source: rtems/cpukit/score/src/mutex.c @ dafa5d88

Last change on this file since dafa5d88 was dafa5d88, checked in by Sebastian Huber <sebastian.huber@…>, on Sep 3, 2015 at 8:27:16 AM

score: Implement priority boosting

  • Property mode set to 100644
File size: 10.5 KB
Line 
1/*
2 * Copyright (c) 2015 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#if HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
19#if HAVE_STRUCT__THREAD_QUEUE_QUEUE
20
21#include <sys/lock.h>
22#include <errno.h>
23
24#include <rtems/score/assert.h>
25#include <rtems/score/threadimpl.h>
26#include <rtems/score/threadqimpl.h>
27#include <rtems/score/todimpl.h>
28
29#define MUTEX_TQ_OPERATIONS &_Thread_queue_Operations_priority
30
31typedef struct {
32  Thread_queue_Syslock_queue Queue;
33  Thread_Control *owner;
34} Mutex_Control;
35
36RTEMS_STATIC_ASSERT(
37  offsetof( Mutex_Control, Queue )
38    == offsetof( struct _Mutex_Control, _Queue ),
39  MUTEX_CONTROL_QUEUE
40);
41
42RTEMS_STATIC_ASSERT(
43  offsetof( Mutex_Control, owner )
44    == offsetof( struct _Mutex_Control, _owner ),
45  MUTEX_CONTROL_OWNER
46);
47
48RTEMS_STATIC_ASSERT(
49  sizeof( Mutex_Control ) == sizeof( struct _Mutex_Control ),
50  MUTEX_CONTROL_SIZE
51);
52
53typedef struct {
54  Mutex_Control Mutex;
55  unsigned int nest_level;
56} Mutex_recursive_Control;
57
58RTEMS_STATIC_ASSERT(
59  offsetof( Mutex_recursive_Control, Mutex )
60    == offsetof( struct _Mutex_recursive_Control, _Mutex ),
61  MUTEX_RECURSIVE_CONTROL_MUTEX
62);
63
64RTEMS_STATIC_ASSERT(
65  offsetof( Mutex_recursive_Control, nest_level )
66    == offsetof( struct _Mutex_recursive_Control, _nest_level ),
67  MUTEX_RECURSIVE_CONTROL_NEST_LEVEL
68);
69
70RTEMS_STATIC_ASSERT(
71  sizeof( Mutex_recursive_Control )
72    == sizeof( struct _Mutex_recursive_Control ),
73  MUTEX_RECURSIVE_CONTROL_SIZE
74);
75
76static Mutex_Control *_Mutex_Get( struct _Mutex_Control *_mutex )
77{
78  return (Mutex_Control *) _mutex;
79}
80
81static Thread_Control *_Mutex_Queue_acquire(
82  Mutex_Control    *mutex,
83  ISR_lock_Context *lock_context
84)
85{
86  Thread_Control *executing;
87
88  _ISR_lock_ISR_disable( lock_context );
89  executing = _Thread_Executing;
90  _Thread_queue_Queue_acquire_critical(
91    &mutex->Queue.Queue,
92    &executing->Potpourri_stats,
93    lock_context
94  );
95
96  return executing;
97}
98
99static void _Mutex_Queue_release(
100  Mutex_Control    *mutex,
101  ISR_lock_Context *lock_context
102)
103{
104  _Thread_queue_Queue_release( &mutex->Queue.Queue, lock_context );
105}
106
107static void _Mutex_Acquire_slow(
108  Mutex_Control     *mutex,
109  Thread_Control    *owner,
110  Thread_Control    *executing,
111  Watchdog_Interval  timeout,
112  ISR_lock_Context  *lock_context
113)
114{
115  _Thread_Inherit_priority( owner, executing );
116  _Thread_queue_Enqueue_critical(
117    &mutex->Queue.Queue,
118    MUTEX_TQ_OPERATIONS,
119    executing,
120    STATES_WAITING_FOR_SYS_LOCK_MUTEX,
121    timeout,
122    ETIMEDOUT,
123    lock_context
124  );
125}
126
127static void _Mutex_Release_slow(
128  Mutex_Control      *mutex,
129  Thread_Control     *executing,
130  Thread_queue_Heads *heads,
131  bool                keep_priority,
132  ISR_lock_Context   *lock_context
133)
134{
135  if (heads != NULL) {
136    const Thread_queue_Operations *operations;
137    Thread_Control                *first;
138    bool                           unblock;
139
140    operations = MUTEX_TQ_OPERATIONS;
141    first = ( *operations->first )( heads );
142
143    mutex->owner = first;
144    unblock = _Thread_queue_Extract_locked(
145      &mutex->Queue.Queue,
146      operations,
147      first
148    );
149    _Thread_queue_Boost_priority( &mutex->Queue.Queue, first );
150    _Thread_queue_Unblock_critical(
151      unblock,
152      &mutex->Queue.Queue,
153      first,
154      lock_context
155    );
156  } else {
157    _Mutex_Queue_release( mutex, lock_context);
158  }
159
160  if ( !keep_priority ) {
161    Per_CPU_Control *cpu_self;
162
163    cpu_self = _Thread_Dispatch_disable();
164    _Thread_Restore_priority( executing );
165    _Thread_Dispatch_enable( cpu_self );
166  }
167}
168
169static void _Mutex_Release_critical(
170  Mutex_Control *mutex,
171  Thread_Control *executing,
172  ISR_lock_Context *lock_context
173)
174{
175  Thread_queue_Heads *heads;
176  bool keep_priority;
177
178  mutex->owner = NULL;
179
180  --executing->resource_count;
181
182  /*
183   * Ensure that the owner resource count is visible to all other
184   * processors and that we read the latest priority restore
185   * hint.
186   */
187  _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
188
189  heads = mutex->Queue.Queue.heads;
190  keep_priority = _Thread_Owns_resources( executing )
191    || !executing->priority_restore_hint;
192
193  if ( __predict_true( heads == NULL && keep_priority ) ) {
194    _Mutex_Queue_release( mutex, lock_context );
195  } else {
196    _Mutex_Release_slow(
197      mutex,
198      executing,
199      heads,
200      keep_priority,
201      lock_context
202    );
203  }
204}
205
206void _Mutex_Acquire( struct _Mutex_Control *_mutex )
207{
208  Mutex_Control    *mutex;
209  ISR_lock_Context  lock_context;
210  Thread_Control   *executing;
211  Thread_Control   *owner;
212
213  mutex = _Mutex_Get( _mutex );
214  executing = _Mutex_Queue_acquire( mutex, &lock_context );
215
216  owner = mutex->owner;
217  ++executing->resource_count;
218
219  if ( __predict_true( owner == NULL ) ) {
220    mutex->owner = executing;
221    _Mutex_Queue_release( mutex, &lock_context );
222  } else {
223    _Mutex_Acquire_slow( mutex, owner, executing, 0, &lock_context );
224  }
225}
226
227int _Mutex_Acquire_timed(
228  struct _Mutex_Control *_mutex,
229  const struct timespec *abstime
230)
231{
232  Mutex_Control    *mutex;
233  ISR_lock_Context  lock_context;
234  Thread_Control   *executing;
235  Thread_Control   *owner;
236
237  mutex = _Mutex_Get( _mutex );
238  executing = _Mutex_Queue_acquire( mutex, &lock_context );
239
240  owner = mutex->owner;
241  ++executing->resource_count;
242
243  if ( __predict_true( owner == NULL ) ) {
244    mutex->owner = executing;
245    _Mutex_Queue_release( mutex, &lock_context );
246
247    return 0;
248  } else {
249    Watchdog_Interval ticks;
250
251    switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
252      case TOD_ABSOLUTE_TIMEOUT_INVALID:
253        _Mutex_Queue_release( mutex, &lock_context );
254        return EINVAL;
255      case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
256      case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
257        _Mutex_Queue_release( mutex, &lock_context );
258        return ETIMEDOUT;
259      default:
260        break;
261    }
262
263    executing->Wait.return_code = 0;
264    _Mutex_Acquire_slow( mutex, owner, executing, ticks, &lock_context );
265
266    return (int) executing->Wait.return_code;
267  }
268}
269
270int _Mutex_Try_acquire( struct _Mutex_Control *_mutex )
271{
272  Mutex_Control    *mutex;
273  ISR_lock_Context  lock_context;
274  Thread_Control   *executing;
275  Thread_Control   *owner;
276  int               eno;
277
278  mutex = _Mutex_Get( _mutex );
279  executing = _Mutex_Queue_acquire( mutex, &lock_context );
280
281  owner = mutex->owner;
282
283  if ( __predict_true( owner == NULL ) ) {
284    mutex->owner = executing;
285    ++executing->resource_count;
286    eno = 0;
287  } else {
288    eno = EBUSY;
289  }
290
291  _Mutex_Queue_release( mutex, &lock_context );
292
293  return eno;
294}
295
296void _Mutex_Release( struct _Mutex_Control *_mutex )
297{
298  Mutex_Control    *mutex;
299  ISR_lock_Context  lock_context;
300  Thread_Control   *executing;
301
302  mutex = _Mutex_Get( _mutex );
303  executing = _Mutex_Queue_acquire( mutex, &lock_context );
304
305  _Assert( mutex->owner == executing );
306
307  _Mutex_Release_critical( mutex, executing, &lock_context );
308}
309
310static Mutex_recursive_Control *_Mutex_recursive_Get(
311  struct _Mutex_recursive_Control *_mutex
312)
313{
314  return (Mutex_recursive_Control *) _mutex;
315}
316
317void _Mutex_recursive_Acquire( struct _Mutex_recursive_Control *_mutex )
318{
319  Mutex_recursive_Control *mutex;
320  ISR_lock_Context         lock_context;
321  Thread_Control          *executing;
322  Thread_Control          *owner;
323
324  mutex = _Mutex_recursive_Get( _mutex );
325  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
326
327  owner = mutex->Mutex.owner;
328
329  if ( __predict_true( owner == NULL ) ) {
330    mutex->Mutex.owner = executing;
331    ++executing->resource_count;
332    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
333  } else if ( owner == executing ) {
334    ++mutex->nest_level;
335    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
336  } else {
337    _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, 0, &lock_context );
338  }
339}
340
341int _Mutex_recursive_Acquire_timed(
342  struct _Mutex_recursive_Control *_mutex,
343  const struct timespec           *abstime
344)
345{
346  Mutex_recursive_Control *mutex;
347  ISR_lock_Context         lock_context;
348  Thread_Control          *executing;
349  Thread_Control          *owner;
350
351  mutex = _Mutex_recursive_Get( _mutex );
352  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
353
354  owner = mutex->Mutex.owner;
355
356  if ( __predict_true( owner == NULL ) ) {
357    mutex->Mutex.owner = executing;
358    ++executing->resource_count;
359    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
360
361    return 0;
362  } else if ( owner == executing ) {
363    ++mutex->nest_level;
364    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
365
366    return 0;
367  } else {
368    Watchdog_Interval ticks;
369
370    switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
371      case TOD_ABSOLUTE_TIMEOUT_INVALID:
372        _Mutex_Queue_release( &mutex->Mutex, &lock_context );
373        return EINVAL;
374      case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
375      case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
376        _Mutex_Queue_release( &mutex->Mutex, &lock_context );
377        return ETIMEDOUT;
378      default:
379        break;
380    }
381
382    executing->Wait.return_code = 0;
383    _Mutex_Acquire_slow(
384      &mutex->Mutex,
385      owner,
386      executing,
387      ticks,
388      &lock_context
389    );
390
391    return (int) executing->Wait.return_code;
392  }
393}
394
395int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
396{
397  Mutex_recursive_Control *mutex;
398  ISR_lock_Context         lock_context;
399  Thread_Control          *executing;
400  Thread_Control          *owner;
401  int                      eno;
402
403  mutex = _Mutex_recursive_Get( _mutex );
404  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
405
406  owner = mutex->Mutex.owner;
407
408  if ( __predict_true( owner == NULL ) ) {
409    mutex->Mutex.owner = executing;
410    ++executing->resource_count;
411    eno = 0;
412  } else if ( owner == executing ) {
413    ++mutex->nest_level;
414    eno = 0;
415  } else {
416    eno = EBUSY;
417  }
418
419  _Mutex_Queue_release( &mutex->Mutex, &lock_context );
420
421  return eno;
422}
423
424void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex )
425{
426  Mutex_recursive_Control *mutex;
427  ISR_lock_Context         lock_context;
428  Thread_Control          *executing;
429  unsigned int             nest_level;
430
431  mutex = _Mutex_recursive_Get( _mutex );
432  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
433
434  _Assert( mutex->Mutex.owner == executing );
435
436  nest_level = mutex->nest_level;
437
438  if ( __predict_true( nest_level == 0 ) ) {
439    _Mutex_Release_critical( &mutex->Mutex, executing, &lock_context );
440  } else {
441    mutex->nest_level = nest_level - 1;
442
443    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
444  }
445}
446
447#endif /* HAVE_STRUCT__THREAD_QUEUE_QUEUE */
Note: See TracBrowser for help on using the repository browser.