source: rtems/cpukit/score/src/mutex.c @ 631b3c8

5
Last change on this file since 631b3c8 was 631b3c8, checked in by Sebastian Huber <sebastian.huber@…>, on 05/23/16 at 09:40:18

score: Move thread queue MP callout to context

Drop the multiprocessing (MP) dependent callout parameter from the
thread queue extract, dequeue, flush and unblock methods. Merge this
parameter with the lock context into new structure Thread_queue_Context.
This helps to gets rid of the conditionally compiled method call
helpers.

  • Property mode set to 100644
File size: 10.9 KB
Line 
1/*
2 * Copyright (c) 2015 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#if HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
19#if HAVE_STRUCT__THREAD_QUEUE_QUEUE
20
21#include <sys/lock.h>
22#include <errno.h>
23
24#include <rtems/score/assert.h>
25#include <rtems/score/threadimpl.h>
26#include <rtems/score/threadqimpl.h>
27#include <rtems/score/todimpl.h>
28
29#define MUTEX_TQ_OPERATIONS &_Thread_queue_Operations_priority
30
31typedef struct {
32  Thread_queue_Syslock_queue Queue;
33  Thread_Control *owner;
34} Mutex_Control;
35
36RTEMS_STATIC_ASSERT(
37  offsetof( Mutex_Control, Queue )
38    == offsetof( struct _Mutex_Control, _Queue ),
39  MUTEX_CONTROL_QUEUE
40);
41
42RTEMS_STATIC_ASSERT(
43  offsetof( Mutex_Control, owner )
44    == offsetof( struct _Mutex_Control, _owner ),
45  MUTEX_CONTROL_OWNER
46);
47
48RTEMS_STATIC_ASSERT(
49  sizeof( Mutex_Control ) == sizeof( struct _Mutex_Control ),
50  MUTEX_CONTROL_SIZE
51);
52
53typedef struct {
54  Mutex_Control Mutex;
55  unsigned int nest_level;
56} Mutex_recursive_Control;
57
58RTEMS_STATIC_ASSERT(
59  offsetof( Mutex_recursive_Control, Mutex )
60    == offsetof( struct _Mutex_recursive_Control, _Mutex ),
61  MUTEX_RECURSIVE_CONTROL_MUTEX
62);
63
64RTEMS_STATIC_ASSERT(
65  offsetof( Mutex_recursive_Control, nest_level )
66    == offsetof( struct _Mutex_recursive_Control, _nest_level ),
67  MUTEX_RECURSIVE_CONTROL_NEST_LEVEL
68);
69
70RTEMS_STATIC_ASSERT(
71  sizeof( Mutex_recursive_Control )
72    == sizeof( struct _Mutex_recursive_Control ),
73  MUTEX_RECURSIVE_CONTROL_SIZE
74);
75
76static Mutex_Control *_Mutex_Get( struct _Mutex_Control *_mutex )
77{
78  return (Mutex_Control *) _mutex;
79}
80
81static Thread_Control *_Mutex_Queue_acquire(
82  Mutex_Control    *mutex,
83  ISR_lock_Context *lock_context
84)
85{
86  Thread_Control *executing;
87
88  _ISR_lock_ISR_disable( lock_context );
89  executing = _Thread_Executing;
90  _Thread_queue_Queue_acquire_critical(
91    &mutex->Queue.Queue,
92    &executing->Potpourri_stats,
93    lock_context
94  );
95
96  return executing;
97}
98
99static void _Mutex_Queue_release(
100  Mutex_Control    *mutex,
101  ISR_lock_Context *lock_context
102)
103{
104  _Thread_queue_Queue_release( &mutex->Queue.Queue, lock_context );
105}
106
107static void _Mutex_Acquire_slow(
108  Mutex_Control     *mutex,
109  Thread_Control    *owner,
110  Thread_Control    *executing,
111  Watchdog_Interval  timeout,
112  ISR_lock_Context  *lock_context
113)
114{
115  _Thread_Inherit_priority( owner, executing );
116  _Thread_queue_Enqueue_critical(
117    &mutex->Queue.Queue,
118    MUTEX_TQ_OPERATIONS,
119    executing,
120    STATES_WAITING_FOR_SYS_LOCK_MUTEX,
121    timeout,
122    ETIMEDOUT,
123    lock_context
124  );
125}
126
127static void _Mutex_Release_slow(
128  Mutex_Control        *mutex,
129  Thread_Control       *executing,
130  Thread_queue_Heads   *heads,
131  bool                  keep_priority,
132  Thread_queue_Context *queue_context
133)
134{
135  if (heads != NULL) {
136    const Thread_queue_Operations *operations;
137    Thread_Control                *first;
138    bool                           unblock;
139
140    operations = MUTEX_TQ_OPERATIONS;
141    first = ( *operations->first )( heads );
142
143    mutex->owner = first;
144    ++first->resource_count;
145    unblock = _Thread_queue_Extract_locked(
146      &mutex->Queue.Queue,
147      operations,
148      first,
149      queue_context
150    );
151    _Thread_queue_Boost_priority( &mutex->Queue.Queue, first );
152    _Thread_queue_Unblock_critical(
153      unblock,
154      &mutex->Queue.Queue,
155      first,
156      &queue_context->Lock_context
157    );
158  } else {
159    _Mutex_Queue_release( mutex, &queue_context->Lock_context );
160  }
161
162  if ( !keep_priority ) {
163    Per_CPU_Control *cpu_self;
164
165    cpu_self = _Thread_Dispatch_disable();
166    _Thread_Restore_priority( executing );
167    _Thread_Dispatch_enable( cpu_self );
168  }
169}
170
171static void _Mutex_Release_critical(
172  Mutex_Control        *mutex,
173  Thread_Control       *executing,
174  Thread_queue_Context *queue_context
175)
176{
177  Thread_queue_Heads *heads;
178  bool keep_priority;
179
180  mutex->owner = NULL;
181
182  --executing->resource_count;
183
184  /*
185   * Ensure that the owner resource count is visible to all other
186   * processors and that we read the latest priority restore
187   * hint.
188   */
189  _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
190
191  heads = mutex->Queue.Queue.heads;
192  keep_priority = _Thread_Owns_resources( executing )
193    || !executing->priority_restore_hint;
194
195  if ( __predict_true( heads == NULL && keep_priority ) ) {
196    _Mutex_Queue_release( mutex, &queue_context->Lock_context );
197  } else {
198    _Mutex_Release_slow(
199      mutex,
200      executing,
201      heads,
202      keep_priority,
203      queue_context
204    );
205  }
206}
207
208void _Mutex_Acquire( struct _Mutex_Control *_mutex )
209{
210  Mutex_Control    *mutex;
211  ISR_lock_Context  lock_context;
212  Thread_Control   *executing;
213  Thread_Control   *owner;
214
215  mutex = _Mutex_Get( _mutex );
216  executing = _Mutex_Queue_acquire( mutex, &lock_context );
217
218  owner = mutex->owner;
219
220  if ( __predict_true( owner == NULL ) ) {
221    mutex->owner = executing;
222    ++executing->resource_count;
223    _Mutex_Queue_release( mutex, &lock_context );
224  } else {
225    _Mutex_Acquire_slow( mutex, owner, executing, 0, &lock_context );
226  }
227}
228
229int _Mutex_Acquire_timed(
230  struct _Mutex_Control *_mutex,
231  const struct timespec *abstime
232)
233{
234  Mutex_Control    *mutex;
235  ISR_lock_Context  lock_context;
236  Thread_Control   *executing;
237  Thread_Control   *owner;
238
239  mutex = _Mutex_Get( _mutex );
240  executing = _Mutex_Queue_acquire( mutex, &lock_context );
241
242  owner = mutex->owner;
243
244  if ( __predict_true( owner == NULL ) ) {
245    mutex->owner = executing;
246    ++executing->resource_count;
247    _Mutex_Queue_release( mutex, &lock_context );
248
249    return 0;
250  } else {
251    Watchdog_Interval ticks;
252
253    switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
254      case TOD_ABSOLUTE_TIMEOUT_INVALID:
255        _Mutex_Queue_release( mutex, &lock_context );
256        return EINVAL;
257      case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
258      case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
259        _Mutex_Queue_release( mutex, &lock_context );
260        return ETIMEDOUT;
261      default:
262        break;
263    }
264
265    executing->Wait.return_code = 0;
266    _Mutex_Acquire_slow( mutex, owner, executing, ticks, &lock_context );
267
268    return (int) executing->Wait.return_code;
269  }
270}
271
272int _Mutex_Try_acquire( struct _Mutex_Control *_mutex )
273{
274  Mutex_Control    *mutex;
275  ISR_lock_Context  lock_context;
276  Thread_Control   *executing;
277  Thread_Control   *owner;
278  int               eno;
279
280  mutex = _Mutex_Get( _mutex );
281  executing = _Mutex_Queue_acquire( mutex, &lock_context );
282
283  owner = mutex->owner;
284
285  if ( __predict_true( owner == NULL ) ) {
286    mutex->owner = executing;
287    ++executing->resource_count;
288    eno = 0;
289  } else {
290    eno = EBUSY;
291  }
292
293  _Mutex_Queue_release( mutex, &lock_context );
294
295  return eno;
296}
297
298void _Mutex_Release( struct _Mutex_Control *_mutex )
299{
300  Mutex_Control        *mutex;
301  Thread_queue_Context  queue_context;
302  Thread_Control       *executing;
303
304  mutex = _Mutex_Get( _mutex );
305  _Thread_queue_Context_initialize( &queue_context, NULL );
306  executing = _Mutex_Queue_acquire( mutex, &queue_context.Lock_context );
307
308  _Assert( mutex->owner == executing );
309
310  _Mutex_Release_critical( mutex, executing, &queue_context );
311}
312
313static Mutex_recursive_Control *_Mutex_recursive_Get(
314  struct _Mutex_recursive_Control *_mutex
315)
316{
317  return (Mutex_recursive_Control *) _mutex;
318}
319
320void _Mutex_recursive_Acquire( struct _Mutex_recursive_Control *_mutex )
321{
322  Mutex_recursive_Control *mutex;
323  ISR_lock_Context         lock_context;
324  Thread_Control          *executing;
325  Thread_Control          *owner;
326
327  mutex = _Mutex_recursive_Get( _mutex );
328  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
329
330  owner = mutex->Mutex.owner;
331
332  if ( __predict_true( owner == NULL ) ) {
333    mutex->Mutex.owner = executing;
334    ++executing->resource_count;
335    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
336  } else if ( owner == executing ) {
337    ++mutex->nest_level;
338    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
339  } else {
340    _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, 0, &lock_context );
341  }
342}
343
344int _Mutex_recursive_Acquire_timed(
345  struct _Mutex_recursive_Control *_mutex,
346  const struct timespec           *abstime
347)
348{
349  Mutex_recursive_Control *mutex;
350  ISR_lock_Context         lock_context;
351  Thread_Control          *executing;
352  Thread_Control          *owner;
353
354  mutex = _Mutex_recursive_Get( _mutex );
355  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
356
357  owner = mutex->Mutex.owner;
358
359  if ( __predict_true( owner == NULL ) ) {
360    mutex->Mutex.owner = executing;
361    ++executing->resource_count;
362    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
363
364    return 0;
365  } else if ( owner == executing ) {
366    ++mutex->nest_level;
367    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
368
369    return 0;
370  } else {
371    Watchdog_Interval ticks;
372
373    switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
374      case TOD_ABSOLUTE_TIMEOUT_INVALID:
375        _Mutex_Queue_release( &mutex->Mutex, &lock_context );
376        return EINVAL;
377      case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
378      case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
379        _Mutex_Queue_release( &mutex->Mutex, &lock_context );
380        return ETIMEDOUT;
381      default:
382        break;
383    }
384
385    executing->Wait.return_code = 0;
386    _Mutex_Acquire_slow(
387      &mutex->Mutex,
388      owner,
389      executing,
390      ticks,
391      &lock_context
392    );
393
394    return (int) executing->Wait.return_code;
395  }
396}
397
398int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
399{
400  Mutex_recursive_Control *mutex;
401  ISR_lock_Context         lock_context;
402  Thread_Control          *executing;
403  Thread_Control          *owner;
404  int                      eno;
405
406  mutex = _Mutex_recursive_Get( _mutex );
407  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
408
409  owner = mutex->Mutex.owner;
410
411  if ( __predict_true( owner == NULL ) ) {
412    mutex->Mutex.owner = executing;
413    ++executing->resource_count;
414    eno = 0;
415  } else if ( owner == executing ) {
416    ++mutex->nest_level;
417    eno = 0;
418  } else {
419    eno = EBUSY;
420  }
421
422  _Mutex_Queue_release( &mutex->Mutex, &lock_context );
423
424  return eno;
425}
426
427void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex )
428{
429  Mutex_recursive_Control *mutex;
430  Thread_queue_Context     queue_context;
431  Thread_Control          *executing;
432  unsigned int             nest_level;
433
434  mutex = _Mutex_recursive_Get( _mutex );
435  _Thread_queue_Context_initialize( &queue_context, NULL );
436  executing = _Mutex_Queue_acquire(
437    &mutex->Mutex,
438    &queue_context.Lock_context
439  );
440
441  _Assert( mutex->Mutex.owner == executing );
442
443  nest_level = mutex->nest_level;
444
445  if ( __predict_true( nest_level == 0 ) ) {
446    _Mutex_Release_critical( &mutex->Mutex, executing, &queue_context );
447  } else {
448    mutex->nest_level = nest_level - 1;
449
450    _Mutex_Queue_release( &mutex->Mutex, &queue_context.Lock_context );
451  }
452}
453
454#endif /* HAVE_STRUCT__THREAD_QUEUE_QUEUE */
Note: See TracBrowser for help on using the repository browser.