source: rtems/cpukit/score/src/mutex.c @ 469dc47

5
Last change on this file since 469dc47 was 8f96581, checked in by Sebastian Huber <sebastian.huber@…>, on 04/01/16 at 09:38:47

score: Rework MP thread queue callout support

The thread queue implementation was heavily reworked to support SMP.
This broke the multiprocessing support of the thread queues. This is
fixed by this patch.

A thread proxy is unblocked due to three reasons

1) timeout,
2) request satisfaction, and
3) extraction.

In case 1) no MPCI message must be sent. This is ensured via the
_Thread_queue_MP_callout_do_nothing() callout set during
_Thread_MP_Allocate_proxy().

In case 2) and 3) an MPCI message must be sent. In case we interrupt
the blocking operation during _Thread_queue_Enqueue_critical(), then
this message must be sent by the blocking thread. For this the new
fields Thread_Proxy_control::thread_queue_callout and
Thread_Proxy_control::thread_queue_id are used.

Delete the individual API MP callout types and use
Thread_queue_MP_callout throughout. This type is only defined in
multiprocessing configurations. Prefix the multiprocessing parameters
with mp_ to ease code review. Multiprocessing specific parameters are
optional due to use of a similar macro pattern. There is no overhead
for non-multiprocessing configurations.

  • Property mode set to 100644
File size: 10.6 KB
Line 
1/*
2 * Copyright (c) 2015 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#if HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
19#if HAVE_STRUCT__THREAD_QUEUE_QUEUE
20
21#include <sys/lock.h>
22#include <errno.h>
23
24#include <rtems/score/assert.h>
25#include <rtems/score/threadimpl.h>
26#include <rtems/score/threadqimpl.h>
27#include <rtems/score/todimpl.h>
28
29#define MUTEX_TQ_OPERATIONS &_Thread_queue_Operations_priority
30
31typedef struct {
32  Thread_queue_Syslock_queue Queue;
33  Thread_Control *owner;
34} Mutex_Control;
35
36RTEMS_STATIC_ASSERT(
37  offsetof( Mutex_Control, Queue )
38    == offsetof( struct _Mutex_Control, _Queue ),
39  MUTEX_CONTROL_QUEUE
40);
41
42RTEMS_STATIC_ASSERT(
43  offsetof( Mutex_Control, owner )
44    == offsetof( struct _Mutex_Control, _owner ),
45  MUTEX_CONTROL_OWNER
46);
47
48RTEMS_STATIC_ASSERT(
49  sizeof( Mutex_Control ) == sizeof( struct _Mutex_Control ),
50  MUTEX_CONTROL_SIZE
51);
52
53typedef struct {
54  Mutex_Control Mutex;
55  unsigned int nest_level;
56} Mutex_recursive_Control;
57
58RTEMS_STATIC_ASSERT(
59  offsetof( Mutex_recursive_Control, Mutex )
60    == offsetof( struct _Mutex_recursive_Control, _Mutex ),
61  MUTEX_RECURSIVE_CONTROL_MUTEX
62);
63
64RTEMS_STATIC_ASSERT(
65  offsetof( Mutex_recursive_Control, nest_level )
66    == offsetof( struct _Mutex_recursive_Control, _nest_level ),
67  MUTEX_RECURSIVE_CONTROL_NEST_LEVEL
68);
69
70RTEMS_STATIC_ASSERT(
71  sizeof( Mutex_recursive_Control )
72    == sizeof( struct _Mutex_recursive_Control ),
73  MUTEX_RECURSIVE_CONTROL_SIZE
74);
75
76static Mutex_Control *_Mutex_Get( struct _Mutex_Control *_mutex )
77{
78  return (Mutex_Control *) _mutex;
79}
80
81static Thread_Control *_Mutex_Queue_acquire(
82  Mutex_Control    *mutex,
83  ISR_lock_Context *lock_context
84)
85{
86  Thread_Control *executing;
87
88  _ISR_lock_ISR_disable( lock_context );
89  executing = _Thread_Executing;
90  _Thread_queue_Queue_acquire_critical(
91    &mutex->Queue.Queue,
92    &executing->Potpourri_stats,
93    lock_context
94  );
95
96  return executing;
97}
98
99static void _Mutex_Queue_release(
100  Mutex_Control    *mutex,
101  ISR_lock_Context *lock_context
102)
103{
104  _Thread_queue_Queue_release( &mutex->Queue.Queue, lock_context );
105}
106
107static void _Mutex_Acquire_slow(
108  Mutex_Control     *mutex,
109  Thread_Control    *owner,
110  Thread_Control    *executing,
111  Watchdog_Interval  timeout,
112  ISR_lock_Context  *lock_context
113)
114{
115  _Thread_Inherit_priority( owner, executing );
116  _Thread_queue_Enqueue_critical(
117    &mutex->Queue.Queue,
118    MUTEX_TQ_OPERATIONS,
119    executing,
120    STATES_WAITING_FOR_SYS_LOCK_MUTEX,
121    timeout,
122    ETIMEDOUT,
123    lock_context
124  );
125}
126
127static void _Mutex_Release_slow(
128  Mutex_Control      *mutex,
129  Thread_Control     *executing,
130  Thread_queue_Heads *heads,
131  bool                keep_priority,
132  ISR_lock_Context   *lock_context
133)
134{
135  if (heads != NULL) {
136    const Thread_queue_Operations *operations;
137    Thread_Control                *first;
138    bool                           unblock;
139
140    operations = MUTEX_TQ_OPERATIONS;
141    first = ( *operations->first )( heads );
142
143    mutex->owner = first;
144    ++first->resource_count;
145    unblock = _Thread_queue_Extract_locked(
146      &mutex->Queue.Queue,
147      operations,
148      first,
149      NULL,
150      0
151    );
152    _Thread_queue_Boost_priority( &mutex->Queue.Queue, first );
153    _Thread_queue_Unblock_critical(
154      unblock,
155      &mutex->Queue.Queue,
156      first,
157      NULL,
158      0,
159      lock_context
160    );
161  } else {
162    _Mutex_Queue_release( mutex, lock_context);
163  }
164
165  if ( !keep_priority ) {
166    Per_CPU_Control *cpu_self;
167
168    cpu_self = _Thread_Dispatch_disable();
169    _Thread_Restore_priority( executing );
170    _Thread_Dispatch_enable( cpu_self );
171  }
172}
173
174static void _Mutex_Release_critical(
175  Mutex_Control *mutex,
176  Thread_Control *executing,
177  ISR_lock_Context *lock_context
178)
179{
180  Thread_queue_Heads *heads;
181  bool keep_priority;
182
183  mutex->owner = NULL;
184
185  --executing->resource_count;
186
187  /*
188   * Ensure that the owner resource count is visible to all other
189   * processors and that we read the latest priority restore
190   * hint.
191   */
192  _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
193
194  heads = mutex->Queue.Queue.heads;
195  keep_priority = _Thread_Owns_resources( executing )
196    || !executing->priority_restore_hint;
197
198  if ( __predict_true( heads == NULL && keep_priority ) ) {
199    _Mutex_Queue_release( mutex, lock_context );
200  } else {
201    _Mutex_Release_slow(
202      mutex,
203      executing,
204      heads,
205      keep_priority,
206      lock_context
207    );
208  }
209}
210
211void _Mutex_Acquire( struct _Mutex_Control *_mutex )
212{
213  Mutex_Control    *mutex;
214  ISR_lock_Context  lock_context;
215  Thread_Control   *executing;
216  Thread_Control   *owner;
217
218  mutex = _Mutex_Get( _mutex );
219  executing = _Mutex_Queue_acquire( mutex, &lock_context );
220
221  owner = mutex->owner;
222
223  if ( __predict_true( owner == NULL ) ) {
224    mutex->owner = executing;
225    ++executing->resource_count;
226    _Mutex_Queue_release( mutex, &lock_context );
227  } else {
228    _Mutex_Acquire_slow( mutex, owner, executing, 0, &lock_context );
229  }
230}
231
232int _Mutex_Acquire_timed(
233  struct _Mutex_Control *_mutex,
234  const struct timespec *abstime
235)
236{
237  Mutex_Control    *mutex;
238  ISR_lock_Context  lock_context;
239  Thread_Control   *executing;
240  Thread_Control   *owner;
241
242  mutex = _Mutex_Get( _mutex );
243  executing = _Mutex_Queue_acquire( mutex, &lock_context );
244
245  owner = mutex->owner;
246
247  if ( __predict_true( owner == NULL ) ) {
248    mutex->owner = executing;
249    ++executing->resource_count;
250    _Mutex_Queue_release( mutex, &lock_context );
251
252    return 0;
253  } else {
254    Watchdog_Interval ticks;
255
256    switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
257      case TOD_ABSOLUTE_TIMEOUT_INVALID:
258        _Mutex_Queue_release( mutex, &lock_context );
259        return EINVAL;
260      case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
261      case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
262        _Mutex_Queue_release( mutex, &lock_context );
263        return ETIMEDOUT;
264      default:
265        break;
266    }
267
268    executing->Wait.return_code = 0;
269    _Mutex_Acquire_slow( mutex, owner, executing, ticks, &lock_context );
270
271    return (int) executing->Wait.return_code;
272  }
273}
274
275int _Mutex_Try_acquire( struct _Mutex_Control *_mutex )
276{
277  Mutex_Control    *mutex;
278  ISR_lock_Context  lock_context;
279  Thread_Control   *executing;
280  Thread_Control   *owner;
281  int               eno;
282
283  mutex = _Mutex_Get( _mutex );
284  executing = _Mutex_Queue_acquire( mutex, &lock_context );
285
286  owner = mutex->owner;
287
288  if ( __predict_true( owner == NULL ) ) {
289    mutex->owner = executing;
290    ++executing->resource_count;
291    eno = 0;
292  } else {
293    eno = EBUSY;
294  }
295
296  _Mutex_Queue_release( mutex, &lock_context );
297
298  return eno;
299}
300
301void _Mutex_Release( struct _Mutex_Control *_mutex )
302{
303  Mutex_Control    *mutex;
304  ISR_lock_Context  lock_context;
305  Thread_Control   *executing;
306
307  mutex = _Mutex_Get( _mutex );
308  executing = _Mutex_Queue_acquire( mutex, &lock_context );
309
310  _Assert( mutex->owner == executing );
311
312  _Mutex_Release_critical( mutex, executing, &lock_context );
313}
314
315static Mutex_recursive_Control *_Mutex_recursive_Get(
316  struct _Mutex_recursive_Control *_mutex
317)
318{
319  return (Mutex_recursive_Control *) _mutex;
320}
321
322void _Mutex_recursive_Acquire( struct _Mutex_recursive_Control *_mutex )
323{
324  Mutex_recursive_Control *mutex;
325  ISR_lock_Context         lock_context;
326  Thread_Control          *executing;
327  Thread_Control          *owner;
328
329  mutex = _Mutex_recursive_Get( _mutex );
330  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
331
332  owner = mutex->Mutex.owner;
333
334  if ( __predict_true( owner == NULL ) ) {
335    mutex->Mutex.owner = executing;
336    ++executing->resource_count;
337    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
338  } else if ( owner == executing ) {
339    ++mutex->nest_level;
340    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
341  } else {
342    _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, 0, &lock_context );
343  }
344}
345
346int _Mutex_recursive_Acquire_timed(
347  struct _Mutex_recursive_Control *_mutex,
348  const struct timespec           *abstime
349)
350{
351  Mutex_recursive_Control *mutex;
352  ISR_lock_Context         lock_context;
353  Thread_Control          *executing;
354  Thread_Control          *owner;
355
356  mutex = _Mutex_recursive_Get( _mutex );
357  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
358
359  owner = mutex->Mutex.owner;
360
361  if ( __predict_true( owner == NULL ) ) {
362    mutex->Mutex.owner = executing;
363    ++executing->resource_count;
364    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
365
366    return 0;
367  } else if ( owner == executing ) {
368    ++mutex->nest_level;
369    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
370
371    return 0;
372  } else {
373    Watchdog_Interval ticks;
374
375    switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
376      case TOD_ABSOLUTE_TIMEOUT_INVALID:
377        _Mutex_Queue_release( &mutex->Mutex, &lock_context );
378        return EINVAL;
379      case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
380      case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
381        _Mutex_Queue_release( &mutex->Mutex, &lock_context );
382        return ETIMEDOUT;
383      default:
384        break;
385    }
386
387    executing->Wait.return_code = 0;
388    _Mutex_Acquire_slow(
389      &mutex->Mutex,
390      owner,
391      executing,
392      ticks,
393      &lock_context
394    );
395
396    return (int) executing->Wait.return_code;
397  }
398}
399
400int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
401{
402  Mutex_recursive_Control *mutex;
403  ISR_lock_Context         lock_context;
404  Thread_Control          *executing;
405  Thread_Control          *owner;
406  int                      eno;
407
408  mutex = _Mutex_recursive_Get( _mutex );
409  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
410
411  owner = mutex->Mutex.owner;
412
413  if ( __predict_true( owner == NULL ) ) {
414    mutex->Mutex.owner = executing;
415    ++executing->resource_count;
416    eno = 0;
417  } else if ( owner == executing ) {
418    ++mutex->nest_level;
419    eno = 0;
420  } else {
421    eno = EBUSY;
422  }
423
424  _Mutex_Queue_release( &mutex->Mutex, &lock_context );
425
426  return eno;
427}
428
429void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex )
430{
431  Mutex_recursive_Control *mutex;
432  ISR_lock_Context         lock_context;
433  Thread_Control          *executing;
434  unsigned int             nest_level;
435
436  mutex = _Mutex_recursive_Get( _mutex );
437  executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
438
439  _Assert( mutex->Mutex.owner == executing );
440
441  nest_level = mutex->nest_level;
442
443  if ( __predict_true( nest_level == 0 ) ) {
444    _Mutex_Release_critical( &mutex->Mutex, executing, &lock_context );
445  } else {
446    mutex->nest_level = nest_level - 1;
447
448    _Mutex_Queue_release( &mutex->Mutex, &lock_context );
449  }
450}
451
452#endif /* HAVE_STRUCT__THREAD_QUEUE_QUEUE */
Note: See TracBrowser for help on using the repository browser.