source: rtems/cpukit/score/src/mutex.c @ ac8402dd

5
Last change on this file since ac8402dd was ac8402dd, checked in by Sebastian Huber <sebastian.huber@…>, on 06/27/16 at 08:20:34

score: Simplify _Thread_queue_Boost_priority()

Raise the priority under thread queue lock protection and omit the
superfluous thread queue priority change, since the thread is extracted
anyway. The unblock operation will pick up the new priority.

Update #2412.
Update #2556.
Update #2765.

  • Property mode set to 100644
File size: 11.5 KB
Line 
1/*
2 * Copyright (c) 2015 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#if HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
19#if HAVE_STRUCT__THREAD_QUEUE_QUEUE
20
21#include <sys/lock.h>
22#include <errno.h>
23
24#include <rtems/score/assert.h>
25#include <rtems/score/threadimpl.h>
26#include <rtems/score/threadqimpl.h>
27#include <rtems/score/todimpl.h>
28
29#define MUTEX_TQ_OPERATIONS &_Thread_queue_Operations_priority
30
31typedef struct {
32  Thread_queue_Syslock_queue Queue;
33} Mutex_Control;
34
35RTEMS_STATIC_ASSERT(
36  offsetof( Mutex_Control, Queue )
37    == offsetof( struct _Mutex_Control, _Queue ),
38  MUTEX_CONTROL_QUEUE
39);
40
41RTEMS_STATIC_ASSERT(
42  sizeof( Mutex_Control ) == sizeof( struct _Mutex_Control ),
43  MUTEX_CONTROL_SIZE
44);
45
46typedef struct {
47  Mutex_Control Mutex;
48  unsigned int nest_level;
49} Mutex_recursive_Control;
50
51RTEMS_STATIC_ASSERT(
52  offsetof( Mutex_recursive_Control, Mutex )
53    == offsetof( struct _Mutex_recursive_Control, _Mutex ),
54  MUTEX_RECURSIVE_CONTROL_MUTEX
55);
56
57RTEMS_STATIC_ASSERT(
58  offsetof( Mutex_recursive_Control, nest_level )
59    == offsetof( struct _Mutex_recursive_Control, _nest_level ),
60  MUTEX_RECURSIVE_CONTROL_NEST_LEVEL
61);
62
63RTEMS_STATIC_ASSERT(
64  sizeof( Mutex_recursive_Control )
65    == sizeof( struct _Mutex_recursive_Control ),
66  MUTEX_RECURSIVE_CONTROL_SIZE
67);
68
69static Mutex_Control *_Mutex_Get( struct _Mutex_Control *_mutex )
70{
71  return (Mutex_Control *) _mutex;
72}
73
74static Thread_Control *_Mutex_Queue_acquire(
75  Mutex_Control        *mutex,
76  Thread_queue_Context *queue_context
77)
78{
79  Thread_Control *executing;
80
81  _ISR_lock_ISR_disable( &queue_context->Lock_context );
82  executing = _Thread_Executing;
83  _Thread_queue_Queue_acquire_critical(
84    &mutex->Queue.Queue,
85    &executing->Potpourri_stats,
86    &queue_context->Lock_context
87  );
88
89  return executing;
90}
91
92static void _Mutex_Queue_release(
93  Mutex_Control        *mutex,
94  Thread_queue_Context *queue_context
95)
96{
97  _Thread_queue_Queue_release(
98    &mutex->Queue.Queue,
99    &queue_context->Lock_context
100  );
101}
102
103static void _Mutex_Acquire_slow(
104  Mutex_Control        *mutex,
105  Thread_Control       *owner,
106  Thread_Control       *executing,
107  Thread_queue_Context *queue_context
108)
109{
110  _Thread_Inherit_priority( owner, executing );
111  _Thread_queue_Context_set_expected_level( queue_context, 1 );
112  _Thread_queue_Enqueue_critical(
113    &mutex->Queue.Queue,
114    MUTEX_TQ_OPERATIONS,
115    executing,
116    STATES_WAITING_FOR_SYS_LOCK_MUTEX,
117    queue_context
118  );
119}
120
121static void _Mutex_Release_slow(
122  Mutex_Control        *mutex,
123  Thread_Control       *executing,
124  Thread_queue_Heads   *heads,
125  bool                  keep_priority,
126  Thread_queue_Context *queue_context
127)
128{
129  if (heads != NULL) {
130    const Thread_queue_Operations *operations;
131    Thread_Control                *first;
132    bool                           unblock;
133
134    operations = MUTEX_TQ_OPERATIONS;
135    first = ( *operations->first )( heads );
136
137    mutex->Queue.Queue.owner = first;
138    ++first->resource_count;
139    _Thread_queue_Boost_priority( &mutex->Queue.Queue, first );
140    unblock = _Thread_queue_Extract_locked(
141      &mutex->Queue.Queue,
142      operations,
143      first,
144      queue_context
145    );
146    _Thread_queue_Unblock_critical(
147      unblock,
148      &mutex->Queue.Queue,
149      first,
150      &queue_context->Lock_context
151    );
152  } else {
153    _Mutex_Queue_release( mutex, queue_context );
154  }
155
156  if ( !keep_priority ) {
157    Per_CPU_Control *cpu_self;
158
159    cpu_self = _Thread_Dispatch_disable();
160    _Thread_Restore_priority( executing );
161    _Thread_Dispatch_enable( cpu_self );
162  }
163}
164
165static void _Mutex_Release_critical(
166  Mutex_Control        *mutex,
167  Thread_Control       *executing,
168  Thread_queue_Context *queue_context
169)
170{
171  Thread_queue_Heads *heads;
172  bool keep_priority;
173
174  mutex->Queue.Queue.owner = NULL;
175
176  --executing->resource_count;
177
178  /*
179   * Ensure that the owner resource count is visible to all other
180   * processors and that we read the latest priority restore
181   * hint.
182   */
183  _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
184
185  heads = mutex->Queue.Queue.heads;
186  keep_priority = _Thread_Owns_resources( executing )
187    || !executing->priority_restore_hint;
188
189  if ( __predict_true( heads == NULL && keep_priority ) ) {
190    _Mutex_Queue_release( mutex, queue_context );
191  } else {
192    _Mutex_Release_slow(
193      mutex,
194      executing,
195      heads,
196      keep_priority,
197      queue_context
198    );
199  }
200}
201
202void _Mutex_Acquire( struct _Mutex_Control *_mutex )
203{
204  Mutex_Control        *mutex;
205  Thread_queue_Context  queue_context;
206  Thread_Control       *executing;
207  Thread_Control       *owner;
208
209  mutex = _Mutex_Get( _mutex );
210  _Thread_queue_Context_initialize( &queue_context );
211  executing = _Mutex_Queue_acquire( mutex, &queue_context );
212
213  owner = mutex->Queue.Queue.owner;
214
215  if ( __predict_true( owner == NULL ) ) {
216    mutex->Queue.Queue.owner = executing;
217    ++executing->resource_count;
218    _Mutex_Queue_release( mutex, &queue_context );
219  } else {
220    _Thread_queue_Context_set_no_timeout( &queue_context );
221    _Mutex_Acquire_slow( mutex, owner, executing, &queue_context );
222  }
223}
224
225int _Mutex_Acquire_timed(
226  struct _Mutex_Control *_mutex,
227  const struct timespec *abstime
228)
229{
230  Mutex_Control        *mutex;
231  Thread_queue_Context  queue_context;
232  Thread_Control       *executing;
233  Thread_Control       *owner;
234
235  mutex = _Mutex_Get( _mutex );
236  _Thread_queue_Context_initialize( &queue_context );
237  executing = _Mutex_Queue_acquire( mutex, &queue_context );
238
239  owner = mutex->Queue.Queue.owner;
240
241  if ( __predict_true( owner == NULL ) ) {
242    mutex->Queue.Queue.owner = executing;
243    ++executing->resource_count;
244    _Mutex_Queue_release( mutex, &queue_context );
245
246    return 0;
247  } else {
248    Watchdog_Interval ticks;
249
250    switch ( _TOD_Absolute_timeout_to_ticks( abstime, CLOCK_REALTIME, &ticks ) ) {
251      case TOD_ABSOLUTE_TIMEOUT_INVALID:
252        _Mutex_Queue_release( mutex, &queue_context );
253        return EINVAL;
254      case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
255      case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
256        _Mutex_Queue_release( mutex, &queue_context );
257        return ETIMEDOUT;
258      default:
259        break;
260    }
261
262    _Thread_queue_Context_set_relative_timeout( &queue_context, ticks );
263    _Mutex_Acquire_slow( mutex, owner, executing, &queue_context );
264
265    return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
266  }
267}
268
269int _Mutex_Try_acquire( struct _Mutex_Control *_mutex )
270{
271  Mutex_Control        *mutex;
272  Thread_queue_Context  queue_context;
273  Thread_Control       *executing;
274  Thread_Control       *owner;
275  int                   eno;
276
277  mutex = _Mutex_Get( _mutex );
278  _Thread_queue_Context_initialize( &queue_context );
279  executing = _Mutex_Queue_acquire( mutex, &queue_context );
280
281  owner = mutex->Queue.Queue.owner;
282
283  if ( __predict_true( owner == NULL ) ) {
284    mutex->Queue.Queue.owner = executing;
285    ++executing->resource_count;
286    eno = 0;
287  } else {
288    eno = EBUSY;
289  }
290
291  _Mutex_Queue_release( mutex, &queue_context );
292
293  return eno;
294}
295
296void _Mutex_Release( struct _Mutex_Control *_mutex )
297{
298  Mutex_Control        *mutex;
299  Thread_queue_Context  queue_context;
300  Thread_Control       *executing;
301
302  mutex = _Mutex_Get( _mutex );
303  _Thread_queue_Context_initialize( &queue_context );
304  executing = _Mutex_Queue_acquire( mutex, &queue_context );
305
306  _Assert( mutex->Queue.Queue.owner == executing );
307
308  _Mutex_Release_critical( mutex, executing, &queue_context );
309}
310
311static Mutex_recursive_Control *_Mutex_recursive_Get(
312  struct _Mutex_recursive_Control *_mutex
313)
314{
315  return (Mutex_recursive_Control *) _mutex;
316}
317
318void _Mutex_recursive_Acquire( struct _Mutex_recursive_Control *_mutex )
319{
320  Mutex_recursive_Control *mutex;
321  Thread_queue_Context     queue_context;
322  Thread_Control          *executing;
323  Thread_Control          *owner;
324
325  mutex = _Mutex_recursive_Get( _mutex );
326  _Thread_queue_Context_initialize( &queue_context );
327  executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
328
329  owner = mutex->Mutex.Queue.Queue.owner;
330
331  if ( __predict_true( owner == NULL ) ) {
332    mutex->Mutex.Queue.Queue.owner = executing;
333    ++executing->resource_count;
334    _Mutex_Queue_release( &mutex->Mutex, &queue_context );
335  } else if ( owner == executing ) {
336    ++mutex->nest_level;
337    _Mutex_Queue_release( &mutex->Mutex, &queue_context );
338  } else {
339    _Thread_queue_Context_set_no_timeout( &queue_context );
340    _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, &queue_context );
341  }
342}
343
344int _Mutex_recursive_Acquire_timed(
345  struct _Mutex_recursive_Control *_mutex,
346  const struct timespec           *abstime
347)
348{
349  Mutex_recursive_Control *mutex;
350  Thread_queue_Context     queue_context;
351  Thread_Control          *executing;
352  Thread_Control          *owner;
353
354  mutex = _Mutex_recursive_Get( _mutex );
355  _Thread_queue_Context_initialize( &queue_context );
356  executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
357
358  owner = mutex->Mutex.Queue.Queue.owner;
359
360  if ( __predict_true( owner == NULL ) ) {
361    mutex->Mutex.Queue.Queue.owner = executing;
362    ++executing->resource_count;
363    _Mutex_Queue_release( &mutex->Mutex, &queue_context );
364
365    return 0;
366  } else if ( owner == executing ) {
367    ++mutex->nest_level;
368    _Mutex_Queue_release( &mutex->Mutex, &queue_context );
369
370    return 0;
371  } else {
372    Watchdog_Interval ticks;
373
374    switch ( _TOD_Absolute_timeout_to_ticks( abstime, CLOCK_REALTIME, &ticks ) ) {
375      case TOD_ABSOLUTE_TIMEOUT_INVALID:
376        _Mutex_Queue_release( &mutex->Mutex, &queue_context );
377        return EINVAL;
378      case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
379      case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
380        _Mutex_Queue_release( &mutex->Mutex, &queue_context );
381        return ETIMEDOUT;
382      default:
383        break;
384    }
385
386    _Thread_queue_Context_set_relative_timeout( &queue_context, ticks );
387    _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, &queue_context );
388
389    return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
390  }
391}
392
393int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
394{
395  Mutex_recursive_Control *mutex;
396  Thread_queue_Context     queue_context;
397  Thread_Control          *executing;
398  Thread_Control          *owner;
399  int                      eno;
400
401  mutex = _Mutex_recursive_Get( _mutex );
402  _Thread_queue_Context_initialize( &queue_context );
403  executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
404
405  owner = mutex->Mutex.Queue.Queue.owner;
406
407  if ( __predict_true( owner == NULL ) ) {
408    mutex->Mutex.Queue.Queue.owner = executing;
409    ++executing->resource_count;
410    eno = 0;
411  } else if ( owner == executing ) {
412    ++mutex->nest_level;
413    eno = 0;
414  } else {
415    eno = EBUSY;
416  }
417
418  _Mutex_Queue_release( &mutex->Mutex, &queue_context );
419
420  return eno;
421}
422
423void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex )
424{
425  Mutex_recursive_Control *mutex;
426  Thread_queue_Context     queue_context;
427  Thread_Control          *executing;
428  unsigned int             nest_level;
429
430  mutex = _Mutex_recursive_Get( _mutex );
431  _Thread_queue_Context_initialize( &queue_context );
432  executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
433
434  _Assert( mutex->Mutex.Queue.Queue.owner == executing );
435
436  nest_level = mutex->nest_level;
437
438  if ( __predict_true( nest_level == 0 ) ) {
439    _Mutex_Release_critical( &mutex->Mutex, executing, &queue_context );
440  } else {
441    mutex->nest_level = nest_level - 1;
442
443    _Mutex_Queue_release( &mutex->Mutex, &queue_context );
444  }
445}
446
447#endif /* HAVE_STRUCT__THREAD_QUEUE_QUEUE */
Note: See TracBrowser for help on using the repository browser.