1 | /* |
---|
2 | * Copyright (c) 2015 embedded brains GmbH. All rights reserved. |
---|
3 | * |
---|
4 | * embedded brains GmbH |
---|
5 | * Dornierstr. 4 |
---|
6 | * 82178 Puchheim |
---|
7 | * Germany |
---|
8 | * <rtems@embedded-brains.de> |
---|
9 | * |
---|
10 | * The license and distribution terms for this file may be |
---|
11 | * found in the file LICENSE in this distribution or at |
---|
12 | * http://www.rtems.org/license/LICENSE. |
---|
13 | */ |
---|
14 | |
---|
15 | #if HAVE_CONFIG_H |
---|
16 | #include "config.h" |
---|
17 | #endif |
---|
18 | |
---|
19 | #if HAVE_STRUCT__THREAD_QUEUE_QUEUE |
---|
20 | |
---|
21 | #include <sys/lock.h> |
---|
22 | #include <errno.h> |
---|
23 | |
---|
24 | #include <rtems/score/assert.h> |
---|
25 | #include <rtems/score/threadimpl.h> |
---|
26 | #include <rtems/score/threadqimpl.h> |
---|
27 | #include <rtems/score/todimpl.h> |
---|
28 | |
---|
29 | #define MUTEX_TQ_OPERATIONS &_Thread_queue_Operations_priority |
---|
30 | |
---|
31 | typedef struct { |
---|
32 | Thread_queue_Syslock_queue Queue; |
---|
33 | Thread_Control *owner; |
---|
34 | } Mutex_Control; |
---|
35 | |
---|
36 | RTEMS_STATIC_ASSERT( |
---|
37 | offsetof( Mutex_Control, Queue ) |
---|
38 | == offsetof( struct _Mutex_Control, _Queue ), |
---|
39 | MUTEX_CONTROL_QUEUE |
---|
40 | ); |
---|
41 | |
---|
42 | RTEMS_STATIC_ASSERT( |
---|
43 | offsetof( Mutex_Control, owner ) |
---|
44 | == offsetof( struct _Mutex_Control, _owner ), |
---|
45 | MUTEX_CONTROL_OWNER |
---|
46 | ); |
---|
47 | |
---|
48 | RTEMS_STATIC_ASSERT( |
---|
49 | sizeof( Mutex_Control ) == sizeof( struct _Mutex_Control ), |
---|
50 | MUTEX_CONTROL_SIZE |
---|
51 | ); |
---|
52 | |
---|
53 | typedef struct { |
---|
54 | Mutex_Control Mutex; |
---|
55 | unsigned int nest_level; |
---|
56 | } Mutex_recursive_Control; |
---|
57 | |
---|
58 | RTEMS_STATIC_ASSERT( |
---|
59 | offsetof( Mutex_recursive_Control, Mutex ) |
---|
60 | == offsetof( struct _Mutex_recursive_Control, _Mutex ), |
---|
61 | MUTEX_RECURSIVE_CONTROL_MUTEX |
---|
62 | ); |
---|
63 | |
---|
64 | RTEMS_STATIC_ASSERT( |
---|
65 | offsetof( Mutex_recursive_Control, nest_level ) |
---|
66 | == offsetof( struct _Mutex_recursive_Control, _nest_level ), |
---|
67 | MUTEX_RECURSIVE_CONTROL_NEST_LEVEL |
---|
68 | ); |
---|
69 | |
---|
70 | RTEMS_STATIC_ASSERT( |
---|
71 | sizeof( Mutex_recursive_Control ) |
---|
72 | == sizeof( struct _Mutex_recursive_Control ), |
---|
73 | MUTEX_RECURSIVE_CONTROL_SIZE |
---|
74 | ); |
---|
75 | |
---|
76 | static Mutex_Control *_Mutex_Get( struct _Mutex_Control *_mutex ) |
---|
77 | { |
---|
78 | return (Mutex_Control *) _mutex; |
---|
79 | } |
---|
80 | |
---|
81 | static Thread_Control *_Mutex_Queue_acquire( |
---|
82 | Mutex_Control *mutex, |
---|
83 | ISR_lock_Context *lock_context |
---|
84 | ) |
---|
85 | { |
---|
86 | Thread_Control *executing; |
---|
87 | |
---|
88 | _ISR_lock_ISR_disable( lock_context ); |
---|
89 | executing = _Thread_Executing; |
---|
90 | _Thread_queue_Queue_acquire_critical( |
---|
91 | &mutex->Queue.Queue, |
---|
92 | &executing->Potpourri_stats, |
---|
93 | lock_context |
---|
94 | ); |
---|
95 | |
---|
96 | return executing; |
---|
97 | } |
---|
98 | |
---|
99 | static void _Mutex_Queue_release( |
---|
100 | Mutex_Control *mutex, |
---|
101 | ISR_lock_Context *lock_context |
---|
102 | ) |
---|
103 | { |
---|
104 | _Thread_queue_Queue_release( &mutex->Queue.Queue, lock_context ); |
---|
105 | } |
---|
106 | |
---|
107 | static void _Mutex_Acquire_slow( |
---|
108 | Mutex_Control *mutex, |
---|
109 | Thread_Control *owner, |
---|
110 | Thread_Control *executing, |
---|
111 | Watchdog_Interval timeout, |
---|
112 | ISR_lock_Context *lock_context |
---|
113 | ) |
---|
114 | { |
---|
115 | _Thread_Inherit_priority( owner, executing ); |
---|
116 | _Thread_queue_Enqueue_critical( |
---|
117 | &mutex->Queue.Queue, |
---|
118 | MUTEX_TQ_OPERATIONS, |
---|
119 | executing, |
---|
120 | STATES_WAITING_FOR_SYS_LOCK_MUTEX, |
---|
121 | timeout, |
---|
122 | ETIMEDOUT, |
---|
123 | lock_context |
---|
124 | ); |
---|
125 | } |
---|
126 | |
---|
127 | static void _Mutex_Release_slow( |
---|
128 | Mutex_Control *mutex, |
---|
129 | Thread_Control *executing, |
---|
130 | Thread_queue_Heads *heads, |
---|
131 | bool keep_priority, |
---|
132 | ISR_lock_Context *lock_context |
---|
133 | ) |
---|
134 | { |
---|
135 | if (heads != NULL) { |
---|
136 | const Thread_queue_Operations *operations; |
---|
137 | Thread_Control *first; |
---|
138 | bool unblock; |
---|
139 | |
---|
140 | operations = MUTEX_TQ_OPERATIONS; |
---|
141 | first = ( *operations->first )( heads ); |
---|
142 | |
---|
143 | mutex->owner = first; |
---|
144 | ++first->resource_count; |
---|
145 | unblock = _Thread_queue_Extract_locked( |
---|
146 | &mutex->Queue.Queue, |
---|
147 | operations, |
---|
148 | first, |
---|
149 | NULL |
---|
150 | ); |
---|
151 | _Thread_queue_Boost_priority( &mutex->Queue.Queue, first ); |
---|
152 | _Thread_queue_Unblock_critical( |
---|
153 | unblock, |
---|
154 | &mutex->Queue.Queue, |
---|
155 | first, |
---|
156 | lock_context |
---|
157 | ); |
---|
158 | } else { |
---|
159 | _Mutex_Queue_release( mutex, lock_context); |
---|
160 | } |
---|
161 | |
---|
162 | if ( !keep_priority ) { |
---|
163 | Per_CPU_Control *cpu_self; |
---|
164 | |
---|
165 | cpu_self = _Thread_Dispatch_disable(); |
---|
166 | _Thread_Restore_priority( executing ); |
---|
167 | _Thread_Dispatch_enable( cpu_self ); |
---|
168 | } |
---|
169 | } |
---|
170 | |
---|
171 | static void _Mutex_Release_critical( |
---|
172 | Mutex_Control *mutex, |
---|
173 | Thread_Control *executing, |
---|
174 | ISR_lock_Context *lock_context |
---|
175 | ) |
---|
176 | { |
---|
177 | Thread_queue_Heads *heads; |
---|
178 | bool keep_priority; |
---|
179 | |
---|
180 | mutex->owner = NULL; |
---|
181 | |
---|
182 | --executing->resource_count; |
---|
183 | |
---|
184 | /* |
---|
185 | * Ensure that the owner resource count is visible to all other |
---|
186 | * processors and that we read the latest priority restore |
---|
187 | * hint. |
---|
188 | */ |
---|
189 | _Atomic_Fence( ATOMIC_ORDER_ACQ_REL ); |
---|
190 | |
---|
191 | heads = mutex->Queue.Queue.heads; |
---|
192 | keep_priority = _Thread_Owns_resources( executing ) |
---|
193 | || !executing->priority_restore_hint; |
---|
194 | |
---|
195 | if ( __predict_true( heads == NULL && keep_priority ) ) { |
---|
196 | _Mutex_Queue_release( mutex, lock_context ); |
---|
197 | } else { |
---|
198 | _Mutex_Release_slow( |
---|
199 | mutex, |
---|
200 | executing, |
---|
201 | heads, |
---|
202 | keep_priority, |
---|
203 | lock_context |
---|
204 | ); |
---|
205 | } |
---|
206 | } |
---|
207 | |
---|
208 | void _Mutex_Acquire( struct _Mutex_Control *_mutex ) |
---|
209 | { |
---|
210 | Mutex_Control *mutex; |
---|
211 | ISR_lock_Context lock_context; |
---|
212 | Thread_Control *executing; |
---|
213 | Thread_Control *owner; |
---|
214 | |
---|
215 | mutex = _Mutex_Get( _mutex ); |
---|
216 | executing = _Mutex_Queue_acquire( mutex, &lock_context ); |
---|
217 | |
---|
218 | owner = mutex->owner; |
---|
219 | |
---|
220 | if ( __predict_true( owner == NULL ) ) { |
---|
221 | mutex->owner = executing; |
---|
222 | ++executing->resource_count; |
---|
223 | _Mutex_Queue_release( mutex, &lock_context ); |
---|
224 | } else { |
---|
225 | _Mutex_Acquire_slow( mutex, owner, executing, 0, &lock_context ); |
---|
226 | } |
---|
227 | } |
---|
228 | |
---|
229 | int _Mutex_Acquire_timed( |
---|
230 | struct _Mutex_Control *_mutex, |
---|
231 | const struct timespec *abstime |
---|
232 | ) |
---|
233 | { |
---|
234 | Mutex_Control *mutex; |
---|
235 | ISR_lock_Context lock_context; |
---|
236 | Thread_Control *executing; |
---|
237 | Thread_Control *owner; |
---|
238 | |
---|
239 | mutex = _Mutex_Get( _mutex ); |
---|
240 | executing = _Mutex_Queue_acquire( mutex, &lock_context ); |
---|
241 | |
---|
242 | owner = mutex->owner; |
---|
243 | |
---|
244 | if ( __predict_true( owner == NULL ) ) { |
---|
245 | mutex->owner = executing; |
---|
246 | ++executing->resource_count; |
---|
247 | _Mutex_Queue_release( mutex, &lock_context ); |
---|
248 | |
---|
249 | return 0; |
---|
250 | } else { |
---|
251 | Watchdog_Interval ticks; |
---|
252 | |
---|
253 | switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) { |
---|
254 | case TOD_ABSOLUTE_TIMEOUT_INVALID: |
---|
255 | _Mutex_Queue_release( mutex, &lock_context ); |
---|
256 | return EINVAL; |
---|
257 | case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST: |
---|
258 | case TOD_ABSOLUTE_TIMEOUT_IS_NOW: |
---|
259 | _Mutex_Queue_release( mutex, &lock_context ); |
---|
260 | return ETIMEDOUT; |
---|
261 | default: |
---|
262 | break; |
---|
263 | } |
---|
264 | |
---|
265 | executing->Wait.return_code = 0; |
---|
266 | _Mutex_Acquire_slow( mutex, owner, executing, ticks, &lock_context ); |
---|
267 | |
---|
268 | return (int) executing->Wait.return_code; |
---|
269 | } |
---|
270 | } |
---|
271 | |
---|
272 | int _Mutex_Try_acquire( struct _Mutex_Control *_mutex ) |
---|
273 | { |
---|
274 | Mutex_Control *mutex; |
---|
275 | ISR_lock_Context lock_context; |
---|
276 | Thread_Control *executing; |
---|
277 | Thread_Control *owner; |
---|
278 | int eno; |
---|
279 | |
---|
280 | mutex = _Mutex_Get( _mutex ); |
---|
281 | executing = _Mutex_Queue_acquire( mutex, &lock_context ); |
---|
282 | |
---|
283 | owner = mutex->owner; |
---|
284 | |
---|
285 | if ( __predict_true( owner == NULL ) ) { |
---|
286 | mutex->owner = executing; |
---|
287 | ++executing->resource_count; |
---|
288 | eno = 0; |
---|
289 | } else { |
---|
290 | eno = EBUSY; |
---|
291 | } |
---|
292 | |
---|
293 | _Mutex_Queue_release( mutex, &lock_context ); |
---|
294 | |
---|
295 | return eno; |
---|
296 | } |
---|
297 | |
---|
298 | void _Mutex_Release( struct _Mutex_Control *_mutex ) |
---|
299 | { |
---|
300 | Mutex_Control *mutex; |
---|
301 | ISR_lock_Context lock_context; |
---|
302 | Thread_Control *executing; |
---|
303 | |
---|
304 | mutex = _Mutex_Get( _mutex ); |
---|
305 | executing = _Mutex_Queue_acquire( mutex, &lock_context ); |
---|
306 | |
---|
307 | _Assert( mutex->owner == executing ); |
---|
308 | |
---|
309 | _Mutex_Release_critical( mutex, executing, &lock_context ); |
---|
310 | } |
---|
311 | |
---|
312 | static Mutex_recursive_Control *_Mutex_recursive_Get( |
---|
313 | struct _Mutex_recursive_Control *_mutex |
---|
314 | ) |
---|
315 | { |
---|
316 | return (Mutex_recursive_Control *) _mutex; |
---|
317 | } |
---|
318 | |
---|
319 | void _Mutex_recursive_Acquire( struct _Mutex_recursive_Control *_mutex ) |
---|
320 | { |
---|
321 | Mutex_recursive_Control *mutex; |
---|
322 | ISR_lock_Context lock_context; |
---|
323 | Thread_Control *executing; |
---|
324 | Thread_Control *owner; |
---|
325 | |
---|
326 | mutex = _Mutex_recursive_Get( _mutex ); |
---|
327 | executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context ); |
---|
328 | |
---|
329 | owner = mutex->Mutex.owner; |
---|
330 | |
---|
331 | if ( __predict_true( owner == NULL ) ) { |
---|
332 | mutex->Mutex.owner = executing; |
---|
333 | ++executing->resource_count; |
---|
334 | _Mutex_Queue_release( &mutex->Mutex, &lock_context ); |
---|
335 | } else if ( owner == executing ) { |
---|
336 | ++mutex->nest_level; |
---|
337 | _Mutex_Queue_release( &mutex->Mutex, &lock_context ); |
---|
338 | } else { |
---|
339 | _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, 0, &lock_context ); |
---|
340 | } |
---|
341 | } |
---|
342 | |
---|
343 | int _Mutex_recursive_Acquire_timed( |
---|
344 | struct _Mutex_recursive_Control *_mutex, |
---|
345 | const struct timespec *abstime |
---|
346 | ) |
---|
347 | { |
---|
348 | Mutex_recursive_Control *mutex; |
---|
349 | ISR_lock_Context lock_context; |
---|
350 | Thread_Control *executing; |
---|
351 | Thread_Control *owner; |
---|
352 | |
---|
353 | mutex = _Mutex_recursive_Get( _mutex ); |
---|
354 | executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context ); |
---|
355 | |
---|
356 | owner = mutex->Mutex.owner; |
---|
357 | |
---|
358 | if ( __predict_true( owner == NULL ) ) { |
---|
359 | mutex->Mutex.owner = executing; |
---|
360 | ++executing->resource_count; |
---|
361 | _Mutex_Queue_release( &mutex->Mutex, &lock_context ); |
---|
362 | |
---|
363 | return 0; |
---|
364 | } else if ( owner == executing ) { |
---|
365 | ++mutex->nest_level; |
---|
366 | _Mutex_Queue_release( &mutex->Mutex, &lock_context ); |
---|
367 | |
---|
368 | return 0; |
---|
369 | } else { |
---|
370 | Watchdog_Interval ticks; |
---|
371 | |
---|
372 | switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) { |
---|
373 | case TOD_ABSOLUTE_TIMEOUT_INVALID: |
---|
374 | _Mutex_Queue_release( &mutex->Mutex, &lock_context ); |
---|
375 | return EINVAL; |
---|
376 | case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST: |
---|
377 | case TOD_ABSOLUTE_TIMEOUT_IS_NOW: |
---|
378 | _Mutex_Queue_release( &mutex->Mutex, &lock_context ); |
---|
379 | return ETIMEDOUT; |
---|
380 | default: |
---|
381 | break; |
---|
382 | } |
---|
383 | |
---|
384 | executing->Wait.return_code = 0; |
---|
385 | _Mutex_Acquire_slow( |
---|
386 | &mutex->Mutex, |
---|
387 | owner, |
---|
388 | executing, |
---|
389 | ticks, |
---|
390 | &lock_context |
---|
391 | ); |
---|
392 | |
---|
393 | return (int) executing->Wait.return_code; |
---|
394 | } |
---|
395 | } |
---|
396 | |
---|
397 | int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex ) |
---|
398 | { |
---|
399 | Mutex_recursive_Control *mutex; |
---|
400 | ISR_lock_Context lock_context; |
---|
401 | Thread_Control *executing; |
---|
402 | Thread_Control *owner; |
---|
403 | int eno; |
---|
404 | |
---|
405 | mutex = _Mutex_recursive_Get( _mutex ); |
---|
406 | executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context ); |
---|
407 | |
---|
408 | owner = mutex->Mutex.owner; |
---|
409 | |
---|
410 | if ( __predict_true( owner == NULL ) ) { |
---|
411 | mutex->Mutex.owner = executing; |
---|
412 | ++executing->resource_count; |
---|
413 | eno = 0; |
---|
414 | } else if ( owner == executing ) { |
---|
415 | ++mutex->nest_level; |
---|
416 | eno = 0; |
---|
417 | } else { |
---|
418 | eno = EBUSY; |
---|
419 | } |
---|
420 | |
---|
421 | _Mutex_Queue_release( &mutex->Mutex, &lock_context ); |
---|
422 | |
---|
423 | return eno; |
---|
424 | } |
---|
425 | |
---|
426 | void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex ) |
---|
427 | { |
---|
428 | Mutex_recursive_Control *mutex; |
---|
429 | ISR_lock_Context lock_context; |
---|
430 | Thread_Control *executing; |
---|
431 | unsigned int nest_level; |
---|
432 | |
---|
433 | mutex = _Mutex_recursive_Get( _mutex ); |
---|
434 | executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context ); |
---|
435 | |
---|
436 | _Assert( mutex->Mutex.owner == executing ); |
---|
437 | |
---|
438 | nest_level = mutex->nest_level; |
---|
439 | |
---|
440 | if ( __predict_true( nest_level == 0 ) ) { |
---|
441 | _Mutex_Release_critical( &mutex->Mutex, executing, &lock_context ); |
---|
442 | } else { |
---|
443 | mutex->nest_level = nest_level - 1; |
---|
444 | |
---|
445 | _Mutex_Queue_release( &mutex->Mutex, &lock_context ); |
---|
446 | } |
---|
447 | } |
---|
448 | |
---|
449 | #endif /* HAVE_STRUCT__THREAD_QUEUE_QUEUE */ |
---|