1 | /* |
---|
2 | * Copyright (c) 2015, 2016 embedded brains GmbH. All rights reserved. |
---|
3 | * |
---|
4 | * embedded brains GmbH |
---|
5 | * Dornierstr. 4 |
---|
6 | * 82178 Puchheim |
---|
7 | * Germany |
---|
8 | * <rtems@embedded-brains.de> |
---|
9 | * |
---|
10 | * The license and distribution terms for this file may be |
---|
11 | * found in the file LICENSE in this distribution or at |
---|
12 | * http://www.rtems.org/license/LICENSE. |
---|
13 | */ |
---|
14 | |
---|
15 | #if HAVE_CONFIG_H |
---|
16 | #include "config.h" |
---|
17 | #endif |
---|
18 | |
---|
19 | #if HAVE_STRUCT__THREAD_QUEUE_QUEUE |
---|
20 | |
---|
21 | #include <sys/lock.h> |
---|
22 | #include <errno.h> |
---|
23 | |
---|
24 | #include <rtems/score/assert.h> |
---|
25 | #include <rtems/score/threadimpl.h> |
---|
26 | #include <rtems/score/threadqimpl.h> |
---|
27 | #include <rtems/score/todimpl.h> |
---|
28 | |
---|
29 | #define MUTEX_TQ_OPERATIONS &_Thread_queue_Operations_priority_inherit |
---|
30 | |
---|
31 | typedef struct { |
---|
32 | Thread_queue_Syslock_queue Queue; |
---|
33 | } Mutex_Control; |
---|
34 | |
---|
35 | RTEMS_STATIC_ASSERT( |
---|
36 | offsetof( Mutex_Control, Queue ) |
---|
37 | == offsetof( struct _Mutex_Control, _Queue ), |
---|
38 | MUTEX_CONTROL_QUEUE |
---|
39 | ); |
---|
40 | |
---|
41 | RTEMS_STATIC_ASSERT( |
---|
42 | sizeof( Mutex_Control ) <= sizeof( struct _Mutex_Control ), |
---|
43 | MUTEX_CONTROL_SIZE |
---|
44 | ); |
---|
45 | |
---|
46 | typedef struct { |
---|
47 | Mutex_Control Mutex; |
---|
48 | unsigned int nest_level; |
---|
49 | } Mutex_recursive_Control; |
---|
50 | |
---|
51 | RTEMS_STATIC_ASSERT( |
---|
52 | offsetof( Mutex_recursive_Control, Mutex ) |
---|
53 | <= offsetof( struct _Mutex_recursive_Control, _Mutex ), |
---|
54 | MUTEX_RECURSIVE_CONTROL_MUTEX |
---|
55 | ); |
---|
56 | |
---|
57 | RTEMS_STATIC_ASSERT( |
---|
58 | offsetof( Mutex_recursive_Control, nest_level ) |
---|
59 | <= offsetof( struct _Mutex_recursive_Control, _nest_level ), |
---|
60 | MUTEX_RECURSIVE_CONTROL_NEST_LEVEL |
---|
61 | ); |
---|
62 | |
---|
63 | RTEMS_STATIC_ASSERT( |
---|
64 | sizeof( Mutex_recursive_Control ) |
---|
65 | <= sizeof( struct _Mutex_recursive_Control ), |
---|
66 | MUTEX_RECURSIVE_CONTROL_SIZE |
---|
67 | ); |
---|
68 | |
---|
69 | static Mutex_Control *_Mutex_Get( struct _Mutex_Control *_mutex ) |
---|
70 | { |
---|
71 | return (Mutex_Control *) _mutex; |
---|
72 | } |
---|
73 | |
---|
74 | static Thread_Control *_Mutex_Queue_acquire_critical( |
---|
75 | Mutex_Control *mutex, |
---|
76 | Thread_queue_Context *queue_context |
---|
77 | ) |
---|
78 | { |
---|
79 | Thread_Control *executing; |
---|
80 | |
---|
81 | executing = _Thread_Executing; |
---|
82 | _Thread_queue_Queue_acquire_critical( |
---|
83 | &mutex->Queue.Queue, |
---|
84 | &executing->Potpourri_stats, |
---|
85 | &queue_context->Lock_context.Lock_context |
---|
86 | ); |
---|
87 | |
---|
88 | return executing; |
---|
89 | } |
---|
90 | |
---|
91 | static void _Mutex_Queue_release( |
---|
92 | Mutex_Control *mutex, |
---|
93 | ISR_Level level, |
---|
94 | Thread_queue_Context *queue_context |
---|
95 | ) |
---|
96 | { |
---|
97 | _Thread_queue_Queue_release_critical( |
---|
98 | &mutex->Queue.Queue, |
---|
99 | &queue_context->Lock_context.Lock_context |
---|
100 | ); |
---|
101 | _ISR_Local_enable( level ); |
---|
102 | } |
---|
103 | |
---|
104 | static void _Mutex_Acquire_slow( |
---|
105 | Mutex_Control *mutex, |
---|
106 | Thread_Control *owner, |
---|
107 | Thread_Control *executing, |
---|
108 | ISR_Level level, |
---|
109 | Thread_queue_Context *queue_context |
---|
110 | ) |
---|
111 | { |
---|
112 | _Thread_queue_Context_set_thread_state( |
---|
113 | queue_context, |
---|
114 | STATES_WAITING_FOR_SYS_LOCK_MUTEX |
---|
115 | ); |
---|
116 | _Thread_queue_Context_set_do_nothing_enqueue_callout( queue_context ); |
---|
117 | _Thread_queue_Context_set_deadlock_callout( |
---|
118 | queue_context, |
---|
119 | _Thread_queue_Deadlock_fatal |
---|
120 | ); |
---|
121 | _Thread_queue_Context_set_ISR_level( queue_context, level ); |
---|
122 | _Thread_queue_Enqueue( |
---|
123 | &mutex->Queue.Queue, |
---|
124 | MUTEX_TQ_OPERATIONS, |
---|
125 | executing, |
---|
126 | queue_context |
---|
127 | ); |
---|
128 | } |
---|
129 | |
---|
130 | static void _Mutex_Release_critical( |
---|
131 | Mutex_Control *mutex, |
---|
132 | Thread_Control *executing, |
---|
133 | ISR_Level level, |
---|
134 | Thread_queue_Context *queue_context |
---|
135 | ) |
---|
136 | { |
---|
137 | Thread_queue_Heads *heads; |
---|
138 | |
---|
139 | heads = mutex->Queue.Queue.heads; |
---|
140 | mutex->Queue.Queue.owner = NULL; |
---|
141 | _Thread_Resource_count_decrement( executing ); |
---|
142 | |
---|
143 | if ( __predict_true( heads == NULL ) ) { |
---|
144 | _Mutex_Queue_release( mutex, level, queue_context ); |
---|
145 | } else { |
---|
146 | _Thread_queue_Context_set_ISR_level( queue_context, level ); |
---|
147 | _Thread_queue_Surrender( |
---|
148 | &mutex->Queue.Queue, |
---|
149 | heads, |
---|
150 | executing, |
---|
151 | queue_context, |
---|
152 | MUTEX_TQ_OPERATIONS |
---|
153 | ); |
---|
154 | } |
---|
155 | } |
---|
156 | |
---|
157 | void _Mutex_Acquire( struct _Mutex_Control *_mutex ) |
---|
158 | { |
---|
159 | Mutex_Control *mutex; |
---|
160 | Thread_queue_Context queue_context; |
---|
161 | ISR_Level level; |
---|
162 | Thread_Control *executing; |
---|
163 | Thread_Control *owner; |
---|
164 | |
---|
165 | mutex = _Mutex_Get( _mutex ); |
---|
166 | _Thread_queue_Context_initialize( &queue_context ); |
---|
167 | _Thread_queue_Context_ISR_disable( &queue_context, level ); |
---|
168 | executing = _Mutex_Queue_acquire_critical( mutex, &queue_context ); |
---|
169 | |
---|
170 | owner = mutex->Queue.Queue.owner; |
---|
171 | |
---|
172 | if ( __predict_true( owner == NULL ) ) { |
---|
173 | mutex->Queue.Queue.owner = executing; |
---|
174 | _Thread_Resource_count_increment( executing ); |
---|
175 | _Mutex_Queue_release( mutex, level, &queue_context ); |
---|
176 | } else { |
---|
177 | _Thread_queue_Context_set_no_timeout( &queue_context ); |
---|
178 | _Mutex_Acquire_slow( mutex, owner, executing, level, &queue_context ); |
---|
179 | } |
---|
180 | } |
---|
181 | |
---|
182 | int _Mutex_Acquire_timed( |
---|
183 | struct _Mutex_Control *_mutex, |
---|
184 | const struct timespec *abstime |
---|
185 | ) |
---|
186 | { |
---|
187 | Mutex_Control *mutex; |
---|
188 | Thread_queue_Context queue_context; |
---|
189 | ISR_Level level; |
---|
190 | Thread_Control *executing; |
---|
191 | Thread_Control *owner; |
---|
192 | |
---|
193 | mutex = _Mutex_Get( _mutex ); |
---|
194 | _Thread_queue_Context_initialize( &queue_context ); |
---|
195 | _Thread_queue_Context_ISR_disable( &queue_context, level ); |
---|
196 | executing = _Mutex_Queue_acquire_critical( mutex, &queue_context ); |
---|
197 | |
---|
198 | owner = mutex->Queue.Queue.owner; |
---|
199 | |
---|
200 | if ( __predict_true( owner == NULL ) ) { |
---|
201 | mutex->Queue.Queue.owner = executing; |
---|
202 | _Thread_Resource_count_increment( executing ); |
---|
203 | _Mutex_Queue_release( mutex, level, &queue_context ); |
---|
204 | |
---|
205 | return 0; |
---|
206 | } else { |
---|
207 | Watchdog_Interval ticks; |
---|
208 | |
---|
209 | switch ( _TOD_Absolute_timeout_to_ticks( abstime, CLOCK_REALTIME, &ticks ) ) { |
---|
210 | case TOD_ABSOLUTE_TIMEOUT_INVALID: |
---|
211 | _Mutex_Queue_release( mutex, level, &queue_context ); |
---|
212 | return EINVAL; |
---|
213 | case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST: |
---|
214 | case TOD_ABSOLUTE_TIMEOUT_IS_NOW: |
---|
215 | _Mutex_Queue_release( mutex, level, &queue_context ); |
---|
216 | return ETIMEDOUT; |
---|
217 | default: |
---|
218 | break; |
---|
219 | } |
---|
220 | |
---|
221 | _Thread_queue_Context_set_relative_timeout( &queue_context, ticks ); |
---|
222 | _Mutex_Acquire_slow( mutex, owner, executing, level, &queue_context ); |
---|
223 | |
---|
224 | return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) ); |
---|
225 | } |
---|
226 | } |
---|
227 | |
---|
228 | int _Mutex_Try_acquire( struct _Mutex_Control *_mutex ) |
---|
229 | { |
---|
230 | Mutex_Control *mutex; |
---|
231 | Thread_queue_Context queue_context; |
---|
232 | ISR_Level level; |
---|
233 | Thread_Control *executing; |
---|
234 | Thread_Control *owner; |
---|
235 | int eno; |
---|
236 | |
---|
237 | mutex = _Mutex_Get( _mutex ); |
---|
238 | _Thread_queue_Context_initialize( &queue_context ); |
---|
239 | _Thread_queue_Context_ISR_disable( &queue_context, level ); |
---|
240 | executing = _Mutex_Queue_acquire_critical( mutex, &queue_context ); |
---|
241 | |
---|
242 | owner = mutex->Queue.Queue.owner; |
---|
243 | |
---|
244 | if ( __predict_true( owner == NULL ) ) { |
---|
245 | mutex->Queue.Queue.owner = executing; |
---|
246 | _Thread_Resource_count_increment( executing ); |
---|
247 | eno = 0; |
---|
248 | } else { |
---|
249 | eno = EBUSY; |
---|
250 | } |
---|
251 | |
---|
252 | _Mutex_Queue_release( mutex, level, &queue_context ); |
---|
253 | |
---|
254 | return eno; |
---|
255 | } |
---|
256 | |
---|
257 | void _Mutex_Release( struct _Mutex_Control *_mutex ) |
---|
258 | { |
---|
259 | Mutex_Control *mutex; |
---|
260 | Thread_queue_Context queue_context; |
---|
261 | ISR_Level level; |
---|
262 | Thread_Control *executing; |
---|
263 | |
---|
264 | mutex = _Mutex_Get( _mutex ); |
---|
265 | _Thread_queue_Context_initialize( &queue_context ); |
---|
266 | _Thread_queue_Context_ISR_disable( &queue_context, level ); |
---|
267 | executing = _Mutex_Queue_acquire_critical( mutex, &queue_context ); |
---|
268 | |
---|
269 | _Assert( mutex->Queue.Queue.owner == executing ); |
---|
270 | |
---|
271 | _Mutex_Release_critical( mutex, executing, level, &queue_context ); |
---|
272 | } |
---|
273 | |
---|
274 | static Mutex_recursive_Control *_Mutex_recursive_Get( |
---|
275 | struct _Mutex_recursive_Control *_mutex |
---|
276 | ) |
---|
277 | { |
---|
278 | return (Mutex_recursive_Control *) _mutex; |
---|
279 | } |
---|
280 | |
---|
281 | void _Mutex_recursive_Acquire( struct _Mutex_recursive_Control *_mutex ) |
---|
282 | { |
---|
283 | Mutex_recursive_Control *mutex; |
---|
284 | Thread_queue_Context queue_context; |
---|
285 | ISR_Level level; |
---|
286 | Thread_Control *executing; |
---|
287 | Thread_Control *owner; |
---|
288 | |
---|
289 | mutex = _Mutex_recursive_Get( _mutex ); |
---|
290 | _Thread_queue_Context_initialize( &queue_context ); |
---|
291 | _Thread_queue_Context_ISR_disable( &queue_context, level ); |
---|
292 | executing = _Mutex_Queue_acquire_critical( &mutex->Mutex, &queue_context ); |
---|
293 | |
---|
294 | owner = mutex->Mutex.Queue.Queue.owner; |
---|
295 | |
---|
296 | if ( __predict_true( owner == NULL ) ) { |
---|
297 | mutex->Mutex.Queue.Queue.owner = executing; |
---|
298 | _Thread_Resource_count_increment( executing ); |
---|
299 | _Mutex_Queue_release( &mutex->Mutex, level, &queue_context ); |
---|
300 | } else if ( owner == executing ) { |
---|
301 | ++mutex->nest_level; |
---|
302 | _Mutex_Queue_release( &mutex->Mutex, level, &queue_context ); |
---|
303 | } else { |
---|
304 | _Thread_queue_Context_set_no_timeout( &queue_context ); |
---|
305 | _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, level, &queue_context ); |
---|
306 | } |
---|
307 | } |
---|
308 | |
---|
309 | int _Mutex_recursive_Acquire_timed( |
---|
310 | struct _Mutex_recursive_Control *_mutex, |
---|
311 | const struct timespec *abstime |
---|
312 | ) |
---|
313 | { |
---|
314 | Mutex_recursive_Control *mutex; |
---|
315 | Thread_queue_Context queue_context; |
---|
316 | ISR_Level level; |
---|
317 | Thread_Control *executing; |
---|
318 | Thread_Control *owner; |
---|
319 | |
---|
320 | mutex = _Mutex_recursive_Get( _mutex ); |
---|
321 | _Thread_queue_Context_initialize( &queue_context ); |
---|
322 | _Thread_queue_Context_ISR_disable( &queue_context, level ); |
---|
323 | executing = _Mutex_Queue_acquire_critical( &mutex->Mutex, &queue_context ); |
---|
324 | |
---|
325 | owner = mutex->Mutex.Queue.Queue.owner; |
---|
326 | |
---|
327 | if ( __predict_true( owner == NULL ) ) { |
---|
328 | mutex->Mutex.Queue.Queue.owner = executing; |
---|
329 | _Thread_Resource_count_increment( executing ); |
---|
330 | _Mutex_Queue_release( &mutex->Mutex, level, &queue_context ); |
---|
331 | |
---|
332 | return 0; |
---|
333 | } else if ( owner == executing ) { |
---|
334 | ++mutex->nest_level; |
---|
335 | _Mutex_Queue_release( &mutex->Mutex, level, &queue_context ); |
---|
336 | |
---|
337 | return 0; |
---|
338 | } else { |
---|
339 | Watchdog_Interval ticks; |
---|
340 | |
---|
341 | switch ( _TOD_Absolute_timeout_to_ticks( abstime, CLOCK_REALTIME, &ticks ) ) { |
---|
342 | case TOD_ABSOLUTE_TIMEOUT_INVALID: |
---|
343 | _Mutex_Queue_release( &mutex->Mutex, level, &queue_context ); |
---|
344 | return EINVAL; |
---|
345 | case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST: |
---|
346 | case TOD_ABSOLUTE_TIMEOUT_IS_NOW: |
---|
347 | _Mutex_Queue_release( &mutex->Mutex, level, &queue_context ); |
---|
348 | return ETIMEDOUT; |
---|
349 | default: |
---|
350 | break; |
---|
351 | } |
---|
352 | |
---|
353 | _Thread_queue_Context_set_relative_timeout( &queue_context, ticks ); |
---|
354 | _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, level, &queue_context ); |
---|
355 | |
---|
356 | return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) ); |
---|
357 | } |
---|
358 | } |
---|
359 | |
---|
360 | int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex ) |
---|
361 | { |
---|
362 | Mutex_recursive_Control *mutex; |
---|
363 | Thread_queue_Context queue_context; |
---|
364 | ISR_Level level; |
---|
365 | Thread_Control *executing; |
---|
366 | Thread_Control *owner; |
---|
367 | int eno; |
---|
368 | |
---|
369 | mutex = _Mutex_recursive_Get( _mutex ); |
---|
370 | _Thread_queue_Context_initialize( &queue_context ); |
---|
371 | _Thread_queue_Context_ISR_disable( &queue_context, level ); |
---|
372 | executing = _Mutex_Queue_acquire_critical( &mutex->Mutex, &queue_context ); |
---|
373 | |
---|
374 | owner = mutex->Mutex.Queue.Queue.owner; |
---|
375 | |
---|
376 | if ( __predict_true( owner == NULL ) ) { |
---|
377 | mutex->Mutex.Queue.Queue.owner = executing; |
---|
378 | _Thread_Resource_count_increment( executing ); |
---|
379 | eno = 0; |
---|
380 | } else if ( owner == executing ) { |
---|
381 | ++mutex->nest_level; |
---|
382 | eno = 0; |
---|
383 | } else { |
---|
384 | eno = EBUSY; |
---|
385 | } |
---|
386 | |
---|
387 | _Mutex_Queue_release( &mutex->Mutex, level, &queue_context ); |
---|
388 | |
---|
389 | return eno; |
---|
390 | } |
---|
391 | |
---|
392 | void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex ) |
---|
393 | { |
---|
394 | Mutex_recursive_Control *mutex; |
---|
395 | Thread_queue_Context queue_context; |
---|
396 | ISR_Level level; |
---|
397 | Thread_Control *executing; |
---|
398 | unsigned int nest_level; |
---|
399 | |
---|
400 | mutex = _Mutex_recursive_Get( _mutex ); |
---|
401 | _Thread_queue_Context_initialize( &queue_context ); |
---|
402 | _Thread_queue_Context_ISR_disable( &queue_context, level ); |
---|
403 | executing = _Mutex_Queue_acquire_critical( &mutex->Mutex, &queue_context ); |
---|
404 | |
---|
405 | _Assert( mutex->Mutex.Queue.Queue.owner == executing ); |
---|
406 | |
---|
407 | nest_level = mutex->nest_level; |
---|
408 | |
---|
409 | if ( __predict_true( nest_level == 0 ) ) { |
---|
410 | _Mutex_Release_critical( &mutex->Mutex, executing, level, &queue_context ); |
---|
411 | } else { |
---|
412 | mutex->nest_level = nest_level - 1; |
---|
413 | |
---|
414 | _Mutex_Queue_release( &mutex->Mutex, level, &queue_context ); |
---|
415 | } |
---|
416 | } |
---|
417 | |
---|
418 | #endif /* HAVE_STRUCT__THREAD_QUEUE_QUEUE */ |
---|