1 | /** |
---|
2 | * @file |
---|
3 | * |
---|
4 | * @ingroup RTEMSScoreThreadQueue |
---|
5 | * |
---|
6 | * @brief This source file contains the implementation of |
---|
7 | * _Thread_queue_Deadlock_fatal(), _Thread_queue_Deadlock_status(), |
---|
8 | * _Thread_queue_Do_dequeue(), _Thread_queue_Enqueue(), |
---|
9 | * _Thread_queue_Enqueue_do_nothing_extra(), _Thread_queue_Enqueue_sticky(), |
---|
10 | * _Thread_queue_Extract(), _Thread_queue_Extract_critical(), |
---|
11 | * _Thread_queue_Extract_locked(), _Thread_queue_Path_acquire_critical(), |
---|
12 | * _Thread_queue_Path_release_critical(), _Thread_queue_Surrender(), |
---|
13 | * _Thread_queue_Surrender_sticky(), and _Thread_queue_Unblock_critical(). |
---|
14 | */ |
---|
15 | |
---|
16 | /* |
---|
17 | * COPYRIGHT (c) 1989-2014. |
---|
18 | * On-Line Applications Research Corporation (OAR). |
---|
19 | * |
---|
20 | * Copyright (c) 2015, 2016 embedded brains GmbH. |
---|
21 | * |
---|
22 | * The license and distribution terms for this file may be |
---|
23 | * found in the file LICENSE in this distribution or at |
---|
24 | * http://www.rtems.org/license/LICENSE. |
---|
25 | */ |
---|
26 | |
---|
27 | #ifdef HAVE_CONFIG_H |
---|
28 | #include "config.h" |
---|
29 | #endif |
---|
30 | |
---|
31 | #include <rtems/score/threadqimpl.h> |
---|
32 | #include <rtems/score/assert.h> |
---|
33 | #include <rtems/score/threaddispatch.h> |
---|
34 | #include <rtems/score/threadimpl.h> |
---|
35 | #include <rtems/score/status.h> |
---|
36 | #include <rtems/score/watchdogimpl.h> |
---|
37 | |
---|
38 | #define THREAD_QUEUE_INTEND_TO_BLOCK \ |
---|
39 | (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_INTEND_TO_BLOCK) |
---|
40 | |
---|
41 | #define THREAD_QUEUE_BLOCKED \ |
---|
42 | (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_BLOCKED) |
---|
43 | |
---|
44 | #define THREAD_QUEUE_READY_AGAIN \ |
---|
45 | (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_READY_AGAIN) |
---|
46 | |
---|
47 | #if defined(RTEMS_SMP) |
---|
48 | /* |
---|
49 | * A global registry of active thread queue links is used to provide deadlock |
---|
50 | * detection on SMP configurations. This is simple to implement and no |
---|
51 | * additional storage is required for the thread queues. The disadvantage is |
---|
52 | * the global registry is not scalable and may lead to lock contention. |
---|
53 | * However, the registry is only used in case of nested resource conflicts. In |
---|
54 | * this case, the application is already in trouble. |
---|
55 | */ |
---|
56 | |
---|
57 | typedef struct { |
---|
58 | ISR_lock_Control Lock; |
---|
59 | |
---|
60 | RBTree_Control Links; |
---|
61 | } Thread_queue_Links; |
---|
62 | |
---|
63 | static Thread_queue_Links _Thread_queue_Links = { |
---|
64 | ISR_LOCK_INITIALIZER( "Thread Queue Links" ), |
---|
65 | RBTREE_INITIALIZER_EMPTY( _Thread_queue_Links.Links ) |
---|
66 | }; |
---|
67 | |
---|
68 | static bool _Thread_queue_Link_equal( |
---|
69 | const void *left, |
---|
70 | const RBTree_Node *right |
---|
71 | ) |
---|
72 | { |
---|
73 | const Thread_queue_Queue *the_left; |
---|
74 | const Thread_queue_Link *the_right; |
---|
75 | |
---|
76 | the_left = left; |
---|
77 | the_right = (Thread_queue_Link *) right; |
---|
78 | |
---|
79 | return the_left == the_right->source; |
---|
80 | } |
---|
81 | |
---|
82 | static bool _Thread_queue_Link_less( |
---|
83 | const void *left, |
---|
84 | const RBTree_Node *right |
---|
85 | ) |
---|
86 | { |
---|
87 | const Thread_queue_Queue *the_left; |
---|
88 | const Thread_queue_Link *the_right; |
---|
89 | |
---|
90 | the_left = left; |
---|
91 | the_right = (Thread_queue_Link *) right; |
---|
92 | |
---|
93 | return (uintptr_t) the_left < (uintptr_t) the_right->source; |
---|
94 | } |
---|
95 | |
---|
96 | static void *_Thread_queue_Link_map( RBTree_Node *node ) |
---|
97 | { |
---|
98 | return node; |
---|
99 | } |
---|
100 | |
---|
101 | static Thread_queue_Link *_Thread_queue_Link_find( |
---|
102 | Thread_queue_Links *links, |
---|
103 | Thread_queue_Queue *source |
---|
104 | ) |
---|
105 | { |
---|
106 | return _RBTree_Find_inline( |
---|
107 | &links->Links, |
---|
108 | source, |
---|
109 | _Thread_queue_Link_equal, |
---|
110 | _Thread_queue_Link_less, |
---|
111 | _Thread_queue_Link_map |
---|
112 | ); |
---|
113 | } |
---|
114 | |
---|
115 | static bool _Thread_queue_Link_add( |
---|
116 | Thread_queue_Link *link, |
---|
117 | Thread_queue_Queue *source, |
---|
118 | Thread_queue_Queue *target |
---|
119 | ) |
---|
120 | { |
---|
121 | Thread_queue_Links *links; |
---|
122 | Thread_queue_Queue *recursive_target; |
---|
123 | ISR_lock_Context lock_context; |
---|
124 | |
---|
125 | link->source = source; |
---|
126 | link->target = target; |
---|
127 | |
---|
128 | links = &_Thread_queue_Links; |
---|
129 | recursive_target = target; |
---|
130 | |
---|
131 | _ISR_lock_Acquire( &links->Lock, &lock_context ); |
---|
132 | |
---|
133 | while ( true ) { |
---|
134 | Thread_queue_Link *recursive_link; |
---|
135 | |
---|
136 | recursive_link = _Thread_queue_Link_find( links, recursive_target ); |
---|
137 | |
---|
138 | if ( recursive_link == NULL ) { |
---|
139 | break; |
---|
140 | } |
---|
141 | |
---|
142 | recursive_target = recursive_link->target; |
---|
143 | |
---|
144 | if ( recursive_target == source ) { |
---|
145 | _ISR_lock_Release( &links->Lock, &lock_context ); |
---|
146 | return false; |
---|
147 | } |
---|
148 | } |
---|
149 | |
---|
150 | _RBTree_Insert_inline( |
---|
151 | &links->Links, |
---|
152 | &link->Registry_node, |
---|
153 | source, |
---|
154 | _Thread_queue_Link_less |
---|
155 | ); |
---|
156 | |
---|
157 | _ISR_lock_Release( &links->Lock, &lock_context ); |
---|
158 | return true; |
---|
159 | } |
---|
160 | |
---|
161 | static void _Thread_queue_Link_remove( Thread_queue_Link *link ) |
---|
162 | { |
---|
163 | Thread_queue_Links *links; |
---|
164 | ISR_lock_Context lock_context; |
---|
165 | |
---|
166 | links = &_Thread_queue_Links; |
---|
167 | |
---|
168 | _ISR_lock_Acquire( &links->Lock, &lock_context ); |
---|
169 | _RBTree_Extract( &links->Links, &link->Registry_node ); |
---|
170 | _ISR_lock_Release( &links->Lock, &lock_context ); |
---|
171 | } |
---|
172 | #endif |
---|
173 | |
---|
174 | #if !defined(RTEMS_SMP) |
---|
175 | static |
---|
176 | #endif |
---|
177 | void _Thread_queue_Path_release_critical( |
---|
178 | Thread_queue_Context *queue_context |
---|
179 | ) |
---|
180 | { |
---|
181 | #if defined(RTEMS_SMP) |
---|
182 | Chain_Node *head; |
---|
183 | Chain_Node *node; |
---|
184 | |
---|
185 | head = _Chain_Head( &queue_context->Path.Links ); |
---|
186 | node = _Chain_Last( &queue_context->Path.Links ); |
---|
187 | |
---|
188 | while ( head != node ) { |
---|
189 | Thread_queue_Link *link; |
---|
190 | |
---|
191 | link = THREAD_QUEUE_LINK_OF_PATH_NODE( node ); |
---|
192 | |
---|
193 | if ( link->Lock_context.Wait.queue != NULL ) { |
---|
194 | _Thread_queue_Link_remove( link ); |
---|
195 | _Thread_Wait_release_queue_critical( |
---|
196 | link->Lock_context.Wait.queue, |
---|
197 | &link->Lock_context |
---|
198 | ); |
---|
199 | _Thread_Wait_remove_request( link->owner, &link->Lock_context ); |
---|
200 | } else { |
---|
201 | _Thread_Wait_release_default_critical( |
---|
202 | link->owner, |
---|
203 | &link->Lock_context.Lock_context |
---|
204 | ); |
---|
205 | } |
---|
206 | |
---|
207 | node = _Chain_Previous( node ); |
---|
208 | #if defined(RTEMS_DEBUG) |
---|
209 | _Chain_Set_off_chain( &link->Path_node ); |
---|
210 | #endif |
---|
211 | } |
---|
212 | #else |
---|
213 | (void) queue_context; |
---|
214 | #endif |
---|
215 | } |
---|
216 | |
---|
217 | #if defined(RTEMS_SMP) |
---|
218 | static void _Thread_queue_Path_append_deadlock_thread( |
---|
219 | Thread_Control *the_thread, |
---|
220 | Thread_queue_Context *queue_context |
---|
221 | ) |
---|
222 | { |
---|
223 | Thread_Control *deadlock; |
---|
224 | |
---|
225 | /* |
---|
226 | * In case of a deadlock, we must obtain the thread wait default lock for the |
---|
227 | * first thread on the path that tries to enqueue on a thread queue. This |
---|
228 | * thread can be identified by the thread wait operations. This lock acquire |
---|
229 | * is necessary for the timeout and explicit thread priority changes, see |
---|
230 | * _Thread_Priority_perform_actions(). |
---|
231 | */ |
---|
232 | |
---|
233 | deadlock = NULL; |
---|
234 | |
---|
235 | while ( the_thread->Wait.operations != &_Thread_queue_Operations_default ) { |
---|
236 | the_thread = the_thread->Wait.queue->owner; |
---|
237 | deadlock = the_thread; |
---|
238 | } |
---|
239 | |
---|
240 | if ( deadlock != NULL ) { |
---|
241 | Thread_queue_Link *link; |
---|
242 | |
---|
243 | link = &queue_context->Path.Deadlock; |
---|
244 | _Chain_Initialize_node( &link->Path_node ); |
---|
245 | _Chain_Append_unprotected( |
---|
246 | &queue_context->Path.Links, |
---|
247 | &link->Path_node |
---|
248 | ); |
---|
249 | link->owner = deadlock; |
---|
250 | link->Lock_context.Wait.queue = NULL; |
---|
251 | _Thread_Wait_acquire_default_critical( |
---|
252 | deadlock, |
---|
253 | &link->Lock_context.Lock_context |
---|
254 | ); |
---|
255 | } |
---|
256 | } |
---|
257 | #endif |
---|
258 | |
---|
259 | #if !defined(RTEMS_SMP) |
---|
260 | static |
---|
261 | #endif |
---|
262 | bool _Thread_queue_Path_acquire_critical( |
---|
263 | Thread_queue_Queue *queue, |
---|
264 | Thread_Control *the_thread, |
---|
265 | Thread_queue_Context *queue_context |
---|
266 | ) |
---|
267 | { |
---|
268 | Thread_Control *owner; |
---|
269 | #if defined(RTEMS_SMP) |
---|
270 | Thread_queue_Link *link; |
---|
271 | Thread_queue_Queue *target; |
---|
272 | |
---|
273 | /* |
---|
274 | * For an overview please look at the non-SMP part below. We basically do |
---|
275 | * the same on SMP configurations. The fact that we may have more than one |
---|
276 | * executing thread and each thread queue has its own SMP lock makes the task |
---|
277 | * a bit more difficult. We have to avoid deadlocks at SMP lock level, since |
---|
278 | * this would result in an unrecoverable deadlock of the overall system. |
---|
279 | */ |
---|
280 | |
---|
281 | _Chain_Initialize_empty( &queue_context->Path.Links ); |
---|
282 | |
---|
283 | owner = queue->owner; |
---|
284 | |
---|
285 | if ( owner == NULL ) { |
---|
286 | return true; |
---|
287 | } |
---|
288 | |
---|
289 | if ( owner == the_thread ) { |
---|
290 | return false; |
---|
291 | } |
---|
292 | |
---|
293 | _Chain_Initialize_node( |
---|
294 | &queue_context->Path.Start.Lock_context.Wait.Gate.Node |
---|
295 | ); |
---|
296 | link = &queue_context->Path.Start; |
---|
297 | _RBTree_Initialize_node( &link->Registry_node ); |
---|
298 | _Chain_Initialize_node( &link->Path_node ); |
---|
299 | |
---|
300 | do { |
---|
301 | _Chain_Append_unprotected( &queue_context->Path.Links, &link->Path_node ); |
---|
302 | link->owner = owner; |
---|
303 | |
---|
304 | _Thread_Wait_acquire_default_critical( |
---|
305 | owner, |
---|
306 | &link->Lock_context.Lock_context |
---|
307 | ); |
---|
308 | |
---|
309 | target = owner->Wait.queue; |
---|
310 | link->Lock_context.Wait.queue = target; |
---|
311 | |
---|
312 | if ( target != NULL ) { |
---|
313 | if ( _Thread_queue_Link_add( link, queue, target ) ) { |
---|
314 | _Thread_queue_Gate_add( |
---|
315 | &owner->Wait.Lock.Pending_requests, |
---|
316 | &link->Lock_context.Wait.Gate |
---|
317 | ); |
---|
318 | _Thread_Wait_release_default_critical( |
---|
319 | owner, |
---|
320 | &link->Lock_context.Lock_context |
---|
321 | ); |
---|
322 | _Thread_Wait_acquire_queue_critical( target, &link->Lock_context ); |
---|
323 | |
---|
324 | if ( link->Lock_context.Wait.queue == NULL ) { |
---|
325 | _Thread_queue_Link_remove( link ); |
---|
326 | _Thread_Wait_release_queue_critical( target, &link->Lock_context ); |
---|
327 | _Thread_Wait_acquire_default_critical( |
---|
328 | owner, |
---|
329 | &link->Lock_context.Lock_context |
---|
330 | ); |
---|
331 | _Thread_Wait_remove_request_locked( owner, &link->Lock_context ); |
---|
332 | _Assert( owner->Wait.queue == NULL ); |
---|
333 | return true; |
---|
334 | } |
---|
335 | } else { |
---|
336 | link->Lock_context.Wait.queue = NULL; |
---|
337 | _Thread_queue_Path_append_deadlock_thread( owner, queue_context ); |
---|
338 | return false; |
---|
339 | } |
---|
340 | } else { |
---|
341 | return true; |
---|
342 | } |
---|
343 | |
---|
344 | link = &owner->Wait.Link; |
---|
345 | queue = target; |
---|
346 | owner = queue->owner; |
---|
347 | } while ( owner != NULL ); |
---|
348 | #else |
---|
349 | do { |
---|
350 | owner = queue->owner; |
---|
351 | |
---|
352 | if ( owner == NULL ) { |
---|
353 | return true; |
---|
354 | } |
---|
355 | |
---|
356 | if ( owner == the_thread ) { |
---|
357 | return false; |
---|
358 | } |
---|
359 | |
---|
360 | queue = owner->Wait.queue; |
---|
361 | } while ( queue != NULL ); |
---|
362 | #endif |
---|
363 | |
---|
364 | return true; |
---|
365 | } |
---|
366 | |
---|
367 | void _Thread_queue_Enqueue_do_nothing_extra( |
---|
368 | Thread_queue_Queue *queue, |
---|
369 | Thread_Control *the_thread, |
---|
370 | Per_CPU_Control *cpu_self, |
---|
371 | Thread_queue_Context *queue_context |
---|
372 | ) |
---|
373 | { |
---|
374 | /* Do nothing */ |
---|
375 | } |
---|
376 | |
---|
377 | void _Thread_queue_Deadlock_status( Thread_Control *the_thread ) |
---|
378 | { |
---|
379 | the_thread->Wait.return_code = STATUS_DEADLOCK; |
---|
380 | } |
---|
381 | |
---|
382 | void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread ) |
---|
383 | { |
---|
384 | _Internal_error( INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK ); |
---|
385 | } |
---|
386 | |
---|
387 | void _Thread_queue_Enqueue( |
---|
388 | Thread_queue_Queue *queue, |
---|
389 | const Thread_queue_Operations *operations, |
---|
390 | Thread_Control *the_thread, |
---|
391 | Thread_queue_Context *queue_context |
---|
392 | ) |
---|
393 | { |
---|
394 | Per_CPU_Control *cpu_self; |
---|
395 | bool success; |
---|
396 | |
---|
397 | _Assert( queue_context->enqueue_callout != NULL ); |
---|
398 | |
---|
399 | #if defined(RTEMS_MULTIPROCESSING) |
---|
400 | if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) { |
---|
401 | the_thread = _Thread_MP_Allocate_proxy( queue_context->thread_state ); |
---|
402 | } |
---|
403 | #endif |
---|
404 | |
---|
405 | _Thread_Wait_claim( the_thread, queue ); |
---|
406 | |
---|
407 | if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) { |
---|
408 | _Thread_queue_Path_release_critical( queue_context ); |
---|
409 | _Thread_Wait_restore_default( the_thread ); |
---|
410 | _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context ); |
---|
411 | _Thread_Wait_tranquilize( the_thread ); |
---|
412 | _Assert( queue_context->deadlock_callout != NULL ); |
---|
413 | ( *queue_context->deadlock_callout )( the_thread ); |
---|
414 | return; |
---|
415 | } |
---|
416 | |
---|
417 | _Thread_queue_Context_clear_priority_updates( queue_context ); |
---|
418 | _Thread_Wait_claim_finalize( the_thread, operations ); |
---|
419 | ( *operations->enqueue )( queue, the_thread, queue_context ); |
---|
420 | |
---|
421 | _Thread_queue_Path_release_critical( queue_context ); |
---|
422 | |
---|
423 | the_thread->Wait.return_code = STATUS_SUCCESSFUL; |
---|
424 | _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK ); |
---|
425 | cpu_self = _Thread_queue_Dispatch_disable( queue_context ); |
---|
426 | _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context ); |
---|
427 | |
---|
428 | ( *queue_context->enqueue_callout )( |
---|
429 | queue, |
---|
430 | the_thread, |
---|
431 | cpu_self, |
---|
432 | queue_context |
---|
433 | ); |
---|
434 | |
---|
435 | /* |
---|
436 | * Set the blocking state for this thread queue in the thread. |
---|
437 | */ |
---|
438 | _Thread_Set_state( the_thread, queue_context->thread_state ); |
---|
439 | |
---|
440 | /* |
---|
441 | * At this point thread dispatching is disabled, however, we already released |
---|
442 | * the thread queue lock. Thus, interrupts or threads on other processors |
---|
443 | * may already changed our state with respect to the thread queue object. |
---|
444 | * The request could be satisfied or timed out. This situation is indicated |
---|
445 | * by the thread wait flags. Other parties must not modify our thread state |
---|
446 | * as long as we are in the THREAD_QUEUE_INTEND_TO_BLOCK thread wait state, |
---|
447 | * thus we have to cancel the blocking operation ourself if necessary. |
---|
448 | */ |
---|
449 | success = _Thread_Wait_flags_try_change_acquire( |
---|
450 | the_thread, |
---|
451 | THREAD_QUEUE_INTEND_TO_BLOCK, |
---|
452 | THREAD_QUEUE_BLOCKED |
---|
453 | ); |
---|
454 | if ( !success ) { |
---|
455 | _Thread_Remove_timer_and_unblock( the_thread, queue ); |
---|
456 | } |
---|
457 | |
---|
458 | _Thread_Priority_update( queue_context ); |
---|
459 | _Thread_Dispatch_direct( cpu_self ); |
---|
460 | } |
---|
461 | |
---|
462 | #if defined(RTEMS_SMP) |
---|
463 | Status_Control _Thread_queue_Enqueue_sticky( |
---|
464 | Thread_queue_Queue *queue, |
---|
465 | const Thread_queue_Operations *operations, |
---|
466 | Thread_Control *the_thread, |
---|
467 | Thread_queue_Context *queue_context |
---|
468 | ) |
---|
469 | { |
---|
470 | Per_CPU_Control *cpu_self; |
---|
471 | |
---|
472 | _Assert( queue_context->enqueue_callout != NULL ); |
---|
473 | |
---|
474 | _Thread_Wait_claim( the_thread, queue ); |
---|
475 | |
---|
476 | if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) { |
---|
477 | _Thread_queue_Path_release_critical( queue_context ); |
---|
478 | _Thread_Wait_restore_default( the_thread ); |
---|
479 | _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context ); |
---|
480 | _Thread_Wait_tranquilize( the_thread ); |
---|
481 | ( *queue_context->deadlock_callout )( the_thread ); |
---|
482 | return _Thread_Wait_get_status( the_thread ); |
---|
483 | } |
---|
484 | |
---|
485 | _Thread_queue_Context_clear_priority_updates( queue_context ); |
---|
486 | _Thread_Wait_claim_finalize( the_thread, operations ); |
---|
487 | ( *operations->enqueue )( queue, the_thread, queue_context ); |
---|
488 | |
---|
489 | _Thread_queue_Path_release_critical( queue_context ); |
---|
490 | |
---|
491 | the_thread->Wait.return_code = STATUS_SUCCESSFUL; |
---|
492 | _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK ); |
---|
493 | cpu_self = _Thread_queue_Dispatch_disable( queue_context ); |
---|
494 | _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context ); |
---|
495 | |
---|
496 | if ( cpu_self->thread_dispatch_disable_level != 1 ) { |
---|
497 | _Internal_error( |
---|
498 | INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE |
---|
499 | ); |
---|
500 | } |
---|
501 | |
---|
502 | ( *queue_context->enqueue_callout )( |
---|
503 | queue, |
---|
504 | the_thread, |
---|
505 | cpu_self, |
---|
506 | queue_context |
---|
507 | ); |
---|
508 | |
---|
509 | _Thread_Priority_update( queue_context ); |
---|
510 | _Thread_Priority_and_sticky_update( the_thread, 1 ); |
---|
511 | _Thread_Dispatch_enable( cpu_self ); |
---|
512 | |
---|
513 | while ( |
---|
514 | _Thread_Wait_flags_get_acquire( the_thread ) == THREAD_QUEUE_INTEND_TO_BLOCK |
---|
515 | ) { |
---|
516 | /* Wait */ |
---|
517 | } |
---|
518 | |
---|
519 | _Thread_Wait_tranquilize( the_thread ); |
---|
520 | _Thread_Timer_remove( the_thread ); |
---|
521 | return _Thread_Wait_get_status( the_thread ); |
---|
522 | } |
---|
523 | #endif |
---|
524 | |
---|
525 | #if defined(RTEMS_MULTIPROCESSING) |
---|
526 | static bool _Thread_queue_MP_set_callout( |
---|
527 | Thread_Control *the_thread, |
---|
528 | const Thread_queue_Context *queue_context |
---|
529 | ) |
---|
530 | { |
---|
531 | Thread_Proxy_control *the_proxy; |
---|
532 | Thread_queue_MP_callout mp_callout; |
---|
533 | |
---|
534 | if ( _Objects_Is_local_id( the_thread->Object.id ) ) { |
---|
535 | return false; |
---|
536 | } |
---|
537 | |
---|
538 | the_proxy = (Thread_Proxy_control *) the_thread; |
---|
539 | mp_callout = queue_context->mp_callout; |
---|
540 | _Assert( mp_callout != NULL ); |
---|
541 | the_proxy->thread_queue_callout = mp_callout; |
---|
542 | return true; |
---|
543 | } |
---|
544 | #endif |
---|
545 | |
---|
546 | static bool _Thread_queue_Make_ready_again( Thread_Control *the_thread ) |
---|
547 | { |
---|
548 | bool success; |
---|
549 | bool unblock; |
---|
550 | |
---|
551 | /* |
---|
552 | * We must update the wait flags under protection of the current thread lock, |
---|
553 | * otherwise a _Thread_Timeout() running on another processor may interfere. |
---|
554 | */ |
---|
555 | success = _Thread_Wait_flags_try_change_release( |
---|
556 | the_thread, |
---|
557 | THREAD_QUEUE_INTEND_TO_BLOCK, |
---|
558 | THREAD_QUEUE_READY_AGAIN |
---|
559 | ); |
---|
560 | if ( success ) { |
---|
561 | unblock = false; |
---|
562 | } else { |
---|
563 | _Assert( _Thread_Wait_flags_get( the_thread ) == THREAD_QUEUE_BLOCKED ); |
---|
564 | _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_READY_AGAIN ); |
---|
565 | unblock = true; |
---|
566 | } |
---|
567 | |
---|
568 | _Thread_Wait_restore_default( the_thread ); |
---|
569 | return unblock; |
---|
570 | } |
---|
571 | |
---|
572 | bool _Thread_queue_Extract_locked( |
---|
573 | Thread_queue_Queue *queue, |
---|
574 | const Thread_queue_Operations *operations, |
---|
575 | Thread_Control *the_thread, |
---|
576 | Thread_queue_Context *queue_context |
---|
577 | ) |
---|
578 | { |
---|
579 | #if defined(RTEMS_MULTIPROCESSING) |
---|
580 | _Thread_queue_MP_set_callout( the_thread, queue_context ); |
---|
581 | #endif |
---|
582 | ( *operations->extract )( queue, the_thread, queue_context ); |
---|
583 | return _Thread_queue_Make_ready_again( the_thread ); |
---|
584 | } |
---|
585 | |
---|
586 | void _Thread_queue_Unblock_critical( |
---|
587 | bool unblock, |
---|
588 | Thread_queue_Queue *queue, |
---|
589 | Thread_Control *the_thread, |
---|
590 | ISR_lock_Context *lock_context |
---|
591 | ) |
---|
592 | { |
---|
593 | if ( unblock ) { |
---|
594 | Per_CPU_Control *cpu_self; |
---|
595 | |
---|
596 | cpu_self = _Thread_Dispatch_disable_critical( lock_context ); |
---|
597 | _Thread_queue_Queue_release( queue, lock_context ); |
---|
598 | |
---|
599 | _Thread_Remove_timer_and_unblock( the_thread, queue ); |
---|
600 | |
---|
601 | _Thread_Dispatch_enable( cpu_self ); |
---|
602 | } else { |
---|
603 | _Thread_queue_Queue_release( queue, lock_context ); |
---|
604 | } |
---|
605 | } |
---|
606 | |
---|
607 | void _Thread_queue_Extract_critical( |
---|
608 | Thread_queue_Queue *queue, |
---|
609 | const Thread_queue_Operations *operations, |
---|
610 | Thread_Control *the_thread, |
---|
611 | Thread_queue_Context *queue_context |
---|
612 | ) |
---|
613 | { |
---|
614 | bool unblock; |
---|
615 | |
---|
616 | unblock = _Thread_queue_Extract_locked( |
---|
617 | queue, |
---|
618 | operations, |
---|
619 | the_thread, |
---|
620 | queue_context |
---|
621 | ); |
---|
622 | |
---|
623 | _Thread_queue_Unblock_critical( |
---|
624 | unblock, |
---|
625 | queue, |
---|
626 | the_thread, |
---|
627 | &queue_context->Lock_context.Lock_context |
---|
628 | ); |
---|
629 | } |
---|
630 | |
---|
631 | void _Thread_queue_Extract( Thread_Control *the_thread ) |
---|
632 | { |
---|
633 | Thread_queue_Context queue_context; |
---|
634 | Thread_queue_Queue *queue; |
---|
635 | |
---|
636 | _Thread_queue_Context_initialize( &queue_context ); |
---|
637 | _Thread_queue_Context_clear_priority_updates( &queue_context ); |
---|
638 | _Thread_Wait_acquire( the_thread, &queue_context ); |
---|
639 | |
---|
640 | queue = the_thread->Wait.queue; |
---|
641 | |
---|
642 | if ( queue != NULL ) { |
---|
643 | bool unblock; |
---|
644 | |
---|
645 | _Thread_Wait_remove_request( the_thread, &queue_context.Lock_context ); |
---|
646 | _Thread_queue_Context_set_MP_callout( |
---|
647 | &queue_context, |
---|
648 | _Thread_queue_MP_callout_do_nothing |
---|
649 | ); |
---|
650 | unblock = _Thread_queue_Extract_locked( |
---|
651 | queue, |
---|
652 | the_thread->Wait.operations, |
---|
653 | the_thread, |
---|
654 | &queue_context |
---|
655 | ); |
---|
656 | _Thread_queue_Unblock_critical( |
---|
657 | unblock, |
---|
658 | queue, |
---|
659 | the_thread, |
---|
660 | &queue_context.Lock_context.Lock_context |
---|
661 | ); |
---|
662 | } else { |
---|
663 | _Thread_Wait_release( the_thread, &queue_context ); |
---|
664 | } |
---|
665 | } |
---|
666 | |
---|
667 | void _Thread_queue_Surrender( |
---|
668 | Thread_queue_Queue *queue, |
---|
669 | Thread_queue_Heads *heads, |
---|
670 | Thread_Control *previous_owner, |
---|
671 | Thread_queue_Context *queue_context, |
---|
672 | const Thread_queue_Operations *operations |
---|
673 | ) |
---|
674 | { |
---|
675 | Thread_Control *new_owner; |
---|
676 | bool unblock; |
---|
677 | Per_CPU_Control *cpu_self; |
---|
678 | |
---|
679 | _Assert( heads != NULL ); |
---|
680 | |
---|
681 | _Thread_queue_Context_clear_priority_updates( queue_context ); |
---|
682 | new_owner = ( *operations->surrender )( |
---|
683 | queue, |
---|
684 | heads, |
---|
685 | previous_owner, |
---|
686 | queue_context |
---|
687 | ); |
---|
688 | queue->owner = new_owner; |
---|
689 | |
---|
690 | #if defined(RTEMS_MULTIPROCESSING) |
---|
691 | if ( !_Thread_queue_MP_set_callout( new_owner, queue_context ) ) |
---|
692 | #endif |
---|
693 | { |
---|
694 | _Thread_Resource_count_increment( new_owner ); |
---|
695 | } |
---|
696 | |
---|
697 | unblock = _Thread_queue_Make_ready_again( new_owner ); |
---|
698 | |
---|
699 | cpu_self = _Thread_queue_Dispatch_disable( queue_context ); |
---|
700 | _Thread_queue_Queue_release( |
---|
701 | queue, |
---|
702 | &queue_context->Lock_context.Lock_context |
---|
703 | ); |
---|
704 | |
---|
705 | _Thread_Priority_update( queue_context ); |
---|
706 | |
---|
707 | if ( unblock ) { |
---|
708 | _Thread_Remove_timer_and_unblock( new_owner, queue ); |
---|
709 | } |
---|
710 | |
---|
711 | _Thread_Dispatch_enable( cpu_self ); |
---|
712 | } |
---|
713 | |
---|
714 | #if defined(RTEMS_SMP) |
---|
715 | void _Thread_queue_Surrender_sticky( |
---|
716 | Thread_queue_Queue *queue, |
---|
717 | Thread_queue_Heads *heads, |
---|
718 | Thread_Control *previous_owner, |
---|
719 | Thread_queue_Context *queue_context, |
---|
720 | const Thread_queue_Operations *operations |
---|
721 | ) |
---|
722 | { |
---|
723 | Thread_Control *new_owner; |
---|
724 | Per_CPU_Control *cpu_self; |
---|
725 | |
---|
726 | _Assert( heads != NULL ); |
---|
727 | |
---|
728 | _Thread_queue_Context_clear_priority_updates( queue_context ); |
---|
729 | new_owner = ( *operations->surrender )( |
---|
730 | queue, |
---|
731 | heads, |
---|
732 | previous_owner, |
---|
733 | queue_context |
---|
734 | ); |
---|
735 | queue->owner = new_owner; |
---|
736 | |
---|
737 | /* |
---|
738 | * There is no need to check the unblock status, since in the corresponding |
---|
739 | * _Thread_queue_Enqueue_sticky() the thread is not blocked by the scheduler. |
---|
740 | * Instead, the thread busy waits for a change of its thread wait flags. |
---|
741 | */ |
---|
742 | (void) _Thread_queue_Make_ready_again( new_owner ); |
---|
743 | |
---|
744 | cpu_self = _Thread_queue_Dispatch_disable( queue_context ); |
---|
745 | _Thread_queue_Queue_release( |
---|
746 | queue, |
---|
747 | &queue_context->Lock_context.Lock_context |
---|
748 | ); |
---|
749 | _Thread_Priority_and_sticky_update( previous_owner, -1 ); |
---|
750 | _Thread_Priority_and_sticky_update( new_owner, 0 ); |
---|
751 | _Thread_Dispatch_enable( cpu_self ); |
---|
752 | } |
---|
753 | #endif |
---|
754 | |
---|
755 | #if defined(RTEMS_MULTIPROCESSING) |
---|
756 | void _Thread_queue_Unblock_proxy( |
---|
757 | Thread_queue_Queue *queue, |
---|
758 | Thread_Control *the_thread |
---|
759 | ) |
---|
760 | { |
---|
761 | const Thread_queue_Object *the_queue_object; |
---|
762 | Thread_Proxy_control *the_proxy; |
---|
763 | Thread_queue_MP_callout mp_callout; |
---|
764 | |
---|
765 | the_queue_object = THREAD_QUEUE_QUEUE_TO_OBJECT( queue ); |
---|
766 | the_proxy = (Thread_Proxy_control *) the_thread; |
---|
767 | mp_callout = the_proxy->thread_queue_callout; |
---|
768 | ( *mp_callout )( the_thread, the_queue_object->Object.id ); |
---|
769 | |
---|
770 | _Thread_MP_Free_proxy( the_thread ); |
---|
771 | } |
---|
772 | #endif |
---|