source: rtems/cpukit/score/src/threadqenqueue.c @ 8a8b95aa

5
Last change on this file since 8a8b95aa was 4c20da4b, checked in by Sebastian Huber <sebastian.huber@…>, on 04/04/19 at 07:18:11

doxygen: Rename Score* groups in RTEMSScore*

Update #3706

  • Property mode set to 100644
File size: 20.4 KB
Line 
1/**
2 * @file
3 *
4 * @brief Thread Queue Operations
5 * @ingroup RTEMSScoreThreadQ
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2014.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  Copyright (c) 2015, 2016 embedded brains GmbH.
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#if HAVE_CONFIG_H
20#include "config.h"
21#endif
22
23#include <rtems/score/threadqimpl.h>
24#include <rtems/score/assert.h>
25#include <rtems/score/threaddispatch.h>
26#include <rtems/score/threadimpl.h>
27#include <rtems/score/status.h>
28#include <rtems/score/watchdogimpl.h>
29
30#define THREAD_QUEUE_INTEND_TO_BLOCK \
31  (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_INTEND_TO_BLOCK)
32
33#define THREAD_QUEUE_BLOCKED \
34  (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_BLOCKED)
35
36#define THREAD_QUEUE_READY_AGAIN \
37  (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_READY_AGAIN)
38
39#if defined(RTEMS_SMP)
40/*
41 * A global registry of active thread queue links is used to provide deadlock
42 * detection on SMP configurations.  This is simple to implement and no
43 * additional storage is required for the thread queues.  The disadvantage is
44 * the global registry is not scalable and may lead to lock contention.
45 * However, the registry is only used in case of nested resource conflicts.  In
46 * this case, the application is already in trouble.
47 */
48
49typedef struct {
50  ISR_lock_Control Lock;
51
52  RBTree_Control Links;
53} Thread_queue_Links;
54
55static Thread_queue_Links _Thread_queue_Links = {
56  ISR_LOCK_INITIALIZER( "Thread Queue Links" ),
57  RBTREE_INITIALIZER_EMPTY( _Thread_queue_Links.Links )
58};
59
60static bool _Thread_queue_Link_equal(
61  const void        *left,
62  const RBTree_Node *right
63)
64{
65  const Thread_queue_Queue *the_left;
66  const Thread_queue_Link  *the_right;
67
68  the_left = left;
69  the_right = (Thread_queue_Link *) right;
70
71  return the_left == the_right->source;
72}
73
74static bool _Thread_queue_Link_less(
75  const void        *left,
76  const RBTree_Node *right
77)
78{
79  const Thread_queue_Queue *the_left;
80  const Thread_queue_Link  *the_right;
81
82  the_left = left;
83  the_right = (Thread_queue_Link *) right;
84
85  return (uintptr_t) the_left < (uintptr_t) the_right->source;
86}
87
88static void *_Thread_queue_Link_map( RBTree_Node *node )
89{
90  return node;
91}
92
93static Thread_queue_Link *_Thread_queue_Link_find(
94  Thread_queue_Links *links,
95  Thread_queue_Queue *source
96)
97{
98  return _RBTree_Find_inline(
99    &links->Links,
100    source,
101    _Thread_queue_Link_equal,
102    _Thread_queue_Link_less,
103    _Thread_queue_Link_map
104  );
105}
106
107static bool _Thread_queue_Link_add(
108  Thread_queue_Link  *link,
109  Thread_queue_Queue *source,
110  Thread_queue_Queue *target
111)
112{
113  Thread_queue_Links *links;
114  Thread_queue_Queue *recursive_target;
115  ISR_lock_Context    lock_context;
116
117  link->source = source;
118  link->target = target;
119
120  links = &_Thread_queue_Links;
121  recursive_target = target;
122
123  _ISR_lock_Acquire( &links->Lock, &lock_context );
124
125  while ( true ) {
126    Thread_queue_Link *recursive_link;
127
128    recursive_link = _Thread_queue_Link_find( links, recursive_target );
129
130    if ( recursive_link == NULL ) {
131      break;
132    }
133
134    recursive_target = recursive_link->target;
135
136    if ( recursive_target == source ) {
137      _ISR_lock_Release( &links->Lock, &lock_context );
138      return false;
139    }
140  }
141
142  _RBTree_Insert_inline(
143    &links->Links,
144    &link->Registry_node,
145    source,
146    _Thread_queue_Link_less
147  );
148
149  _ISR_lock_Release( &links->Lock, &lock_context );
150  return true;
151}
152
153static void _Thread_queue_Link_remove( Thread_queue_Link *link )
154{
155  Thread_queue_Links *links;
156  ISR_lock_Context    lock_context;
157
158  links = &_Thread_queue_Links;
159
160  _ISR_lock_Acquire( &links->Lock, &lock_context );
161  _RBTree_Extract( &links->Links, &link->Registry_node );
162  _ISR_lock_Release( &links->Lock, &lock_context );
163}
164#endif
165
166#if !defined(RTEMS_SMP)
167static
168#endif
169void _Thread_queue_Path_release_critical(
170  Thread_queue_Context *queue_context
171)
172{
173#if defined(RTEMS_SMP)
174  Chain_Node *head;
175  Chain_Node *node;
176
177  head = _Chain_Head( &queue_context->Path.Links );
178  node = _Chain_Last( &queue_context->Path.Links );
179
180  while ( head != node ) {
181    Thread_queue_Link *link;
182
183    link = THREAD_QUEUE_LINK_OF_PATH_NODE( node );
184
185    if ( link->Lock_context.Wait.queue != NULL ) {
186      _Thread_queue_Link_remove( link );
187      _Thread_Wait_release_queue_critical(
188        link->Lock_context.Wait.queue,
189        &link->Lock_context
190      );
191      _Thread_Wait_remove_request( link->owner, &link->Lock_context );
192    } else {
193      _Thread_Wait_release_default_critical(
194        link->owner,
195        &link->Lock_context.Lock_context
196      );
197    }
198
199    node = _Chain_Previous( node );
200#if defined(RTEMS_DEBUG)
201    _Chain_Set_off_chain( &link->Path_node );
202#endif
203  }
204#else
205  (void) queue_context;
206#endif
207}
208
209#if defined(RTEMS_SMP)
210static void _Thread_queue_Path_append_deadlock_thread(
211  Thread_Control       *the_thread,
212  Thread_queue_Context *queue_context
213)
214{
215  Thread_Control *deadlock;
216
217  /*
218   * In case of a deadlock, we must obtain the thread wait default lock for the
219   * first thread on the path that tries to enqueue on a thread queue.  This
220   * thread can be identified by the thread wait operations.  This lock acquire
221   * is necessary for the timeout and explicit thread priority changes, see
222   * _Thread_Priority_perform_actions().
223   */
224
225  deadlock = NULL;
226
227  while ( the_thread->Wait.operations != &_Thread_queue_Operations_default ) {
228    the_thread = the_thread->Wait.queue->owner;
229    deadlock = the_thread;
230  }
231
232  if ( deadlock != NULL ) {
233    Thread_queue_Link *link;
234
235    link = &queue_context->Path.Deadlock;
236    _Chain_Initialize_node( &link->Path_node );
237    _Chain_Append_unprotected(
238      &queue_context->Path.Links,
239      &link->Path_node
240    );
241    link->owner = deadlock;
242    link->Lock_context.Wait.queue = NULL;
243    _Thread_Wait_acquire_default_critical(
244      deadlock,
245      &link->Lock_context.Lock_context
246    );
247  }
248}
249#endif
250
251#if !defined(RTEMS_SMP)
252static
253#endif
254bool _Thread_queue_Path_acquire_critical(
255  Thread_queue_Queue   *queue,
256  Thread_Control       *the_thread,
257  Thread_queue_Context *queue_context
258)
259{
260  Thread_Control     *owner;
261#if defined(RTEMS_SMP)
262  Thread_queue_Link  *link;
263  Thread_queue_Queue *target;
264
265  /*
266   * For an overview please look at the non-SMP part below.  We basically do
267   * the same on SMP configurations.  The fact that we may have more than one
268   * executing thread and each thread queue has its own SMP lock makes the task
269   * a bit more difficult.  We have to avoid deadlocks at SMP lock level, since
270   * this would result in an unrecoverable deadlock of the overall system.
271   */
272
273  _Chain_Initialize_empty( &queue_context->Path.Links );
274
275  owner = queue->owner;
276
277  if ( owner == NULL ) {
278    return true;
279  }
280
281  if ( owner == the_thread ) {
282    return false;
283  }
284
285  _Chain_Initialize_node(
286    &queue_context->Path.Start.Lock_context.Wait.Gate.Node
287  );
288  link = &queue_context->Path.Start;
289  _RBTree_Initialize_node( &link->Registry_node );
290  _Chain_Initialize_node( &link->Path_node );
291
292  do {
293    _Chain_Append_unprotected( &queue_context->Path.Links, &link->Path_node );
294    link->owner = owner;
295
296    _Thread_Wait_acquire_default_critical(
297      owner,
298      &link->Lock_context.Lock_context
299    );
300
301    target = owner->Wait.queue;
302    link->Lock_context.Wait.queue = target;
303
304    if ( target != NULL ) {
305      if ( _Thread_queue_Link_add( link, queue, target ) ) {
306        _Thread_queue_Gate_add(
307          &owner->Wait.Lock.Pending_requests,
308          &link->Lock_context.Wait.Gate
309        );
310        _Thread_Wait_release_default_critical(
311          owner,
312          &link->Lock_context.Lock_context
313        );
314        _Thread_Wait_acquire_queue_critical( target, &link->Lock_context );
315
316        if ( link->Lock_context.Wait.queue == NULL ) {
317          _Thread_queue_Link_remove( link );
318          _Thread_Wait_release_queue_critical( target, &link->Lock_context );
319          _Thread_Wait_acquire_default_critical(
320            owner,
321            &link->Lock_context.Lock_context
322          );
323          _Thread_Wait_remove_request_locked( owner, &link->Lock_context );
324          _Assert( owner->Wait.queue == NULL );
325          return true;
326        }
327      } else {
328        link->Lock_context.Wait.queue = NULL;
329        _Thread_queue_Path_append_deadlock_thread( owner, queue_context );
330        return false;
331      }
332    } else {
333      return true;
334    }
335
336    link = &owner->Wait.Link;
337    queue = target;
338    owner = queue->owner;
339  } while ( owner != NULL );
340#else
341  do {
342    owner = queue->owner;
343
344    if ( owner == NULL ) {
345      return true;
346    }
347
348    if ( owner == the_thread ) {
349      return false;
350    }
351
352    queue = owner->Wait.queue;
353  } while ( queue != NULL );
354#endif
355
356  return true;
357}
358
359void _Thread_queue_Enqueue_do_nothing_extra(
360  Thread_queue_Queue   *queue,
361  Thread_Control       *the_thread,
362  Per_CPU_Control      *cpu_self,
363  Thread_queue_Context *queue_context
364)
365{
366  /* Do nothing */
367}
368
369void _Thread_queue_Deadlock_status( Thread_Control *the_thread )
370{
371  the_thread->Wait.return_code = STATUS_DEADLOCK;
372}
373
374void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread )
375{
376  _Internal_error( INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK );
377}
378
379void _Thread_queue_Enqueue(
380  Thread_queue_Queue            *queue,
381  const Thread_queue_Operations *operations,
382  Thread_Control                *the_thread,
383  Thread_queue_Context          *queue_context
384)
385{
386  Per_CPU_Control *cpu_self;
387  bool             success;
388
389  _Assert( queue_context->enqueue_callout != NULL );
390
391#if defined(RTEMS_MULTIPROCESSING)
392  if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) {
393    the_thread = _Thread_MP_Allocate_proxy( queue_context->thread_state );
394  }
395#endif
396
397  _Thread_Wait_claim( the_thread, queue );
398
399  if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) {
400    _Thread_queue_Path_release_critical( queue_context );
401    _Thread_Wait_restore_default( the_thread );
402    _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
403    _Thread_Wait_tranquilize( the_thread );
404    _Assert( queue_context->deadlock_callout != NULL );
405    ( *queue_context->deadlock_callout )( the_thread );
406    return;
407  }
408
409  _Thread_queue_Context_clear_priority_updates( queue_context );
410  _Thread_Wait_claim_finalize( the_thread, operations );
411  ( *operations->enqueue )( queue, the_thread, queue_context );
412
413  _Thread_queue_Path_release_critical( queue_context );
414
415  the_thread->Wait.return_code = STATUS_SUCCESSFUL;
416  _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
417  cpu_self = _Thread_queue_Dispatch_disable( queue_context );
418  _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
419
420  ( *queue_context->enqueue_callout )(
421    queue,
422    the_thread,
423    cpu_self,
424    queue_context
425  );
426
427  /*
428   *  Set the blocking state for this thread queue in the thread.
429   */
430  _Thread_Set_state( the_thread, queue_context->thread_state );
431
432  /*
433   * At this point thread dispatching is disabled, however, we already released
434   * the thread queue lock.  Thus, interrupts or threads on other processors
435   * may already changed our state with respect to the thread queue object.
436   * The request could be satisfied or timed out.  This situation is indicated
437   * by the thread wait flags.  Other parties must not modify our thread state
438   * as long as we are in the THREAD_QUEUE_INTEND_TO_BLOCK thread wait state,
439   * thus we have to cancel the blocking operation ourself if necessary.
440   */
441  success = _Thread_Wait_flags_try_change_acquire(
442    the_thread,
443    THREAD_QUEUE_INTEND_TO_BLOCK,
444    THREAD_QUEUE_BLOCKED
445  );
446  if ( !success ) {
447    _Thread_Remove_timer_and_unblock( the_thread, queue );
448  }
449
450  _Thread_Priority_update( queue_context );
451  _Thread_Dispatch_direct( cpu_self );
452}
453
454#if defined(RTEMS_SMP)
455Status_Control _Thread_queue_Enqueue_sticky(
456  Thread_queue_Queue            *queue,
457  const Thread_queue_Operations *operations,
458  Thread_Control                *the_thread,
459  Thread_queue_Context          *queue_context
460)
461{
462  Per_CPU_Control *cpu_self;
463
464  _Assert( queue_context->enqueue_callout != NULL );
465
466  _Thread_Wait_claim( the_thread, queue );
467
468  if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) {
469    _Thread_queue_Path_release_critical( queue_context );
470    _Thread_Wait_restore_default( the_thread );
471    _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
472    _Thread_Wait_tranquilize( the_thread );
473    ( *queue_context->deadlock_callout )( the_thread );
474    return _Thread_Wait_get_status( the_thread );
475  }
476
477  _Thread_queue_Context_clear_priority_updates( queue_context );
478  _Thread_Wait_claim_finalize( the_thread, operations );
479  ( *operations->enqueue )( queue, the_thread, queue_context );
480
481  _Thread_queue_Path_release_critical( queue_context );
482
483  the_thread->Wait.return_code = STATUS_SUCCESSFUL;
484  _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
485  cpu_self = _Thread_queue_Dispatch_disable( queue_context );
486  _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
487
488  if ( cpu_self->thread_dispatch_disable_level != 1 ) {
489    _Internal_error(
490      INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE
491    );
492  }
493
494  ( *queue_context->enqueue_callout )(
495    queue,
496    the_thread,
497    cpu_self,
498    queue_context
499  );
500
501  _Thread_Priority_update( queue_context );
502  _Thread_Priority_and_sticky_update( the_thread, 1 );
503  _Thread_Dispatch_enable( cpu_self );
504
505  while (
506    _Thread_Wait_flags_get_acquire( the_thread ) == THREAD_QUEUE_INTEND_TO_BLOCK
507  ) {
508    /* Wait */
509  }
510
511  _Thread_Wait_tranquilize( the_thread );
512  _Thread_Timer_remove( the_thread );
513  return _Thread_Wait_get_status( the_thread );
514}
515#endif
516
517#if defined(RTEMS_MULTIPROCESSING)
518static bool _Thread_queue_MP_set_callout(
519  Thread_Control             *the_thread,
520  const Thread_queue_Context *queue_context
521)
522{
523  Thread_Proxy_control    *the_proxy;
524  Thread_queue_MP_callout  mp_callout;
525
526  if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
527    return false;
528  }
529
530  the_proxy = (Thread_Proxy_control *) the_thread;
531  mp_callout = queue_context->mp_callout;
532  _Assert( mp_callout != NULL );
533  the_proxy->thread_queue_callout = queue_context->mp_callout;
534  return true;
535}
536#endif
537
538static bool _Thread_queue_Make_ready_again( Thread_Control *the_thread )
539{
540  bool success;
541  bool unblock;
542
543  /*
544   * We must update the wait flags under protection of the current thread lock,
545   * otherwise a _Thread_Timeout() running on another processor may interfere.
546   */
547  success = _Thread_Wait_flags_try_change_release(
548    the_thread,
549    THREAD_QUEUE_INTEND_TO_BLOCK,
550    THREAD_QUEUE_READY_AGAIN
551  );
552  if ( success ) {
553    unblock = false;
554  } else {
555    _Assert( _Thread_Wait_flags_get( the_thread ) == THREAD_QUEUE_BLOCKED );
556    _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_READY_AGAIN );
557    unblock = true;
558  }
559
560  _Thread_Wait_restore_default( the_thread );
561  return unblock;
562}
563
564bool _Thread_queue_Extract_locked(
565  Thread_queue_Queue            *queue,
566  const Thread_queue_Operations *operations,
567  Thread_Control                *the_thread,
568  Thread_queue_Context          *queue_context
569)
570{
571#if defined(RTEMS_MULTIPROCESSING)
572  _Thread_queue_MP_set_callout( the_thread, queue_context );
573#endif
574  ( *operations->extract )( queue, the_thread, queue_context );
575  return _Thread_queue_Make_ready_again( the_thread );
576}
577
578void _Thread_queue_Unblock_critical(
579  bool                unblock,
580  Thread_queue_Queue *queue,
581  Thread_Control     *the_thread,
582  ISR_lock_Context   *lock_context
583)
584{
585  if ( unblock ) {
586    Per_CPU_Control *cpu_self;
587
588    cpu_self = _Thread_Dispatch_disable_critical( lock_context );
589    _Thread_queue_Queue_release( queue, lock_context );
590
591    _Thread_Remove_timer_and_unblock( the_thread, queue );
592
593    _Thread_Dispatch_enable( cpu_self );
594  } else {
595    _Thread_queue_Queue_release( queue, lock_context );
596  }
597}
598
599void _Thread_queue_Extract_critical(
600  Thread_queue_Queue            *queue,
601  const Thread_queue_Operations *operations,
602  Thread_Control                *the_thread,
603  Thread_queue_Context          *queue_context
604)
605{
606  bool unblock;
607
608  unblock = _Thread_queue_Extract_locked(
609    queue,
610    operations,
611    the_thread,
612    queue_context
613  );
614
615  _Thread_queue_Unblock_critical(
616    unblock,
617    queue,
618    the_thread,
619    &queue_context->Lock_context.Lock_context
620  );
621}
622
623void _Thread_queue_Extract( Thread_Control *the_thread )
624{
625  Thread_queue_Context  queue_context;
626  Thread_queue_Queue   *queue;
627
628  _Thread_queue_Context_initialize( &queue_context );
629  _Thread_queue_Context_clear_priority_updates( &queue_context );
630  _Thread_Wait_acquire( the_thread, &queue_context );
631
632  queue = the_thread->Wait.queue;
633
634  if ( queue != NULL ) {
635    bool unblock;
636
637    _Thread_Wait_remove_request( the_thread, &queue_context.Lock_context );
638    _Thread_queue_Context_set_MP_callout(
639      &queue_context,
640      _Thread_queue_MP_callout_do_nothing
641    );
642    unblock = _Thread_queue_Extract_locked(
643      queue,
644      the_thread->Wait.operations,
645      the_thread,
646      &queue_context
647    );
648    _Thread_queue_Unblock_critical(
649      unblock,
650      queue,
651      the_thread,
652      &queue_context.Lock_context.Lock_context
653    );
654  } else {
655    _Thread_Wait_release( the_thread, &queue_context );
656  }
657}
658
659void _Thread_queue_Surrender(
660  Thread_queue_Queue            *queue,
661  Thread_queue_Heads            *heads,
662  Thread_Control                *previous_owner,
663  Thread_queue_Context          *queue_context,
664  const Thread_queue_Operations *operations
665)
666{
667  Thread_Control  *new_owner;
668  bool             unblock;
669  Per_CPU_Control *cpu_self;
670
671  _Assert( heads != NULL );
672
673  _Thread_queue_Context_clear_priority_updates( queue_context );
674  new_owner = ( *operations->surrender )(
675    queue,
676    heads,
677    previous_owner,
678    queue_context
679  );
680  queue->owner = new_owner;
681
682#if defined(RTEMS_MULTIPROCESSING)
683  if ( !_Thread_queue_MP_set_callout( new_owner, queue_context ) )
684#endif
685  {
686    _Thread_Resource_count_increment( new_owner );
687  }
688
689  unblock = _Thread_queue_Make_ready_again( new_owner );
690
691  cpu_self = _Thread_queue_Dispatch_disable( queue_context );
692  _Thread_queue_Queue_release(
693    queue,
694    &queue_context->Lock_context.Lock_context
695  );
696
697  _Thread_Priority_update( queue_context );
698
699  if ( unblock ) {
700    _Thread_Remove_timer_and_unblock( new_owner, queue );
701  }
702
703  _Thread_Dispatch_enable( cpu_self );
704}
705
706#if defined(RTEMS_SMP)
707void _Thread_queue_Surrender_sticky(
708  Thread_queue_Queue            *queue,
709  Thread_queue_Heads            *heads,
710  Thread_Control                *previous_owner,
711  Thread_queue_Context          *queue_context,
712  const Thread_queue_Operations *operations
713)
714{
715  Thread_Control  *new_owner;
716  Per_CPU_Control *cpu_self;
717
718  _Assert( heads != NULL );
719
720  _Thread_queue_Context_clear_priority_updates( queue_context );
721  new_owner = ( *operations->surrender )(
722    queue,
723    heads,
724    previous_owner,
725    queue_context
726  );
727  queue->owner = new_owner;
728  _Thread_queue_Make_ready_again( new_owner );
729
730  cpu_self = _Thread_queue_Dispatch_disable( queue_context );
731  _Thread_queue_Queue_release(
732    queue,
733    &queue_context->Lock_context.Lock_context
734  );
735  _Thread_Priority_and_sticky_update( previous_owner, -1 );
736  _Thread_Priority_and_sticky_update( new_owner, 0 );
737  _Thread_Dispatch_enable( cpu_self );
738}
739#endif
740
741Thread_Control *_Thread_queue_Do_dequeue(
742  Thread_queue_Control          *the_thread_queue,
743  const Thread_queue_Operations *operations
744#if defined(RTEMS_MULTIPROCESSING)
745  ,
746  Thread_queue_MP_callout        mp_callout
747#endif
748)
749{
750  Thread_queue_Context  queue_context;
751  Thread_Control       *the_thread;
752
753  _Thread_queue_Context_initialize( &queue_context );
754  _Thread_queue_Context_set_MP_callout( &queue_context, mp_callout );
755  _Thread_queue_Acquire( the_thread_queue, &queue_context );
756
757  the_thread = _Thread_queue_First_locked( the_thread_queue, operations );
758
759  if ( the_thread != NULL ) {
760    _Thread_queue_Extract_critical(
761      &the_thread_queue->Queue,
762      operations,
763      the_thread,
764      &queue_context
765    );
766  } else {
767    _Thread_queue_Release( the_thread_queue, &queue_context );
768  }
769
770  return the_thread;
771}
772
773#if defined(RTEMS_MULTIPROCESSING)
774void _Thread_queue_Unblock_proxy(
775  Thread_queue_Queue *queue,
776  Thread_Control     *the_thread
777)
778{
779  const Thread_queue_Object *the_queue_object;
780  Thread_Proxy_control      *the_proxy;
781  Thread_queue_MP_callout    mp_callout;
782
783  the_queue_object = THREAD_QUEUE_QUEUE_TO_OBJECT( queue );
784  the_proxy = (Thread_Proxy_control *) the_thread;
785  mp_callout = the_proxy->thread_queue_callout;
786  ( *mp_callout )( the_thread, the_queue_object->Object.id );
787
788  _Thread_MP_Free_proxy( the_thread );
789}
790#endif
Note: See TracBrowser for help on using the repository browser.