source: rtems/cpukit/score/src/threadqenqueue.c @ db3a3de

5
Last change on this file since db3a3de was db3a3de, checked in by Sebastian Huber <sebastian.huber@…>, on 10/10/17 at 08:03:48

score: Add _Thread_queue_Dispatch_disable()

  • Property mode set to 100644
File size: 21.2 KB
Line 
1/**
2 * @file
3 *
4 * @brief Thread Queue Operations
5 * @ingroup ScoreThreadQ
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2014.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  Copyright (c) 2015, 2016 embedded brains GmbH.
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#if HAVE_CONFIG_H
20#include "config.h"
21#endif
22
23#include <rtems/score/threadqimpl.h>
24#include <rtems/score/assert.h>
25#include <rtems/score/threaddispatch.h>
26#include <rtems/score/threadimpl.h>
27#include <rtems/score/status.h>
28#include <rtems/score/watchdogimpl.h>
29
30#define THREAD_QUEUE_INTEND_TO_BLOCK \
31  (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_INTEND_TO_BLOCK)
32
33#define THREAD_QUEUE_BLOCKED \
34  (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_BLOCKED)
35
36#define THREAD_QUEUE_READY_AGAIN \
37  (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_READY_AGAIN)
38
39#if defined(RTEMS_SMP)
40/*
41 * A global registry of active thread queue links is used to provide deadlock
42 * detection on SMP configurations.  This is simple to implement and no
43 * additional storage is required for the thread queues.  The disadvantage is
44 * the global registry is not scalable and may lead to lock contention.
45 * However, the registry is only used in case of nested resource conflicts.  In
46 * this case, the application is already in trouble.
47 */
48
49typedef struct {
50  ISR_lock_Control Lock;
51
52  RBTree_Control Links;
53} Thread_queue_Links;
54
55static Thread_queue_Links _Thread_queue_Links = {
56  ISR_LOCK_INITIALIZER( "Thread Queue Links" ),
57  RBTREE_INITIALIZER_EMPTY( _Thread_queue_Links.Links )
58};
59
60static bool _Thread_queue_Link_equal(
61  const void        *left,
62  const RBTree_Node *right
63)
64{
65  const Thread_queue_Queue *the_left;
66  const Thread_queue_Link  *the_right;
67
68  the_left = left;
69  the_right = (Thread_queue_Link *) right;
70
71  return the_left == the_right->source;
72}
73
74static bool _Thread_queue_Link_less(
75  const void        *left,
76  const RBTree_Node *right
77)
78{
79  const Thread_queue_Queue *the_left;
80  const Thread_queue_Link  *the_right;
81
82  the_left = left;
83  the_right = (Thread_queue_Link *) right;
84
85  return (uintptr_t) the_left < (uintptr_t) the_right->source;
86}
87
88static void *_Thread_queue_Link_map( RBTree_Node *node )
89{
90  return node;
91}
92
93static Thread_queue_Link *_Thread_queue_Link_find(
94  Thread_queue_Links *links,
95  Thread_queue_Queue *source
96)
97{
98  return _RBTree_Find_inline(
99    &links->Links,
100    source,
101    _Thread_queue_Link_equal,
102    _Thread_queue_Link_less,
103    _Thread_queue_Link_map
104  );
105}
106
107static bool _Thread_queue_Link_add(
108  Thread_queue_Link  *link,
109  Thread_queue_Queue *source,
110  Thread_queue_Queue *target
111)
112{
113  Thread_queue_Links *links;
114  Thread_queue_Queue *recursive_target;
115  ISR_lock_Context    lock_context;
116
117  link->source = source;
118  link->target = target;
119
120  links = &_Thread_queue_Links;
121  recursive_target = target;
122
123  _ISR_lock_Acquire( &links->Lock, &lock_context );
124
125  while ( true ) {
126    Thread_queue_Link *recursive_link;
127
128    recursive_link = _Thread_queue_Link_find( links, recursive_target );
129
130    if ( recursive_link == NULL ) {
131      break;
132    }
133
134    recursive_target = recursive_link->target;
135
136    if ( recursive_target == source ) {
137      _ISR_lock_Release( &links->Lock, &lock_context );
138      return false;
139    }
140  }
141
142  _RBTree_Insert_inline(
143    &links->Links,
144    &link->Registry_node,
145    source,
146    _Thread_queue_Link_less
147  );
148
149  _ISR_lock_Release( &links->Lock, &lock_context );
150  return true;
151}
152
153static void _Thread_queue_Link_remove( Thread_queue_Link *link )
154{
155  Thread_queue_Links *links;
156  ISR_lock_Context    lock_context;
157
158  links = &_Thread_queue_Links;
159
160  _ISR_lock_Acquire( &links->Lock, &lock_context );
161  _RBTree_Extract( &links->Links, &link->Registry_node );
162  _ISR_lock_Release( &links->Lock, &lock_context );
163}
164#endif
165
166#if !defined(RTEMS_SMP)
167static
168#endif
169void _Thread_queue_Path_release_critical(
170  Thread_queue_Context *queue_context
171)
172{
173#if defined(RTEMS_SMP)
174  Chain_Node *head;
175  Chain_Node *node;
176
177  head = _Chain_Head( &queue_context->Path.Links );
178  node = _Chain_Last( &queue_context->Path.Links );
179
180  while ( head != node ) {
181    Thread_queue_Link *link;
182
183    link = THREAD_QUEUE_LINK_OF_PATH_NODE( node );
184
185    if ( link->Lock_context.Wait.queue != NULL ) {
186      _Thread_queue_Link_remove( link );
187      _Thread_Wait_release_queue_critical(
188        link->Lock_context.Wait.queue,
189        &link->Lock_context
190      );
191      _Thread_Wait_remove_request( link->owner, &link->Lock_context );
192    } else {
193      _Thread_Wait_release_default_critical(
194        link->owner,
195        &link->Lock_context.Lock_context
196      );
197    }
198
199    node = _Chain_Previous( node );
200#if defined(RTEMS_DEBUG)
201    _Chain_Set_off_chain( &link->Path_node );
202#endif
203  }
204#else
205  (void) queue_context;
206#endif
207}
208
209#if defined(RTEMS_SMP)
210static void _Thread_queue_Path_append_deadlock_thread(
211  Thread_Control       *the_thread,
212  Thread_queue_Context *queue_context
213)
214{
215  Thread_Control *deadlock;
216
217  /*
218   * In case of a deadlock, we must obtain the thread wait default lock for the
219   * first thread on the path that tries to enqueue on a thread queue.  This
220   * thread can be identified by the thread wait operations.  This lock acquire
221   * is necessary for the timeout and explicit thread priority changes, see
222   * _Thread_Priority_perform_actions().
223   */
224
225  deadlock = NULL;
226
227  while ( the_thread->Wait.operations != &_Thread_queue_Operations_default ) {
228    the_thread = the_thread->Wait.queue->owner;
229    deadlock = the_thread;
230  }
231
232  if ( deadlock != NULL ) {
233    Thread_queue_Link *link;
234
235    link = &queue_context->Path.Deadlock;
236    _Chain_Initialize_node( &link->Path_node );
237    _Chain_Append_unprotected(
238      &queue_context->Path.Links,
239      &link->Path_node
240    );
241    link->owner = deadlock;
242    link->Lock_context.Wait.queue = NULL;
243    _Thread_Wait_acquire_default_critical(
244      deadlock,
245      &link->Lock_context.Lock_context
246    );
247  }
248}
249#endif
250
251#if !defined(RTEMS_SMP)
252static
253#endif
254bool _Thread_queue_Path_acquire_critical(
255  Thread_queue_Queue   *queue,
256  Thread_Control       *the_thread,
257  Thread_queue_Context *queue_context
258)
259{
260  Thread_Control     *owner;
261#if defined(RTEMS_SMP)
262  Thread_queue_Link  *link;
263  Thread_queue_Queue *target;
264
265  /*
266   * For an overview please look at the non-SMP part below.  We basically do
267   * the same on SMP configurations.  The fact that we may have more than one
268   * executing thread and each thread queue has its own SMP lock makes the task
269   * a bit more difficult.  We have to avoid deadlocks at SMP lock level, since
270   * this would result in an unrecoverable deadlock of the overall system.
271   */
272
273  _Chain_Initialize_empty( &queue_context->Path.Links );
274
275  owner = queue->owner;
276
277  if ( owner == NULL ) {
278    return true;
279  }
280
281  if ( owner == the_thread ) {
282    return false;
283  }
284
285  _Chain_Initialize_node(
286    &queue_context->Path.Start.Lock_context.Wait.Gate.Node
287  );
288  link = &queue_context->Path.Start;
289  _RBTree_Initialize_node( &link->Registry_node );
290  _Chain_Initialize_node( &link->Path_node );
291
292  do {
293    _Chain_Append_unprotected( &queue_context->Path.Links, &link->Path_node );
294    link->owner = owner;
295
296    _Thread_Wait_acquire_default_critical(
297      owner,
298      &link->Lock_context.Lock_context
299    );
300
301    target = owner->Wait.queue;
302    link->Lock_context.Wait.queue = target;
303
304    if ( target != NULL ) {
305      if ( _Thread_queue_Link_add( link, queue, target ) ) {
306        _Thread_queue_Gate_add(
307          &owner->Wait.Lock.Pending_requests,
308          &link->Lock_context.Wait.Gate
309        );
310        _Thread_Wait_release_default_critical(
311          owner,
312          &link->Lock_context.Lock_context
313        );
314        _Thread_Wait_acquire_queue_critical( target, &link->Lock_context );
315
316        if ( link->Lock_context.Wait.queue == NULL ) {
317          _Thread_queue_Link_remove( link );
318          _Thread_Wait_release_queue_critical( target, &link->Lock_context );
319          _Thread_Wait_acquire_default_critical(
320            owner,
321            &link->Lock_context.Lock_context
322          );
323          _Thread_Wait_remove_request_locked( owner, &link->Lock_context );
324          _Assert( owner->Wait.queue == NULL );
325          return true;
326        }
327      } else {
328        link->Lock_context.Wait.queue = NULL;
329        _Thread_queue_Path_append_deadlock_thread( owner, queue_context );
330        return false;
331      }
332    } else {
333      return true;
334    }
335
336    link = &owner->Wait.Link;
337    queue = target;
338    owner = queue->owner;
339  } while ( owner != NULL );
340#else
341  do {
342    owner = queue->owner;
343
344    if ( owner == NULL ) {
345      return true;
346    }
347
348    if ( owner == the_thread ) {
349      return false;
350    }
351
352    queue = owner->Wait.queue;
353  } while ( queue != NULL );
354#endif
355
356  return true;
357}
358
359void _Thread_queue_Enqueue_do_nothing(
360  Thread_queue_Queue   *queue,
361  Thread_Control       *the_thread,
362  Thread_queue_Context *queue_context
363)
364{
365  /* Do nothing */
366}
367
368void _Thread_queue_Deadlock_status( Thread_Control *the_thread )
369{
370  the_thread->Wait.return_code = STATUS_DEADLOCK;
371}
372
373void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread )
374{
375  _Internal_error( INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK );
376}
377
378static void _Thread_queue_Timeout(
379  Thread_Control       *the_thread,
380  Per_CPU_Control      *cpu_self,
381  Thread_queue_Context *queue_context
382)
383{
384  switch ( queue_context->timeout_discipline ) {
385    case WATCHDOG_RELATIVE:
386      /* A relative timeout of 0 is a special case indefinite (no) timeout */
387      if ( queue_context->timeout != 0 ) {
388        _Thread_Timer_insert_relative(
389          the_thread,
390          cpu_self,
391          _Thread_Timeout,
392          (Watchdog_Interval) queue_context->timeout
393        );
394      }
395      break;
396    case WATCHDOG_ABSOLUTE:
397      _Thread_Timer_insert_absolute(
398        the_thread,
399        cpu_self,
400        _Thread_Timeout,
401        queue_context->timeout
402      );
403      break;
404    default:
405      break;
406  }
407}
408
409void _Thread_queue_Enqueue(
410  Thread_queue_Queue            *queue,
411  const Thread_queue_Operations *operations,
412  Thread_Control                *the_thread,
413  Thread_queue_Context          *queue_context
414)
415{
416  Per_CPU_Control *cpu_self;
417  bool             success;
418
419  _Assert( queue_context->enqueue_callout != NULL );
420  _Assert( (uint8_t) queue_context->timeout_discipline != 0x7f );
421
422#if defined(RTEMS_MULTIPROCESSING)
423  if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) {
424    the_thread = _Thread_MP_Allocate_proxy( queue_context->thread_state );
425  }
426#endif
427
428  _Thread_Wait_claim( the_thread, queue );
429
430  if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) {
431    _Thread_queue_Path_release_critical( queue_context );
432    _Thread_Wait_restore_default( the_thread );
433    _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
434    _Thread_Wait_tranquilize( the_thread );
435    _Assert( queue_context->deadlock_callout != NULL );
436    ( *queue_context->deadlock_callout )( the_thread );
437    return;
438  }
439
440  _Thread_queue_Context_clear_priority_updates( queue_context );
441  _Thread_Wait_claim_finalize( the_thread, operations );
442  ( *operations->enqueue )( queue, the_thread, queue_context );
443
444  _Thread_queue_Path_release_critical( queue_context );
445
446  the_thread->Wait.return_code = STATUS_SUCCESSFUL;
447  _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
448  cpu_self = _Thread_queue_Dispatch_disable( queue_context );
449  _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
450
451  ( *queue_context->enqueue_callout )( queue, the_thread, queue_context );
452
453  /*
454   *  Set the blocking state for this thread queue in the thread.
455   */
456  _Thread_Set_state( the_thread, queue_context->thread_state );
457
458  /*
459   *  If the thread wants to timeout, then schedule its timer.
460   */
461  _Thread_queue_Timeout( the_thread, cpu_self, queue_context );
462
463  /*
464   * At this point thread dispatching is disabled, however, we already released
465   * the thread queue lock.  Thus, interrupts or threads on other processors
466   * may already changed our state with respect to the thread queue object.
467   * The request could be satisfied or timed out.  This situation is indicated
468   * by the thread wait flags.  Other parties must not modify our thread state
469   * as long as we are in the THREAD_QUEUE_INTEND_TO_BLOCK thread wait state,
470   * thus we have to cancel the blocking operation ourself if necessary.
471   */
472  success = _Thread_Wait_flags_try_change_acquire(
473    the_thread,
474    THREAD_QUEUE_INTEND_TO_BLOCK,
475    THREAD_QUEUE_BLOCKED
476  );
477  if ( !success ) {
478    _Thread_Remove_timer_and_unblock( the_thread, queue );
479  }
480
481  _Thread_Priority_update( queue_context );
482  _Thread_Dispatch_direct( cpu_self );
483}
484
485#if defined(RTEMS_SMP)
486Status_Control _Thread_queue_Enqueue_sticky(
487  Thread_queue_Queue            *queue,
488  const Thread_queue_Operations *operations,
489  Thread_Control                *the_thread,
490  Thread_queue_Context          *queue_context
491)
492{
493  Per_CPU_Control *cpu_self;
494
495  _Thread_Wait_claim( the_thread, queue );
496
497  if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) {
498    _Thread_queue_Path_release_critical( queue_context );
499    _Thread_Wait_restore_default( the_thread );
500    _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
501    _Thread_Wait_tranquilize( the_thread );
502    ( *queue_context->deadlock_callout )( the_thread );
503    return _Thread_Wait_get_status( the_thread );
504  }
505
506  _Thread_queue_Context_clear_priority_updates( queue_context );
507  _Thread_Wait_claim_finalize( the_thread, operations );
508  ( *operations->enqueue )( queue, the_thread, queue_context );
509
510  _Thread_queue_Path_release_critical( queue_context );
511
512  the_thread->Wait.return_code = STATUS_SUCCESSFUL;
513  _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
514  cpu_self = _Thread_queue_Dispatch_disable( queue_context );
515  _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
516
517  if ( cpu_self->thread_dispatch_disable_level != 1 ) {
518    _Internal_error(
519      INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE
520    );
521  }
522
523  _Thread_queue_Timeout( the_thread, cpu_self, queue_context );
524  _Thread_Priority_update( queue_context );
525  _Thread_Priority_and_sticky_update( the_thread, 1 );
526  _Thread_Dispatch_enable( cpu_self );
527
528  while (
529    _Thread_Wait_flags_get_acquire( the_thread ) == THREAD_QUEUE_INTEND_TO_BLOCK
530  ) {
531    /* Wait */
532  }
533
534  _Thread_Wait_tranquilize( the_thread );
535  _Thread_Timer_remove( the_thread );
536  return _Thread_Wait_get_status( the_thread );
537}
538#endif
539
540#if defined(RTEMS_MULTIPROCESSING)
541static bool _Thread_queue_MP_set_callout(
542  Thread_Control             *the_thread,
543  const Thread_queue_Context *queue_context
544)
545{
546  Thread_Proxy_control    *the_proxy;
547  Thread_queue_MP_callout  mp_callout;
548
549  if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
550    return false;
551  }
552
553  the_proxy = (Thread_Proxy_control *) the_thread;
554  mp_callout = queue_context->mp_callout;
555  _Assert( mp_callout != NULL );
556  the_proxy->thread_queue_callout = queue_context->mp_callout;
557  return true;
558}
559#endif
560
561static bool _Thread_queue_Make_ready_again( Thread_Control *the_thread )
562{
563  bool success;
564  bool unblock;
565
566  /*
567   * We must update the wait flags under protection of the current thread lock,
568   * otherwise a _Thread_Timeout() running on another processor may interfere.
569   */
570  success = _Thread_Wait_flags_try_change_release(
571    the_thread,
572    THREAD_QUEUE_INTEND_TO_BLOCK,
573    THREAD_QUEUE_READY_AGAIN
574  );
575  if ( success ) {
576    unblock = false;
577  } else {
578    _Assert( _Thread_Wait_flags_get( the_thread ) == THREAD_QUEUE_BLOCKED );
579    _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_READY_AGAIN );
580    unblock = true;
581  }
582
583  _Thread_Wait_restore_default( the_thread );
584  return unblock;
585}
586
587bool _Thread_queue_Extract_locked(
588  Thread_queue_Queue            *queue,
589  const Thread_queue_Operations *operations,
590  Thread_Control                *the_thread,
591  Thread_queue_Context          *queue_context
592)
593{
594  _Thread_queue_Context_clear_priority_updates( queue_context );
595#if defined(RTEMS_MULTIPROCESSING)
596  _Thread_queue_MP_set_callout( the_thread, queue_context );
597#endif
598  ( *operations->extract )( queue, the_thread, queue_context );
599  return _Thread_queue_Make_ready_again( the_thread );
600}
601
602void _Thread_queue_Unblock_critical(
603  bool                unblock,
604  Thread_queue_Queue *queue,
605  Thread_Control     *the_thread,
606  ISR_lock_Context   *lock_context
607)
608{
609  if ( unblock ) {
610    Per_CPU_Control *cpu_self;
611
612    cpu_self = _Thread_Dispatch_disable_critical( lock_context );
613    _Thread_queue_Queue_release( queue, lock_context );
614
615    _Thread_Remove_timer_and_unblock( the_thread, queue );
616
617    _Thread_Dispatch_enable( cpu_self );
618  } else {
619    _Thread_queue_Queue_release( queue, lock_context );
620  }
621}
622
623void _Thread_queue_Extract_critical(
624  Thread_queue_Queue            *queue,
625  const Thread_queue_Operations *operations,
626  Thread_Control                *the_thread,
627  Thread_queue_Context          *queue_context
628)
629{
630  bool unblock;
631
632  unblock = _Thread_queue_Extract_locked(
633    queue,
634    operations,
635    the_thread,
636    queue_context
637  );
638
639  _Thread_queue_Unblock_critical(
640    unblock,
641    queue,
642    the_thread,
643    &queue_context->Lock_context.Lock_context
644  );
645}
646
647void _Thread_queue_Extract( Thread_Control *the_thread )
648{
649  Thread_queue_Context  queue_context;
650  Thread_queue_Queue   *queue;
651
652  _Thread_queue_Context_initialize( &queue_context );
653  _Thread_Wait_acquire( the_thread, &queue_context );
654
655  queue = the_thread->Wait.queue;
656
657  if ( queue != NULL ) {
658    bool unblock;
659
660    _Thread_Wait_remove_request( the_thread, &queue_context.Lock_context );
661    _Thread_queue_Context_set_MP_callout(
662      &queue_context,
663      _Thread_queue_MP_callout_do_nothing
664    );
665    unblock = _Thread_queue_Extract_locked(
666      queue,
667      the_thread->Wait.operations,
668      the_thread,
669      &queue_context
670    );
671    _Thread_queue_Unblock_critical(
672      unblock,
673      queue,
674      the_thread,
675      &queue_context.Lock_context.Lock_context
676    );
677  } else {
678    _Thread_Wait_release( the_thread, &queue_context );
679  }
680}
681
682void _Thread_queue_Surrender(
683  Thread_queue_Queue            *queue,
684  Thread_queue_Heads            *heads,
685  Thread_Control                *previous_owner,
686  Thread_queue_Context          *queue_context,
687  const Thread_queue_Operations *operations
688)
689{
690  Thread_Control  *new_owner;
691  bool             unblock;
692  Per_CPU_Control *cpu_self;
693
694  _Assert( heads != NULL );
695
696  _Thread_queue_Context_clear_priority_updates( queue_context );
697  new_owner = ( *operations->surrender )(
698    queue,
699    heads,
700    previous_owner,
701    queue_context
702  );
703  queue->owner = new_owner;
704
705#if defined(RTEMS_MULTIPROCESSING)
706  if ( !_Thread_queue_MP_set_callout( new_owner, queue_context ) )
707#endif
708  {
709    _Thread_Resource_count_increment( new_owner );
710  }
711
712  unblock = _Thread_queue_Make_ready_again( new_owner );
713
714  cpu_self = _Thread_queue_Dispatch_disable( queue_context );
715  _Thread_queue_Queue_release(
716    queue,
717    &queue_context->Lock_context.Lock_context
718  );
719
720  _Thread_Priority_update( queue_context );
721
722  if ( unblock ) {
723    _Thread_Remove_timer_and_unblock( new_owner, queue );
724  }
725
726  _Thread_Dispatch_enable( cpu_self );
727}
728
729#if defined(RTEMS_SMP)
730void _Thread_queue_Surrender_sticky(
731  Thread_queue_Queue            *queue,
732  Thread_queue_Heads            *heads,
733  Thread_Control                *previous_owner,
734  Thread_queue_Context          *queue_context,
735  const Thread_queue_Operations *operations
736)
737{
738  Thread_Control  *new_owner;
739  Per_CPU_Control *cpu_self;
740
741  _Assert( heads != NULL );
742
743  _Thread_queue_Context_clear_priority_updates( queue_context );
744  new_owner = ( *operations->surrender )(
745    queue,
746    heads,
747    previous_owner,
748    queue_context
749  );
750  queue->owner = new_owner;
751  _Thread_queue_Make_ready_again( new_owner );
752
753  cpu_self = _Thread_queue_Dispatch_disable( queue_context );
754  _Thread_queue_Queue_release(
755    queue,
756    &queue_context->Lock_context.Lock_context
757  );
758  _Thread_Priority_and_sticky_update( previous_owner, -1 );
759  _Thread_Priority_and_sticky_update( new_owner, 0 );
760  _Thread_Dispatch_enable( cpu_self );
761}
762#endif
763
764Thread_Control *_Thread_queue_Do_dequeue(
765  Thread_queue_Control          *the_thread_queue,
766  const Thread_queue_Operations *operations
767#if defined(RTEMS_MULTIPROCESSING)
768  ,
769  Thread_queue_MP_callout        mp_callout
770#endif
771)
772{
773  Thread_queue_Context  queue_context;
774  Thread_Control       *the_thread;
775
776  _Thread_queue_Context_initialize( &queue_context );
777  _Thread_queue_Context_set_MP_callout( &queue_context, mp_callout );
778  _Thread_queue_Acquire( the_thread_queue, &queue_context );
779
780  the_thread = _Thread_queue_First_locked( the_thread_queue, operations );
781
782  if ( the_thread != NULL ) {
783    _Thread_queue_Extract_critical(
784      &the_thread_queue->Queue,
785      operations,
786      the_thread,
787      &queue_context
788    );
789  } else {
790    _Thread_queue_Release( the_thread_queue, &queue_context );
791  }
792
793  return the_thread;
794}
795
796#if defined(RTEMS_MULTIPROCESSING)
797void _Thread_queue_Unblock_proxy(
798  Thread_queue_Queue *queue,
799  Thread_Control     *the_thread
800)
801{
802  const Thread_queue_Object *the_queue_object;
803  Thread_Proxy_control      *the_proxy;
804  Thread_queue_MP_callout    mp_callout;
805
806  the_queue_object = THREAD_QUEUE_QUEUE_TO_OBJECT( queue );
807  the_proxy = (Thread_Proxy_control *) the_thread;
808  mp_callout = the_proxy->thread_queue_callout;
809  ( *mp_callout )( the_thread, the_queue_object->Object.id );
810
811  _Thread_MP_Free_proxy( the_thread );
812}
813#endif
Note: See TracBrowser for help on using the repository browser.