source: rtems/cpukit/score/src/threadqenqueue.c @ 6117f29

5
Last change on this file since 6117f29 was 1c1e31f7, checked in by Sebastian Huber <sebastian.huber@…>, on 08/04/16 at 06:10:29

score: Optimize _Thread_queue_Path_release()

Update #2556.

  • Property mode set to 100644
File size: 15.5 KB
Line 
1/**
2 * @file
3 *
4 * @brief Thread Queue Operations
5 * @ingroup ScoreThreadQ
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2014.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  Copyright (c) 2015, 2016 embedded brains GmbH.
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#if HAVE_CONFIG_H
20#include "config.h"
21#endif
22
23#include <rtems/score/threadqimpl.h>
24#include <rtems/score/assert.h>
25#include <rtems/score/threaddispatch.h>
26#include <rtems/score/threadimpl.h>
27#include <rtems/score/status.h>
28#include <rtems/score/watchdogimpl.h>
29
30#define THREAD_QUEUE_INTEND_TO_BLOCK \
31  (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_INTEND_TO_BLOCK)
32
33#define THREAD_QUEUE_BLOCKED \
34  (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_BLOCKED)
35
36#define THREAD_QUEUE_READY_AGAIN \
37  (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_READY_AGAIN)
38
39#if defined(RTEMS_SMP)
40/*
41 * A global registry of active thread queue links is used to provide deadlock
42 * detection on SMP configurations.  This is simple to implement and no
43 * additional storage is required for the thread queues.  The disadvantage is
44 * the global registry is not scalable and may lead to lock contention.
45 * However, the registry is only used in case of nested resource conflicts.  In
46 * this case, the application is already in trouble.
47 */
48
49typedef struct {
50  ISR_lock_Control Lock;
51
52  RBTree_Control Links;
53} Thread_queue_Links;
54
55static Thread_queue_Links _Thread_queue_Links = {
56  ISR_LOCK_INITIALIZER( "Thread Queue Links" ),
57  RBTREE_INITIALIZER_EMPTY( _Thread_queue_Links.Links )
58};
59
60static bool _Thread_queue_Link_equal(
61  const void        *left,
62  const RBTree_Node *right
63)
64{
65  const Thread_queue_Queue *the_left;
66  const Thread_queue_Link  *the_right;
67
68  the_left = left;
69  the_right = (Thread_queue_Link *) right;
70
71  return the_left == the_right->source;
72}
73
74static bool _Thread_queue_Link_less(
75  const void        *left,
76  const RBTree_Node *right
77)
78{
79  const Thread_queue_Queue *the_left;
80  const Thread_queue_Link  *the_right;
81
82  the_left = left;
83  the_right = (Thread_queue_Link *) right;
84
85  return (uintptr_t) the_left < (uintptr_t) the_right->source;
86}
87
88static void *_Thread_queue_Link_map( RBTree_Node *node )
89{
90  return node;
91}
92
93static Thread_queue_Link *_Thread_queue_Link_find(
94  Thread_queue_Links *links,
95  Thread_queue_Queue *source
96)
97{
98  return _RBTree_Find_inline(
99    &links->Links,
100    source,
101    _Thread_queue_Link_equal,
102    _Thread_queue_Link_less,
103    _Thread_queue_Link_map
104  );
105}
106
107static bool _Thread_queue_Link_add(
108  Thread_queue_Link  *link,
109  Thread_queue_Queue *source,
110  Thread_queue_Queue *target
111)
112{
113  Thread_queue_Links *links;
114  Thread_queue_Queue *recursive_target;
115  ISR_lock_Context    lock_context;
116
117  links = &_Thread_queue_Links;
118  recursive_target = target;
119
120  _ISR_lock_Acquire( &links->Lock, &lock_context );
121
122  while ( true ) {
123    Thread_queue_Link *recursive_link;
124
125    recursive_link = _Thread_queue_Link_find( links, recursive_target );
126
127    if ( recursive_link == NULL ) {
128      break;
129    }
130
131    recursive_target = recursive_link->target;
132
133    if ( recursive_target == source ) {
134      _ISR_lock_Release( &links->Lock, &lock_context );
135      return false;
136    }
137  }
138
139  link->source = source;
140  link->target = target;
141  _RBTree_Insert_inline(
142    &links->Links,
143    &link->Registry_node,
144    source,
145    _Thread_queue_Link_less
146  );
147
148  _ISR_lock_Release( &links->Lock, &lock_context );
149  return true;
150}
151
152static void _Thread_queue_Link_remove( Thread_queue_Link *link )
153{
154  Thread_queue_Links *links;
155  ISR_lock_Context    lock_context;
156
157  links = &_Thread_queue_Links;
158
159  _ISR_lock_Acquire( &links->Lock, &lock_context );
160  _RBTree_Extract( &links->Links, &link->Registry_node );
161  _ISR_lock_Release( &links->Lock, &lock_context );
162}
163#endif
164
165#define THREAD_QUEUE_LINK_OF_PATH_NODE( node ) \
166  RTEMS_CONTAINER_OF( node, Thread_queue_Link, Path_node );
167
168static void _Thread_queue_Path_release( Thread_queue_Path *path )
169{
170#if defined(RTEMS_SMP)
171  Chain_Node *head;
172  Chain_Node *node;
173
174  head = _Chain_Head( &path->Links );
175  node = _Chain_Last( &path->Links );
176
177  if ( head != node ) {
178    Thread_queue_Link *link;
179
180    /* The terminal link has an owner which does not wait on a thread queue */
181    link = THREAD_QUEUE_LINK_OF_PATH_NODE( node );
182    _Assert( link->Queue_context.Wait.queue == NULL );
183
184    _Thread_Wait_release_default_critical(
185      link->owner,
186      &link->Queue_context.Lock_context
187    );
188
189    node = _Chain_Previous( node );
190#if defined(RTEMS_DEBUG)
191    _Chain_Set_off_chain( &link->Path_node );
192#endif
193
194    while ( head != node ) {
195      /* The other links have an owner which waits on a thread queue */
196      link = THREAD_QUEUE_LINK_OF_PATH_NODE( node );
197      _Assert( link->Queue_context.Wait.queue != NULL );
198
199      _Thread_queue_Link_remove( link );
200      _Thread_Wait_release_queue_critical(
201        link->Queue_context.Wait.queue,
202        &link->Queue_context
203      );
204      _Thread_Wait_remove_request( link->owner, &link->Queue_context );
205
206      node = _Chain_Previous( node );
207#if defined(RTEMS_DEBUG)
208      _Chain_Set_off_chain( &link->Path_node );
209#endif
210    }
211  }
212#else
213  (void) path;
214#endif
215}
216
217static bool _Thread_queue_Path_acquire(
218  Thread_Control     *the_thread,
219  Thread_queue_Queue *queue,
220  Thread_queue_Path  *path
221)
222{
223  Thread_Control     *owner;
224
225#if defined(RTEMS_SMP)
226  Thread_queue_Link  *link;
227  Thread_queue_Queue *target;
228
229  /*
230   * For an overview please look at the non-SMP part below.  We basically do
231   * the same on SMP configurations.  The fact that we may have more than one
232   * executing thread and each thread queue has its own SMP lock makes the task
233   * a bit more difficult.  We have to avoid deadlocks at SMP lock level, since
234   * this would result in an unrecoverable deadlock of the overall system.
235   */
236
237  _Chain_Initialize_empty( &path->Links );
238
239  owner = queue->owner;
240
241  if ( owner == NULL ) {
242    return true;
243  }
244
245  if ( owner == the_thread ) {
246    return false;
247  }
248
249  _Chain_Initialize_node( &path->Start.Path_node );
250  _Thread_queue_Context_initialize( &path->Start.Queue_context );
251  link = &path->Start;
252
253  do {
254    _Chain_Append_unprotected( &path->Links, &link->Path_node );
255    link->owner = owner;
256
257    _Thread_Wait_acquire_default_critical(
258      owner,
259      &link->Queue_context.Lock_context
260    );
261
262    target = owner->Wait.queue;
263    link->Queue_context.Wait.queue = target;
264
265    if ( target != NULL ) {
266      if ( _Thread_queue_Link_add( link, queue, target ) ) {
267        _Thread_queue_Gate_add(
268          &owner->Wait.Lock.Pending_requests,
269          &link->Queue_context.Wait.Gate
270        );
271        _Thread_Wait_release_default_critical(
272          owner,
273          &link->Queue_context.Lock_context
274        );
275        _Thread_Wait_acquire_queue_critical( target, &link->Queue_context );
276
277        if ( link->Queue_context.Wait.queue == NULL ) {
278          _Thread_queue_Link_remove( link );
279          _Thread_Wait_release_queue_critical( target, &link->Queue_context );
280          _Thread_Wait_acquire_default_critical(
281            owner,
282            &link->Queue_context.Lock_context
283          );
284          _Thread_Wait_remove_request_locked( owner, &link->Queue_context );
285          _Assert( owner->Wait.queue == NULL );
286          return true;
287        }
288      } else {
289        link->Queue_context.Wait.queue = NULL;
290        _Thread_queue_Path_release( path );
291        return false;
292      }
293    } else {
294      return true;
295    }
296
297    link = &owner->Wait.Link;
298    queue = target;
299    owner = queue->owner;
300  } while ( owner != NULL );
301#else
302  do {
303    owner = queue->owner;
304
305    if ( owner == NULL ) {
306      return true;
307    }
308
309    if ( owner == the_thread ) {
310      return false;
311    }
312
313    queue = owner->Wait.queue;
314  } while ( queue != NULL );
315#endif
316
317  return true;
318}
319
320void _Thread_queue_Deadlock_status( Thread_Control *the_thread )
321{
322  the_thread->Wait.return_code = STATUS_DEADLOCK;
323}
324
325void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread )
326{
327  _Terminate(
328    INTERNAL_ERROR_CORE,
329    false,
330    INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
331  );
332}
333
334void _Thread_queue_Enqueue_critical(
335  Thread_queue_Queue            *queue,
336  const Thread_queue_Operations *operations,
337  Thread_Control                *the_thread,
338  States_Control                 state,
339  Thread_queue_Context          *queue_context
340)
341{
342  Thread_queue_Path  path;
343  Per_CPU_Control   *cpu_self;
344  bool               success;
345
346#if defined(RTEMS_MULTIPROCESSING)
347  if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) {
348    the_thread = _Thread_MP_Allocate_proxy( state );
349  }
350#endif
351
352  _Thread_Wait_claim( the_thread, queue, operations );
353
354  if ( !_Thread_queue_Path_acquire( the_thread, queue, &path ) ) {
355    _Thread_Wait_restore_default( the_thread );
356    _Thread_queue_Queue_release( queue, &queue_context->Lock_context );
357    _Thread_Wait_tranquilize( the_thread );
358    ( *queue_context->deadlock_callout )( the_thread );
359    return;
360  }
361
362  ( *operations->enqueue )( queue, the_thread, &path );
363
364  _Thread_queue_Path_release( &path );
365
366  the_thread->Wait.return_code = STATUS_SUCCESSFUL;
367  _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
368  cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context );
369  _Thread_queue_Queue_release( queue, &queue_context->Lock_context );
370
371  if (
372    cpu_self->thread_dispatch_disable_level
373      != queue_context->expected_thread_dispatch_disable_level
374  ) {
375    _Terminate(
376      INTERNAL_ERROR_CORE,
377      false,
378      INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE
379    );
380  }
381
382  /*
383   *  Set the blocking state for this thread queue in the thread.
384   */
385  _Thread_Set_state( the_thread, state );
386
387  /*
388   *  If the thread wants to timeout, then schedule its timer.
389   */
390  switch ( queue_context->timeout_discipline ) {
391    case WATCHDOG_RELATIVE:
392      /* A relative timeout of 0 is a special case indefinite (no) timeout */
393      if ( queue_context->timeout != 0 ) {
394        _Thread_Timer_insert_relative(
395          the_thread,
396          cpu_self,
397          _Thread_Timeout,
398          (Watchdog_Interval) queue_context->timeout
399        );
400      }
401      break;
402    case WATCHDOG_ABSOLUTE:
403      _Thread_Timer_insert_absolute(
404        the_thread,
405        cpu_self,
406        _Thread_Timeout,
407        queue_context->timeout
408      );
409      break;
410    default:
411      break;
412  }
413
414  /*
415   * At this point thread dispatching is disabled, however, we already released
416   * the thread queue lock.  Thus, interrupts or threads on other processors
417   * may already changed our state with respect to the thread queue object.
418   * The request could be satisfied or timed out.  This situation is indicated
419   * by the thread wait flags.  Other parties must not modify our thread state
420   * as long as we are in the THREAD_QUEUE_INTEND_TO_BLOCK thread wait state,
421   * thus we have to cancel the blocking operation ourself if necessary.
422   */
423  success = _Thread_Wait_flags_try_change_acquire(
424    the_thread,
425    THREAD_QUEUE_INTEND_TO_BLOCK,
426    THREAD_QUEUE_BLOCKED
427  );
428  if ( !success ) {
429    _Thread_Remove_timer_and_unblock( the_thread, queue );
430  }
431
432  _Thread_Update_priority( path.update_priority );
433  _Thread_Dispatch_enable( cpu_self );
434}
435
436bool _Thread_queue_Do_extract_locked(
437  Thread_queue_Queue            *queue,
438  const Thread_queue_Operations *operations,
439  Thread_Control                *the_thread
440#if defined(RTEMS_MULTIPROCESSING)
441  ,
442  const Thread_queue_Context    *queue_context
443#endif
444)
445{
446  bool success;
447  bool unblock;
448
449#if defined(RTEMS_MULTIPROCESSING)
450  if ( !_Objects_Is_local_id( the_thread->Object.id ) ) {
451    Thread_Proxy_control    *the_proxy;
452    Thread_queue_MP_callout  mp_callout;
453
454    the_proxy = (Thread_Proxy_control *) the_thread;
455    mp_callout = queue_context->mp_callout;
456    _Assert( mp_callout != NULL );
457    the_proxy->thread_queue_callout = queue_context->mp_callout;
458  }
459#endif
460
461  ( *operations->extract )( queue, the_thread );
462
463  /*
464   * We must update the wait flags under protection of the current thread lock,
465   * otherwise a _Thread_Timeout() running on another processor may interfere.
466   */
467  success = _Thread_Wait_flags_try_change_release(
468    the_thread,
469    THREAD_QUEUE_INTEND_TO_BLOCK,
470    THREAD_QUEUE_READY_AGAIN
471  );
472  if ( success ) {
473    unblock = false;
474  } else {
475    _Assert( _Thread_Wait_flags_get( the_thread ) == THREAD_QUEUE_BLOCKED );
476    _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_READY_AGAIN );
477    unblock = true;
478  }
479
480  _Thread_Wait_restore_default( the_thread );
481
482  return unblock;
483}
484
485void _Thread_queue_Unblock_critical(
486  bool                unblock,
487  Thread_queue_Queue *queue,
488  Thread_Control     *the_thread,
489  ISR_lock_Context   *lock_context
490)
491{
492  if ( unblock ) {
493    Per_CPU_Control *cpu_self;
494
495    cpu_self = _Thread_Dispatch_disable_critical( lock_context );
496    _Thread_queue_Queue_release( queue, lock_context );
497
498    _Thread_Remove_timer_and_unblock( the_thread, queue );
499
500    _Thread_Dispatch_enable( cpu_self );
501  } else {
502    _Thread_queue_Queue_release( queue, lock_context );
503  }
504}
505
506void _Thread_queue_Extract_critical(
507  Thread_queue_Queue            *queue,
508  const Thread_queue_Operations *operations,
509  Thread_Control                *the_thread,
510  Thread_queue_Context          *queue_context
511)
512{
513  bool unblock;
514
515  unblock = _Thread_queue_Extract_locked(
516    queue,
517    operations,
518    the_thread,
519    queue_context
520  );
521
522  _Thread_queue_Unblock_critical(
523    unblock,
524    queue,
525    the_thread,
526    &queue_context->Lock_context
527  );
528}
529
530void _Thread_queue_Extract( Thread_Control *the_thread )
531{
532  Thread_queue_Context  queue_context;
533  Thread_queue_Queue   *queue;
534
535  _Thread_queue_Context_initialize( &queue_context );
536  _Thread_Wait_acquire( the_thread, &queue_context );
537
538  queue = the_thread->Wait.queue;
539
540  if ( queue != NULL ) {
541    bool unblock;
542
543    _Thread_Wait_remove_request( the_thread, &queue_context );
544    _Thread_queue_Context_set_MP_callout(
545      &queue_context,
546      _Thread_queue_MP_callout_do_nothing
547    );
548    unblock = _Thread_queue_Extract_locked(
549      queue,
550      the_thread->Wait.operations,
551      the_thread,
552      &queue_context.Lock_context
553    );
554    _Thread_queue_Unblock_critical(
555      unblock,
556      queue,
557      the_thread,
558      &queue_context.Lock_context
559    );
560  } else {
561    _Thread_Wait_release( the_thread, &queue_context );
562  }
563}
564
565Thread_Control *_Thread_queue_Do_dequeue(
566  Thread_queue_Control          *the_thread_queue,
567  const Thread_queue_Operations *operations
568#if defined(RTEMS_MULTIPROCESSING)
569  ,
570  Thread_queue_MP_callout        mp_callout
571#endif
572)
573{
574  Thread_queue_Context  queue_context;
575  Thread_Control       *the_thread;
576
577  _Thread_queue_Context_initialize( &queue_context );
578  _Thread_queue_Context_set_MP_callout( &queue_context, mp_callout );
579  _Thread_queue_Acquire( the_thread_queue, &queue_context.Lock_context );
580
581  the_thread = _Thread_queue_First_locked( the_thread_queue, operations );
582
583  if ( the_thread != NULL ) {
584    _Thread_queue_Extract_critical(
585      &the_thread_queue->Queue,
586      operations,
587      the_thread,
588      &queue_context
589    );
590  } else {
591    _Thread_queue_Release( the_thread_queue, &queue_context.Lock_context );
592  }
593
594  return the_thread;
595}
596
597#if defined(RTEMS_MULTIPROCESSING)
598void _Thread_queue_Unblock_proxy(
599  Thread_queue_Queue *queue,
600  Thread_Control     *the_thread
601)
602{
603  const Thread_queue_Object *the_queue_object;
604  Thread_Proxy_control      *the_proxy;
605  Thread_queue_MP_callout    mp_callout;
606
607  the_queue_object = THREAD_QUEUE_QUEUE_TO_OBJECT( queue );
608  the_proxy = (Thread_Proxy_control *) the_thread;
609  mp_callout = the_proxy->thread_queue_callout;
610  ( *mp_callout )( the_thread, the_queue_object->Object.id );
611
612  _Thread_MP_Free_proxy( the_thread );
613}
614#endif
Note: See TracBrowser for help on using the repository browser.