source: rtems/cpukit/score/src/threadqenqueue.c @ ff2e6c64

5
Last change on this file since ff2e6c64 was ff2e6c64, checked in by Sebastian Huber <sebastian.huber@…>, on 08/02/16 at 09:26:56

score: Fix and simplify thread wait locks

There was a subtile race condition in _Thread_queue_Do_extract_locked().
It must first update the thread wait flags and then restore the default
thread wait state. In the previous implementation this could lead under
rare timing conditions to an ineffective _Thread_Wait_tranquilize()
resulting to a corrupt system state.

Update #2556.

  • Property mode set to 100644
File size: 14.8 KB
Line 
1/**
2 * @file
3 *
4 * @brief Thread Queue Operations
5 * @ingroup ScoreThreadQ
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2014.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  Copyright (c) 2015, 2016 embedded brains GmbH.
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#if HAVE_CONFIG_H
20#include "config.h"
21#endif
22
23#include <rtems/score/threadqimpl.h>
24#include <rtems/score/assert.h>
25#include <rtems/score/threaddispatch.h>
26#include <rtems/score/threadimpl.h>
27#include <rtems/score/status.h>
28#include <rtems/score/watchdogimpl.h>
29
30#define THREAD_QUEUE_INTEND_TO_BLOCK \
31  (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_INTEND_TO_BLOCK)
32
33#define THREAD_QUEUE_BLOCKED \
34  (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_BLOCKED)
35
36#define THREAD_QUEUE_READY_AGAIN \
37  (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_READY_AGAIN)
38
39#if defined(RTEMS_SMP)
40/*
41 * A global registry of active thread queue links is used to provide deadlock
42 * detection on SMP configurations.  This is simple to implement and no
43 * additional storage is required for the thread queues.  The disadvantage is
44 * the global registry is not scalable and may lead to lock contention.
45 * However, the registry is only used in case of nested resource conflicts.  In
46 * this case, the application is already in trouble.
47 */
48
49typedef struct {
50  ISR_lock_Control Lock;
51
52  RBTree_Control Links;
53} Thread_queue_Links;
54
55static Thread_queue_Links _Thread_queue_Links = {
56  ISR_LOCK_INITIALIZER( "Thread Queue Links" ),
57  RBTREE_INITIALIZER_EMPTY( _Thread_queue_Links.Links )
58};
59
60static bool _Thread_queue_Link_equal(
61  const void        *left,
62  const RBTree_Node *right
63)
64{
65  const Thread_queue_Queue *the_left;
66  const Thread_queue_Link  *the_right;
67
68  the_left = left;
69  the_right = (Thread_queue_Link *) right;
70
71  return the_left == the_right->source;
72}
73
74static bool _Thread_queue_Link_less(
75  const void        *left,
76  const RBTree_Node *right
77)
78{
79  const Thread_queue_Queue *the_left;
80  const Thread_queue_Link  *the_right;
81
82  the_left = left;
83  the_right = (Thread_queue_Link *) right;
84
85  return (uintptr_t) the_left < (uintptr_t) the_right->source;
86}
87
88static void *_Thread_queue_Link_map( RBTree_Node *node )
89{
90  return node;
91}
92
93static Thread_queue_Link *_Thread_queue_Link_find(
94  Thread_queue_Links *links,
95  Thread_queue_Queue *source
96)
97{
98  return _RBTree_Find_inline(
99    &links->Links,
100    source,
101    _Thread_queue_Link_equal,
102    _Thread_queue_Link_less,
103    _Thread_queue_Link_map
104  );
105}
106
107static bool _Thread_queue_Link_add(
108  Thread_queue_Link  *link,
109  Thread_queue_Queue *source,
110  Thread_queue_Queue *target
111)
112{
113  Thread_queue_Links *links;
114  Thread_queue_Queue *recursive_target;
115  ISR_lock_Context    lock_context;
116
117  links = &_Thread_queue_Links;
118  recursive_target = target;
119
120  _ISR_lock_Acquire( &links->Lock, &lock_context );
121
122  while ( true ) {
123    Thread_queue_Link *recursive_link;
124
125    recursive_link = _Thread_queue_Link_find( links, recursive_target );
126
127    if ( recursive_link == NULL ) {
128      break;
129    }
130
131    recursive_target = recursive_link->target;
132
133    if ( recursive_target == source ) {
134      _ISR_lock_Release( &links->Lock, &lock_context );
135      return false;
136    }
137  }
138
139  link->source = source;
140  link->target = target;
141  _RBTree_Insert_inline(
142    &links->Links,
143    &link->Registry_node,
144    source,
145    _Thread_queue_Link_less
146  );
147
148  _ISR_lock_Release( &links->Lock, &lock_context );
149  return true;
150}
151
152static void _Thread_queue_Link_remove( Thread_queue_Link *link )
153{
154  Thread_queue_Links *links;
155  ISR_lock_Context    lock_context;
156
157  links = &_Thread_queue_Links;
158
159  _ISR_lock_Acquire( &links->Lock, &lock_context );
160  _RBTree_Extract( &links->Links, &link->Registry_node );
161  _ISR_lock_Release( &links->Lock, &lock_context );
162}
163#endif
164
165static void _Thread_queue_Path_release( Thread_queue_Path *path )
166{
167#if defined(RTEMS_SMP)
168  Chain_Node *head;
169  Chain_Node *node;
170
171  head = _Chain_Head( &path->Links );
172  node = _Chain_Last( &path->Links );
173
174  while ( head != node ) {
175    Thread_queue_Link *link;
176
177    link = RTEMS_CONTAINER_OF( node, Thread_queue_Link, Path_node );
178
179    if ( link->Queue_context.Wait.queue != NULL ) {
180      _Thread_queue_Link_remove( link );
181    }
182
183    _Thread_Wait_release_critical( link->owner, &link->Queue_context );
184
185    node = _Chain_Previous( node );
186#if defined(RTEMS_DEBUG)
187    _Chain_Set_off_chain( &link->Path_node );
188#endif
189  }
190#else
191  (void) path;
192#endif
193}
194
195static bool _Thread_queue_Path_acquire(
196  Thread_Control     *the_thread,
197  Thread_queue_Queue *queue,
198  Thread_queue_Path  *path
199)
200{
201  Thread_Control     *owner;
202
203#if defined(RTEMS_SMP)
204  Thread_queue_Link  *link;
205  Thread_queue_Queue *target;
206
207  /*
208   * For an overview please look at the non-SMP part below.  We basically do
209   * the same on SMP configurations.  The fact that we may have more than one
210   * executing thread and each thread queue has its own SMP lock makes the task
211   * a bit more difficult.  We have to avoid deadlocks at SMP lock level, since
212   * this would result in an unrecoverable deadlock of the overall system.
213   */
214
215  _Chain_Initialize_empty( &path->Links );
216
217  owner = queue->owner;
218
219  if ( owner == NULL ) {
220    return true;
221  }
222
223  if ( owner == the_thread ) {
224    return false;
225  }
226
227  _Chain_Initialize_node( &path->Start.Path_node );
228  _Thread_queue_Context_initialize( &path->Start.Queue_context );
229  link = &path->Start;
230
231  do {
232    _Chain_Append_unprotected( &path->Links, &link->Path_node );
233    link->owner = owner;
234
235    _Thread_Wait_acquire_default_critical(
236      owner,
237      &link->Queue_context.Lock_context
238    );
239
240    target = owner->Wait.queue;
241    link->Queue_context.Wait.queue = target;
242
243    if ( target != NULL ) {
244      if ( _Thread_queue_Link_add( link, queue, target ) ) {
245        _Thread_queue_Gate_add(
246          &owner->Wait.Lock.Pending_requests,
247          &link->Queue_context.Wait.Gate
248        );
249        _Thread_Wait_release_default_critical(
250          owner,
251          &link->Queue_context.Lock_context
252        );
253        _Thread_Wait_acquire_queue_critical( target, &link->Queue_context );
254
255        if ( link->Queue_context.Wait.queue == NULL ) {
256          _Thread_queue_Link_remove( link );
257          _Thread_Wait_release_queue_critical( target, &link->Queue_context );
258          _Thread_Wait_acquire_default_critical(
259            owner,
260            &link->Queue_context.Lock_context
261          );
262          _Thread_Wait_remove_request_locked( owner, &link->Queue_context );
263          _Assert( owner->Wait.queue == NULL );
264          return true;
265        }
266      } else {
267        link->Queue_context.Wait.queue = NULL;
268        _Thread_queue_Path_release( path );
269        return false;
270      }
271    } else {
272      return true;
273    }
274
275    link = &owner->Wait.Link;
276    queue = target;
277    owner = queue->owner;
278  } while ( owner != NULL );
279#else
280  do {
281    owner = queue->owner;
282
283    if ( owner == NULL ) {
284      return true;
285    }
286
287    if ( owner == the_thread ) {
288      return false;
289    }
290
291    queue = owner->Wait.queue;
292  } while ( queue != NULL );
293#endif
294
295  return true;
296}
297
298void _Thread_queue_Deadlock_status( Thread_Control *the_thread )
299{
300  the_thread->Wait.return_code = STATUS_DEADLOCK;
301}
302
303void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread )
304{
305  _Terminate(
306    INTERNAL_ERROR_CORE,
307    false,
308    INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
309  );
310}
311
312void _Thread_queue_Enqueue_critical(
313  Thread_queue_Queue            *queue,
314  const Thread_queue_Operations *operations,
315  Thread_Control                *the_thread,
316  States_Control                 state,
317  Thread_queue_Context          *queue_context
318)
319{
320  Thread_queue_Path  path;
321  Per_CPU_Control   *cpu_self;
322  bool               success;
323
324#if defined(RTEMS_MULTIPROCESSING)
325  if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) {
326    the_thread = _Thread_MP_Allocate_proxy( state );
327  }
328#endif
329
330  _Thread_Wait_claim( the_thread, queue, operations );
331
332  if ( !_Thread_queue_Path_acquire( the_thread, queue, &path ) ) {
333    _Thread_Wait_restore_default( the_thread );
334    _Thread_queue_Queue_release( queue, &queue_context->Lock_context );
335    _Thread_Wait_tranquilize( the_thread );
336    ( *queue_context->deadlock_callout )( the_thread );
337    return;
338  }
339
340  ( *operations->enqueue )( queue, the_thread, &path );
341
342  _Thread_queue_Path_release( &path );
343
344  the_thread->Wait.return_code = STATUS_SUCCESSFUL;
345  _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
346  cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context );
347  _Thread_queue_Queue_release( queue, &queue_context->Lock_context );
348
349  if (
350    cpu_self->thread_dispatch_disable_level
351      != queue_context->expected_thread_dispatch_disable_level
352  ) {
353    _Terminate(
354      INTERNAL_ERROR_CORE,
355      false,
356      INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE
357    );
358  }
359
360  /*
361   *  Set the blocking state for this thread queue in the thread.
362   */
363  _Thread_Set_state( the_thread, state );
364
365  /*
366   *  If the thread wants to timeout, then schedule its timer.
367   */
368  switch ( queue_context->timeout_discipline ) {
369    case WATCHDOG_RELATIVE:
370      /* A relative timeout of 0 is a special case indefinite (no) timeout */
371      if ( queue_context->timeout != 0 ) {
372        _Thread_Timer_insert_relative(
373          the_thread,
374          cpu_self,
375          _Thread_Timeout,
376          (Watchdog_Interval) queue_context->timeout
377        );
378      }
379      break;
380    case WATCHDOG_ABSOLUTE:
381      _Thread_Timer_insert_absolute(
382        the_thread,
383        cpu_self,
384        _Thread_Timeout,
385        queue_context->timeout
386      );
387      break;
388    default:
389      break;
390  }
391
392  /*
393   * At this point thread dispatching is disabled, however, we already released
394   * the thread queue lock.  Thus, interrupts or threads on other processors
395   * may already changed our state with respect to the thread queue object.
396   * The request could be satisfied or timed out.  This situation is indicated
397   * by the thread wait flags.  Other parties must not modify our thread state
398   * as long as we are in the THREAD_QUEUE_INTEND_TO_BLOCK thread wait state,
399   * thus we have to cancel the blocking operation ourself if necessary.
400   */
401  success = _Thread_Wait_flags_try_change_acquire(
402    the_thread,
403    THREAD_QUEUE_INTEND_TO_BLOCK,
404    THREAD_QUEUE_BLOCKED
405  );
406  if ( !success ) {
407    _Thread_Remove_timer_and_unblock( the_thread, queue );
408  }
409
410  _Thread_Update_priority( path.update_priority );
411  _Thread_Dispatch_enable( cpu_self );
412}
413
414bool _Thread_queue_Do_extract_locked(
415  Thread_queue_Queue            *queue,
416  const Thread_queue_Operations *operations,
417  Thread_Control                *the_thread
418#if defined(RTEMS_MULTIPROCESSING)
419  ,
420  const Thread_queue_Context    *queue_context
421#endif
422)
423{
424  bool success;
425  bool unblock;
426
427#if defined(RTEMS_MULTIPROCESSING)
428  if ( !_Objects_Is_local_id( the_thread->Object.id ) ) {
429    Thread_Proxy_control    *the_proxy;
430    Thread_queue_MP_callout  mp_callout;
431
432    the_proxy = (Thread_Proxy_control *) the_thread;
433    mp_callout = queue_context->mp_callout;
434    _Assert( mp_callout != NULL );
435    the_proxy->thread_queue_callout = queue_context->mp_callout;
436  }
437#endif
438
439  ( *operations->extract )( queue, the_thread );
440
441  /*
442   * We must update the wait flags under protection of the current thread lock,
443   * otherwise a _Thread_Timeout() running on another processor may interfere.
444   */
445  success = _Thread_Wait_flags_try_change_release(
446    the_thread,
447    THREAD_QUEUE_INTEND_TO_BLOCK,
448    THREAD_QUEUE_READY_AGAIN
449  );
450  if ( success ) {
451    unblock = false;
452  } else {
453    _Assert( _Thread_Wait_flags_get( the_thread ) == THREAD_QUEUE_BLOCKED );
454    _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_READY_AGAIN );
455    unblock = true;
456  }
457
458  _Thread_Wait_restore_default( the_thread );
459
460  return unblock;
461}
462
463void _Thread_queue_Unblock_critical(
464  bool                unblock,
465  Thread_queue_Queue *queue,
466  Thread_Control     *the_thread,
467  ISR_lock_Context   *lock_context
468)
469{
470  if ( unblock ) {
471    Per_CPU_Control *cpu_self;
472
473    cpu_self = _Thread_Dispatch_disable_critical( lock_context );
474    _Thread_queue_Queue_release( queue, lock_context );
475
476    _Thread_Remove_timer_and_unblock( the_thread, queue );
477
478    _Thread_Dispatch_enable( cpu_self );
479  } else {
480    _Thread_queue_Queue_release( queue, lock_context );
481  }
482}
483
484void _Thread_queue_Extract_critical(
485  Thread_queue_Queue            *queue,
486  const Thread_queue_Operations *operations,
487  Thread_Control                *the_thread,
488  Thread_queue_Context          *queue_context
489)
490{
491  bool unblock;
492
493  unblock = _Thread_queue_Extract_locked(
494    queue,
495    operations,
496    the_thread,
497    queue_context
498  );
499
500  _Thread_queue_Unblock_critical(
501    unblock,
502    queue,
503    the_thread,
504    &queue_context->Lock_context
505  );
506}
507
508void _Thread_queue_Extract( Thread_Control *the_thread )
509{
510  Thread_queue_Context  queue_context;
511  Thread_queue_Queue   *queue;
512
513  _Thread_queue_Context_initialize( &queue_context );
514  _Thread_Wait_acquire( the_thread, &queue_context );
515
516  queue = the_thread->Wait.queue;
517
518  if ( queue != NULL ) {
519    bool unblock;
520
521    _Thread_Wait_remove_request( the_thread, &queue_context );
522    _Thread_queue_Context_set_MP_callout(
523      &queue_context,
524      _Thread_queue_MP_callout_do_nothing
525    );
526    unblock = _Thread_queue_Extract_locked(
527      queue,
528      the_thread->Wait.operations,
529      the_thread,
530      &queue_context.Lock_context
531    );
532    _Thread_queue_Unblock_critical(
533      unblock,
534      queue,
535      the_thread,
536      &queue_context.Lock_context
537    );
538  } else {
539    _Thread_Wait_release( the_thread, &queue_context );
540  }
541}
542
543Thread_Control *_Thread_queue_Do_dequeue(
544  Thread_queue_Control          *the_thread_queue,
545  const Thread_queue_Operations *operations
546#if defined(RTEMS_MULTIPROCESSING)
547  ,
548  Thread_queue_MP_callout        mp_callout
549#endif
550)
551{
552  Thread_queue_Context  queue_context;
553  Thread_Control       *the_thread;
554
555  _Thread_queue_Context_initialize( &queue_context );
556  _Thread_queue_Context_set_MP_callout( &queue_context, mp_callout );
557  _Thread_queue_Acquire( the_thread_queue, &queue_context.Lock_context );
558
559  the_thread = _Thread_queue_First_locked( the_thread_queue, operations );
560
561  if ( the_thread != NULL ) {
562    _Thread_queue_Extract_critical(
563      &the_thread_queue->Queue,
564      operations,
565      the_thread,
566      &queue_context
567    );
568  } else {
569    _Thread_queue_Release( the_thread_queue, &queue_context.Lock_context );
570  }
571
572  return the_thread;
573}
574
575#if defined(RTEMS_MULTIPROCESSING)
576void _Thread_queue_Unblock_proxy(
577  Thread_queue_Queue *queue,
578  Thread_Control     *the_thread
579)
580{
581  const Thread_queue_Object *the_queue_object;
582  Thread_Proxy_control      *the_proxy;
583  Thread_queue_MP_callout    mp_callout;
584
585  the_queue_object = THREAD_QUEUE_QUEUE_TO_OBJECT( queue );
586  the_proxy = (Thread_Proxy_control *) the_thread;
587  mp_callout = the_proxy->thread_queue_callout;
588  ( *mp_callout )( the_thread, the_queue_object->Object.id );
589
590  _Thread_MP_Free_proxy( the_thread );
591}
592#endif
Note: See TracBrowser for help on using the repository browser.