source: rtems/cpukit/score/src/threadqenqueue.c @ ee0e4135

5
Last change on this file since ee0e4135 was ca783bbe, checked in by Sebastian Huber <sebastian.huber@…>, on 08/04/16 at 08:16:14

score: Fix _Thread_queue_Path_release()

It is possible that the owner of the terminal link of a thread queue
path waits on a thread queue. However, this thread queue has no owner,
e.g. a thread queue of a message queue.

  • Property mode set to 100644
File size: 15.5 KB
Line 
1/**
2 * @file
3 *
4 * @brief Thread Queue Operations
5 * @ingroup ScoreThreadQ
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2014.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  Copyright (c) 2015, 2016 embedded brains GmbH.
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19#if HAVE_CONFIG_H
20#include "config.h"
21#endif
22
23#include <rtems/score/threadqimpl.h>
24#include <rtems/score/assert.h>
25#include <rtems/score/threaddispatch.h>
26#include <rtems/score/threadimpl.h>
27#include <rtems/score/status.h>
28#include <rtems/score/watchdogimpl.h>
29
30#define THREAD_QUEUE_INTEND_TO_BLOCK \
31  (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_INTEND_TO_BLOCK)
32
33#define THREAD_QUEUE_BLOCKED \
34  (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_BLOCKED)
35
36#define THREAD_QUEUE_READY_AGAIN \
37  (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_READY_AGAIN)
38
39#if defined(RTEMS_SMP)
40/*
41 * A global registry of active thread queue links is used to provide deadlock
42 * detection on SMP configurations.  This is simple to implement and no
43 * additional storage is required for the thread queues.  The disadvantage is
44 * the global registry is not scalable and may lead to lock contention.
45 * However, the registry is only used in case of nested resource conflicts.  In
46 * this case, the application is already in trouble.
47 */
48
49typedef struct {
50  ISR_lock_Control Lock;
51
52  RBTree_Control Links;
53} Thread_queue_Links;
54
55static Thread_queue_Links _Thread_queue_Links = {
56  ISR_LOCK_INITIALIZER( "Thread Queue Links" ),
57  RBTREE_INITIALIZER_EMPTY( _Thread_queue_Links.Links )
58};
59
60static bool _Thread_queue_Link_equal(
61  const void        *left,
62  const RBTree_Node *right
63)
64{
65  const Thread_queue_Queue *the_left;
66  const Thread_queue_Link  *the_right;
67
68  the_left = left;
69  the_right = (Thread_queue_Link *) right;
70
71  return the_left == the_right->source;
72}
73
74static bool _Thread_queue_Link_less(
75  const void        *left,
76  const RBTree_Node *right
77)
78{
79  const Thread_queue_Queue *the_left;
80  const Thread_queue_Link  *the_right;
81
82  the_left = left;
83  the_right = (Thread_queue_Link *) right;
84
85  return (uintptr_t) the_left < (uintptr_t) the_right->source;
86}
87
88static void *_Thread_queue_Link_map( RBTree_Node *node )
89{
90  return node;
91}
92
93static Thread_queue_Link *_Thread_queue_Link_find(
94  Thread_queue_Links *links,
95  Thread_queue_Queue *source
96)
97{
98  return _RBTree_Find_inline(
99    &links->Links,
100    source,
101    _Thread_queue_Link_equal,
102    _Thread_queue_Link_less,
103    _Thread_queue_Link_map
104  );
105}
106
107static bool _Thread_queue_Link_add(
108  Thread_queue_Link  *link,
109  Thread_queue_Queue *source,
110  Thread_queue_Queue *target
111)
112{
113  Thread_queue_Links *links;
114  Thread_queue_Queue *recursive_target;
115  ISR_lock_Context    lock_context;
116
117  links = &_Thread_queue_Links;
118  recursive_target = target;
119
120  _ISR_lock_Acquire( &links->Lock, &lock_context );
121
122  while ( true ) {
123    Thread_queue_Link *recursive_link;
124
125    recursive_link = _Thread_queue_Link_find( links, recursive_target );
126
127    if ( recursive_link == NULL ) {
128      break;
129    }
130
131    recursive_target = recursive_link->target;
132
133    if ( recursive_target == source ) {
134      _ISR_lock_Release( &links->Lock, &lock_context );
135      return false;
136    }
137  }
138
139  link->source = source;
140  link->target = target;
141  _RBTree_Insert_inline(
142    &links->Links,
143    &link->Registry_node,
144    source,
145    _Thread_queue_Link_less
146  );
147
148  _ISR_lock_Release( &links->Lock, &lock_context );
149  return true;
150}
151
152static void _Thread_queue_Link_remove( Thread_queue_Link *link )
153{
154  Thread_queue_Links *links;
155  ISR_lock_Context    lock_context;
156
157  links = &_Thread_queue_Links;
158
159  _ISR_lock_Acquire( &links->Lock, &lock_context );
160  _RBTree_Extract( &links->Links, &link->Registry_node );
161  _ISR_lock_Release( &links->Lock, &lock_context );
162}
163#endif
164
165#define THREAD_QUEUE_LINK_OF_PATH_NODE( node ) \
166  RTEMS_CONTAINER_OF( node, Thread_queue_Link, Path_node );
167
168static void _Thread_queue_Path_release( Thread_queue_Path *path )
169{
170#if defined(RTEMS_SMP)
171  Chain_Node *head;
172  Chain_Node *node;
173
174  head = _Chain_Head( &path->Links );
175  node = _Chain_Last( &path->Links );
176
177  if ( head != node ) {
178    Thread_queue_Link *link;
179
180    /*
181     * The terminal link may have an owner which does not wait on a thread
182     * queue.
183     */
184
185    link = THREAD_QUEUE_LINK_OF_PATH_NODE( node );
186
187    if ( link->Queue_context.Wait.queue == NULL ) {
188      _Thread_Wait_release_default_critical(
189        link->owner,
190        &link->Queue_context.Lock_context
191      );
192
193      node = _Chain_Previous( node );
194#if defined(RTEMS_DEBUG)
195      _Chain_Set_off_chain( &link->Path_node );
196#endif
197    }
198
199    while ( head != node ) {
200      /* The other links have an owner which waits on a thread queue */
201      link = THREAD_QUEUE_LINK_OF_PATH_NODE( node );
202      _Assert( link->Queue_context.Wait.queue != NULL );
203
204      _Thread_queue_Link_remove( link );
205      _Thread_Wait_release_queue_critical(
206        link->Queue_context.Wait.queue,
207        &link->Queue_context
208      );
209      _Thread_Wait_remove_request( link->owner, &link->Queue_context );
210
211      node = _Chain_Previous( node );
212#if defined(RTEMS_DEBUG)
213      _Chain_Set_off_chain( &link->Path_node );
214#endif
215    }
216  }
217#else
218  (void) path;
219#endif
220}
221
222static bool _Thread_queue_Path_acquire(
223  Thread_Control     *the_thread,
224  Thread_queue_Queue *queue,
225  Thread_queue_Path  *path
226)
227{
228  Thread_Control     *owner;
229
230#if defined(RTEMS_SMP)
231  Thread_queue_Link  *link;
232  Thread_queue_Queue *target;
233
234  /*
235   * For an overview please look at the non-SMP part below.  We basically do
236   * the same on SMP configurations.  The fact that we may have more than one
237   * executing thread and each thread queue has its own SMP lock makes the task
238   * a bit more difficult.  We have to avoid deadlocks at SMP lock level, since
239   * this would result in an unrecoverable deadlock of the overall system.
240   */
241
242  _Chain_Initialize_empty( &path->Links );
243
244  owner = queue->owner;
245
246  if ( owner == NULL ) {
247    return true;
248  }
249
250  if ( owner == the_thread ) {
251    return false;
252  }
253
254  _Chain_Initialize_node( &path->Start.Path_node );
255  _Thread_queue_Context_initialize( &path->Start.Queue_context );
256  link = &path->Start;
257
258  do {
259    _Chain_Append_unprotected( &path->Links, &link->Path_node );
260    link->owner = owner;
261
262    _Thread_Wait_acquire_default_critical(
263      owner,
264      &link->Queue_context.Lock_context
265    );
266
267    target = owner->Wait.queue;
268    link->Queue_context.Wait.queue = target;
269
270    if ( target != NULL ) {
271      if ( _Thread_queue_Link_add( link, queue, target ) ) {
272        _Thread_queue_Gate_add(
273          &owner->Wait.Lock.Pending_requests,
274          &link->Queue_context.Wait.Gate
275        );
276        _Thread_Wait_release_default_critical(
277          owner,
278          &link->Queue_context.Lock_context
279        );
280        _Thread_Wait_acquire_queue_critical( target, &link->Queue_context );
281
282        if ( link->Queue_context.Wait.queue == NULL ) {
283          _Thread_queue_Link_remove( link );
284          _Thread_Wait_release_queue_critical( target, &link->Queue_context );
285          _Thread_Wait_acquire_default_critical(
286            owner,
287            &link->Queue_context.Lock_context
288          );
289          _Thread_Wait_remove_request_locked( owner, &link->Queue_context );
290          _Assert( owner->Wait.queue == NULL );
291          return true;
292        }
293      } else {
294        link->Queue_context.Wait.queue = NULL;
295        _Thread_queue_Path_release( path );
296        return false;
297      }
298    } else {
299      return true;
300    }
301
302    link = &owner->Wait.Link;
303    queue = target;
304    owner = queue->owner;
305  } while ( owner != NULL );
306#else
307  do {
308    owner = queue->owner;
309
310    if ( owner == NULL ) {
311      return true;
312    }
313
314    if ( owner == the_thread ) {
315      return false;
316    }
317
318    queue = owner->Wait.queue;
319  } while ( queue != NULL );
320#endif
321
322  return true;
323}
324
325void _Thread_queue_Deadlock_status( Thread_Control *the_thread )
326{
327  the_thread->Wait.return_code = STATUS_DEADLOCK;
328}
329
330void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread )
331{
332  _Terminate(
333    INTERNAL_ERROR_CORE,
334    false,
335    INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK
336  );
337}
338
339void _Thread_queue_Enqueue_critical(
340  Thread_queue_Queue            *queue,
341  const Thread_queue_Operations *operations,
342  Thread_Control                *the_thread,
343  States_Control                 state,
344  Thread_queue_Context          *queue_context
345)
346{
347  Thread_queue_Path  path;
348  Per_CPU_Control   *cpu_self;
349  bool               success;
350
351#if defined(RTEMS_MULTIPROCESSING)
352  if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) {
353    the_thread = _Thread_MP_Allocate_proxy( state );
354  }
355#endif
356
357  _Thread_Wait_claim( the_thread, queue, operations );
358
359  if ( !_Thread_queue_Path_acquire( the_thread, queue, &path ) ) {
360    _Thread_Wait_restore_default( the_thread );
361    _Thread_queue_Queue_release( queue, &queue_context->Lock_context );
362    _Thread_Wait_tranquilize( the_thread );
363    ( *queue_context->deadlock_callout )( the_thread );
364    return;
365  }
366
367  ( *operations->enqueue )( queue, the_thread, &path );
368
369  _Thread_queue_Path_release( &path );
370
371  the_thread->Wait.return_code = STATUS_SUCCESSFUL;
372  _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
373  cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context );
374  _Thread_queue_Queue_release( queue, &queue_context->Lock_context );
375
376  if (
377    cpu_self->thread_dispatch_disable_level
378      != queue_context->expected_thread_dispatch_disable_level
379  ) {
380    _Terminate(
381      INTERNAL_ERROR_CORE,
382      false,
383      INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE
384    );
385  }
386
387  /*
388   *  Set the blocking state for this thread queue in the thread.
389   */
390  _Thread_Set_state( the_thread, state );
391
392  /*
393   *  If the thread wants to timeout, then schedule its timer.
394   */
395  switch ( queue_context->timeout_discipline ) {
396    case WATCHDOG_RELATIVE:
397      /* A relative timeout of 0 is a special case indefinite (no) timeout */
398      if ( queue_context->timeout != 0 ) {
399        _Thread_Timer_insert_relative(
400          the_thread,
401          cpu_self,
402          _Thread_Timeout,
403          (Watchdog_Interval) queue_context->timeout
404        );
405      }
406      break;
407    case WATCHDOG_ABSOLUTE:
408      _Thread_Timer_insert_absolute(
409        the_thread,
410        cpu_self,
411        _Thread_Timeout,
412        queue_context->timeout
413      );
414      break;
415    default:
416      break;
417  }
418
419  /*
420   * At this point thread dispatching is disabled, however, we already released
421   * the thread queue lock.  Thus, interrupts or threads on other processors
422   * may already changed our state with respect to the thread queue object.
423   * The request could be satisfied or timed out.  This situation is indicated
424   * by the thread wait flags.  Other parties must not modify our thread state
425   * as long as we are in the THREAD_QUEUE_INTEND_TO_BLOCK thread wait state,
426   * thus we have to cancel the blocking operation ourself if necessary.
427   */
428  success = _Thread_Wait_flags_try_change_acquire(
429    the_thread,
430    THREAD_QUEUE_INTEND_TO_BLOCK,
431    THREAD_QUEUE_BLOCKED
432  );
433  if ( !success ) {
434    _Thread_Remove_timer_and_unblock( the_thread, queue );
435  }
436
437  _Thread_Update_priority( path.update_priority );
438  _Thread_Dispatch_enable( cpu_self );
439}
440
441bool _Thread_queue_Do_extract_locked(
442  Thread_queue_Queue            *queue,
443  const Thread_queue_Operations *operations,
444  Thread_Control                *the_thread
445#if defined(RTEMS_MULTIPROCESSING)
446  ,
447  const Thread_queue_Context    *queue_context
448#endif
449)
450{
451  bool success;
452  bool unblock;
453
454#if defined(RTEMS_MULTIPROCESSING)
455  if ( !_Objects_Is_local_id( the_thread->Object.id ) ) {
456    Thread_Proxy_control    *the_proxy;
457    Thread_queue_MP_callout  mp_callout;
458
459    the_proxy = (Thread_Proxy_control *) the_thread;
460    mp_callout = queue_context->mp_callout;
461    _Assert( mp_callout != NULL );
462    the_proxy->thread_queue_callout = queue_context->mp_callout;
463  }
464#endif
465
466  ( *operations->extract )( queue, the_thread );
467
468  /*
469   * We must update the wait flags under protection of the current thread lock,
470   * otherwise a _Thread_Timeout() running on another processor may interfere.
471   */
472  success = _Thread_Wait_flags_try_change_release(
473    the_thread,
474    THREAD_QUEUE_INTEND_TO_BLOCK,
475    THREAD_QUEUE_READY_AGAIN
476  );
477  if ( success ) {
478    unblock = false;
479  } else {
480    _Assert( _Thread_Wait_flags_get( the_thread ) == THREAD_QUEUE_BLOCKED );
481    _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_READY_AGAIN );
482    unblock = true;
483  }
484
485  _Thread_Wait_restore_default( the_thread );
486
487  return unblock;
488}
489
490void _Thread_queue_Unblock_critical(
491  bool                unblock,
492  Thread_queue_Queue *queue,
493  Thread_Control     *the_thread,
494  ISR_lock_Context   *lock_context
495)
496{
497  if ( unblock ) {
498    Per_CPU_Control *cpu_self;
499
500    cpu_self = _Thread_Dispatch_disable_critical( lock_context );
501    _Thread_queue_Queue_release( queue, lock_context );
502
503    _Thread_Remove_timer_and_unblock( the_thread, queue );
504
505    _Thread_Dispatch_enable( cpu_self );
506  } else {
507    _Thread_queue_Queue_release( queue, lock_context );
508  }
509}
510
511void _Thread_queue_Extract_critical(
512  Thread_queue_Queue            *queue,
513  const Thread_queue_Operations *operations,
514  Thread_Control                *the_thread,
515  Thread_queue_Context          *queue_context
516)
517{
518  bool unblock;
519
520  unblock = _Thread_queue_Extract_locked(
521    queue,
522    operations,
523    the_thread,
524    queue_context
525  );
526
527  _Thread_queue_Unblock_critical(
528    unblock,
529    queue,
530    the_thread,
531    &queue_context->Lock_context
532  );
533}
534
535void _Thread_queue_Extract( Thread_Control *the_thread )
536{
537  Thread_queue_Context  queue_context;
538  Thread_queue_Queue   *queue;
539
540  _Thread_queue_Context_initialize( &queue_context );
541  _Thread_Wait_acquire( the_thread, &queue_context );
542
543  queue = the_thread->Wait.queue;
544
545  if ( queue != NULL ) {
546    bool unblock;
547
548    _Thread_Wait_remove_request( the_thread, &queue_context );
549    _Thread_queue_Context_set_MP_callout(
550      &queue_context,
551      _Thread_queue_MP_callout_do_nothing
552    );
553    unblock = _Thread_queue_Extract_locked(
554      queue,
555      the_thread->Wait.operations,
556      the_thread,
557      &queue_context.Lock_context
558    );
559    _Thread_queue_Unblock_critical(
560      unblock,
561      queue,
562      the_thread,
563      &queue_context.Lock_context
564    );
565  } else {
566    _Thread_Wait_release( the_thread, &queue_context );
567  }
568}
569
570Thread_Control *_Thread_queue_Do_dequeue(
571  Thread_queue_Control          *the_thread_queue,
572  const Thread_queue_Operations *operations
573#if defined(RTEMS_MULTIPROCESSING)
574  ,
575  Thread_queue_MP_callout        mp_callout
576#endif
577)
578{
579  Thread_queue_Context  queue_context;
580  Thread_Control       *the_thread;
581
582  _Thread_queue_Context_initialize( &queue_context );
583  _Thread_queue_Context_set_MP_callout( &queue_context, mp_callout );
584  _Thread_queue_Acquire( the_thread_queue, &queue_context.Lock_context );
585
586  the_thread = _Thread_queue_First_locked( the_thread_queue, operations );
587
588  if ( the_thread != NULL ) {
589    _Thread_queue_Extract_critical(
590      &the_thread_queue->Queue,
591      operations,
592      the_thread,
593      &queue_context
594    );
595  } else {
596    _Thread_queue_Release( the_thread_queue, &queue_context.Lock_context );
597  }
598
599  return the_thread;
600}
601
602#if defined(RTEMS_MULTIPROCESSING)
603void _Thread_queue_Unblock_proxy(
604  Thread_queue_Queue *queue,
605  Thread_Control     *the_thread
606)
607{
608  const Thread_queue_Object *the_queue_object;
609  Thread_Proxy_control      *the_proxy;
610  Thread_queue_MP_callout    mp_callout;
611
612  the_queue_object = THREAD_QUEUE_QUEUE_TO_OBJECT( queue );
613  the_proxy = (Thread_Proxy_control *) the_thread;
614  mp_callout = the_proxy->thread_queue_callout;
615  ( *mp_callout )( the_thread, the_queue_object->Object.id );
616
617  _Thread_MP_Free_proxy( the_thread );
618}
619#endif
Note: See TracBrowser for help on using the repository browser.