source: rtems/cpukit/score/src/condition.c @ 03b900d

5
Last change on this file since 03b900d was 03b900d, checked in by Sebastian Huber <sebastian.huber@…>, on 02/18/16 at 07:36:26

score: Replace watchdog handler implementation

Use a red-black tree instead of delta chains.

Close #2344.
Update #2554.
Update #2555.
Close #2606.

  • Property mode set to 100644
File size: 7.5 KB
Line 
1/*
2 * Copyright (c) 2015 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#if HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
19#if HAVE_STRUCT__THREAD_QUEUE_QUEUE
20
21#include <sys/lock.h>
22#include <errno.h>
23
24#include <rtems/score/atomic.h>
25#include <rtems/score/chainimpl.h>
26#include <rtems/score/threadimpl.h>
27#include <rtems/score/threadqimpl.h>
28#include <rtems/score/todimpl.h>
29#include <rtems/score/watchdogimpl.h>
30
31#define CONDITION_TQ_OPERATIONS &_Thread_queue_Operations_FIFO
32
33typedef struct {
34  Thread_queue_Syslock_queue Queue;
35} Condition_Control;
36
37RTEMS_STATIC_ASSERT(
38  offsetof( Condition_Control, Queue )
39    == offsetof( struct _Condition_Control, _Queue ),
40  CONDITION_CONTROL_QUEUE
41);
42
43RTEMS_STATIC_ASSERT(
44  sizeof( Condition_Control ) == sizeof( struct _Condition_Control ),
45  CONDITION_CONTROL_SIZE
46);
47
48static Condition_Control *_Condition_Get(
49  struct _Condition_Control *_condition
50)
51{
52  return (Condition_Control *) _condition;
53}
54
55static Thread_Control *_Condition_Queue_acquire_critical(
56  Condition_Control *condition,
57  ISR_lock_Context  *lock_context
58)
59{
60  Thread_Control *executing;
61
62  executing = _Thread_Executing;
63  _Thread_queue_Queue_acquire_critical(
64    &condition->Queue.Queue,
65    &executing->Potpourri_stats,
66    lock_context
67  );
68
69  return executing;
70}
71
72static void _Condition_Queue_release(
73  Condition_Control *condition,
74  ISR_lock_Context  *lock_context
75)
76{
77  _Thread_queue_Queue_release( &condition->Queue.Queue, lock_context );
78}
79
80static Per_CPU_Control *_Condition_Do_wait(
81  struct _Condition_Control *_condition,
82  Watchdog_Interval          timeout,
83  ISR_lock_Context          *lock_context
84)
85{
86  Condition_Control *condition;
87  Thread_Control    *executing;
88  Per_CPU_Control   *cpu_self;
89
90  condition = _Condition_Get( _condition );
91  executing = _Condition_Queue_acquire_critical( condition, lock_context );
92  cpu_self = _Thread_Dispatch_disable_critical( lock_context );
93
94  executing->Wait.return_code = 0;
95  _Thread_queue_Enqueue_critical(
96    &condition->Queue.Queue,
97    CONDITION_TQ_OPERATIONS,
98    executing,
99    STATES_WAITING_FOR_SYS_LOCK_CONDITION,
100    timeout,
101    ETIMEDOUT,
102    lock_context
103  );
104
105  return cpu_self;
106}
107
108void _Condition_Wait(
109  struct _Condition_Control *_condition,
110  struct _Mutex_Control     *_mutex
111)
112{
113  ISR_lock_Context  lock_context;
114  Per_CPU_Control  *cpu_self;
115
116  _ISR_lock_ISR_disable( &lock_context );
117  cpu_self = _Condition_Do_wait( _condition, 0, &lock_context );
118
119  _Mutex_Release( _mutex );
120  _Thread_Dispatch_enable( cpu_self );
121  _Mutex_Acquire( _mutex );
122}
123
124int _Condition_Wait_timed(
125  struct _Condition_Control *_condition,
126  struct _Mutex_Control     *_mutex,
127  const struct timespec     *abstime
128)
129{
130  ISR_lock_Context   lock_context;
131  Per_CPU_Control   *cpu_self;
132  Thread_Control    *executing;
133  int                eno;
134  Watchdog_Interval  ticks;
135
136  _ISR_lock_ISR_disable( &lock_context );
137
138  switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
139    case TOD_ABSOLUTE_TIMEOUT_INVALID:
140      _ISR_lock_ISR_enable( &lock_context );
141      return EINVAL;
142    case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
143    case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
144      _ISR_lock_ISR_enable( &lock_context );
145      return ETIMEDOUT;
146    default:
147      break;
148  }
149
150  cpu_self = _Condition_Do_wait( _condition, ticks, &lock_context );
151
152  _Mutex_Release( _mutex );
153  executing = cpu_self->executing;
154  _Thread_Dispatch_enable( cpu_self );
155  eno = (int) executing->Wait.return_code;
156  _Mutex_Acquire( _mutex );
157
158  return eno;
159}
160
161void _Condition_Wait_recursive(
162  struct _Condition_Control       *_condition,
163  struct _Mutex_recursive_Control *_mutex
164)
165{
166  ISR_lock_Context  lock_context;
167  Per_CPU_Control  *cpu_self;
168  unsigned int      nest_level;
169
170  _ISR_lock_ISR_disable( &lock_context );
171  cpu_self = _Condition_Do_wait( _condition, 0, &lock_context );
172
173  nest_level = _mutex->_nest_level;
174  _mutex->_nest_level = 0;
175  _Mutex_recursive_Release( _mutex );
176  _Thread_Dispatch_enable( cpu_self );
177  _Mutex_recursive_Acquire( _mutex );
178  _mutex->_nest_level = nest_level;
179}
180
181int _Condition_Wait_recursive_timed(
182  struct _Condition_Control       *_condition,
183  struct _Mutex_recursive_Control *_mutex,
184  const struct timespec           *abstime
185)
186{
187  ISR_lock_Context   lock_context;
188  Per_CPU_Control   *cpu_self;
189  Thread_Control    *executing;
190  int                eno;
191  unsigned int       nest_level;
192  Watchdog_Interval  ticks;
193
194  _ISR_lock_ISR_disable( &lock_context );
195
196  switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
197    case TOD_ABSOLUTE_TIMEOUT_INVALID:
198      _ISR_lock_ISR_enable( &lock_context );
199      return EINVAL;
200    case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
201    case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
202      _ISR_lock_ISR_enable( &lock_context );
203      return ETIMEDOUT;
204    default:
205      break;
206  }
207
208  cpu_self = _Condition_Do_wait( _condition, ticks, &lock_context );
209
210  nest_level = _mutex->_nest_level;
211  _mutex->_nest_level = 0;
212  _Mutex_recursive_Release( _mutex );
213  executing = cpu_self->executing;
214  _Thread_Dispatch_enable( cpu_self );
215  eno = (int) executing->Wait.return_code;
216  _Mutex_recursive_Acquire( _mutex );
217  _mutex->_nest_level = nest_level;
218
219  return eno;
220}
221
222static int _Condition_Wake( struct _Condition_Control *_condition, int count )
223{
224  Condition_Control  *condition;
225  ISR_lock_Context    lock_context;
226  Thread_queue_Heads *heads;
227  Chain_Control       unblock;
228  Chain_Node         *node;
229  Chain_Node         *tail;
230  int                 woken;
231
232  condition = _Condition_Get( _condition );
233  _ISR_lock_ISR_disable( &lock_context );
234  _Condition_Queue_acquire_critical( condition, &lock_context );
235
236  /*
237   * In common uses cases of condition variables there are normally no threads
238   * on the queue, so check this condition early.
239   */
240  heads = condition->Queue.Queue.heads;
241  if ( __predict_true( heads == NULL ) ) {
242    _Condition_Queue_release( condition, &lock_context );
243
244    return 0;
245  }
246
247  woken = 0;
248  _Chain_Initialize_empty( &unblock );
249  while ( count > 0 && heads != NULL ) {
250    const Thread_queue_Operations *operations;
251    Thread_Control                *first;
252    bool                           do_unblock;
253
254    operations = CONDITION_TQ_OPERATIONS;
255    first = ( *operations->first )( heads );
256
257    do_unblock = _Thread_queue_Extract_locked(
258      &condition->Queue.Queue,
259      operations,
260      first
261    );
262    if (do_unblock) {
263      _Chain_Append_unprotected( &unblock, &first->Wait.Node.Chain );
264    }
265
266    ++woken;
267    --count;
268    heads = condition->Queue.Queue.heads;
269  }
270
271  node = _Chain_First( &unblock );
272  tail = _Chain_Tail( &unblock );
273  if ( node != tail ) {
274    Per_CPU_Control *cpu_self;
275
276    cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
277    _Condition_Queue_release( condition, &lock_context );
278
279    do {
280      Thread_Control *thread;
281      Chain_Node     *next;
282
283      next = _Chain_Next( node );
284      thread = THREAD_CHAIN_NODE_TO_THREAD( node );
285      _Thread_Timer_remove( thread );
286      _Thread_Unblock( thread );
287
288      node = next;
289    } while ( node != tail );
290
291    _Thread_Dispatch_enable( cpu_self );
292  } else {
293    _Condition_Queue_release( condition, &lock_context );
294  }
295
296  return woken;
297}
298
299void _Condition_Signal( struct _Condition_Control *_condition )
300{
301  _Condition_Wake( _condition, 1 );
302}
303
304void _Condition_Broadcast( struct _Condition_Control *_condition )
305{
306  _Condition_Wake( _condition, INT_MAX );
307}
308
309#endif /* HAVE_STRUCT__THREAD_QUEUE_QUEUE */
Note: See TracBrowser for help on using the repository browser.