source: rtems/cpukit/score/src/condition.c @ 469dc47

5
Last change on this file since 469dc47 was ba5ef37, checked in by Sebastian Huber <sebastian.huber@…>, on 04/20/16 at 09:12:53

score: Use _Thread_queue_Flush_critical() for cond

  • Property mode set to 100644
File size: 6.9 KB
Line 
1/*
2 * Copyright (c) 2015, 2016 embedded brains GmbH.  All rights reserved.
3 *
4 *  embedded brains GmbH
5 *  Dornierstr. 4
6 *  82178 Puchheim
7 *  Germany
8 *  <rtems@embedded-brains.de>
9 *
10 * The license and distribution terms for this file may be
11 * found in the file LICENSE in this distribution or at
12 * http://www.rtems.org/license/LICENSE.
13 */
14
15#if HAVE_CONFIG_H
16  #include "config.h"
17#endif
18
19#if HAVE_STRUCT__THREAD_QUEUE_QUEUE
20
21#include <sys/lock.h>
22#include <errno.h>
23
24#include <rtems/score/atomic.h>
25#include <rtems/score/chainimpl.h>
26#include <rtems/score/threadimpl.h>
27#include <rtems/score/threadqimpl.h>
28#include <rtems/score/todimpl.h>
29#include <rtems/score/watchdogimpl.h>
30
31#define CONDITION_TQ_OPERATIONS &_Thread_queue_Operations_FIFO
32
33typedef struct {
34  Thread_queue_Syslock_queue Queue;
35} Condition_Control;
36
37RTEMS_STATIC_ASSERT(
38  offsetof( Condition_Control, Queue )
39    == offsetof( struct _Condition_Control, _Queue ),
40  CONDITION_CONTROL_QUEUE
41);
42
43RTEMS_STATIC_ASSERT(
44  sizeof( Condition_Control ) == sizeof( struct _Condition_Control ),
45  CONDITION_CONTROL_SIZE
46);
47
48static Condition_Control *_Condition_Get(
49  struct _Condition_Control *_condition
50)
51{
52  return (Condition_Control *) _condition;
53}
54
55static Thread_Control *_Condition_Queue_acquire_critical(
56  Condition_Control *condition,
57  ISR_lock_Context  *lock_context
58)
59{
60  Thread_Control *executing;
61
62  executing = _Thread_Executing;
63  _Thread_queue_Queue_acquire_critical(
64    &condition->Queue.Queue,
65    &executing->Potpourri_stats,
66    lock_context
67  );
68
69  return executing;
70}
71
72static void _Condition_Queue_release(
73  Condition_Control *condition,
74  ISR_lock_Context  *lock_context
75)
76{
77  _Thread_queue_Queue_release( &condition->Queue.Queue, lock_context );
78}
79
80static Per_CPU_Control *_Condition_Do_wait(
81  struct _Condition_Control *_condition,
82  Watchdog_Interval          timeout,
83  ISR_lock_Context          *lock_context
84)
85{
86  Condition_Control *condition;
87  Thread_Control    *executing;
88  Per_CPU_Control   *cpu_self;
89
90  condition = _Condition_Get( _condition );
91  executing = _Condition_Queue_acquire_critical( condition, lock_context );
92  cpu_self = _Thread_Dispatch_disable_critical( lock_context );
93
94  executing->Wait.return_code = 0;
95  _Thread_queue_Enqueue_critical(
96    &condition->Queue.Queue,
97    CONDITION_TQ_OPERATIONS,
98    executing,
99    STATES_WAITING_FOR_SYS_LOCK_CONDITION,
100    timeout,
101    ETIMEDOUT,
102    lock_context
103  );
104
105  return cpu_self;
106}
107
108void _Condition_Wait(
109  struct _Condition_Control *_condition,
110  struct _Mutex_Control     *_mutex
111)
112{
113  ISR_lock_Context  lock_context;
114  Per_CPU_Control  *cpu_self;
115
116  _ISR_lock_ISR_disable( &lock_context );
117  cpu_self = _Condition_Do_wait( _condition, 0, &lock_context );
118
119  _Mutex_Release( _mutex );
120  _Thread_Dispatch_enable( cpu_self );
121  _Mutex_Acquire( _mutex );
122}
123
124int _Condition_Wait_timed(
125  struct _Condition_Control *_condition,
126  struct _Mutex_Control     *_mutex,
127  const struct timespec     *abstime
128)
129{
130  ISR_lock_Context   lock_context;
131  Per_CPU_Control   *cpu_self;
132  Thread_Control    *executing;
133  int                eno;
134  Watchdog_Interval  ticks;
135
136  _ISR_lock_ISR_disable( &lock_context );
137
138  switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
139    case TOD_ABSOLUTE_TIMEOUT_INVALID:
140      _ISR_lock_ISR_enable( &lock_context );
141      return EINVAL;
142    case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
143    case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
144      _ISR_lock_ISR_enable( &lock_context );
145      return ETIMEDOUT;
146    default:
147      break;
148  }
149
150  cpu_self = _Condition_Do_wait( _condition, ticks, &lock_context );
151
152  _Mutex_Release( _mutex );
153  executing = cpu_self->executing;
154  _Thread_Dispatch_enable( cpu_self );
155  eno = (int) executing->Wait.return_code;
156  _Mutex_Acquire( _mutex );
157
158  return eno;
159}
160
161void _Condition_Wait_recursive(
162  struct _Condition_Control       *_condition,
163  struct _Mutex_recursive_Control *_mutex
164)
165{
166  ISR_lock_Context  lock_context;
167  Per_CPU_Control  *cpu_self;
168  unsigned int      nest_level;
169
170  _ISR_lock_ISR_disable( &lock_context );
171  cpu_self = _Condition_Do_wait( _condition, 0, &lock_context );
172
173  nest_level = _mutex->_nest_level;
174  _mutex->_nest_level = 0;
175  _Mutex_recursive_Release( _mutex );
176  _Thread_Dispatch_enable( cpu_self );
177  _Mutex_recursive_Acquire( _mutex );
178  _mutex->_nest_level = nest_level;
179}
180
181int _Condition_Wait_recursive_timed(
182  struct _Condition_Control       *_condition,
183  struct _Mutex_recursive_Control *_mutex,
184  const struct timespec           *abstime
185)
186{
187  ISR_lock_Context   lock_context;
188  Per_CPU_Control   *cpu_self;
189  Thread_Control    *executing;
190  int                eno;
191  unsigned int       nest_level;
192  Watchdog_Interval  ticks;
193
194  _ISR_lock_ISR_disable( &lock_context );
195
196  switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
197    case TOD_ABSOLUTE_TIMEOUT_INVALID:
198      _ISR_lock_ISR_enable( &lock_context );
199      return EINVAL;
200    case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
201    case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
202      _ISR_lock_ISR_enable( &lock_context );
203      return ETIMEDOUT;
204    default:
205      break;
206  }
207
208  cpu_self = _Condition_Do_wait( _condition, ticks, &lock_context );
209
210  nest_level = _mutex->_nest_level;
211  _mutex->_nest_level = 0;
212  _Mutex_recursive_Release( _mutex );
213  executing = cpu_self->executing;
214  _Thread_Dispatch_enable( cpu_self );
215  eno = (int) executing->Wait.return_code;
216  _Mutex_recursive_Acquire( _mutex );
217  _mutex->_nest_level = nest_level;
218
219  return eno;
220}
221
222typedef struct {
223  ISR_lock_Context Base;
224  int              count;
225} Condition_Lock_context;
226
227static Thread_Control *_Condition_Flush_filter(
228  Thread_Control     *the_thread,
229  Thread_queue_Queue *queue,
230  ISR_lock_Context   *lock_context
231)
232{
233  Condition_Lock_context *condition_lock_context;
234
235  condition_lock_context = (Condition_Lock_context *) lock_context;
236
237  if ( condition_lock_context->count <= 0 ) {
238    return NULL;
239  }
240
241  --condition_lock_context->count;
242
243  return the_thread;
244}
245
246static void _Condition_Wake( struct _Condition_Control *_condition, int count )
247{
248  Condition_Control      *condition;
249  Condition_Lock_context  lock_context;
250
251  condition = _Condition_Get( _condition );
252  _ISR_lock_ISR_disable( &lock_context.Base );
253  _Condition_Queue_acquire_critical( condition, &lock_context.Base );
254
255  /*
256   * In common uses cases of condition variables there are normally no threads
257   * on the queue, so check this condition early.
258   */
259  if ( __predict_true( _Thread_queue_Is_empty( &condition->Queue.Queue ) ) ) {
260    _Condition_Queue_release( condition, &lock_context.Base );
261    return;
262  }
263
264  lock_context.count = count;
265  _Thread_queue_Flush_critical(
266    &condition->Queue.Queue,
267    CONDITION_TQ_OPERATIONS,
268    _Condition_Flush_filter,
269    NULL,
270    0,
271    &lock_context.Base
272  );
273}
274
275void _Condition_Signal( struct _Condition_Control *_condition )
276{
277  _Condition_Wake( _condition, 1 );
278}
279
280void _Condition_Broadcast( struct _Condition_Control *_condition )
281{
282  _Condition_Wake( _condition, INT_MAX );
283}
284
285#endif /* HAVE_STRUCT__THREAD_QUEUE_QUEUE */
Note: See TracBrowser for help on using the repository browser.