1 | /** |
---|
2 | * @file |
---|
3 | * |
---|
4 | * @ingroup rtems_bsd_rtems |
---|
5 | * |
---|
6 | * @brief TODO. |
---|
7 | */ |
---|
8 | |
---|
9 | /* |
---|
10 | * COPYRIGHT (c) 1989-2012. |
---|
11 | * On-Line Applications Research Corporation (OAR). |
---|
12 | * |
---|
13 | * The license and distribution terms for this file may be |
---|
14 | * found in the file LICENSE in this distribution or at |
---|
15 | * http://www.rtems.com/license/LICENSE. |
---|
16 | */ |
---|
17 | |
---|
18 | #include <sys/cdefs.h> |
---|
19 | __FBSDID("$FreeBSD$"); |
---|
20 | |
---|
21 | #include <rtems.h> |
---|
22 | #include <rtems/error.h> |
---|
23 | #include <freebsd/machine/rtems-bsd-taskqueue.h> |
---|
24 | |
---|
25 | /* |
---|
26 | #define STATIC static |
---|
27 | */ |
---|
28 | #undef DEBUG |
---|
29 | |
---|
30 | #ifdef DEBUG |
---|
31 | #include <stdio.h> |
---|
32 | #ifndef STATIC |
---|
33 | #define STATIC |
---|
34 | #endif |
---|
35 | #else |
---|
36 | #ifndef STATIC |
---|
37 | #define STATIC static |
---|
38 | #endif |
---|
39 | #endif |
---|
40 | |
---|
41 | #define TQ_WAKE_EVENT RTEMS_EVENT_0 |
---|
42 | |
---|
43 | /* This implementation is extremely simple; we assume |
---|
44 | * that all taskqueues (and as a matter of fact there is |
---|
45 | * only a single one) are manipulated with the rtems |
---|
46 | * bsdnet semaphore held. I.e., |
---|
47 | * taskqueue_enqueue() |
---|
48 | * taskqueue_drain() |
---|
49 | * etc. |
---|
50 | * are called from an environment that holds the |
---|
51 | * bsdnet semaphore. |
---|
52 | * Likewise, the thread that works the taskqueue |
---|
53 | * holds the semaphore while doing so. |
---|
54 | * |
---|
55 | */ |
---|
56 | |
---|
57 | /* use single-linked list; 'drain' which would benefit from |
---|
58 | * double-linked list is seldom used and performance doesn't |
---|
59 | * matter much there. OTOH, the frequent case of working |
---|
60 | * the list + enqueueing is more efficient for the single-linked |
---|
61 | * list. |
---|
62 | struct task { |
---|
63 | struct task *ta_next; |
---|
64 | int ta_pending; |
---|
65 | int ta_priority; |
---|
66 | task_fn ta_fn; |
---|
67 | void *ta_fn_arg; |
---|
68 | }; |
---|
69 | */ |
---|
70 | |
---|
71 | struct taskqueue { |
---|
72 | struct task anchor; |
---|
73 | struct task *tail; |
---|
74 | tq_enq_fn enq_fn; |
---|
75 | void *enq_fn_arg; |
---|
76 | rtems_id tid; |
---|
77 | }; |
---|
78 | |
---|
79 | |
---|
80 | STATIC struct taskqueue the_taskqueue = { |
---|
81 | { 0, 0, 0, 0, 0 }, |
---|
82 | &the_taskqueue.anchor, |
---|
83 | taskqueue_thread_enqueue, |
---|
84 | &taskqueue_fast, |
---|
85 | 0 |
---|
86 | }; |
---|
87 | |
---|
88 | struct taskqueue *taskqueue_fast = &the_taskqueue; |
---|
89 | struct taskqueue *taskqueue_swi = NULL; |
---|
90 | |
---|
91 | struct taskqueue * |
---|
92 | taskqueue_create(const char *name, int mflags, tq_enq_fn enq_fn, void *arg) |
---|
93 | { |
---|
94 | if ( enq_fn != taskqueue_thread_enqueue ) |
---|
95 | rtems_panic("rtems_taskqueue: attempt to create non-standard TQ; implementation needs to be modified\n"); |
---|
96 | return &the_taskqueue; |
---|
97 | } |
---|
98 | |
---|
99 | struct taskqueue * |
---|
100 | taskqueue_create_fast(const char *name, int mflags, tq_enq_fn enq_fn, void *arg) |
---|
101 | { |
---|
102 | return taskqueue_create(name, mflags, enq_fn, arg); |
---|
103 | } |
---|
104 | |
---|
105 | /* taskqueue_enqueue must be allowed from an ISR; |
---|
106 | * hence, all critical list manipulation must lock out |
---|
107 | * interrupts... |
---|
108 | */ |
---|
109 | int |
---|
110 | taskqueue_enqueue(struct taskqueue *tq, struct task *ta) |
---|
111 | { |
---|
112 | rtems_interrupt_level l; |
---|
113 | |
---|
114 | rtems_interrupt_disable(l); |
---|
115 | if ( 0 == ta->ta_pending ++ ) { |
---|
116 | /* hook into list */ |
---|
117 | ta->ta_next = 0; |
---|
118 | tq->tail->ta_next = ta; |
---|
119 | tq->tail = ta; |
---|
120 | } |
---|
121 | tq->enq_fn(tq->enq_fn_arg); |
---|
122 | rtems_interrupt_enable(l); |
---|
123 | return 0; |
---|
124 | } |
---|
125 | |
---|
126 | void |
---|
127 | taskqueue_thread_enqueue(void *ctxt) |
---|
128 | { |
---|
129 | int dopost; |
---|
130 | /* pointer-to-pointer is what bsd provides; we currently |
---|
131 | * follow the scheme even we don't directly use the argument |
---|
132 | * passed to taskqueue_create... |
---|
133 | */ |
---|
134 | struct taskqueue *tq = *(struct taskqueue **)ctxt; |
---|
135 | /* If this is the first entry on the list then the |
---|
136 | * task needs to be notified... |
---|
137 | */ |
---|
138 | dopost = ( tq->anchor.ta_next == tq->tail && 1 == tq->tail->ta_pending ); |
---|
139 | |
---|
140 | if ( dopost ) |
---|
141 | rtems_event_send(tq->tid, TQ_WAKE_EVENT); |
---|
142 | } |
---|
143 | |
---|
144 | /* Returns 0 on success */ |
---|
145 | int |
---|
146 | taskqueue_start_threads(struct taskqueue **ptq, int count, int prio, const char *fmt, ...) |
---|
147 | { |
---|
148 | if ( count != 1 ) |
---|
149 | rtems_panic("rtems_taskqueue: taskqueue_start_threads cannot currently deal with count != 1\n"); |
---|
150 | |
---|
151 | /* Do (non thread-safe) lazy init as a fallback */ |
---|
152 | if ( ! the_taskqueue.tid ) |
---|
153 | rtems_taskqueue_initialize(); |
---|
154 | return 0; |
---|
155 | } |
---|
156 | |
---|
157 | void |
---|
158 | taskqueue_drain(struct taskqueue *tq, struct task *ta) |
---|
159 | { |
---|
160 | rtems_interrupt_level l; |
---|
161 | struct task *p, *q; |
---|
162 | int i; |
---|
163 | |
---|
164 | /* find predecessor; searching the list should be |
---|
165 | * safe; an ISR might append a new record to the tail |
---|
166 | * while we are working but that should be OK. |
---|
167 | */ |
---|
168 | for ( p = &tq->anchor; (q = p->ta_next); p=q ) { |
---|
169 | if ( q == ta ) { |
---|
170 | rtems_interrupt_disable(l); |
---|
171 | /* found; do work */ |
---|
172 | /* remember 'pending' count and extract */ |
---|
173 | i = ta->ta_pending; |
---|
174 | ta->ta_pending = 0; |
---|
175 | p->ta_next = ta->ta_next; |
---|
176 | ta->ta_next = 0; |
---|
177 | /* adjust tail */ |
---|
178 | if ( tq->tail == q ) |
---|
179 | tq->tail = p; |
---|
180 | rtems_interrupt_enable(l); |
---|
181 | for ( ; i>0; i-- ) { |
---|
182 | ta->ta_fn(ta->ta_fn_arg, i); |
---|
183 | } |
---|
184 | return; |
---|
185 | } |
---|
186 | } |
---|
187 | } |
---|
188 | |
---|
189 | /* work the task queue and return |
---|
190 | * nonzero if the list is not empty |
---|
191 | * (which means that some callback has |
---|
192 | * rescheduled itself) |
---|
193 | */ |
---|
194 | static void * |
---|
195 | taskqueue_work(struct taskqueue *tq) |
---|
196 | { |
---|
197 | rtems_interrupt_level l; |
---|
198 | struct task *p, *q; |
---|
199 | task_fn f; |
---|
200 | void *arg; |
---|
201 | int i; |
---|
202 | |
---|
203 | /* work off a temporary list in case any callback reschedules |
---|
204 | * itself or if new tasks are queued from an ISR. |
---|
205 | */ |
---|
206 | rtems_interrupt_disable(l); |
---|
207 | p = tq->anchor.ta_next; |
---|
208 | |
---|
209 | tq->anchor.ta_next = 0; |
---|
210 | tq->tail = &tq->anchor; |
---|
211 | rtems_interrupt_enable(l); |
---|
212 | |
---|
213 | while ( (q=p) ) { |
---|
214 | rtems_interrupt_disable(l); |
---|
215 | i = q->ta_pending; |
---|
216 | q->ta_pending = 0; |
---|
217 | /* extract */ |
---|
218 | p = q->ta_next; |
---|
219 | q->ta_next = 0; |
---|
220 | f = q->ta_fn; |
---|
221 | arg = q->ta_fn_arg; |
---|
222 | rtems_interrupt_enable(l); |
---|
223 | for ( ; i>0; i-- ) { |
---|
224 | f(arg, i); |
---|
225 | } |
---|
226 | } |
---|
227 | return tq->anchor.ta_next; |
---|
228 | } |
---|
229 | |
---|
230 | void |
---|
231 | taskqueue_free(struct taskqueue *tq) |
---|
232 | { |
---|
233 | taskqueue_work(tq); |
---|
234 | } |
---|
235 | |
---|
236 | static void |
---|
237 | taskqueueDoWork(void *arg) |
---|
238 | { |
---|
239 | struct taskqueue *tq = arg; |
---|
240 | rtems_event_set evs; |
---|
241 | rtems_status_code sc; |
---|
242 | while ( 1 ) { |
---|
243 | sc = rtems_event_receive(TQ_WAKE_EVENT, RTEMS_EVENT_ANY | RTEMS_WAIT, RTEMS_NO_TIMEOUT, &evs); |
---|
244 | if ( RTEMS_SUCCESSFUL != sc ) { |
---|
245 | rtems_error(sc,"rtems_taskqueue: taskqueueDoWork() unable to receive wakup event\n"); |
---|
246 | rtems_panic("Can't proceed\n"); |
---|
247 | } |
---|
248 | if ( taskqueue_work(tq) ) { |
---|
249 | #if 0 |
---|
250 | /* chance to reschedule */ |
---|
251 | rtems_bsdnet_semaphore_release(); |
---|
252 | rtems_task_wake_after(0); |
---|
253 | rtems_bsdnet_semaphore_obtain(); |
---|
254 | #else |
---|
255 | /* hopefully, releasing the semaphore (as part of bsdnet_event_receive) |
---|
256 | * and obtaining the event (which has been posted already) |
---|
257 | * yields the CPU if necessary... |
---|
258 | */ |
---|
259 | #endif |
---|
260 | } |
---|
261 | } |
---|
262 | } |
---|
263 | |
---|
264 | #ifdef DEBUG |
---|
265 | struct task_dbg { |
---|
266 | struct task t; |
---|
267 | char *nm; |
---|
268 | }; |
---|
269 | |
---|
270 | struct task_dbg taskA = { |
---|
271 | {0}, |
---|
272 | "taskA" |
---|
273 | }; |
---|
274 | |
---|
275 | struct task_dbg taskB = { |
---|
276 | {0}, |
---|
277 | "taskB" |
---|
278 | }; |
---|
279 | |
---|
280 | struct task_dbg taskC = { |
---|
281 | {0}, |
---|
282 | "taskC" |
---|
283 | }; |
---|
284 | |
---|
285 | static void the_task_fn(void *arg, int pending) |
---|
286 | { |
---|
287 | struct task_dbg *td = arg; |
---|
288 | printf("%s (pending: %i)\n", td->nm, pending); |
---|
289 | /* Test rescheduling */ |
---|
290 | if ( pending > 3 ) |
---|
291 | taskqueue_enqueue(&the_taskqueue,&td->t); |
---|
292 | } |
---|
293 | |
---|
294 | void taskqueue_dump() |
---|
295 | { |
---|
296 | struct task *p; |
---|
297 | printf("Anchor %p, Tail %p\n", &the_taskqueue.anchor, the_taskqueue.tail); |
---|
298 | for ( p = the_taskqueue.anchor.ta_next; p; p=p->ta_next ) { |
---|
299 | printf("%p: (pending %2i, next %p)\n", |
---|
300 | p, p->ta_pending, p->ta_next); |
---|
301 | } |
---|
302 | } |
---|
303 | #endif |
---|
304 | |
---|
305 | rtems_id |
---|
306 | rtems_taskqueue_initialize() |
---|
307 | { |
---|
308 | #ifdef DEBUG |
---|
309 | TASK_INIT( &taskA.t, 0, the_task_fn, &taskA ); |
---|
310 | TASK_INIT( &taskB.t, 0, the_task_fn, &taskB ); |
---|
311 | TASK_INIT( &taskC.t, 0, the_task_fn, &taskC ); |
---|
312 | #endif |
---|
313 | if ( ! the_taskqueue.tid ) |
---|
314 | the_taskqueue.tid = rtems_bsdnet_newproc("tskq", 10000, taskqueueDoWork, &the_taskqueue); |
---|
315 | return the_taskqueue.tid; |
---|
316 | } |
---|
317 | |
---|
318 | #ifdef DEBUG |
---|
319 | void |
---|
320 | _cexpModuleInitialize(void *u) |
---|
321 | { |
---|
322 | rtems_bsdnet_initialize_network(); |
---|
323 | the_taskqueue.tid = rtems_taskqueue_initialize(); |
---|
324 | } |
---|
325 | #endif |
---|