1 | /** |
---|
2 | * @file |
---|
3 | * |
---|
4 | * @brief RPC Multiplexor for a Multitasking Environment |
---|
5 | * @ingroup libfs |
---|
6 | * |
---|
7 | * This code funnels arbitrary task's UDP/RPC requests |
---|
8 | * through one socket to arbitrary servers. |
---|
9 | * The replies are gathered and dispatched to the |
---|
10 | * requestors. |
---|
11 | * One task handles all the sending and receiving |
---|
12 | * work including retries. |
---|
13 | * It is up to the requestor, however, to do |
---|
14 | * the XDR encoding of the arguments / decoding |
---|
15 | * of the results (except for the RPC header which |
---|
16 | * is handled by the daemon). |
---|
17 | */ |
---|
18 | |
---|
19 | /* |
---|
20 | * Author: Till Straumann <strauman@slac.stanford.edu>, 2002 |
---|
21 | * |
---|
22 | * Authorship |
---|
23 | * ---------- |
---|
24 | * This software (NFS-2 client implementation for RTEMS) was created by |
---|
25 | * Till Straumann <strauman@slac.stanford.edu>, 2002-2007, |
---|
26 | * Stanford Linear Accelerator Center, Stanford University. |
---|
27 | * |
---|
28 | * Acknowledgement of sponsorship |
---|
29 | * ------------------------------ |
---|
30 | * The NFS-2 client implementation for RTEMS was produced by |
---|
31 | * the Stanford Linear Accelerator Center, Stanford University, |
---|
32 | * under Contract DE-AC03-76SFO0515 with the Department of Energy. |
---|
33 | * |
---|
34 | * Government disclaimer of liability |
---|
35 | * ---------------------------------- |
---|
36 | * Neither the United States nor the United States Department of Energy, |
---|
37 | * nor any of their employees, makes any warranty, express or implied, or |
---|
38 | * assumes any legal liability or responsibility for the accuracy, |
---|
39 | * completeness, or usefulness of any data, apparatus, product, or process |
---|
40 | * disclosed, or represents that its use would not infringe privately owned |
---|
41 | * rights. |
---|
42 | * |
---|
43 | * Stanford disclaimer of liability |
---|
44 | * -------------------------------- |
---|
45 | * Stanford University makes no representations or warranties, express or |
---|
46 | * implied, nor assumes any liability for the use of this software. |
---|
47 | * |
---|
48 | * Stanford disclaimer of copyright |
---|
49 | * -------------------------------- |
---|
50 | * Stanford University, owner of the copyright, hereby disclaims its |
---|
51 | * copyright and all other rights in this software. Hence, anyone may |
---|
52 | * freely use it for any purpose without restriction. |
---|
53 | * |
---|
54 | * Maintenance of notices |
---|
55 | * ---------------------- |
---|
56 | * In the interest of clarity regarding the origin and status of this |
---|
57 | * SLAC software, this and all the preceding Stanford University notices |
---|
58 | * are to remain affixed to any copy or derivative of this software made |
---|
59 | * or distributed by the recipient and are to be affixed to any copy of |
---|
60 | * software made or distributed by the recipient that contains a copy or |
---|
61 | * derivative of this software. |
---|
62 | * |
---|
63 | * ------------------ SLAC Software Notices, Set 4 OTT.002a, 2004 FEB 03 |
---|
64 | */ |
---|
65 | |
---|
66 | #if HAVE_CONFIG_H |
---|
67 | #include "config.h" |
---|
68 | #endif |
---|
69 | |
---|
70 | #include <inttypes.h> |
---|
71 | |
---|
72 | #include <rtems.h> |
---|
73 | #include <rtems/error.h> |
---|
74 | #include <rtems/rtems_bsdnet.h> |
---|
75 | #include <stdlib.h> |
---|
76 | #include <time.h> |
---|
77 | #include <rpc/rpc.h> |
---|
78 | #include <rpc/pmap_prot.h> |
---|
79 | #include <errno.h> |
---|
80 | #include <sys/ioctl.h> |
---|
81 | #include <assert.h> |
---|
82 | #include <stdio.h> |
---|
83 | #include <errno.h> |
---|
84 | #include <string.h> |
---|
85 | #include <netinet/in.h> |
---|
86 | #include <arpa/inet.h> |
---|
87 | |
---|
88 | #include "rpcio.h" |
---|
89 | |
---|
90 | /****************************************************************/ |
---|
91 | /* CONFIGURABLE PARAMETERS */ |
---|
92 | /****************************************************************/ |
---|
93 | |
---|
94 | #define MBUF_RX /* If defined: use mbuf XDR stream for |
---|
95 | * decoding directly out of mbufs |
---|
96 | * Otherwise, the regular 'recvfrom()' |
---|
97 | * interface will be used involving an |
---|
98 | * extra buffer allocation + copy step. |
---|
99 | */ |
---|
100 | |
---|
101 | #define MBUF_TX /* If defined: avoid copying data when |
---|
102 | * sending. Instead, use a wrapper to |
---|
103 | * 'sosend()' which will point an MBUF |
---|
104 | * directly to our buffer space. |
---|
105 | * Note that the BSD stack does not copy |
---|
106 | * data when fragmenting packets - it |
---|
107 | * merely uses an mbuf chain pointing |
---|
108 | * into different areas of the data. |
---|
109 | * |
---|
110 | * If undefined, the regular 'sendto()' |
---|
111 | * interface is used. |
---|
112 | */ |
---|
113 | |
---|
114 | #undef REJECT_SERVERIP_MISMATCH |
---|
115 | /* If defined, RPC replies must come from the server |
---|
116 | * that was queried. Eric Norum has reported problems |
---|
117 | * with clustered NFS servers. So we disable this |
---|
118 | * reducing paranoia... |
---|
119 | */ |
---|
120 | |
---|
121 | /* daemon task parameters */ |
---|
122 | #define RPCIOD_STACK 10000 |
---|
123 | #define RPCIOD_PRIO 100 /* *fallback* priority */ |
---|
124 | |
---|
125 | /* depth of the message queue for sending |
---|
126 | * RPC requests to the daemon |
---|
127 | */ |
---|
128 | #define RPCIOD_QDEPTH 20 |
---|
129 | |
---|
130 | /* Maximum retry limit for retransmission */ |
---|
131 | #define RPCIOD_RETX_CAP_S 3 /* seconds */ |
---|
132 | |
---|
133 | /* Default timeout for RPC calls */ |
---|
134 | #define RPCIOD_DEFAULT_TIMEOUT (&_rpc_default_timeout) |
---|
135 | static struct timeval _rpc_default_timeout = { 10 /* secs */, 0 /* usecs */ }; |
---|
136 | |
---|
137 | /* how many times should we try to resend a failed |
---|
138 | * transaction with refreshed AUTHs |
---|
139 | */ |
---|
140 | #define RPCIOD_REFRESH 2 |
---|
141 | |
---|
142 | /* Events we are using; the RPC_EVENT |
---|
143 | * MUST NOT be used by any application |
---|
144 | * thread doing RPC IO (e.g. NFS) |
---|
145 | */ |
---|
146 | #define RTEMS_RPC_EVENT RTEMS_EVENT_30 /* THE event used by RPCIO. Every task doing |
---|
147 | * RPC IO will receive this - hence it is |
---|
148 | * RESERVED |
---|
149 | */ |
---|
150 | #define RPCIOD_RX_EVENT RTEMS_EVENT_1 /* Events the RPCIOD is using/waiting for */ |
---|
151 | #define RPCIOD_TX_EVENT RTEMS_EVENT_2 |
---|
152 | #define RPCIOD_KILL_EVENT RTEMS_EVENT_3 /* send to the daemon to kill it */ |
---|
153 | |
---|
154 | #define LD_XACT_HASH 8 /* ld of the size of the transaction hash table */ |
---|
155 | |
---|
156 | |
---|
157 | /* Debugging Flags */ |
---|
158 | |
---|
159 | /* NOTE: defining DEBUG 0 leaves some 'assert()' paranoia checks |
---|
160 | * but produces no output |
---|
161 | */ |
---|
162 | |
---|
163 | #define DEBUG_TRACE_XACT (1<<0) |
---|
164 | #define DEBUG_EVENTS (1<<1) |
---|
165 | #define DEBUG_MALLOC (1<<2) |
---|
166 | #define DEBUG_TIMEOUT (1<<3) |
---|
167 | #define DEBUG_PACKLOSS (1<<4) /* This introduces random, artificial packet losses to test retransmission */ |
---|
168 | |
---|
169 | #define DEBUG_PACKLOSS_FRACT (0xffffffff/10) |
---|
170 | |
---|
171 | /* USE PARENTHESIS WHEN 'or'ing MULTIPLE FLAGS: (DEBUG_XX | DEBUG_YY) */ |
---|
172 | #define DEBUG (0) |
---|
173 | |
---|
174 | /****************************************************************/ |
---|
175 | /* END OF CONFIGURABLE SECTION */ |
---|
176 | /****************************************************************/ |
---|
177 | |
---|
178 | /* prevent rollover of our timers by readjusting the epoch on the fly */ |
---|
179 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
180 | #define RPCIOD_EPOCH_SECS 10 |
---|
181 | #else |
---|
182 | #define RPCIOD_EPOCH_SECS 10000 |
---|
183 | #endif |
---|
184 | |
---|
185 | #ifdef DEBUG |
---|
186 | #define ASSERT(arg) assert(arg) |
---|
187 | #else |
---|
188 | #define ASSERT(arg) if (arg) |
---|
189 | #endif |
---|
190 | |
---|
191 | /****************************************************************/ |
---|
192 | /* MACROS */ |
---|
193 | /****************************************************************/ |
---|
194 | |
---|
195 | |
---|
196 | #define XACT_HASHS (1<<(LD_XACT_HASH)) /* the hash table size derived from the ld */ |
---|
197 | #define XACT_HASH_MSK ((XACT_HASHS)-1) /* mask to extract the hash index from a RPC-XID */ |
---|
198 | |
---|
199 | |
---|
200 | #define MU_LOCK(mutex) do { \ |
---|
201 | assert( \ |
---|
202 | RTEMS_SUCCESSFUL == \ |
---|
203 | rtems_semaphore_obtain( \ |
---|
204 | (mutex), \ |
---|
205 | RTEMS_WAIT, \ |
---|
206 | RTEMS_NO_TIMEOUT \ |
---|
207 | ) ); \ |
---|
208 | } while(0) |
---|
209 | |
---|
210 | #define MU_UNLOCK(mutex) do { \ |
---|
211 | assert( \ |
---|
212 | RTEMS_SUCCESSFUL == \ |
---|
213 | rtems_semaphore_release( \ |
---|
214 | (mutex) \ |
---|
215 | ) ); \ |
---|
216 | } while(0) |
---|
217 | |
---|
218 | #define MU_CREAT(pmutex) do { \ |
---|
219 | assert( \ |
---|
220 | RTEMS_SUCCESSFUL == \ |
---|
221 | rtems_semaphore_create( \ |
---|
222 | rtems_build_name( \ |
---|
223 | 'R','P','C','l' \ |
---|
224 | ), \ |
---|
225 | 1, \ |
---|
226 | MUTEX_ATTRIBUTES, \ |
---|
227 | 0, \ |
---|
228 | (pmutex)) ); \ |
---|
229 | } while (0) |
---|
230 | |
---|
231 | |
---|
232 | #define MU_DESTROY(mutex) do { \ |
---|
233 | assert( \ |
---|
234 | RTEMS_SUCCESSFUL == \ |
---|
235 | rtems_semaphore_delete( \ |
---|
236 | mutex \ |
---|
237 | ) ); \ |
---|
238 | } while (0) |
---|
239 | |
---|
240 | #define MUTEX_ATTRIBUTES (RTEMS_LOCAL | \ |
---|
241 | RTEMS_PRIORITY | \ |
---|
242 | RTEMS_INHERIT_PRIORITY | \ |
---|
243 | RTEMS_BINARY_SEMAPHORE) |
---|
244 | |
---|
245 | #define FIRST_ATTEMPT 0x88888888 /* some time that is never reached */ |
---|
246 | |
---|
247 | /****************************************************************/ |
---|
248 | /* TYPE DEFINITIONS */ |
---|
249 | /****************************************************************/ |
---|
250 | |
---|
251 | typedef rtems_interval TimeoutT; |
---|
252 | |
---|
253 | /* 100000th implementation of a doubly linked list; |
---|
254 | * since only one thread is looking at these, |
---|
255 | * we need no locking |
---|
256 | */ |
---|
257 | typedef struct ListNodeRec_ { |
---|
258 | struct ListNodeRec_ *next, *prev; |
---|
259 | } ListNodeRec, *ListNode; |
---|
260 | |
---|
261 | |
---|
262 | /* Structure representing an RPC server */ |
---|
263 | typedef struct RpcUdpServerRec_ { |
---|
264 | RpcUdpServer next; /* linked list of all servers; protected by hlock */ |
---|
265 | union { |
---|
266 | struct sockaddr_in sin; |
---|
267 | struct sockaddr sa; |
---|
268 | } addr; |
---|
269 | AUTH *auth; |
---|
270 | rtems_id authlock; /* must MUTEX the auth object - it's not clear |
---|
271 | * what is better: |
---|
272 | * 1 having one (MUTEXed) auth per server |
---|
273 | * who is shared among all transactions |
---|
274 | * using that server |
---|
275 | * 2 maintaining an AUTH per transaction |
---|
276 | * (there are then other options: manage |
---|
277 | * XACT pools on a per-server basis instead |
---|
278 | * of associating a server with a XACT when |
---|
279 | * sending) |
---|
280 | * experience will show if the current (1) |
---|
281 | * approach has to be changed. |
---|
282 | */ |
---|
283 | TimeoutT retry_period; /* dynamically adjusted retry period |
---|
284 | * (based on packet roundtrip time) |
---|
285 | */ |
---|
286 | /* STATISTICS */ |
---|
287 | unsigned long retrans; /* how many retries were issued by this server */ |
---|
288 | unsigned long requests; /* how many requests have been sent */ |
---|
289 | unsigned long timeouts; /* how many requests have timed out */ |
---|
290 | unsigned long errors; /* how many errors have occurred (other than timeouts) */ |
---|
291 | char name[20]; /* server's address in IP 'dot' notation */ |
---|
292 | } RpcUdpServerRec; |
---|
293 | |
---|
294 | typedef union RpcBufU_ { |
---|
295 | uint32_t xid; |
---|
296 | char buf[1]; |
---|
297 | } RpcBufU, *RpcBuf; |
---|
298 | |
---|
299 | /* RX Buffer implementation; this is either |
---|
300 | * an MBUF chain (MBUF_RX configuration) |
---|
301 | * or a buffer allocated from the heap |
---|
302 | * where recvfrom copies the (encoded) reply |
---|
303 | * to. The XDR routines the copy/decode |
---|
304 | * it into the user's data structures. |
---|
305 | */ |
---|
306 | #ifdef MBUF_RX |
---|
307 | typedef struct mbuf * RxBuf; /* an MBUF chain */ |
---|
308 | static void bufFree(struct mbuf **m); |
---|
309 | #define XID(ibuf) (*(mtod((ibuf), u_long *))) |
---|
310 | extern void xdrmbuf_create(XDR *, struct mbuf *, enum xdr_op); |
---|
311 | #else |
---|
312 | typedef RpcBuf RxBuf; |
---|
313 | #define bufFree(b) do { MY_FREE(*(b)); *(b)=0; } while(0) |
---|
314 | #define XID(ibuf) ((ibuf)->xid) |
---|
315 | #endif |
---|
316 | |
---|
317 | /* A RPC 'transaction' consisting |
---|
318 | * of server and requestor information, |
---|
319 | * buffer space and an XDR object |
---|
320 | * (for encoding arguments). |
---|
321 | */ |
---|
322 | typedef struct RpcUdpXactRec_ { |
---|
323 | ListNodeRec node; /* so we can put XACTs on a list */ |
---|
324 | RpcUdpServer server; /* server this XACT goes to */ |
---|
325 | long lifetime; /* during the lifetime, retry attempts are made */ |
---|
326 | long tolive; /* lifetime timer */ |
---|
327 | struct rpc_err status; /* RPC reply error status */ |
---|
328 | long age; /* age info; needed to manage retransmission */ |
---|
329 | long trip; /* record round trip time in ticks */ |
---|
330 | rtems_id requestor; /* the task waiting for this XACT to complete */ |
---|
331 | RpcUdpXactPool pool; /* if this XACT belong to a pool, this is it */ |
---|
332 | XDR xdrs; /* argument encoder stream */ |
---|
333 | int xdrpos; /* stream position after the (permanent) header */ |
---|
334 | xdrproc_t xres; /* reply decoder proc - TODO needn't be here */ |
---|
335 | caddr_t pres; /* reply decoded obj - TODO needn't be here */ |
---|
336 | #ifndef MBUF_RX |
---|
337 | int ibufsize; /* size of the ibuf (bytes) */ |
---|
338 | #endif |
---|
339 | #ifdef MBUF_TX |
---|
340 | int refcnt; /* mbuf external storage reference count */ |
---|
341 | #endif |
---|
342 | int obufsize; /* size of the obuf (bytes) */ |
---|
343 | RxBuf ibuf; /* pointer to input buffer assigned by daemon */ |
---|
344 | RpcBufU obuf; /* output buffer (encoded args) APPENDED HERE */ |
---|
345 | } RpcUdpXactRec; |
---|
346 | |
---|
347 | typedef struct RpcUdpXactPoolRec_ { |
---|
348 | rtems_id box; |
---|
349 | int prog; |
---|
350 | int version; |
---|
351 | int xactSize; |
---|
352 | } RpcUdpXactPoolRec; |
---|
353 | |
---|
354 | /* a global hash table where all 'living' transaction |
---|
355 | * objects are registered. |
---|
356 | * A number of bits in a transaction's XID maps 1:1 to |
---|
357 | * an index in this table. Hence, the XACT matching |
---|
358 | * an RPC/UDP reply packet can quickly be found |
---|
359 | * The size of this table imposes a hard limit on the |
---|
360 | * number of all created transactions in the system. |
---|
361 | */ |
---|
362 | static RpcUdpXact xactHashTbl[XACT_HASHS]={0}; |
---|
363 | static u_long xidUpper [XACT_HASHS]={0}; |
---|
364 | static unsigned xidHashSeed = 0 ; |
---|
365 | |
---|
366 | /* forward declarations */ |
---|
367 | static RpcUdpXact |
---|
368 | sockRcv(void); |
---|
369 | |
---|
370 | static void |
---|
371 | rpcio_daemon(rtems_task_argument); |
---|
372 | |
---|
373 | #ifdef MBUF_TX |
---|
374 | ssize_t |
---|
375 | sendto_nocpy ( |
---|
376 | int s, |
---|
377 | const void *buf, size_t buflen, |
---|
378 | int flags, |
---|
379 | const struct sockaddr *toaddr, int tolen, |
---|
380 | void *closure, |
---|
381 | void (*freeproc)(caddr_t, u_int), |
---|
382 | void (*refproc)(caddr_t, u_int) |
---|
383 | ); |
---|
384 | static void paranoia_free(caddr_t closure, u_int size); |
---|
385 | static void paranoia_ref (caddr_t closure, u_int size); |
---|
386 | #define SENDTO sendto_nocpy |
---|
387 | #else |
---|
388 | #define SENDTO sendto |
---|
389 | #endif |
---|
390 | |
---|
391 | static RpcUdpServer rpcUdpServers = 0; /* linked list of all servers; protected by llock */ |
---|
392 | |
---|
393 | static int ourSock = -1; /* the socket we are using for communication */ |
---|
394 | static rtems_id rpciod = 0; /* task id of the RPC daemon */ |
---|
395 | static rtems_id msgQ = 0; /* message queue where the daemon picks up |
---|
396 | * requests |
---|
397 | */ |
---|
398 | #ifndef NDEBUG |
---|
399 | static rtems_id llock = 0; /* MUTEX protecting the server list */ |
---|
400 | static rtems_id hlock = 0; /* MUTEX protecting the hash table and the list of servers */ |
---|
401 | #endif |
---|
402 | static rtems_id fini = 0; /* a synchronization semaphore we use during |
---|
403 | * module cleanup / driver unloading |
---|
404 | */ |
---|
405 | static rtems_interval ticksPerSec; /* cached system clock rate (WHO IS ASSUMED NOT |
---|
406 | * TO CHANGE) |
---|
407 | */ |
---|
408 | |
---|
409 | rtems_task_priority rpciodPriority = 0; |
---|
410 | |
---|
411 | #if (DEBUG) & DEBUG_MALLOC |
---|
412 | /* malloc wrappers for debugging */ |
---|
413 | static int nibufs = 0; |
---|
414 | |
---|
415 | static inline void *MY_MALLOC(int s) |
---|
416 | { |
---|
417 | if (s) { |
---|
418 | void *rval; |
---|
419 | MU_LOCK(hlock); |
---|
420 | assert(nibufs++ < 2000); |
---|
421 | MU_UNLOCK(hlock); |
---|
422 | assert((rval = malloc(s)) != 0); |
---|
423 | return rval; |
---|
424 | } |
---|
425 | return 0; |
---|
426 | } |
---|
427 | |
---|
428 | static inline void *MY_CALLOC(int n, int s) |
---|
429 | { |
---|
430 | if (s) { |
---|
431 | void *rval; |
---|
432 | MU_LOCK(hlock); |
---|
433 | assert(nibufs++ < 2000); |
---|
434 | MU_UNLOCK(hlock); |
---|
435 | assert((rval = calloc(n,s)) != 0); |
---|
436 | return rval; |
---|
437 | } |
---|
438 | return 0; |
---|
439 | } |
---|
440 | |
---|
441 | |
---|
442 | static inline void MY_FREE(void *p) |
---|
443 | { |
---|
444 | if (p) { |
---|
445 | MU_LOCK(hlock); |
---|
446 | nibufs--; |
---|
447 | MU_UNLOCK(hlock); |
---|
448 | free(p); |
---|
449 | } |
---|
450 | } |
---|
451 | #else |
---|
452 | #define MY_MALLOC malloc |
---|
453 | #define MY_CALLOC calloc |
---|
454 | #define MY_FREE free |
---|
455 | #endif |
---|
456 | |
---|
457 | static inline bool_t |
---|
458 | locked_marshal(RpcUdpServer s, XDR *xdrs) |
---|
459 | { |
---|
460 | bool_t rval; |
---|
461 | MU_LOCK(s->authlock); |
---|
462 | rval = AUTH_MARSHALL(s->auth, xdrs); |
---|
463 | MU_UNLOCK(s->authlock); |
---|
464 | return rval; |
---|
465 | } |
---|
466 | |
---|
467 | /* Locked operations on a server's auth object */ |
---|
468 | static inline bool_t |
---|
469 | locked_validate(RpcUdpServer s, struct opaque_auth *v) |
---|
470 | { |
---|
471 | bool_t rval; |
---|
472 | MU_LOCK(s->authlock); |
---|
473 | rval = AUTH_VALIDATE(s->auth, v); |
---|
474 | MU_UNLOCK(s->authlock); |
---|
475 | return rval; |
---|
476 | } |
---|
477 | |
---|
478 | static inline bool_t |
---|
479 | locked_refresh(RpcUdpServer s) |
---|
480 | { |
---|
481 | bool_t rval; |
---|
482 | MU_LOCK(s->authlock); |
---|
483 | rval = AUTH_REFRESH(s->auth); |
---|
484 | MU_UNLOCK(s->authlock); |
---|
485 | return rval; |
---|
486 | } |
---|
487 | |
---|
488 | /* Create a server object |
---|
489 | * |
---|
490 | */ |
---|
491 | enum clnt_stat |
---|
492 | rpcUdpServerCreate( |
---|
493 | struct sockaddr_in *paddr, |
---|
494 | rpcprog_t prog, |
---|
495 | rpcvers_t vers, |
---|
496 | u_long uid, |
---|
497 | u_long gid, |
---|
498 | RpcUdpServer *psrv |
---|
499 | ) |
---|
500 | { |
---|
501 | RpcUdpServer rval; |
---|
502 | u_short port; |
---|
503 | char hname[MAX_MACHINE_NAME + 1]; |
---|
504 | int theuid, thegid; |
---|
505 | int thegids[NGRPS]; |
---|
506 | gid_t gids[NGROUPS]; |
---|
507 | int len,i; |
---|
508 | AUTH *auth; |
---|
509 | enum clnt_stat pmap_err; |
---|
510 | struct pmap pmaparg; |
---|
511 | |
---|
512 | if ( gethostname(hname, MAX_MACHINE_NAME) ) { |
---|
513 | fprintf(stderr, |
---|
514 | "RPCIO - error: I have no hostname ?? (%s)\n", |
---|
515 | strerror(errno)); |
---|
516 | return RPC_UNKNOWNHOST; |
---|
517 | } |
---|
518 | |
---|
519 | if ( (len = getgroups(NGROUPS, gids) < 0 ) ) { |
---|
520 | fprintf(stderr, |
---|
521 | "RPCIO - error: I unable to get group ids (%s)\n", |
---|
522 | strerror(errno)); |
---|
523 | return RPC_FAILED; |
---|
524 | } |
---|
525 | |
---|
526 | if ( len > NGRPS ) |
---|
527 | len = NGRPS; |
---|
528 | |
---|
529 | for (i=0; i<len; i++) |
---|
530 | thegids[i] = (int)gids[i]; |
---|
531 | |
---|
532 | theuid = (int) ((RPCIOD_DEFAULT_ID == uid) ? geteuid() : uid); |
---|
533 | thegid = (int) ((RPCIOD_DEFAULT_ID == gid) ? getegid() : gid); |
---|
534 | |
---|
535 | if ( !(auth = authunix_create(hname, theuid, thegid, len, thegids)) ) { |
---|
536 | fprintf(stderr, |
---|
537 | "RPCIO - error: unable to create RPC AUTH\n"); |
---|
538 | return RPC_FAILED; |
---|
539 | } |
---|
540 | |
---|
541 | /* if they specified no port try to ask the portmapper */ |
---|
542 | if (!paddr->sin_port) { |
---|
543 | |
---|
544 | paddr->sin_port = htons(PMAPPORT); |
---|
545 | |
---|
546 | pmaparg.pm_prog = prog; |
---|
547 | pmaparg.pm_vers = vers; |
---|
548 | pmaparg.pm_prot = IPPROTO_UDP; |
---|
549 | pmaparg.pm_port = 0; /* not needed or used */ |
---|
550 | |
---|
551 | |
---|
552 | /* dont use non-reentrant pmap_getport ! */ |
---|
553 | |
---|
554 | pmap_err = rpcUdpCallRp( |
---|
555 | paddr, |
---|
556 | PMAPPROG, |
---|
557 | PMAPVERS, |
---|
558 | PMAPPROC_GETPORT, |
---|
559 | xdr_pmap, |
---|
560 | &pmaparg, |
---|
561 | xdr_u_short, |
---|
562 | &port, |
---|
563 | uid, |
---|
564 | gid, |
---|
565 | 0); |
---|
566 | |
---|
567 | if ( RPC_SUCCESS != pmap_err ) { |
---|
568 | paddr->sin_port = 0; |
---|
569 | return pmap_err; |
---|
570 | } |
---|
571 | |
---|
572 | paddr->sin_port = htons(port); |
---|
573 | } |
---|
574 | |
---|
575 | if (0==paddr->sin_port) { |
---|
576 | return RPC_PROGNOTREGISTERED; |
---|
577 | } |
---|
578 | |
---|
579 | rval = (RpcUdpServer)MY_MALLOC(sizeof(*rval)); |
---|
580 | memset(rval, 0, sizeof(*rval)); |
---|
581 | |
---|
582 | if (!inet_ntop(AF_INET, &paddr->sin_addr, rval->name, sizeof(rval->name))) |
---|
583 | sprintf(rval->name,"?.?.?.?"); |
---|
584 | rval->addr.sin = *paddr; |
---|
585 | |
---|
586 | /* start with a long retransmission interval - it |
---|
587 | * will be adapted dynamically |
---|
588 | */ |
---|
589 | rval->retry_period = RPCIOD_RETX_CAP_S * ticksPerSec; |
---|
590 | |
---|
591 | rval->auth = auth; |
---|
592 | |
---|
593 | MU_CREAT( &rval->authlock ); |
---|
594 | |
---|
595 | /* link into list */ |
---|
596 | MU_LOCK( llock ); |
---|
597 | rval->next = rpcUdpServers; |
---|
598 | rpcUdpServers = rval; |
---|
599 | MU_UNLOCK( llock ); |
---|
600 | |
---|
601 | *psrv = rval; |
---|
602 | return RPC_SUCCESS; |
---|
603 | } |
---|
604 | |
---|
605 | void |
---|
606 | rpcUdpServerDestroy(RpcUdpServer s) |
---|
607 | { |
---|
608 | RpcUdpServer prev; |
---|
609 | if (!s) |
---|
610 | return; |
---|
611 | /* we should probably verify (but how?) that nobody |
---|
612 | * (at least: no outstanding XACTs) is using this |
---|
613 | * server; |
---|
614 | */ |
---|
615 | |
---|
616 | /* remove from server list */ |
---|
617 | MU_LOCK(llock); |
---|
618 | prev = rpcUdpServers; |
---|
619 | if ( s == prev ) { |
---|
620 | rpcUdpServers = s->next; |
---|
621 | } else { |
---|
622 | for ( ; prev ; prev = prev->next) { |
---|
623 | if (prev->next == s) { |
---|
624 | prev->next = s->next; |
---|
625 | break; |
---|
626 | } |
---|
627 | } |
---|
628 | } |
---|
629 | MU_UNLOCK(llock); |
---|
630 | |
---|
631 | /* MUST have found it */ |
---|
632 | assert(prev); |
---|
633 | |
---|
634 | auth_destroy(s->auth); |
---|
635 | |
---|
636 | MU_DESTROY(s->authlock); |
---|
637 | MY_FREE(s); |
---|
638 | } |
---|
639 | |
---|
640 | int |
---|
641 | rpcUdpStats(FILE *f) |
---|
642 | { |
---|
643 | RpcUdpServer s; |
---|
644 | |
---|
645 | if (!f) f = stdout; |
---|
646 | |
---|
647 | fprintf(f,"RPCIOD statistics:\n"); |
---|
648 | |
---|
649 | MU_LOCK(llock); |
---|
650 | for (s = rpcUdpServers; s; s=s->next) { |
---|
651 | fprintf(f,"\nServer -- %s:\n", s->name); |
---|
652 | fprintf(f," requests sent: %10ld, retransmitted: %10ld\n", |
---|
653 | s->requests, s->retrans); |
---|
654 | fprintf(f," timed out: %10ld, send errors: %10ld\n", |
---|
655 | s->timeouts, s->errors); |
---|
656 | fprintf(f," current retransmission interval: %dms\n", |
---|
657 | (unsigned)(s->retry_period * 1000 / ticksPerSec) ); |
---|
658 | } |
---|
659 | MU_UNLOCK(llock); |
---|
660 | |
---|
661 | return 0; |
---|
662 | } |
---|
663 | |
---|
664 | RpcUdpXact |
---|
665 | rpcUdpXactCreate( |
---|
666 | u_long program, |
---|
667 | u_long version, |
---|
668 | u_long size |
---|
669 | ) |
---|
670 | { |
---|
671 | RpcUdpXact rval=0; |
---|
672 | struct rpc_msg header; |
---|
673 | register int i,j; |
---|
674 | |
---|
675 | if (!size) |
---|
676 | size = UDPMSGSIZE; |
---|
677 | /* word align */ |
---|
678 | size = (size + 3) & ~3; |
---|
679 | |
---|
680 | rval = (RpcUdpXact)MY_CALLOC(1,sizeof(*rval) - sizeof(rval->obuf) + size); |
---|
681 | |
---|
682 | if (rval) { |
---|
683 | |
---|
684 | header.rm_xid = 0; |
---|
685 | header.rm_direction = CALL; |
---|
686 | header.rm_call.cb_rpcvers = RPC_MSG_VERSION; |
---|
687 | header.rm_call.cb_prog = program; |
---|
688 | header.rm_call.cb_vers = version; |
---|
689 | xdrmem_create(&(rval->xdrs), rval->obuf.buf, size, XDR_ENCODE); |
---|
690 | |
---|
691 | if (!xdr_callhdr(&(rval->xdrs), &header)) { |
---|
692 | MY_FREE(rval); |
---|
693 | return 0; |
---|
694 | } |
---|
695 | /* pick a free table slot and initialize the XID */ |
---|
696 | rval->obuf.xid = time(0) ^ (uintptr_t)rval; |
---|
697 | MU_LOCK(hlock); |
---|
698 | rval->obuf.xid = (xidHashSeed++ ^ ((uintptr_t)rval>>10)) & XACT_HASH_MSK; |
---|
699 | i=j=(rval->obuf.xid & XACT_HASH_MSK); |
---|
700 | if (msgQ) { |
---|
701 | /* if there's no message queue, refuse to |
---|
702 | * give them transactions; we might be in the process to |
---|
703 | * go away... |
---|
704 | */ |
---|
705 | do { |
---|
706 | i=(i+1) & XACT_HASH_MSK; /* cheap modulo */ |
---|
707 | if (!xactHashTbl[i]) { |
---|
708 | #if (DEBUG) & DEBUG_TRACE_XACT |
---|
709 | fprintf(stderr,"RPCIO: entering index %i, val %x\n",i,rval); |
---|
710 | #endif |
---|
711 | xactHashTbl[i]=rval; |
---|
712 | j=-1; |
---|
713 | break; |
---|
714 | } |
---|
715 | } while (i!=j); |
---|
716 | } |
---|
717 | MU_UNLOCK(hlock); |
---|
718 | if (i==j) { |
---|
719 | XDR_DESTROY(&rval->xdrs); |
---|
720 | MY_FREE(rval); |
---|
721 | return 0; |
---|
722 | } |
---|
723 | rval->obuf.xid = xidUpper[i] | i; |
---|
724 | rval->xdrpos = XDR_GETPOS(&(rval->xdrs)); |
---|
725 | rval->obufsize = size; |
---|
726 | } |
---|
727 | return rval; |
---|
728 | } |
---|
729 | |
---|
730 | void |
---|
731 | rpcUdpXactDestroy(RpcUdpXact xact) |
---|
732 | { |
---|
733 | int i = xact->obuf.xid & XACT_HASH_MSK; |
---|
734 | |
---|
735 | #if (DEBUG) & DEBUG_TRACE_XACT |
---|
736 | fprintf(stderr,"RPCIO: removing index %i, val %x\n",i,xact); |
---|
737 | #endif |
---|
738 | |
---|
739 | ASSERT( xactHashTbl[i]==xact ); |
---|
740 | |
---|
741 | MU_LOCK(hlock); |
---|
742 | xactHashTbl[i]=0; |
---|
743 | /* remember XID we used last time so we can avoid |
---|
744 | * reusing the same one (incremented by rpcUdpSend routine) |
---|
745 | */ |
---|
746 | xidUpper[i] = xact->obuf.xid & ~XACT_HASH_MSK; |
---|
747 | MU_UNLOCK(hlock); |
---|
748 | |
---|
749 | bufFree(&xact->ibuf); |
---|
750 | |
---|
751 | XDR_DESTROY(&xact->xdrs); |
---|
752 | MY_FREE(xact); |
---|
753 | } |
---|
754 | |
---|
755 | |
---|
756 | |
---|
757 | /* Send a transaction, i.e. enqueue it to the |
---|
758 | * RPC daemon who will actually send it. |
---|
759 | */ |
---|
760 | enum clnt_stat |
---|
761 | rpcUdpSend( |
---|
762 | RpcUdpXact xact, |
---|
763 | RpcUdpServer srvr, |
---|
764 | struct timeval *timeout, |
---|
765 | u_long proc, |
---|
766 | xdrproc_t xres, caddr_t pres, |
---|
767 | xdrproc_t xargs, caddr_t pargs, |
---|
768 | ... |
---|
769 | ) |
---|
770 | { |
---|
771 | register XDR *xdrs; |
---|
772 | unsigned long ms; |
---|
773 | va_list ap; |
---|
774 | |
---|
775 | va_start(ap,pargs); |
---|
776 | |
---|
777 | if (!timeout) |
---|
778 | timeout = RPCIOD_DEFAULT_TIMEOUT; |
---|
779 | |
---|
780 | ms = 1000 * timeout->tv_sec + timeout->tv_usec/1000; |
---|
781 | |
---|
782 | /* round lifetime to closest # of ticks */ |
---|
783 | xact->lifetime = (ms * ticksPerSec + 500) / 1000; |
---|
784 | if ( 0 == xact->lifetime ) |
---|
785 | xact->lifetime = 1; |
---|
786 | |
---|
787 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
788 | { |
---|
789 | static int once=0; |
---|
790 | if (!once++) { |
---|
791 | fprintf(stderr, |
---|
792 | "Initial lifetime: %i (ticks)\n", |
---|
793 | xact->lifetime); |
---|
794 | } |
---|
795 | } |
---|
796 | #endif |
---|
797 | |
---|
798 | xact->tolive = xact->lifetime; |
---|
799 | |
---|
800 | xact->xres = xres; |
---|
801 | xact->pres = pres; |
---|
802 | xact->server = srvr; |
---|
803 | |
---|
804 | xdrs = &xact->xdrs; |
---|
805 | xdrs->x_op = XDR_ENCODE; |
---|
806 | /* increment transaction ID */ |
---|
807 | xact->obuf.xid += XACT_HASHS; |
---|
808 | XDR_SETPOS(xdrs, xact->xdrpos); |
---|
809 | if ( !XDR_PUTLONG(xdrs,(long*)&proc) || !locked_marshal(srvr, xdrs) || |
---|
810 | !xargs(xdrs, pargs) ) { |
---|
811 | va_end(ap); |
---|
812 | return(xact->status.re_status=RPC_CANTENCODEARGS); |
---|
813 | } |
---|
814 | while ((xargs=va_arg(ap,xdrproc_t))) { |
---|
815 | if (!xargs(xdrs, va_arg(ap,caddr_t))) |
---|
816 | va_end(ap); |
---|
817 | return(xact->status.re_status=RPC_CANTENCODEARGS); |
---|
818 | } |
---|
819 | |
---|
820 | va_end(ap); |
---|
821 | |
---|
822 | rtems_task_ident(RTEMS_SELF, RTEMS_WHO_AM_I, &xact->requestor); |
---|
823 | if ( rtems_message_queue_send( msgQ, &xact, sizeof(xact)) ) { |
---|
824 | return RPC_CANTSEND; |
---|
825 | } |
---|
826 | /* wakeup the rpciod */ |
---|
827 | ASSERT( RTEMS_SUCCESSFUL==rtems_event_send(rpciod, RPCIOD_TX_EVENT) ); |
---|
828 | |
---|
829 | return RPC_SUCCESS; |
---|
830 | } |
---|
831 | |
---|
832 | /* Block for the RPC reply to an outstanding |
---|
833 | * transaction. |
---|
834 | * The caller is woken by the RPC daemon either |
---|
835 | * upon reception of the reply or on timeout. |
---|
836 | */ |
---|
837 | enum clnt_stat |
---|
838 | rpcUdpRcv(RpcUdpXact xact) |
---|
839 | { |
---|
840 | int refresh; |
---|
841 | XDR reply_xdrs; |
---|
842 | struct rpc_msg reply_msg; |
---|
843 | rtems_status_code status; |
---|
844 | rtems_event_set gotEvents; |
---|
845 | |
---|
846 | refresh = 0; |
---|
847 | |
---|
848 | do { |
---|
849 | |
---|
850 | /* block for the reply */ |
---|
851 | status = rtems_event_receive( |
---|
852 | RTEMS_RPC_EVENT, |
---|
853 | RTEMS_WAIT | RTEMS_EVENT_ANY, |
---|
854 | RTEMS_NO_TIMEOUT, |
---|
855 | &gotEvents); |
---|
856 | ASSERT( status == RTEMS_SUCCESSFUL ); |
---|
857 | |
---|
858 | if (xact->status.re_status) { |
---|
859 | #ifdef MBUF_RX |
---|
860 | /* add paranoia */ |
---|
861 | ASSERT( !xact->ibuf ); |
---|
862 | #endif |
---|
863 | return xact->status.re_status; |
---|
864 | } |
---|
865 | |
---|
866 | #ifdef MBUF_RX |
---|
867 | xdrmbuf_create(&reply_xdrs, xact->ibuf, XDR_DECODE); |
---|
868 | #else |
---|
869 | xdrmem_create(&reply_xdrs, xact->ibuf->buf, xact->ibufsize, XDR_DECODE); |
---|
870 | #endif |
---|
871 | |
---|
872 | reply_msg.acpted_rply.ar_verf = _null_auth; |
---|
873 | reply_msg.acpted_rply.ar_results.where = xact->pres; |
---|
874 | reply_msg.acpted_rply.ar_results.proc = xact->xres; |
---|
875 | |
---|
876 | if (xdr_replymsg(&reply_xdrs, &reply_msg)) { |
---|
877 | /* OK */ |
---|
878 | _seterr_reply(&reply_msg, &xact->status); |
---|
879 | if (RPC_SUCCESS == xact->status.re_status) { |
---|
880 | if ( !locked_validate(xact->server, |
---|
881 | &reply_msg.acpted_rply.ar_verf) ) { |
---|
882 | xact->status.re_status = RPC_AUTHERROR; |
---|
883 | xact->status.re_why = AUTH_INVALIDRESP; |
---|
884 | } |
---|
885 | if (reply_msg.acpted_rply.ar_verf.oa_base) { |
---|
886 | reply_xdrs.x_op = XDR_FREE; |
---|
887 | xdr_opaque_auth(&reply_xdrs, &reply_msg.acpted_rply.ar_verf); |
---|
888 | } |
---|
889 | refresh = 0; |
---|
890 | } else { |
---|
891 | /* should we try to refresh our credentials ? */ |
---|
892 | if ( !refresh ) { |
---|
893 | /* had never tried before */ |
---|
894 | refresh = RPCIOD_REFRESH; |
---|
895 | } |
---|
896 | } |
---|
897 | } else { |
---|
898 | reply_xdrs.x_op = XDR_FREE; |
---|
899 | xdr_replymsg(&reply_xdrs, &reply_msg); |
---|
900 | xact->status.re_status = RPC_CANTDECODERES; |
---|
901 | } |
---|
902 | XDR_DESTROY(&reply_xdrs); |
---|
903 | |
---|
904 | bufFree(&xact->ibuf); |
---|
905 | |
---|
906 | #ifndef MBUF_RX |
---|
907 | xact->ibufsize = 0; |
---|
908 | #endif |
---|
909 | |
---|
910 | if (refresh && locked_refresh(xact->server)) { |
---|
911 | rtems_task_ident(RTEMS_SELF, RTEMS_WHO_AM_I, &xact->requestor); |
---|
912 | if ( rtems_message_queue_send(msgQ, &xact, sizeof(xact)) ) { |
---|
913 | return RPC_CANTSEND; |
---|
914 | } |
---|
915 | /* wakeup the rpciod */ |
---|
916 | fprintf(stderr,"RPCIO INFO: refreshing my AUTH\n"); |
---|
917 | ASSERT( RTEMS_SUCCESSFUL==rtems_event_send(rpciod, RPCIOD_TX_EVENT) ); |
---|
918 | } |
---|
919 | |
---|
920 | } while ( 0 && refresh-- > 0 ); |
---|
921 | |
---|
922 | return xact->status.re_status; |
---|
923 | } |
---|
924 | |
---|
925 | |
---|
926 | /* On RTEMS, I'm told to avoid select(); this seems to |
---|
927 | * be more efficient |
---|
928 | */ |
---|
929 | static void |
---|
930 | rxWakeupCB(struct socket *sock, void *arg) |
---|
931 | { |
---|
932 | rtems_id *rpciod = (rtems_id*) arg; |
---|
933 | rtems_event_send(*rpciod, RPCIOD_RX_EVENT); |
---|
934 | } |
---|
935 | |
---|
936 | int |
---|
937 | rpcUdpInit(void) |
---|
938 | { |
---|
939 | int s; |
---|
940 | rtems_status_code status; |
---|
941 | int noblock = 1; |
---|
942 | struct sockwakeup wkup; |
---|
943 | |
---|
944 | if (ourSock < 0) { |
---|
945 | fprintf(stderr,"RTEMS-RPCIOD $Release$, " \ |
---|
946 | "Till Straumann, Stanford/SLAC/SSRL 2002, " \ |
---|
947 | "See LICENSE file for licensing info.\n"); |
---|
948 | |
---|
949 | ourSock=socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); |
---|
950 | if (ourSock>=0) { |
---|
951 | bindresvport(ourSock,(struct sockaddr_in*)0); |
---|
952 | s = ioctl(ourSock, FIONBIO, (char*)&noblock); |
---|
953 | assert( s == 0 ); |
---|
954 | /* assume nobody tampers with the clock !! */ |
---|
955 | ticksPerSec = rtems_clock_get_ticks_per_second(); |
---|
956 | MU_CREAT( &hlock ); |
---|
957 | MU_CREAT( &llock ); |
---|
958 | |
---|
959 | if ( !rpciodPriority ) { |
---|
960 | /* use configured networking priority */ |
---|
961 | if ( ! (rpciodPriority = rtems_bsdnet_config.network_task_priority) ) |
---|
962 | rpciodPriority = RPCIOD_PRIO; /* fallback value */ |
---|
963 | } |
---|
964 | |
---|
965 | status = rtems_task_create( |
---|
966 | rtems_build_name('R','P','C','d'), |
---|
967 | rpciodPriority, |
---|
968 | RPCIOD_STACK, |
---|
969 | RTEMS_DEFAULT_MODES, |
---|
970 | /* fprintf saves/restores FP registers on PPC :-( */ |
---|
971 | RTEMS_DEFAULT_ATTRIBUTES | RTEMS_FLOATING_POINT, |
---|
972 | &rpciod); |
---|
973 | assert( status == RTEMS_SUCCESSFUL ); |
---|
974 | |
---|
975 | wkup.sw_pfn = rxWakeupCB; |
---|
976 | wkup.sw_arg = &rpciod; |
---|
977 | assert( 0==setsockopt(ourSock, SOL_SOCKET, SO_RCVWAKEUP, &wkup, sizeof(wkup)) ); |
---|
978 | status = rtems_message_queue_create( |
---|
979 | rtems_build_name('R','P','C','q'), |
---|
980 | RPCIOD_QDEPTH, |
---|
981 | sizeof(RpcUdpXact), |
---|
982 | RTEMS_DEFAULT_ATTRIBUTES, |
---|
983 | &msgQ); |
---|
984 | assert( status == RTEMS_SUCCESSFUL ); |
---|
985 | status = rtems_task_start( rpciod, rpcio_daemon, 0 ); |
---|
986 | assert( status == RTEMS_SUCCESSFUL ); |
---|
987 | |
---|
988 | } else { |
---|
989 | return -1; |
---|
990 | } |
---|
991 | } |
---|
992 | return 0; |
---|
993 | } |
---|
994 | |
---|
995 | int |
---|
996 | rpcUdpCleanup(void) |
---|
997 | { |
---|
998 | rtems_semaphore_create( |
---|
999 | rtems_build_name('R','P','C','f'), |
---|
1000 | 0, |
---|
1001 | RTEMS_DEFAULT_ATTRIBUTES, |
---|
1002 | 0, |
---|
1003 | &fini); |
---|
1004 | rtems_event_send(rpciod, RPCIOD_KILL_EVENT); |
---|
1005 | /* synchronize with daemon */ |
---|
1006 | rtems_semaphore_obtain(fini, RTEMS_WAIT, 5*ticksPerSec); |
---|
1007 | /* if the message queue is still there, something went wrong */ |
---|
1008 | if (!msgQ) { |
---|
1009 | rtems_task_delete(rpciod); |
---|
1010 | } |
---|
1011 | rtems_semaphore_delete(fini); |
---|
1012 | return (msgQ !=0); |
---|
1013 | } |
---|
1014 | |
---|
1015 | /* Another API - simpler but less efficient. |
---|
1016 | * For each RPCall, a server and a Xact |
---|
1017 | * are created and destroyed on the fly. |
---|
1018 | * |
---|
1019 | * This should be used for infrequent calls |
---|
1020 | * (e.g. a NFS mount request). |
---|
1021 | * |
---|
1022 | * This is roughly compatible with the original |
---|
1023 | * clnt_call() etc. API - but it uses our |
---|
1024 | * daemon and is fully reentrant. |
---|
1025 | */ |
---|
1026 | enum clnt_stat |
---|
1027 | rpcUdpClntCreate( |
---|
1028 | struct sockaddr_in *psaddr, |
---|
1029 | rpcprog_t prog, |
---|
1030 | rpcvers_t vers, |
---|
1031 | u_long uid, |
---|
1032 | u_long gid, |
---|
1033 | RpcUdpClnt *pclnt |
---|
1034 | ) |
---|
1035 | { |
---|
1036 | RpcUdpXact x; |
---|
1037 | RpcUdpServer s; |
---|
1038 | enum clnt_stat err; |
---|
1039 | |
---|
1040 | if ( RPC_SUCCESS != (err=rpcUdpServerCreate(psaddr, prog, vers, uid, gid, &s)) ) |
---|
1041 | return err; |
---|
1042 | |
---|
1043 | if ( !(x=rpcUdpXactCreate(prog, vers, UDPMSGSIZE)) ) { |
---|
1044 | rpcUdpServerDestroy(s); |
---|
1045 | return RPC_FAILED; |
---|
1046 | } |
---|
1047 | /* TODO: could maintain a server cache */ |
---|
1048 | |
---|
1049 | x->server = s; |
---|
1050 | |
---|
1051 | *pclnt = x; |
---|
1052 | |
---|
1053 | return RPC_SUCCESS; |
---|
1054 | } |
---|
1055 | |
---|
1056 | void |
---|
1057 | rpcUdpClntDestroy(RpcUdpClnt xact) |
---|
1058 | { |
---|
1059 | rpcUdpServerDestroy(xact->server); |
---|
1060 | rpcUdpXactDestroy(xact); |
---|
1061 | } |
---|
1062 | |
---|
1063 | enum clnt_stat |
---|
1064 | rpcUdpClntCall( |
---|
1065 | RpcUdpClnt xact, |
---|
1066 | u_long proc, |
---|
1067 | XdrProcT xargs, |
---|
1068 | CaddrT pargs, |
---|
1069 | XdrProcT xres, |
---|
1070 | CaddrT pres, |
---|
1071 | struct timeval *timeout |
---|
1072 | ) |
---|
1073 | { |
---|
1074 | enum clnt_stat stat; |
---|
1075 | |
---|
1076 | if ( (stat = rpcUdpSend(xact, xact->server, timeout, proc, |
---|
1077 | xres, pres, |
---|
1078 | xargs, pargs, |
---|
1079 | 0)) ) { |
---|
1080 | fprintf(stderr,"RPCIO Send failed: %i\n",stat); |
---|
1081 | return stat; |
---|
1082 | } |
---|
1083 | return rpcUdpRcv(xact); |
---|
1084 | } |
---|
1085 | |
---|
1086 | /* a yet simpler interface */ |
---|
1087 | enum clnt_stat |
---|
1088 | rpcUdpCallRp( |
---|
1089 | struct sockaddr_in *psrvr, |
---|
1090 | u_long prog, |
---|
1091 | u_long vers, |
---|
1092 | u_long proc, |
---|
1093 | XdrProcT xargs, |
---|
1094 | CaddrT pargs, |
---|
1095 | XdrProcT xres, |
---|
1096 | CaddrT pres, |
---|
1097 | u_long uid, /* RPCIO_DEFAULT_ID picks default */ |
---|
1098 | u_long gid, /* RPCIO_DEFAULT_ID picks default */ |
---|
1099 | struct timeval *timeout /* NULL picks default */ |
---|
1100 | ) |
---|
1101 | { |
---|
1102 | RpcUdpClnt clp; |
---|
1103 | enum clnt_stat stat; |
---|
1104 | |
---|
1105 | stat = rpcUdpClntCreate( |
---|
1106 | psrvr, |
---|
1107 | prog, |
---|
1108 | vers, |
---|
1109 | uid, |
---|
1110 | gid, |
---|
1111 | &clp); |
---|
1112 | |
---|
1113 | if ( RPC_SUCCESS != stat ) |
---|
1114 | return stat; |
---|
1115 | |
---|
1116 | stat = rpcUdpClntCall( |
---|
1117 | clp, |
---|
1118 | proc, |
---|
1119 | xargs, pargs, |
---|
1120 | xres, pres, |
---|
1121 | timeout); |
---|
1122 | |
---|
1123 | rpcUdpClntDestroy(clp); |
---|
1124 | |
---|
1125 | return stat; |
---|
1126 | } |
---|
1127 | |
---|
1128 | /* linked list primitives */ |
---|
1129 | static void |
---|
1130 | nodeXtract(ListNode n) |
---|
1131 | { |
---|
1132 | if (n->prev) |
---|
1133 | n->prev->next = n->next; |
---|
1134 | if (n->next) |
---|
1135 | n->next->prev = n->prev; |
---|
1136 | n->next = n->prev = 0; |
---|
1137 | } |
---|
1138 | |
---|
1139 | static void |
---|
1140 | nodeAppend(ListNode l, ListNode n) |
---|
1141 | { |
---|
1142 | if ( (n->next = l->next) ) |
---|
1143 | n->next->prev = n; |
---|
1144 | l->next = n; |
---|
1145 | n->prev = l; |
---|
1146 | |
---|
1147 | } |
---|
1148 | |
---|
1149 | /* this code does the work */ |
---|
1150 | static void |
---|
1151 | rpcio_daemon(rtems_task_argument arg) |
---|
1152 | { |
---|
1153 | rtems_status_code stat; |
---|
1154 | RpcUdpXact xact; |
---|
1155 | RpcUdpServer srv; |
---|
1156 | rtems_interval next_retrans, then, unow; |
---|
1157 | long now; /* need to do signed comparison with age! */ |
---|
1158 | rtems_event_set events; |
---|
1159 | ListNode newList; |
---|
1160 | size_t size; |
---|
1161 | rtems_id q = 0; |
---|
1162 | ListNodeRec listHead = {0, 0}; |
---|
1163 | unsigned long epoch = RPCIOD_EPOCH_SECS * ticksPerSec; |
---|
1164 | unsigned long max_period = RPCIOD_RETX_CAP_S * ticksPerSec; |
---|
1165 | rtems_status_code status; |
---|
1166 | |
---|
1167 | |
---|
1168 | then = rtems_clock_get_ticks_since_boot(); |
---|
1169 | |
---|
1170 | for (next_retrans = epoch;;) { |
---|
1171 | |
---|
1172 | if ( RTEMS_SUCCESSFUL != |
---|
1173 | (stat = rtems_event_receive( |
---|
1174 | RPCIOD_RX_EVENT | RPCIOD_TX_EVENT | RPCIOD_KILL_EVENT, |
---|
1175 | RTEMS_WAIT | RTEMS_EVENT_ANY, |
---|
1176 | next_retrans, |
---|
1177 | &events)) ) { |
---|
1178 | ASSERT( RTEMS_TIMEOUT == stat ); |
---|
1179 | events = 0; |
---|
1180 | } |
---|
1181 | |
---|
1182 | if (events & RPCIOD_KILL_EVENT) { |
---|
1183 | int i; |
---|
1184 | |
---|
1185 | #if (DEBUG) & DEBUG_EVENTS |
---|
1186 | fprintf(stderr,"RPCIO: got KILL event\n"); |
---|
1187 | #endif |
---|
1188 | |
---|
1189 | MU_LOCK(hlock); |
---|
1190 | for (i=XACT_HASHS-1; i>=0; i--) { |
---|
1191 | if (xactHashTbl[i]) { |
---|
1192 | break; |
---|
1193 | } |
---|
1194 | } |
---|
1195 | if (i<0) { |
---|
1196 | /* prevent them from creating and enqueueing more messages */ |
---|
1197 | q=msgQ; |
---|
1198 | /* messages queued after we executed this assignment will fail */ |
---|
1199 | msgQ=0; |
---|
1200 | } |
---|
1201 | MU_UNLOCK(hlock); |
---|
1202 | if (i>=0) { |
---|
1203 | fprintf(stderr,"RPCIO There are still transactions circulating; I refuse to go away\n"); |
---|
1204 | fprintf(stderr,"(1st in slot %i)\n",i); |
---|
1205 | rtems_semaphore_release(fini); |
---|
1206 | } else { |
---|
1207 | break; |
---|
1208 | } |
---|
1209 | } |
---|
1210 | |
---|
1211 | unow = rtems_clock_get_ticks_since_boot(); |
---|
1212 | |
---|
1213 | /* measure everything relative to then to protect against |
---|
1214 | * rollover |
---|
1215 | */ |
---|
1216 | now = unow - then; |
---|
1217 | |
---|
1218 | /* NOTE: we don't lock the hash table while we are operating |
---|
1219 | * on transactions; the paradigm is that we 'own' a particular |
---|
1220 | * transaction (and hence it's hash table slot) from the |
---|
1221 | * time the xact was put into the message queue until we |
---|
1222 | * wake up the requestor. |
---|
1223 | */ |
---|
1224 | |
---|
1225 | if (RPCIOD_RX_EVENT & events) { |
---|
1226 | |
---|
1227 | #if (DEBUG) & DEBUG_EVENTS |
---|
1228 | fprintf(stderr,"RPCIO: got RX event\n"); |
---|
1229 | #endif |
---|
1230 | |
---|
1231 | while ((xact=sockRcv())) { |
---|
1232 | |
---|
1233 | /* extract from the retransmission list */ |
---|
1234 | nodeXtract(&xact->node); |
---|
1235 | |
---|
1236 | /* change the ID - there might already be |
---|
1237 | * a retransmission on the way. When it's |
---|
1238 | * reply arrives we must not find it's ID |
---|
1239 | * in the hashtable |
---|
1240 | */ |
---|
1241 | xact->obuf.xid += XACT_HASHS; |
---|
1242 | |
---|
1243 | xact->status.re_status = RPC_SUCCESS; |
---|
1244 | |
---|
1245 | /* calculate roundtrip ticks */ |
---|
1246 | xact->trip = now - xact->trip; |
---|
1247 | |
---|
1248 | srv = xact->server; |
---|
1249 | |
---|
1250 | /* adjust the server's retry period */ |
---|
1251 | { |
---|
1252 | register TimeoutT rtry = srv->retry_period; |
---|
1253 | register TimeoutT trip = xact->trip; |
---|
1254 | |
---|
1255 | ASSERT( trip >= 0 ); |
---|
1256 | |
---|
1257 | if ( 0==trip ) |
---|
1258 | trip = 1; |
---|
1259 | |
---|
1260 | /* retry_new = 0.75*retry_old + 0.25 * 8 * roundrip */ |
---|
1261 | rtry = (3*rtry + (trip << 3)) >> 2; |
---|
1262 | |
---|
1263 | if ( rtry > max_period ) |
---|
1264 | rtry = max_period; |
---|
1265 | |
---|
1266 | srv->retry_period = rtry; |
---|
1267 | } |
---|
1268 | |
---|
1269 | /* wakeup requestor */ |
---|
1270 | rtems_event_send(xact->requestor, RTEMS_RPC_EVENT); |
---|
1271 | } |
---|
1272 | } |
---|
1273 | |
---|
1274 | if (RPCIOD_TX_EVENT & events) { |
---|
1275 | |
---|
1276 | #if (DEBUG) & DEBUG_EVENTS |
---|
1277 | fprintf(stderr,"RPCIO: got TX event\n"); |
---|
1278 | #endif |
---|
1279 | |
---|
1280 | while (RTEMS_SUCCESSFUL == rtems_message_queue_receive( |
---|
1281 | msgQ, |
---|
1282 | &xact, |
---|
1283 | &size, |
---|
1284 | RTEMS_NO_WAIT, |
---|
1285 | RTEMS_NO_TIMEOUT)) { |
---|
1286 | /* put to the head of timeout q */ |
---|
1287 | nodeAppend(&listHead, &xact->node); |
---|
1288 | |
---|
1289 | xact->age = now; |
---|
1290 | xact->trip = FIRST_ATTEMPT; |
---|
1291 | } |
---|
1292 | } |
---|
1293 | |
---|
1294 | |
---|
1295 | /* work the timeout q */ |
---|
1296 | newList = 0; |
---|
1297 | for ( xact=(RpcUdpXact)listHead.next; |
---|
1298 | xact && xact->age <= now; |
---|
1299 | xact=(RpcUdpXact)listHead.next ) { |
---|
1300 | |
---|
1301 | /* extract from the list */ |
---|
1302 | nodeXtract(&xact->node); |
---|
1303 | |
---|
1304 | srv = xact->server; |
---|
1305 | |
---|
1306 | if (xact->tolive < 0) { |
---|
1307 | /* this one timed out */ |
---|
1308 | xact->status.re_errno = ETIMEDOUT; |
---|
1309 | xact->status.re_status = RPC_TIMEDOUT; |
---|
1310 | |
---|
1311 | srv->timeouts++; |
---|
1312 | |
---|
1313 | /* Change the ID - there might still be |
---|
1314 | * a reply on the way. When it arrives we |
---|
1315 | * must not find it's ID in the hash table |
---|
1316 | * |
---|
1317 | * Thanks to Steven Johnson for hunting this |
---|
1318 | * one down. |
---|
1319 | */ |
---|
1320 | xact->obuf.xid += XACT_HASHS; |
---|
1321 | |
---|
1322 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
1323 | fprintf(stderr,"RPCIO XACT timed out; waking up requestor\n"); |
---|
1324 | #endif |
---|
1325 | if ( rtems_event_send(xact->requestor, RTEMS_RPC_EVENT) ) { |
---|
1326 | rtems_panic("RPCIO PANIC file %s line: %i, requestor id was 0x%08x", |
---|
1327 | __FILE__, |
---|
1328 | __LINE__, |
---|
1329 | xact->requestor); |
---|
1330 | } |
---|
1331 | |
---|
1332 | } else { |
---|
1333 | int len; |
---|
1334 | |
---|
1335 | len = (int)XDR_GETPOS(&xact->xdrs); |
---|
1336 | |
---|
1337 | #ifdef MBUF_TX |
---|
1338 | xact->refcnt = 1; /* sendto itself */ |
---|
1339 | #endif |
---|
1340 | if ( len != SENDTO( ourSock, |
---|
1341 | xact->obuf.buf, |
---|
1342 | len, |
---|
1343 | 0, |
---|
1344 | &srv->addr.sa, |
---|
1345 | sizeof(srv->addr.sin) |
---|
1346 | #ifdef MBUF_TX |
---|
1347 | , xact, |
---|
1348 | paranoia_free, |
---|
1349 | paranoia_ref |
---|
1350 | #endif |
---|
1351 | ) ) { |
---|
1352 | |
---|
1353 | xact->status.re_errno = errno; |
---|
1354 | xact->status.re_status = RPC_CANTSEND; |
---|
1355 | srv->errors++; |
---|
1356 | |
---|
1357 | /* wakeup requestor */ |
---|
1358 | fprintf(stderr,"RPCIO: SEND failure\n"); |
---|
1359 | status = rtems_event_send(xact->requestor, RTEMS_RPC_EVENT); |
---|
1360 | assert( status == RTEMS_SUCCESSFUL ); |
---|
1361 | |
---|
1362 | } else { |
---|
1363 | /* send successful; calculate retransmission time |
---|
1364 | * and enqueue to temporary list |
---|
1365 | */ |
---|
1366 | if (FIRST_ATTEMPT != xact->trip) { |
---|
1367 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
1368 | fprintf(stderr, |
---|
1369 | "timed out; tolive is %i (ticks), retry period is %i (ticks)\n", |
---|
1370 | xact->tolive, |
---|
1371 | srv->retry_period); |
---|
1372 | #endif |
---|
1373 | /* this is a real retry; we backup |
---|
1374 | * the server's retry interval |
---|
1375 | */ |
---|
1376 | if ( srv->retry_period < max_period ) { |
---|
1377 | |
---|
1378 | /* If multiple transactions for this server |
---|
1379 | * fail (e.g. because it died) this will |
---|
1380 | * back-off very agressively (doubling |
---|
1381 | * the retransmission period for every |
---|
1382 | * timed out transaction up to the CAP limit) |
---|
1383 | * which is desirable - single packet failure |
---|
1384 | * is treated more gracefully by this algorithm. |
---|
1385 | */ |
---|
1386 | |
---|
1387 | srv->retry_period<<=1; |
---|
1388 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
1389 | fprintf(stderr, |
---|
1390 | "adjusted to; retry period %i\n", |
---|
1391 | srv->retry_period); |
---|
1392 | #endif |
---|
1393 | } else { |
---|
1394 | /* never wait longer than RPCIOD_RETX_CAP_S seconds */ |
---|
1395 | fprintf(stderr, |
---|
1396 | "RPCIO: server '%s' not responding - still trying\n", |
---|
1397 | srv->name); |
---|
1398 | } |
---|
1399 | if ( 0 == ++srv->retrans % 1000) { |
---|
1400 | fprintf(stderr, |
---|
1401 | "RPCIO - statistics: already %li retries to server %s\n", |
---|
1402 | srv->retrans, |
---|
1403 | srv->name); |
---|
1404 | } |
---|
1405 | } else { |
---|
1406 | srv->requests++; |
---|
1407 | } |
---|
1408 | xact->trip = now; |
---|
1409 | { |
---|
1410 | long capped_period = srv->retry_period; |
---|
1411 | if ( xact->lifetime < capped_period ) |
---|
1412 | capped_period = xact->lifetime; |
---|
1413 | xact->age = now + capped_period; |
---|
1414 | xact->tolive -= capped_period; |
---|
1415 | } |
---|
1416 | /* enqueue to the list of newly sent transactions */ |
---|
1417 | xact->node.next = newList; |
---|
1418 | newList = &xact->node; |
---|
1419 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
1420 | fprintf(stderr, |
---|
1421 | "XACT (0x%08x) age is 0x%x, now: 0x%x\n", |
---|
1422 | xact, |
---|
1423 | xact->age, |
---|
1424 | now); |
---|
1425 | #endif |
---|
1426 | } |
---|
1427 | } |
---|
1428 | } |
---|
1429 | |
---|
1430 | /* insert the newly sent transactions into the |
---|
1431 | * sorted retransmission list |
---|
1432 | */ |
---|
1433 | for (; (xact = (RpcUdpXact)newList); ) { |
---|
1434 | register ListNode p,n; |
---|
1435 | newList = newList->next; |
---|
1436 | for ( p=&listHead; (n=p->next) && xact->age > ((RpcUdpXact)n)->age; p=n ) |
---|
1437 | /* nothing else to do */; |
---|
1438 | nodeAppend(p, &xact->node); |
---|
1439 | } |
---|
1440 | |
---|
1441 | if (now > epoch) { |
---|
1442 | /* every now and then, readjust the epoch */ |
---|
1443 | register ListNode n; |
---|
1444 | then += now; |
---|
1445 | for (n=listHead.next; n; n=n->next) { |
---|
1446 | /* readjust outstanding time intervals subject to the |
---|
1447 | * condition that the 'absolute' time must remain |
---|
1448 | * the same. 'age' and 'trip' are measured with |
---|
1449 | * respect to 'then' - hence: |
---|
1450 | * |
---|
1451 | * abs_age == old_age + old_then == new_age + new_then |
---|
1452 | * |
---|
1453 | * ==> new_age = old_age + old_then - new_then == old_age - 'now' |
---|
1454 | */ |
---|
1455 | ((RpcUdpXact)n)->age -= now; |
---|
1456 | ((RpcUdpXact)n)->trip -= now; |
---|
1457 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
1458 | fprintf(stderr, |
---|
1459 | "readjusted XACT (0x%08x); age is 0x%x, trip: 0x%x now: 0x%x\n", |
---|
1460 | (RpcUdpXact)n, |
---|
1461 | ((RpcUdpXact)n)->trip, |
---|
1462 | ((RpcUdpXact)n)->age, |
---|
1463 | now); |
---|
1464 | #endif |
---|
1465 | } |
---|
1466 | now = 0; |
---|
1467 | } |
---|
1468 | |
---|
1469 | next_retrans = listHead.next ? |
---|
1470 | ((RpcUdpXact)listHead.next)->age - now : |
---|
1471 | epoch; /* make sure we don't miss updating the epoch */ |
---|
1472 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
1473 | fprintf(stderr,"RPCIO: next timeout is %x\n",next_retrans); |
---|
1474 | #endif |
---|
1475 | } |
---|
1476 | /* close our socket; shut down the receiver */ |
---|
1477 | close(ourSock); |
---|
1478 | |
---|
1479 | #if 0 /* if we get here, no transactions exist, hence there can be none |
---|
1480 | * in the queue whatsoever |
---|
1481 | */ |
---|
1482 | /* flush the message queue */ |
---|
1483 | while (RTEMS_SUCCESSFUL == rtems_message_queue_receive( |
---|
1484 | q, |
---|
1485 | &xact, |
---|
1486 | &size, |
---|
1487 | RTEMS_NO_WAIT, |
---|
1488 | RTEMS_NO_TIMEOUT)) { |
---|
1489 | /* TODO enque xact */ |
---|
1490 | } |
---|
1491 | |
---|
1492 | /* flush all outstanding transactions */ |
---|
1493 | |
---|
1494 | for (xact=((RpcUdpXact)listHead.next); xact; xact=((RpcUdpXact)xact->node.next)) { |
---|
1495 | xact->status.re_status = RPC_TIMEDOUT; |
---|
1496 | rtems_event_send(xact->requestor, RTEMS_RPC_EVENT); |
---|
1497 | } |
---|
1498 | #endif |
---|
1499 | |
---|
1500 | rtems_message_queue_delete(q); |
---|
1501 | |
---|
1502 | MU_DESTROY(hlock); |
---|
1503 | |
---|
1504 | fprintf(stderr,"RPC daemon exited...\n"); |
---|
1505 | |
---|
1506 | rtems_semaphore_release(fini); |
---|
1507 | rtems_task_suspend(RTEMS_SELF); |
---|
1508 | } |
---|
1509 | |
---|
1510 | |
---|
1511 | /* support for transaction 'pools'. A number of XACT objects |
---|
1512 | * is always kept around. The initial number is 0 but it |
---|
1513 | * is allowed to grow up to a maximum. |
---|
1514 | * If the need grows beyond the maximum, behavior depends: |
---|
1515 | * Users can either block until a transaction becomes available, |
---|
1516 | * they can create a new XACT on the fly or get an error |
---|
1517 | * if no free XACT is available from the pool. |
---|
1518 | */ |
---|
1519 | |
---|
1520 | RpcUdpXactPool |
---|
1521 | rpcUdpXactPoolCreate( |
---|
1522 | rpcprog_t prog, rpcvers_t version, |
---|
1523 | int xactsize, int poolsize) |
---|
1524 | { |
---|
1525 | RpcUdpXactPool rval = MY_MALLOC(sizeof(*rval)); |
---|
1526 | rtems_status_code status; |
---|
1527 | |
---|
1528 | ASSERT( rval ); |
---|
1529 | status = rtems_message_queue_create( |
---|
1530 | rtems_build_name('R','P','C','p'), |
---|
1531 | poolsize, |
---|
1532 | sizeof(RpcUdpXact), |
---|
1533 | RTEMS_DEFAULT_ATTRIBUTES, |
---|
1534 | &rval->box); |
---|
1535 | assert( status == RTEMS_SUCCESSFUL ); |
---|
1536 | |
---|
1537 | rval->prog = prog; |
---|
1538 | rval->version = version; |
---|
1539 | rval->xactSize = xactsize; |
---|
1540 | return rval; |
---|
1541 | } |
---|
1542 | |
---|
1543 | void |
---|
1544 | rpcUdpXactPoolDestroy(RpcUdpXactPool pool) |
---|
1545 | { |
---|
1546 | RpcUdpXact xact; |
---|
1547 | |
---|
1548 | while ((xact = rpcUdpXactPoolGet(pool, XactGetFail))) { |
---|
1549 | rpcUdpXactDestroy(xact); |
---|
1550 | } |
---|
1551 | rtems_message_queue_delete(pool->box); |
---|
1552 | MY_FREE(pool); |
---|
1553 | } |
---|
1554 | |
---|
1555 | RpcUdpXact |
---|
1556 | rpcUdpXactPoolGet(RpcUdpXactPool pool, XactPoolGetMode mode) |
---|
1557 | { |
---|
1558 | RpcUdpXact xact = 0; |
---|
1559 | size_t size; |
---|
1560 | |
---|
1561 | if (RTEMS_SUCCESSFUL != rtems_message_queue_receive( |
---|
1562 | pool->box, |
---|
1563 | &xact, |
---|
1564 | &size, |
---|
1565 | XactGetWait == mode ? |
---|
1566 | RTEMS_WAIT : RTEMS_NO_WAIT, |
---|
1567 | RTEMS_NO_TIMEOUT)) { |
---|
1568 | |
---|
1569 | /* nothing found in box; should we create a new one ? */ |
---|
1570 | |
---|
1571 | xact = (XactGetCreate == mode) ? |
---|
1572 | rpcUdpXactCreate( |
---|
1573 | pool->prog, |
---|
1574 | pool->version, |
---|
1575 | pool->xactSize) : 0 ; |
---|
1576 | if (xact) |
---|
1577 | xact->pool = pool; |
---|
1578 | |
---|
1579 | } |
---|
1580 | return xact; |
---|
1581 | } |
---|
1582 | |
---|
1583 | void |
---|
1584 | rpcUdpXactPoolPut(RpcUdpXact xact) |
---|
1585 | { |
---|
1586 | RpcUdpXactPool pool; |
---|
1587 | |
---|
1588 | pool = xact->pool; |
---|
1589 | ASSERT( pool ); |
---|
1590 | |
---|
1591 | if (RTEMS_SUCCESSFUL != rtems_message_queue_send( |
---|
1592 | pool->box, |
---|
1593 | &xact, |
---|
1594 | sizeof(xact))) |
---|
1595 | rpcUdpXactDestroy(xact); |
---|
1596 | } |
---|
1597 | |
---|
1598 | #ifdef MBUF_RX |
---|
1599 | |
---|
1600 | /* WORKAROUND: include sys/mbuf.h (or other bsdnet headers) only |
---|
1601 | * _after_ using malloc()/free() & friends because |
---|
1602 | * the RTEMS/BSDNET headers redefine those :-( |
---|
1603 | */ |
---|
1604 | |
---|
1605 | #define _KERNEL |
---|
1606 | #include <sys/mbuf.h> |
---|
1607 | |
---|
1608 | ssize_t |
---|
1609 | recv_mbuf_from(int s, struct mbuf **ppm, long len, struct sockaddr *fromaddr, int *fromlen); |
---|
1610 | |
---|
1611 | static void |
---|
1612 | bufFree(struct mbuf **m) |
---|
1613 | { |
---|
1614 | if (*m) { |
---|
1615 | rtems_bsdnet_semaphore_obtain(); |
---|
1616 | m_freem(*m); |
---|
1617 | rtems_bsdnet_semaphore_release(); |
---|
1618 | *m = 0; |
---|
1619 | } |
---|
1620 | } |
---|
1621 | #endif |
---|
1622 | |
---|
1623 | #ifdef MBUF_TX |
---|
1624 | static void |
---|
1625 | paranoia_free(caddr_t closure, u_int size) |
---|
1626 | { |
---|
1627 | #if (DEBUG) |
---|
1628 | RpcUdpXact xact = (RpcUdpXact)closure; |
---|
1629 | int len = (int)XDR_GETPOS(&xact->xdrs); |
---|
1630 | |
---|
1631 | ASSERT( --xact->refcnt >= 0 && size == len ); |
---|
1632 | #endif |
---|
1633 | } |
---|
1634 | |
---|
1635 | static void |
---|
1636 | paranoia_ref (caddr_t closure, u_int size) |
---|
1637 | { |
---|
1638 | #if (DEBUG) |
---|
1639 | RpcUdpXact xact = (RpcUdpXact)closure; |
---|
1640 | int len = (int)XDR_GETPOS(&xact->xdrs); |
---|
1641 | ASSERT( size == len ); |
---|
1642 | xact->refcnt++; |
---|
1643 | #endif |
---|
1644 | } |
---|
1645 | #endif |
---|
1646 | |
---|
1647 | /* receive from a socket and find |
---|
1648 | * the transaction corresponding to the |
---|
1649 | * transaction ID received in the server |
---|
1650 | * reply. |
---|
1651 | * |
---|
1652 | * The semantics of the 'pibuf' pointer are |
---|
1653 | * as follows: |
---|
1654 | * |
---|
1655 | * MBUF_RX: |
---|
1656 | * |
---|
1657 | */ |
---|
1658 | |
---|
1659 | #define RPCIOD_RXBUFSZ UDPMSGSIZE |
---|
1660 | |
---|
1661 | static RpcUdpXact |
---|
1662 | sockRcv(void) |
---|
1663 | { |
---|
1664 | int len,i; |
---|
1665 | uint32_t xid; |
---|
1666 | union { |
---|
1667 | struct sockaddr_in sin; |
---|
1668 | struct sockaddr sa; |
---|
1669 | } fromAddr; |
---|
1670 | int fromLen = sizeof(fromAddr.sin); |
---|
1671 | RxBuf ibuf = 0; |
---|
1672 | RpcUdpXact xact = 0; |
---|
1673 | |
---|
1674 | do { |
---|
1675 | |
---|
1676 | /* rcv_mbuf() and recvfrom() differ in that the |
---|
1677 | * former allocates buffers and passes them back |
---|
1678 | * to us whereas the latter requires us to provide |
---|
1679 | * buffer space. |
---|
1680 | * Hence, in the first case whe have to make sure |
---|
1681 | * no old buffer is leaked - in the second case, |
---|
1682 | * we might well re-use an old buffer but must |
---|
1683 | * make sure we have one allocated |
---|
1684 | */ |
---|
1685 | #ifdef MBUF_RX |
---|
1686 | if (ibuf) |
---|
1687 | bufFree(&ibuf); |
---|
1688 | |
---|
1689 | len = recv_mbuf_from( |
---|
1690 | ourSock, |
---|
1691 | &ibuf, |
---|
1692 | RPCIOD_RXBUFSZ, |
---|
1693 | &fromAddr.sa, |
---|
1694 | &fromLen); |
---|
1695 | #else |
---|
1696 | if ( !ibuf ) |
---|
1697 | ibuf = (RpcBuf)MY_MALLOC(RPCIOD_RXBUFSZ); |
---|
1698 | if ( !ibuf ) |
---|
1699 | goto cleanup; /* no memory - drop this message */ |
---|
1700 | |
---|
1701 | len = recvfrom(ourSock, |
---|
1702 | ibuf->buf, |
---|
1703 | RPCIOD_RXBUFSZ, |
---|
1704 | 0, |
---|
1705 | &fromAddr.sa, |
---|
1706 | &fromLen); |
---|
1707 | #endif |
---|
1708 | |
---|
1709 | if (len <= 0) { |
---|
1710 | if (EAGAIN != errno) |
---|
1711 | fprintf(stderr,"RECV failed: %s\n",strerror(errno)); |
---|
1712 | goto cleanup; |
---|
1713 | } |
---|
1714 | |
---|
1715 | #if (DEBUG) & DEBUG_PACKLOSS |
---|
1716 | if ( (unsigned)rand() < DEBUG_PACKLOSS_FRACT ) { |
---|
1717 | /* lose packets once in a while */ |
---|
1718 | static int xxx = 0; |
---|
1719 | if ( ++xxx % 16 == 0 ) |
---|
1720 | fprintf(stderr,"DEBUG: dropped %i packets, so far...\n",xxx); |
---|
1721 | if ( ibuf ) |
---|
1722 | bufFree( &ibuf ); |
---|
1723 | continue; |
---|
1724 | } |
---|
1725 | #endif |
---|
1726 | |
---|
1727 | i = (xid=XID(ibuf)) & XACT_HASH_MSK; |
---|
1728 | |
---|
1729 | if ( !(xact=xactHashTbl[i]) || |
---|
1730 | xact->obuf.xid != xid || |
---|
1731 | #ifdef REJECT_SERVERIP_MISMATCH |
---|
1732 | xact->server->addr.sin.sin_addr.s_addr != fromAddr.sin.sin_addr.s_addr || |
---|
1733 | #endif |
---|
1734 | xact->server->addr.sin.sin_port != fromAddr.sin.sin_port ) { |
---|
1735 | |
---|
1736 | if (xact) { |
---|
1737 | if ( |
---|
1738 | #ifdef REJECT_SERVERIP_MISMATCH |
---|
1739 | xact->server->addr.sin.sin_addr.s_addr == fromAddr.sin.sin_addr.s_addr && |
---|
1740 | #endif |
---|
1741 | xact->server->addr.sin.sin_port == fromAddr.sin.sin_port && |
---|
1742 | ( xact->obuf.xid == xid + XACT_HASHS || |
---|
1743 | xact->obuf.xid == xid + 2*XACT_HASHS ) |
---|
1744 | ) { |
---|
1745 | #ifndef DEBUG /* don't complain if it's just a late arrival of a retry */ |
---|
1746 | fprintf(stderr,"RPCIO - FYI sockRcv(): dropping late/redundant retry answer\n"); |
---|
1747 | #endif |
---|
1748 | } else { |
---|
1749 | fprintf(stderr,"RPCIO WARNING sockRcv(): transaction mismatch\n"); |
---|
1750 | fprintf(stderr,"xact: xid 0x%08" PRIx32 " -- got 0x%08" PRIx32 "\n", |
---|
1751 | xact->obuf.xid, xid); |
---|
1752 | fprintf(stderr,"xact: addr 0x%08" PRIx32 " -- got 0x%08" PRIx32 "\n", |
---|
1753 | xact->server->addr.sin.sin_addr.s_addr, |
---|
1754 | fromAddr.sin.sin_addr.s_addr); |
---|
1755 | fprintf(stderr,"xact: port 0x%08x -- got 0x%08x\n", |
---|
1756 | xact->server->addr.sin.sin_port, |
---|
1757 | fromAddr.sin.sin_port); |
---|
1758 | } |
---|
1759 | } else { |
---|
1760 | fprintf(stderr, |
---|
1761 | "RPCIO WARNING sockRcv(): got xid 0x%08" PRIx32 " but its slot is empty\n", |
---|
1762 | xid); |
---|
1763 | } |
---|
1764 | /* forget about this one and try again */ |
---|
1765 | xact = 0; |
---|
1766 | } |
---|
1767 | |
---|
1768 | } while ( !xact ); |
---|
1769 | |
---|
1770 | xact->ibuf = ibuf; |
---|
1771 | #ifndef MBUF_RX |
---|
1772 | xact->ibufsize = RPCIOD_RXBUFSZ; |
---|
1773 | #endif |
---|
1774 | |
---|
1775 | return xact; |
---|
1776 | |
---|
1777 | cleanup: |
---|
1778 | |
---|
1779 | bufFree(&ibuf); |
---|
1780 | |
---|
1781 | return 0; |
---|
1782 | } |
---|
1783 | |
---|
1784 | |
---|
1785 | #include <rtems/rtems_bsdnet_internal.h> |
---|
1786 | /* double check the event configuration; should probably globally |
---|
1787 | * manage system events!! |
---|
1788 | * We do this at the end of the file for the same reason we had |
---|
1789 | * included mbuf.h only a couple of lines above - see comment up |
---|
1790 | * there... |
---|
1791 | */ |
---|
1792 | #if RTEMS_RPC_EVENT & SOSLEEP_EVENT & SBWAIT_EVENT & NETISR_EVENTS |
---|
1793 | #error ILLEGAL EVENT CONFIGURATION |
---|
1794 | #endif |
---|