1 | /* $Id$ */ |
---|
2 | |
---|
3 | /* RPC multiplexor for a multitasking environment */ |
---|
4 | |
---|
5 | /* Author: Till Straumann <strauman@slac.stanford.edu>, 2002 */ |
---|
6 | |
---|
7 | /* This code funnels arbitrary task's UDP/RPC requests |
---|
8 | * through one socket to arbitrary servers. |
---|
9 | * The replies are gathered and dispatched to the |
---|
10 | * requestors. |
---|
11 | * One task handles all the sending and receiving |
---|
12 | * work including retries. |
---|
13 | * It is up to the requestor, however, to do |
---|
14 | * the XDR encoding of the arguments / decoding |
---|
15 | * of the results (except for the RPC header which |
---|
16 | * is handled by the daemon). |
---|
17 | */ |
---|
18 | |
---|
19 | /* |
---|
20 | * Authorship |
---|
21 | * ---------- |
---|
22 | * This software (NFS-2 client implementation for RTEMS) was created by |
---|
23 | * Till Straumann <strauman@slac.stanford.edu>, 2002-2007, |
---|
24 | * Stanford Linear Accelerator Center, Stanford University. |
---|
25 | * |
---|
26 | * Acknowledgement of sponsorship |
---|
27 | * ------------------------------ |
---|
28 | * The NFS-2 client implementation for RTEMS was produced by |
---|
29 | * the Stanford Linear Accelerator Center, Stanford University, |
---|
30 | * under Contract DE-AC03-76SFO0515 with the Department of Energy. |
---|
31 | * |
---|
32 | * Government disclaimer of liability |
---|
33 | * ---------------------------------- |
---|
34 | * Neither the United States nor the United States Department of Energy, |
---|
35 | * nor any of their employees, makes any warranty, express or implied, or |
---|
36 | * assumes any legal liability or responsibility for the accuracy, |
---|
37 | * completeness, or usefulness of any data, apparatus, product, or process |
---|
38 | * disclosed, or represents that its use would not infringe privately owned |
---|
39 | * rights. |
---|
40 | * |
---|
41 | * Stanford disclaimer of liability |
---|
42 | * -------------------------------- |
---|
43 | * Stanford University makes no representations or warranties, express or |
---|
44 | * implied, nor assumes any liability for the use of this software. |
---|
45 | * |
---|
46 | * Stanford disclaimer of copyright |
---|
47 | * -------------------------------- |
---|
48 | * Stanford University, owner of the copyright, hereby disclaims its |
---|
49 | * copyright and all other rights in this software. Hence, anyone may |
---|
50 | * freely use it for any purpose without restriction. |
---|
51 | * |
---|
52 | * Maintenance of notices |
---|
53 | * ---------------------- |
---|
54 | * In the interest of clarity regarding the origin and status of this |
---|
55 | * SLAC software, this and all the preceding Stanford University notices |
---|
56 | * are to remain affixed to any copy or derivative of this software made |
---|
57 | * or distributed by the recipient and are to be affixed to any copy of |
---|
58 | * software made or distributed by the recipient that contains a copy or |
---|
59 | * derivative of this software. |
---|
60 | * |
---|
61 | * ------------------ SLAC Software Notices, Set 4 OTT.002a, 2004 FEB 03 |
---|
62 | */ |
---|
63 | |
---|
64 | #include <rtems.h> |
---|
65 | #include <rtems/error.h> |
---|
66 | #include <rtems/rtems_bsdnet.h> |
---|
67 | #include <stdlib.h> |
---|
68 | #include <time.h> |
---|
69 | #include <rpc/rpc.h> |
---|
70 | #include <rpc/pmap_prot.h> |
---|
71 | #include <errno.h> |
---|
72 | #include <sys/ioctl.h> |
---|
73 | #include <assert.h> |
---|
74 | #include <stdio.h> |
---|
75 | #include <errno.h> |
---|
76 | #include <string.h> |
---|
77 | #include <netinet/in.h> |
---|
78 | #include <arpa/inet.h> |
---|
79 | |
---|
80 | #include "rpcio.h" |
---|
81 | |
---|
82 | /****************************************************************/ |
---|
83 | /* CONFIGURABLE PARAMETERS */ |
---|
84 | /****************************************************************/ |
---|
85 | |
---|
86 | #define MBUF_RX /* If defined: use mbuf XDR stream for |
---|
87 | * decoding directly out of mbufs |
---|
88 | * Otherwise, the regular 'recvfrom()' |
---|
89 | * interface will be used involving an |
---|
90 | * extra buffer allocation + copy step. |
---|
91 | */ |
---|
92 | |
---|
93 | #define MBUF_TX /* If defined: avoid copying data when |
---|
94 | * sending. Instead, use a wrapper to |
---|
95 | * 'sosend()' which will point an MBUF |
---|
96 | * directly to our buffer space. |
---|
97 | * Note that the BSD stack does not copy |
---|
98 | * data when fragmenting packets - it |
---|
99 | * merely uses an mbuf chain pointing |
---|
100 | * into different areas of the data. |
---|
101 | * |
---|
102 | * If undefined, the regular 'sendto()' |
---|
103 | * interface is used. |
---|
104 | */ |
---|
105 | |
---|
106 | #undef REJECT_SERVERIP_MISMATCH |
---|
107 | /* If defined, RPC replies must come from the server |
---|
108 | * that was queried. Eric Norum has reported problems |
---|
109 | * with clustered NFS servers. So we disable this |
---|
110 | * reducing paranoia... |
---|
111 | */ |
---|
112 | |
---|
113 | /* daemon task parameters */ |
---|
114 | #define RPCIOD_STACK 10000 |
---|
115 | #define RPCIOD_PRIO 100 /* *fallback* priority */ |
---|
116 | |
---|
117 | /* depth of the message queue for sending |
---|
118 | * RPC requests to the daemon |
---|
119 | */ |
---|
120 | #define RPCIOD_QDEPTH 20 |
---|
121 | |
---|
122 | /* Maximum retry limit for retransmission */ |
---|
123 | #define RPCIOD_RETX_CAP_S 3 /* seconds */ |
---|
124 | |
---|
125 | /* Default timeout for RPC calls */ |
---|
126 | #define RPCIOD_DEFAULT_TIMEOUT (&_rpc_default_timeout) |
---|
127 | static struct timeval _rpc_default_timeout = { 10 /* secs */, 0 /* usecs */ }; |
---|
128 | |
---|
129 | /* how many times should we try to resend a failed |
---|
130 | * transaction with refreshed AUTHs |
---|
131 | */ |
---|
132 | #define RPCIOD_REFRESH 2 |
---|
133 | |
---|
134 | /* Events we are using; the RPC_EVENT |
---|
135 | * MUST NOT be used by any application |
---|
136 | * thread doing RPC IO (e.g. NFS) |
---|
137 | */ |
---|
138 | #define RTEMS_RPC_EVENT RTEMS_EVENT_30 /* THE event used by RPCIO. Every task doing |
---|
139 | * RPC IO will receive this - hence it is |
---|
140 | * RESERVED |
---|
141 | */ |
---|
142 | #define RPCIOD_RX_EVENT RTEMS_EVENT_1 /* Events the RPCIOD is using/waiting for */ |
---|
143 | #define RPCIOD_TX_EVENT RTEMS_EVENT_2 |
---|
144 | #define RPCIOD_KILL_EVENT RTEMS_EVENT_3 /* send to the daemon to kill it */ |
---|
145 | |
---|
146 | #define LD_XACT_HASH 8 /* ld of the size of the transaction hash table */ |
---|
147 | |
---|
148 | |
---|
149 | /* Debugging Flags */ |
---|
150 | |
---|
151 | /* NOTE: defining DEBUG 0 leaves some 'assert()' paranoia checks |
---|
152 | * but produces no output |
---|
153 | */ |
---|
154 | |
---|
155 | #define DEBUG_TRACE_XACT (1<<0) |
---|
156 | #define DEBUG_EVENTS (1<<1) |
---|
157 | #define DEBUG_MALLOC (1<<2) |
---|
158 | #define DEBUG_TIMEOUT (1<<3) |
---|
159 | #define DEBUG_PACKLOSS (1<<4) /* This introduces random, artificial packet losses to test retransmission */ |
---|
160 | |
---|
161 | #define DEBUG_PACKLOSS_FRACT (0xffffffff/10) |
---|
162 | |
---|
163 | /* USE PARENTHESIS WHEN 'or'ing MULTIPLE FLAGS: (DEBUG_XX | DEBUG_YY) */ |
---|
164 | #define DEBUG (0) |
---|
165 | |
---|
166 | /****************************************************************/ |
---|
167 | /* END OF CONFIGURABLE SECTION */ |
---|
168 | /****************************************************************/ |
---|
169 | |
---|
170 | /* prevent rollover of our timers by readjusting the epoch on the fly */ |
---|
171 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
172 | #define RPCIOD_EPOCH_SECS 10 |
---|
173 | #else |
---|
174 | #define RPCIOD_EPOCH_SECS 10000 |
---|
175 | #endif |
---|
176 | |
---|
177 | #ifdef DEBUG |
---|
178 | #define ASSERT(arg) assert(arg) |
---|
179 | #else |
---|
180 | #define ASSERT(arg) if (arg) |
---|
181 | #endif |
---|
182 | |
---|
183 | /****************************************************************/ |
---|
184 | /* MACROS */ |
---|
185 | /****************************************************************/ |
---|
186 | |
---|
187 | |
---|
188 | #define XACT_HASHS (1<<(LD_XACT_HASH)) /* the hash table size derived from the ld */ |
---|
189 | #define XACT_HASH_MSK ((XACT_HASHS)-1) /* mask to extract the hash index from a RPC-XID */ |
---|
190 | |
---|
191 | |
---|
192 | #define MU_LOCK(mutex) do { \ |
---|
193 | assert( \ |
---|
194 | RTEMS_SUCCESSFUL == \ |
---|
195 | rtems_semaphore_obtain( \ |
---|
196 | (mutex), \ |
---|
197 | RTEMS_WAIT, \ |
---|
198 | RTEMS_NO_TIMEOUT \ |
---|
199 | ) ); \ |
---|
200 | } while(0) |
---|
201 | |
---|
202 | #define MU_UNLOCK(mutex) do { \ |
---|
203 | assert( \ |
---|
204 | RTEMS_SUCCESSFUL == \ |
---|
205 | rtems_semaphore_release( \ |
---|
206 | (mutex) \ |
---|
207 | ) ); \ |
---|
208 | } while(0) |
---|
209 | |
---|
210 | #define MU_CREAT(pmutex) do { \ |
---|
211 | assert( \ |
---|
212 | RTEMS_SUCCESSFUL == \ |
---|
213 | rtems_semaphore_create( \ |
---|
214 | rtems_build_name( \ |
---|
215 | 'R','P','C','l' \ |
---|
216 | ), \ |
---|
217 | 1, \ |
---|
218 | MUTEX_ATTRIBUTES, \ |
---|
219 | 0, \ |
---|
220 | (pmutex)) ); \ |
---|
221 | } while (0) |
---|
222 | |
---|
223 | |
---|
224 | #define MU_DESTROY(mutex) do { \ |
---|
225 | assert( \ |
---|
226 | RTEMS_SUCCESSFUL == \ |
---|
227 | rtems_semaphore_delete( \ |
---|
228 | mutex \ |
---|
229 | ) ); \ |
---|
230 | } while (0) |
---|
231 | |
---|
232 | #define MUTEX_ATTRIBUTES (RTEMS_LOCAL | \ |
---|
233 | RTEMS_PRIORITY | \ |
---|
234 | RTEMS_INHERIT_PRIORITY | \ |
---|
235 | RTEMS_BINARY_SEMAPHORE) |
---|
236 | |
---|
237 | #define FIRST_ATTEMPT 0x88888888 /* some time that is never reached */ |
---|
238 | |
---|
239 | /****************************************************************/ |
---|
240 | /* TYPE DEFINITIONS */ |
---|
241 | /****************************************************************/ |
---|
242 | |
---|
243 | typedef rtems_interval TimeoutT; |
---|
244 | |
---|
245 | /* 100000th implementation of a doubly linked list; |
---|
246 | * since only one thread is looking at these, |
---|
247 | * we need no locking |
---|
248 | */ |
---|
249 | typedef struct ListNodeRec_ { |
---|
250 | struct ListNodeRec_ *next, *prev; |
---|
251 | } ListNodeRec, *ListNode; |
---|
252 | |
---|
253 | |
---|
254 | /* Structure representing an RPC server */ |
---|
255 | typedef struct RpcUdpServerRec_ { |
---|
256 | RpcUdpServer next; /* linked list of all servers; protected by hlock */ |
---|
257 | union { |
---|
258 | struct sockaddr_in sin; |
---|
259 | struct sockaddr sa; |
---|
260 | } addr; |
---|
261 | AUTH *auth; |
---|
262 | rtems_id authlock; /* must MUTEX the auth object - it's not clear |
---|
263 | * what is better: |
---|
264 | * 1 having one (MUTEXed) auth per server |
---|
265 | * who is shared among all transactions |
---|
266 | * using that server |
---|
267 | * 2 maintaining an AUTH per transaction |
---|
268 | * (there are then other options: manage |
---|
269 | * XACT pools on a per-server basis instead |
---|
270 | * of associating a server with a XACT when |
---|
271 | * sending) |
---|
272 | * experience will show if the current (1) |
---|
273 | * approach has to be changed. |
---|
274 | */ |
---|
275 | TimeoutT retry_period; /* dynamically adjusted retry period |
---|
276 | * (based on packet roundtrip time) |
---|
277 | */ |
---|
278 | /* STATISTICS */ |
---|
279 | unsigned long retrans; /* how many retries were issued by this server */ |
---|
280 | unsigned long requests; /* how many requests have been sent */ |
---|
281 | unsigned long timeouts; /* how many requests have timed out */ |
---|
282 | unsigned long errors; /* how many errors have occurred (other than timeouts) */ |
---|
283 | char name[20]; /* server's address in IP 'dot' notation */ |
---|
284 | } RpcUdpServerRec; |
---|
285 | |
---|
286 | typedef union RpcBufU_ { |
---|
287 | u_long xid; |
---|
288 | char buf[1]; |
---|
289 | } RpcBufU, *RpcBuf; |
---|
290 | |
---|
291 | /* RX Buffer implementation; this is either |
---|
292 | * an MBUF chain (MBUF_RX configuration) |
---|
293 | * or a buffer allocated from the heap |
---|
294 | * where recvfrom copies the (encoded) reply |
---|
295 | * to. The XDR routines the copy/decode |
---|
296 | * it into the user's data structures. |
---|
297 | */ |
---|
298 | #ifdef MBUF_RX |
---|
299 | typedef struct mbuf * RxBuf; /* an MBUF chain */ |
---|
300 | static void bufFree(struct mbuf **m); |
---|
301 | #define XID(ibuf) (*(mtod((ibuf), u_long *))) |
---|
302 | extern void xdrmbuf_create(XDR *, struct mbuf *, enum xdr_op); |
---|
303 | #else |
---|
304 | typedef RpcBuf RxBuf; |
---|
305 | #define bufFree(b) do { MY_FREE(*(b)); *(b)=0; } while(0) |
---|
306 | #define XID(ibuf) ((ibuf)->xid) |
---|
307 | #endif |
---|
308 | |
---|
309 | /* A RPC 'transaction' consisting |
---|
310 | * of server and requestor information, |
---|
311 | * buffer space and an XDR object |
---|
312 | * (for encoding arguments). |
---|
313 | */ |
---|
314 | typedef struct RpcUdpXactRec_ { |
---|
315 | ListNodeRec node; /* so we can put XACTs on a list */ |
---|
316 | RpcUdpServer server; /* server this XACT goes to */ |
---|
317 | long lifetime; /* during the lifetime, retry attempts are made */ |
---|
318 | long tolive; /* lifetime timer */ |
---|
319 | struct rpc_err status; /* RPC reply error status */ |
---|
320 | long age; /* age info; needed to manage retransmission */ |
---|
321 | long trip; /* record round trip time in ticks */ |
---|
322 | rtems_id requestor; /* the task waiting for this XACT to complete */ |
---|
323 | RpcUdpXactPool pool; /* if this XACT belong to a pool, this is it */ |
---|
324 | XDR xdrs; /* argument encoder stream */ |
---|
325 | int xdrpos; /* stream position after the (permanent) header */ |
---|
326 | xdrproc_t xres; /* reply decoder proc - TODO needn't be here */ |
---|
327 | caddr_t pres; /* reply decoded obj - TODO needn't be here */ |
---|
328 | #ifndef MBUF_RX |
---|
329 | int ibufsize; /* size of the ibuf (bytes) */ |
---|
330 | #endif |
---|
331 | #ifdef MBUF_TX |
---|
332 | int refcnt; /* mbuf external storage reference count */ |
---|
333 | #endif |
---|
334 | int obufsize; /* size of the obuf (bytes) */ |
---|
335 | RxBuf ibuf; /* pointer to input buffer assigned by daemon */ |
---|
336 | RpcBufU obuf; /* output buffer (encoded args) APPENDED HERE */ |
---|
337 | } RpcUdpXactRec; |
---|
338 | |
---|
339 | typedef struct RpcUdpXactPoolRec_ { |
---|
340 | rtems_id box; |
---|
341 | int prog; |
---|
342 | int version; |
---|
343 | int xactSize; |
---|
344 | } RpcUdpXactPoolRec; |
---|
345 | |
---|
346 | /* a global hash table where all 'living' transaction |
---|
347 | * objects are registered. |
---|
348 | * A number of bits in a transaction's XID maps 1:1 to |
---|
349 | * an index in this table. Hence, the XACT matching |
---|
350 | * an RPC/UDP reply packet can quickly be found |
---|
351 | * The size of this table imposes a hard limit on the |
---|
352 | * number of all created transactions in the system. |
---|
353 | */ |
---|
354 | static RpcUdpXact xactHashTbl[XACT_HASHS]={0}; |
---|
355 | static u_long xidUpper [XACT_HASHS]={0}; |
---|
356 | static unsigned xidHashSeed = 0 ; |
---|
357 | |
---|
358 | /* forward declarations */ |
---|
359 | static RpcUdpXact |
---|
360 | sockRcv(void); |
---|
361 | |
---|
362 | static void |
---|
363 | rpcio_daemon(rtems_task_argument); |
---|
364 | |
---|
365 | #ifdef MBUF_TX |
---|
366 | ssize_t |
---|
367 | sendto_nocpy ( |
---|
368 | int s, |
---|
369 | const void *buf, size_t buflen, |
---|
370 | int flags, |
---|
371 | const struct sockaddr *toaddr, int tolen, |
---|
372 | void *closure, |
---|
373 | void (*freeproc)(caddr_t, u_int), |
---|
374 | void (*refproc)(caddr_t, u_int) |
---|
375 | ); |
---|
376 | static void paranoia_free(caddr_t closure, u_int size); |
---|
377 | static void paranoia_ref (caddr_t closure, u_int size); |
---|
378 | #define SENDTO sendto_nocpy |
---|
379 | #else |
---|
380 | #define SENDTO sendto |
---|
381 | #endif |
---|
382 | |
---|
383 | static RpcUdpServer rpcUdpServers = 0; /* linked list of all servers; protected by llock */ |
---|
384 | |
---|
385 | static int ourSock = -1; /* the socket we are using for communication */ |
---|
386 | static rtems_id rpciod = 0; /* task id of the RPC daemon */ |
---|
387 | static rtems_id msgQ = 0; /* message queue where the daemon picks up |
---|
388 | * requests |
---|
389 | */ |
---|
390 | static rtems_id llock = 0; /* MUTEX protecting the server list */ |
---|
391 | static rtems_id hlock = 0; /* MUTEX protecting the hash table and the list of servers */ |
---|
392 | static rtems_id fini = 0; /* a synchronization semaphore we use during |
---|
393 | * module cleanup / driver unloading |
---|
394 | */ |
---|
395 | static rtems_interval ticksPerSec; /* cached system clock rate (WHO IS ASSUMED NOT |
---|
396 | * TO CHANGE) |
---|
397 | */ |
---|
398 | |
---|
399 | rtems_task_priority rpciodPriority = 0; |
---|
400 | |
---|
401 | #if (DEBUG) & DEBUG_MALLOC |
---|
402 | /* malloc wrappers for debugging */ |
---|
403 | static int nibufs = 0; |
---|
404 | |
---|
405 | static inline void *MY_MALLOC(int s) |
---|
406 | { |
---|
407 | if (s) { |
---|
408 | void *rval; |
---|
409 | MU_LOCK(hlock); |
---|
410 | assert(nibufs++ < 2000); |
---|
411 | MU_UNLOCK(hlock); |
---|
412 | assert(rval = malloc(s)); |
---|
413 | return rval; |
---|
414 | } |
---|
415 | return 0; |
---|
416 | } |
---|
417 | |
---|
418 | static inline void *MY_CALLOC(int n, int s) |
---|
419 | { |
---|
420 | if (s) { |
---|
421 | void *rval; |
---|
422 | MU_LOCK(hlock); |
---|
423 | assert(nibufs++ < 2000); |
---|
424 | MU_UNLOCK(hlock); |
---|
425 | assert(rval = calloc(n,s)); |
---|
426 | return rval; |
---|
427 | } |
---|
428 | return 0; |
---|
429 | } |
---|
430 | |
---|
431 | |
---|
432 | static inline void MY_FREE(void *p) |
---|
433 | { |
---|
434 | if (p) { |
---|
435 | MU_LOCK(hlock); |
---|
436 | nibufs--; |
---|
437 | MU_UNLOCK(hlock); |
---|
438 | free(p); |
---|
439 | } |
---|
440 | } |
---|
441 | #else |
---|
442 | #define MY_MALLOC malloc |
---|
443 | #define MY_CALLOC calloc |
---|
444 | #define MY_FREE free |
---|
445 | #endif |
---|
446 | |
---|
447 | static inline bool_t |
---|
448 | locked_marshal(RpcUdpServer s, XDR *xdrs) |
---|
449 | { |
---|
450 | bool_t rval; |
---|
451 | MU_LOCK(s->authlock); |
---|
452 | rval = AUTH_MARSHALL(s->auth, xdrs); |
---|
453 | MU_UNLOCK(s->authlock); |
---|
454 | return rval; |
---|
455 | } |
---|
456 | |
---|
457 | /* Locked operations on a server's auth object */ |
---|
458 | static inline bool_t |
---|
459 | locked_validate(RpcUdpServer s, struct opaque_auth *v) |
---|
460 | { |
---|
461 | bool_t rval; |
---|
462 | MU_LOCK(s->authlock); |
---|
463 | rval = AUTH_VALIDATE(s->auth, v); |
---|
464 | MU_UNLOCK(s->authlock); |
---|
465 | return rval; |
---|
466 | } |
---|
467 | |
---|
468 | static inline bool_t |
---|
469 | locked_refresh(RpcUdpServer s) |
---|
470 | { |
---|
471 | bool_t rval; |
---|
472 | MU_LOCK(s->authlock); |
---|
473 | rval = AUTH_REFRESH(s->auth); |
---|
474 | MU_UNLOCK(s->authlock); |
---|
475 | return rval; |
---|
476 | } |
---|
477 | |
---|
478 | /* Create a server object |
---|
479 | * |
---|
480 | */ |
---|
481 | enum clnt_stat |
---|
482 | rpcUdpServerCreate( |
---|
483 | struct sockaddr_in *paddr, |
---|
484 | int prog, |
---|
485 | int vers, |
---|
486 | u_long uid, |
---|
487 | u_long gid, |
---|
488 | RpcUdpServer *psrv |
---|
489 | ) |
---|
490 | { |
---|
491 | RpcUdpServer rval; |
---|
492 | u_short port; |
---|
493 | char hname[MAX_MACHINE_NAME + 1]; |
---|
494 | int theuid, thegid; |
---|
495 | int thegids[NGRPS]; |
---|
496 | gid_t gids[NGROUPS]; |
---|
497 | int len,i; |
---|
498 | AUTH *auth; |
---|
499 | enum clnt_stat pmap_err; |
---|
500 | struct pmap pmaparg; |
---|
501 | |
---|
502 | if ( gethostname(hname, MAX_MACHINE_NAME) ) { |
---|
503 | fprintf(stderr, |
---|
504 | "RPCIO - error: I have no hostname ?? (%s)\n", |
---|
505 | strerror(errno)); |
---|
506 | return RPC_UNKNOWNHOST; |
---|
507 | } |
---|
508 | |
---|
509 | if ( (len = getgroups(NGROUPS, gids) < 0 ) ) { |
---|
510 | fprintf(stderr, |
---|
511 | "RPCIO - error: I unable to get group ids (%s)\n", |
---|
512 | strerror(errno)); |
---|
513 | return RPC_FAILED; |
---|
514 | } |
---|
515 | |
---|
516 | if ( len > NGRPS ) |
---|
517 | len = NGRPS; |
---|
518 | |
---|
519 | for (i=0; i<len; i++) |
---|
520 | thegids[i] = (int)gids[i]; |
---|
521 | |
---|
522 | theuid = (int) ((RPCIOD_DEFAULT_ID == uid) ? geteuid() : uid); |
---|
523 | thegid = (int) ((RPCIOD_DEFAULT_ID == gid) ? getegid() : gid); |
---|
524 | |
---|
525 | if ( !(auth = authunix_create(hname, theuid, thegid, len, thegids)) ) { |
---|
526 | fprintf(stderr, |
---|
527 | "RPCIO - error: unable to create RPC AUTH\n"); |
---|
528 | return RPC_FAILED; |
---|
529 | } |
---|
530 | |
---|
531 | /* if they specified no port try to ask the portmapper */ |
---|
532 | if (!paddr->sin_port) { |
---|
533 | |
---|
534 | paddr->sin_port = htons(PMAPPORT); |
---|
535 | |
---|
536 | pmaparg.pm_prog = prog; |
---|
537 | pmaparg.pm_vers = vers; |
---|
538 | pmaparg.pm_prot = IPPROTO_UDP; |
---|
539 | pmaparg.pm_port = 0; /* not needed or used */ |
---|
540 | |
---|
541 | |
---|
542 | /* dont use non-reentrant pmap_getport ! */ |
---|
543 | |
---|
544 | pmap_err = rpcUdpCallRp( |
---|
545 | paddr, |
---|
546 | PMAPPROG, |
---|
547 | PMAPVERS, |
---|
548 | PMAPPROC_GETPORT, |
---|
549 | xdr_pmap, |
---|
550 | &pmaparg, |
---|
551 | xdr_u_short, |
---|
552 | &port, |
---|
553 | uid, |
---|
554 | gid, |
---|
555 | 0); |
---|
556 | |
---|
557 | if ( RPC_SUCCESS != pmap_err ) { |
---|
558 | paddr->sin_port = 0; |
---|
559 | return pmap_err; |
---|
560 | } |
---|
561 | |
---|
562 | paddr->sin_port = htons(port); |
---|
563 | } |
---|
564 | |
---|
565 | if (0==paddr->sin_port) { |
---|
566 | return RPC_PROGNOTREGISTERED; |
---|
567 | } |
---|
568 | |
---|
569 | rval = (RpcUdpServer)MY_MALLOC(sizeof(*rval)); |
---|
570 | memset(rval, 0, sizeof(*rval)); |
---|
571 | |
---|
572 | if (!inet_ntop(AF_INET, &paddr->sin_addr, rval->name, sizeof(rval->name))) |
---|
573 | sprintf(rval->name,"?.?.?.?"); |
---|
574 | rval->addr.sin = *paddr; |
---|
575 | |
---|
576 | /* start with a long retransmission interval - it |
---|
577 | * will be adapted dynamically |
---|
578 | */ |
---|
579 | rval->retry_period = RPCIOD_RETX_CAP_S * ticksPerSec; |
---|
580 | |
---|
581 | rval->auth = auth; |
---|
582 | |
---|
583 | MU_CREAT( &rval->authlock ); |
---|
584 | |
---|
585 | /* link into list */ |
---|
586 | MU_LOCK( llock ); |
---|
587 | rval->next = rpcUdpServers; |
---|
588 | rpcUdpServers = rval; |
---|
589 | MU_UNLOCK( llock ); |
---|
590 | |
---|
591 | *psrv = rval; |
---|
592 | return RPC_SUCCESS; |
---|
593 | } |
---|
594 | |
---|
595 | void |
---|
596 | rpcUdpServerDestroy(RpcUdpServer s) |
---|
597 | { |
---|
598 | RpcUdpServer prev; |
---|
599 | if (!s) |
---|
600 | return; |
---|
601 | /* we should probably verify (but how?) that nobody |
---|
602 | * (at least: no outstanding XACTs) is using this |
---|
603 | * server; |
---|
604 | */ |
---|
605 | |
---|
606 | /* remove from server list */ |
---|
607 | MU_LOCK(llock); |
---|
608 | prev = rpcUdpServers; |
---|
609 | if ( s == prev ) { |
---|
610 | rpcUdpServers = s->next; |
---|
611 | } else { |
---|
612 | for ( ; prev ; prev = prev->next) { |
---|
613 | if (prev->next == s) { |
---|
614 | prev->next = s->next; |
---|
615 | break; |
---|
616 | } |
---|
617 | } |
---|
618 | } |
---|
619 | MU_UNLOCK(llock); |
---|
620 | |
---|
621 | /* MUST have found it */ |
---|
622 | assert(prev); |
---|
623 | |
---|
624 | auth_destroy(s->auth); |
---|
625 | |
---|
626 | MU_DESTROY(s->authlock); |
---|
627 | MY_FREE(s); |
---|
628 | } |
---|
629 | |
---|
630 | int |
---|
631 | rpcUdpStats(FILE *f) |
---|
632 | { |
---|
633 | RpcUdpServer s; |
---|
634 | |
---|
635 | if (!f) f = stdout; |
---|
636 | |
---|
637 | fprintf(f,"RPCIOD statistics:\n"); |
---|
638 | |
---|
639 | MU_LOCK(llock); |
---|
640 | for (s = rpcUdpServers; s; s=s->next) { |
---|
641 | fprintf(f,"\nServer -- %s:\n", s->name); |
---|
642 | fprintf(f," requests sent: %10ld, retransmitted: %10ld\n", |
---|
643 | s->requests, s->retrans); |
---|
644 | fprintf(f," timed out: %10ld, send errors: %10ld\n", |
---|
645 | s->timeouts, s->errors); |
---|
646 | fprintf(f," current retransmission interval: %dms\n", |
---|
647 | (unsigned)(s->retry_period * 1000 / ticksPerSec) ); |
---|
648 | } |
---|
649 | MU_UNLOCK(llock); |
---|
650 | |
---|
651 | return 0; |
---|
652 | } |
---|
653 | |
---|
654 | RpcUdpXact |
---|
655 | rpcUdpXactCreate( |
---|
656 | u_long program, |
---|
657 | u_long version, |
---|
658 | u_long size |
---|
659 | ) |
---|
660 | { |
---|
661 | RpcUdpXact rval=0; |
---|
662 | struct rpc_msg header; |
---|
663 | register int i,j; |
---|
664 | |
---|
665 | if (!size) |
---|
666 | size = UDPMSGSIZE; |
---|
667 | /* word align */ |
---|
668 | size = (size + 3) & ~3; |
---|
669 | |
---|
670 | rval = (RpcUdpXact)MY_CALLOC(1,sizeof(*rval) - sizeof(rval->obuf) + size); |
---|
671 | |
---|
672 | if (rval) { |
---|
673 | |
---|
674 | header.rm_xid = 0; |
---|
675 | header.rm_direction = CALL; |
---|
676 | header.rm_call.cb_rpcvers = RPC_MSG_VERSION; |
---|
677 | header.rm_call.cb_prog = program; |
---|
678 | header.rm_call.cb_vers = version; |
---|
679 | xdrmem_create(&(rval->xdrs), rval->obuf.buf, size, XDR_ENCODE); |
---|
680 | |
---|
681 | if (!xdr_callhdr(&(rval->xdrs), &header)) { |
---|
682 | MY_FREE(rval); |
---|
683 | return 0; |
---|
684 | } |
---|
685 | /* pick a free table slot and initialize the XID */ |
---|
686 | rval->obuf.xid = time(0) ^ (unsigned long)rval; |
---|
687 | MU_LOCK(hlock); |
---|
688 | rval->obuf.xid = (xidHashSeed++ ^ ((unsigned long)rval>>10)) & XACT_HASH_MSK; |
---|
689 | i=j=(rval->obuf.xid & XACT_HASH_MSK); |
---|
690 | if (msgQ) { |
---|
691 | /* if there's no message queue, refuse to |
---|
692 | * give them transactions; we might be in the process to |
---|
693 | * go away... |
---|
694 | */ |
---|
695 | do { |
---|
696 | i=(i+1) & XACT_HASH_MSK; /* cheap modulo */ |
---|
697 | if (!xactHashTbl[i]) { |
---|
698 | #if (DEBUG) & DEBUG_TRACE_XACT |
---|
699 | fprintf(stderr,"RPCIO: entering index %i, val %x\n",i,rval); |
---|
700 | #endif |
---|
701 | xactHashTbl[i]=rval; |
---|
702 | j=-1; |
---|
703 | break; |
---|
704 | } |
---|
705 | } while (i!=j); |
---|
706 | } |
---|
707 | MU_UNLOCK(hlock); |
---|
708 | if (i==j) { |
---|
709 | XDR_DESTROY(&rval->xdrs); |
---|
710 | MY_FREE(rval); |
---|
711 | return 0; |
---|
712 | } |
---|
713 | rval->obuf.xid = xidUpper[i] | i; |
---|
714 | rval->xdrpos = XDR_GETPOS(&(rval->xdrs)); |
---|
715 | rval->obufsize = size; |
---|
716 | } |
---|
717 | return rval; |
---|
718 | } |
---|
719 | |
---|
720 | void |
---|
721 | rpcUdpXactDestroy(RpcUdpXact xact) |
---|
722 | { |
---|
723 | int i = xact->obuf.xid & XACT_HASH_MSK; |
---|
724 | |
---|
725 | #if (DEBUG) & DEBUG_TRACE_XACT |
---|
726 | fprintf(stderr,"RPCIO: removing index %i, val %x\n",i,xact); |
---|
727 | #endif |
---|
728 | |
---|
729 | ASSERT( xactHashTbl[i]==xact ); |
---|
730 | |
---|
731 | MU_LOCK(hlock); |
---|
732 | xactHashTbl[i]=0; |
---|
733 | /* remember XID we used last time so we can avoid |
---|
734 | * reusing the same one (incremented by rpcUdpSend routine) |
---|
735 | */ |
---|
736 | xidUpper[i] = xact->obuf.xid & ~XACT_HASH_MSK; |
---|
737 | MU_UNLOCK(hlock); |
---|
738 | |
---|
739 | bufFree(&xact->ibuf); |
---|
740 | |
---|
741 | XDR_DESTROY(&xact->xdrs); |
---|
742 | MY_FREE(xact); |
---|
743 | } |
---|
744 | |
---|
745 | |
---|
746 | |
---|
747 | /* Send a transaction, i.e. enqueue it to the |
---|
748 | * RPC daemon who will actually send it. |
---|
749 | */ |
---|
750 | enum clnt_stat |
---|
751 | rpcUdpSend( |
---|
752 | RpcUdpXact xact, |
---|
753 | RpcUdpServer srvr, |
---|
754 | struct timeval *timeout, |
---|
755 | u_long proc, |
---|
756 | xdrproc_t xres, caddr_t pres, |
---|
757 | xdrproc_t xargs, caddr_t pargs, |
---|
758 | ... |
---|
759 | ) |
---|
760 | { |
---|
761 | register XDR *xdrs; |
---|
762 | unsigned long ms; |
---|
763 | va_list ap; |
---|
764 | |
---|
765 | va_start(ap,pargs); |
---|
766 | |
---|
767 | if (!timeout) |
---|
768 | timeout = RPCIOD_DEFAULT_TIMEOUT; |
---|
769 | |
---|
770 | ms = 1000 * timeout->tv_sec + timeout->tv_usec/1000; |
---|
771 | |
---|
772 | /* round lifetime to closest # of ticks */ |
---|
773 | xact->lifetime = (ms * ticksPerSec + 500) / 1000; |
---|
774 | if ( 0 == xact->lifetime ) |
---|
775 | xact->lifetime = 1; |
---|
776 | |
---|
777 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
778 | { |
---|
779 | static int once=0; |
---|
780 | if (!once++) { |
---|
781 | fprintf(stderr, |
---|
782 | "Initial lifetime: %i (ticks)\n", |
---|
783 | xact->lifetime); |
---|
784 | } |
---|
785 | } |
---|
786 | #endif |
---|
787 | |
---|
788 | xact->tolive = xact->lifetime; |
---|
789 | |
---|
790 | xact->xres = xres; |
---|
791 | xact->pres = pres; |
---|
792 | xact->server = srvr; |
---|
793 | |
---|
794 | xdrs = &xact->xdrs; |
---|
795 | xdrs->x_op = XDR_ENCODE; |
---|
796 | /* increment transaction ID */ |
---|
797 | xact->obuf.xid += XACT_HASHS; |
---|
798 | XDR_SETPOS(xdrs, xact->xdrpos); |
---|
799 | if ( !XDR_PUTLONG(xdrs,(long*)&proc) || !locked_marshal(srvr, xdrs) || |
---|
800 | !xargs(xdrs, pargs) ) { |
---|
801 | va_end(ap); |
---|
802 | return(xact->status.re_status=RPC_CANTENCODEARGS); |
---|
803 | } |
---|
804 | while ((xargs=va_arg(ap,xdrproc_t))) { |
---|
805 | if (!xargs(xdrs, va_arg(ap,caddr_t))) |
---|
806 | va_end(ap); |
---|
807 | return(xact->status.re_status=RPC_CANTENCODEARGS); |
---|
808 | } |
---|
809 | |
---|
810 | va_end(ap); |
---|
811 | |
---|
812 | rtems_task_ident(RTEMS_SELF, RTEMS_WHO_AM_I, &xact->requestor); |
---|
813 | if ( rtems_message_queue_send( msgQ, &xact, sizeof(xact)) ) { |
---|
814 | return RPC_CANTSEND; |
---|
815 | } |
---|
816 | /* wakeup the rpciod */ |
---|
817 | ASSERT( RTEMS_SUCCESSFUL==rtems_event_send(rpciod, RPCIOD_TX_EVENT) ); |
---|
818 | |
---|
819 | return RPC_SUCCESS; |
---|
820 | } |
---|
821 | |
---|
822 | /* Block for the RPC reply to an outstanding |
---|
823 | * transaction. |
---|
824 | * The caller is woken by the RPC daemon either |
---|
825 | * upon reception of the reply or on timeout. |
---|
826 | */ |
---|
827 | enum clnt_stat |
---|
828 | rpcUdpRcv(RpcUdpXact xact) |
---|
829 | { |
---|
830 | int refresh; |
---|
831 | XDR reply_xdrs; |
---|
832 | struct rpc_msg reply_msg; |
---|
833 | rtems_event_set gotEvents; |
---|
834 | |
---|
835 | refresh = 0; |
---|
836 | |
---|
837 | do { |
---|
838 | |
---|
839 | /* block for the reply */ |
---|
840 | ASSERT( RTEMS_SUCCESSFUL == |
---|
841 | rtems_event_receive( |
---|
842 | RTEMS_RPC_EVENT, |
---|
843 | RTEMS_WAIT | RTEMS_EVENT_ANY, |
---|
844 | RTEMS_NO_TIMEOUT, |
---|
845 | &gotEvents) ); |
---|
846 | |
---|
847 | if (xact->status.re_status) { |
---|
848 | #ifdef MBUF_RX |
---|
849 | /* add paranoia */ |
---|
850 | ASSERT( !xact->ibuf ); |
---|
851 | #endif |
---|
852 | return xact->status.re_status; |
---|
853 | } |
---|
854 | |
---|
855 | #ifdef MBUF_RX |
---|
856 | xdrmbuf_create(&reply_xdrs, xact->ibuf, XDR_DECODE); |
---|
857 | #else |
---|
858 | xdrmem_create(&reply_xdrs, xact->ibuf->buf, xact->ibufsize, XDR_DECODE); |
---|
859 | #endif |
---|
860 | |
---|
861 | reply_msg.acpted_rply.ar_verf = _null_auth; |
---|
862 | reply_msg.acpted_rply.ar_results.where = xact->pres; |
---|
863 | reply_msg.acpted_rply.ar_results.proc = xact->xres; |
---|
864 | |
---|
865 | if (xdr_replymsg(&reply_xdrs, &reply_msg)) { |
---|
866 | /* OK */ |
---|
867 | _seterr_reply(&reply_msg, &xact->status); |
---|
868 | if (RPC_SUCCESS == xact->status.re_status) { |
---|
869 | if ( !locked_validate(xact->server, |
---|
870 | &reply_msg.acpted_rply.ar_verf) ) { |
---|
871 | xact->status.re_status = RPC_AUTHERROR; |
---|
872 | xact->status.re_why = AUTH_INVALIDRESP; |
---|
873 | } |
---|
874 | if (reply_msg.acpted_rply.ar_verf.oa_base) { |
---|
875 | reply_xdrs.x_op = XDR_FREE; |
---|
876 | xdr_opaque_auth(&reply_xdrs, &reply_msg.acpted_rply.ar_verf); |
---|
877 | } |
---|
878 | refresh = 0; |
---|
879 | } else { |
---|
880 | /* should we try to refresh our credentials ? */ |
---|
881 | if ( !refresh ) { |
---|
882 | /* had never tried before */ |
---|
883 | refresh = RPCIOD_REFRESH; |
---|
884 | } |
---|
885 | } |
---|
886 | } else { |
---|
887 | reply_xdrs.x_op = XDR_FREE; |
---|
888 | xdr_replymsg(&reply_xdrs, &reply_msg); |
---|
889 | xact->status.re_status = RPC_CANTDECODERES; |
---|
890 | } |
---|
891 | XDR_DESTROY(&reply_xdrs); |
---|
892 | |
---|
893 | bufFree(&xact->ibuf); |
---|
894 | |
---|
895 | #ifndef MBUF_RX |
---|
896 | xact->ibufsize = 0; |
---|
897 | #endif |
---|
898 | |
---|
899 | if (refresh && locked_refresh(xact->server)) { |
---|
900 | rtems_task_ident(RTEMS_SELF, RTEMS_WHO_AM_I, &xact->requestor); |
---|
901 | if ( rtems_message_queue_send(msgQ, &xact, sizeof(xact)) ) { |
---|
902 | return RPC_CANTSEND; |
---|
903 | } |
---|
904 | /* wakeup the rpciod */ |
---|
905 | fprintf(stderr,"RPCIO INFO: refreshing my AUTH\n"); |
---|
906 | ASSERT( RTEMS_SUCCESSFUL==rtems_event_send(rpciod, RPCIOD_TX_EVENT) ); |
---|
907 | } |
---|
908 | |
---|
909 | } while ( 0 && refresh-- > 0 ); |
---|
910 | |
---|
911 | return xact->status.re_status; |
---|
912 | } |
---|
913 | |
---|
914 | |
---|
915 | /* On RTEMS, I'm told to avoid select(); this seems to |
---|
916 | * be more efficient |
---|
917 | */ |
---|
918 | static void |
---|
919 | rxWakeupCB(struct socket *sock, caddr_t arg) |
---|
920 | { |
---|
921 | rtems_event_send((rtems_id)arg, RPCIOD_RX_EVENT); |
---|
922 | } |
---|
923 | |
---|
924 | int |
---|
925 | rpcUdpInit(void) |
---|
926 | { |
---|
927 | int noblock = 1; |
---|
928 | struct sockwakeup wkup; |
---|
929 | |
---|
930 | if (ourSock < 0) { |
---|
931 | fprintf(stderr,"RTEMS-RPCIOD $Release$, " \ |
---|
932 | "Till Straumann, Stanford/SLAC/SSRL 2002, " \ |
---|
933 | "See LICENSE file for licensing info.\n"); |
---|
934 | |
---|
935 | ourSock=socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); |
---|
936 | if (ourSock>=0) { |
---|
937 | bindresvport(ourSock,(struct sockaddr_in*)0); |
---|
938 | assert( 0==ioctl(ourSock, FIONBIO, (char*)&noblock) ); |
---|
939 | /* assume nobody tampers with the clock !! */ |
---|
940 | assert( RTEMS_SUCCESSFUL == rtems_clock_get( |
---|
941 | RTEMS_CLOCK_GET_TICKS_PER_SECOND, |
---|
942 | &ticksPerSec)); |
---|
943 | MU_CREAT( &hlock ); |
---|
944 | MU_CREAT( &llock ); |
---|
945 | |
---|
946 | if ( !rpciodPriority ) { |
---|
947 | /* use configured networking priority */ |
---|
948 | if ( ! (rpciodPriority = rtems_bsdnet_config.network_task_priority) ) |
---|
949 | rpciodPriority = RPCIOD_PRIO; /* fallback value */ |
---|
950 | } |
---|
951 | |
---|
952 | assert( RTEMS_SUCCESSFUL == rtems_task_create( |
---|
953 | rtems_build_name('R','P','C','d'), |
---|
954 | rpciodPriority, |
---|
955 | RPCIOD_STACK, |
---|
956 | RTEMS_DEFAULT_MODES, |
---|
957 | /* fprintf saves/restores FP registers on PPC :-( */ |
---|
958 | RTEMS_DEFAULT_ATTRIBUTES | RTEMS_FLOATING_POINT, |
---|
959 | &rpciod) ); |
---|
960 | wkup.sw_pfn = rxWakeupCB; |
---|
961 | wkup.sw_arg = (caddr_t)rpciod; |
---|
962 | assert( 0==setsockopt(ourSock, SOL_SOCKET, SO_RCVWAKEUP, &wkup, sizeof(wkup)) ); |
---|
963 | assert( RTEMS_SUCCESSFUL == rtems_message_queue_create( |
---|
964 | rtems_build_name('R','P','C','q'), |
---|
965 | RPCIOD_QDEPTH, |
---|
966 | sizeof(RpcUdpXact), |
---|
967 | RTEMS_DEFAULT_ATTRIBUTES, |
---|
968 | &msgQ) ); |
---|
969 | assert( RTEMS_SUCCESSFUL == rtems_task_start( |
---|
970 | rpciod, |
---|
971 | rpcio_daemon, |
---|
972 | 0 ) ); |
---|
973 | |
---|
974 | } else { |
---|
975 | return -1; |
---|
976 | } |
---|
977 | } |
---|
978 | return 0; |
---|
979 | } |
---|
980 | |
---|
981 | int |
---|
982 | rpcUdpCleanup(void) |
---|
983 | { |
---|
984 | rtems_semaphore_create( |
---|
985 | rtems_build_name('R','P','C','f'), |
---|
986 | 0, |
---|
987 | RTEMS_DEFAULT_ATTRIBUTES, |
---|
988 | 0, |
---|
989 | &fini); |
---|
990 | rtems_event_send(rpciod, RPCIOD_KILL_EVENT); |
---|
991 | /* synchronize with daemon */ |
---|
992 | rtems_semaphore_obtain(fini, RTEMS_WAIT, 5*ticksPerSec); |
---|
993 | /* if the message queue is still there, something went wrong */ |
---|
994 | if (!msgQ) { |
---|
995 | rtems_task_delete(rpciod); |
---|
996 | } |
---|
997 | rtems_semaphore_delete(fini); |
---|
998 | return (msgQ !=0); |
---|
999 | } |
---|
1000 | |
---|
1001 | /* Another API - simpler but less efficient. |
---|
1002 | * For each RPCall, a server and a Xact |
---|
1003 | * are created and destroyed on the fly. |
---|
1004 | * |
---|
1005 | * This should be used for infrequent calls |
---|
1006 | * (e.g. a NFS mount request). |
---|
1007 | * |
---|
1008 | * This is roughly compatible with the original |
---|
1009 | * clnt_call() etc. API - but it uses our |
---|
1010 | * daemon and is fully reentrant. |
---|
1011 | */ |
---|
1012 | enum clnt_stat |
---|
1013 | rpcUdpClntCreate( |
---|
1014 | struct sockaddr_in *psaddr, |
---|
1015 | int prog, |
---|
1016 | int vers, |
---|
1017 | u_long uid, |
---|
1018 | u_long gid, |
---|
1019 | RpcUdpClnt *pclnt |
---|
1020 | ) |
---|
1021 | { |
---|
1022 | RpcUdpXact x; |
---|
1023 | RpcUdpServer s; |
---|
1024 | enum clnt_stat err; |
---|
1025 | |
---|
1026 | if ( RPC_SUCCESS != (err=rpcUdpServerCreate(psaddr, prog, vers, uid, gid, &s)) ) |
---|
1027 | return err; |
---|
1028 | |
---|
1029 | if ( !(x=rpcUdpXactCreate(prog, vers, UDPMSGSIZE)) ) { |
---|
1030 | rpcUdpServerDestroy(s); |
---|
1031 | return RPC_FAILED; |
---|
1032 | } |
---|
1033 | /* TODO: could maintain a server cache */ |
---|
1034 | |
---|
1035 | x->server = s; |
---|
1036 | |
---|
1037 | *pclnt = x; |
---|
1038 | |
---|
1039 | return RPC_SUCCESS; |
---|
1040 | } |
---|
1041 | |
---|
1042 | void |
---|
1043 | rpcUdpClntDestroy(RpcUdpClnt xact) |
---|
1044 | { |
---|
1045 | rpcUdpServerDestroy(xact->server); |
---|
1046 | rpcUdpXactDestroy(xact); |
---|
1047 | } |
---|
1048 | |
---|
1049 | enum clnt_stat |
---|
1050 | rpcUdpClntCall( |
---|
1051 | RpcUdpClnt xact, |
---|
1052 | u_long proc, |
---|
1053 | XdrProcT xargs, |
---|
1054 | CaddrT pargs, |
---|
1055 | XdrProcT xres, |
---|
1056 | CaddrT pres, |
---|
1057 | struct timeval *timeout |
---|
1058 | ) |
---|
1059 | { |
---|
1060 | enum clnt_stat stat; |
---|
1061 | |
---|
1062 | if ( (stat = rpcUdpSend(xact, xact->server, timeout, proc, |
---|
1063 | xres, pres, |
---|
1064 | xargs, pargs, |
---|
1065 | 0)) ) { |
---|
1066 | fprintf(stderr,"RPCIO Send failed: %i\n",stat); |
---|
1067 | return stat; |
---|
1068 | } |
---|
1069 | return rpcUdpRcv(xact); |
---|
1070 | } |
---|
1071 | |
---|
1072 | /* a yet simpler interface */ |
---|
1073 | enum clnt_stat |
---|
1074 | rpcUdpCallRp( |
---|
1075 | struct sockaddr_in *psrvr, |
---|
1076 | u_long prog, |
---|
1077 | u_long vers, |
---|
1078 | u_long proc, |
---|
1079 | XdrProcT xargs, |
---|
1080 | CaddrT pargs, |
---|
1081 | XdrProcT xres, |
---|
1082 | CaddrT pres, |
---|
1083 | u_long uid, /* RPCIO_DEFAULT_ID picks default */ |
---|
1084 | u_long gid, /* RPCIO_DEFAULT_ID picks default */ |
---|
1085 | struct timeval *timeout /* NULL picks default */ |
---|
1086 | ) |
---|
1087 | { |
---|
1088 | RpcUdpClnt clp; |
---|
1089 | enum clnt_stat stat; |
---|
1090 | |
---|
1091 | stat = rpcUdpClntCreate( |
---|
1092 | psrvr, |
---|
1093 | prog, |
---|
1094 | vers, |
---|
1095 | uid, |
---|
1096 | gid, |
---|
1097 | &clp); |
---|
1098 | |
---|
1099 | if ( RPC_SUCCESS != stat ) |
---|
1100 | return stat; |
---|
1101 | |
---|
1102 | stat = rpcUdpClntCall( |
---|
1103 | clp, |
---|
1104 | proc, |
---|
1105 | xargs, pargs, |
---|
1106 | xres, pres, |
---|
1107 | timeout); |
---|
1108 | |
---|
1109 | rpcUdpClntDestroy(clp); |
---|
1110 | |
---|
1111 | return stat; |
---|
1112 | } |
---|
1113 | |
---|
1114 | /* linked list primitives */ |
---|
1115 | static void |
---|
1116 | nodeXtract(ListNode n) |
---|
1117 | { |
---|
1118 | if (n->prev) |
---|
1119 | n->prev->next = n->next; |
---|
1120 | if (n->next) |
---|
1121 | n->next->prev = n->prev; |
---|
1122 | n->next = n->prev = 0; |
---|
1123 | } |
---|
1124 | |
---|
1125 | static void |
---|
1126 | nodeAppend(ListNode l, ListNode n) |
---|
1127 | { |
---|
1128 | if ( (n->next = l->next) ) |
---|
1129 | n->next->prev = n; |
---|
1130 | l->next = n; |
---|
1131 | n->prev = l; |
---|
1132 | |
---|
1133 | } |
---|
1134 | |
---|
1135 | /* this code does the work */ |
---|
1136 | static void |
---|
1137 | rpcio_daemon(rtems_task_argument arg) |
---|
1138 | { |
---|
1139 | rtems_status_code stat; |
---|
1140 | RpcUdpXact xact; |
---|
1141 | RpcUdpServer srv; |
---|
1142 | rtems_interval next_retrans, then, unow; |
---|
1143 | long now; /* need to do signed comparison with age! */ |
---|
1144 | rtems_event_set events; |
---|
1145 | ListNode newList; |
---|
1146 | size_t size; |
---|
1147 | rtems_id q = 0; |
---|
1148 | ListNodeRec listHead = {0}; |
---|
1149 | unsigned long epoch = RPCIOD_EPOCH_SECS * ticksPerSec; |
---|
1150 | unsigned long max_period = RPCIOD_RETX_CAP_S * ticksPerSec; |
---|
1151 | |
---|
1152 | assert( RTEMS_SUCCESSFUL == rtems_clock_get( |
---|
1153 | RTEMS_CLOCK_GET_TICKS_SINCE_BOOT, |
---|
1154 | &then) ); |
---|
1155 | |
---|
1156 | for (next_retrans = epoch;;) { |
---|
1157 | |
---|
1158 | if ( RTEMS_SUCCESSFUL != |
---|
1159 | (stat = rtems_event_receive( |
---|
1160 | RPCIOD_RX_EVENT | RPCIOD_TX_EVENT | RPCIOD_KILL_EVENT, |
---|
1161 | RTEMS_WAIT | RTEMS_EVENT_ANY, |
---|
1162 | next_retrans, |
---|
1163 | &events)) ) { |
---|
1164 | ASSERT( RTEMS_TIMEOUT == stat ); |
---|
1165 | events = 0; |
---|
1166 | } |
---|
1167 | |
---|
1168 | if (events & RPCIOD_KILL_EVENT) { |
---|
1169 | int i; |
---|
1170 | |
---|
1171 | #if (DEBUG) & DEBUG_EVENTS |
---|
1172 | fprintf(stderr,"RPCIO: got KILL event\n"); |
---|
1173 | #endif |
---|
1174 | |
---|
1175 | MU_LOCK(hlock); |
---|
1176 | for (i=XACT_HASHS-1; i>=0; i--) { |
---|
1177 | if (xactHashTbl[i]) { |
---|
1178 | break; |
---|
1179 | } |
---|
1180 | } |
---|
1181 | if (i<0) { |
---|
1182 | /* prevent them from creating and enqueueing more messages */ |
---|
1183 | q=msgQ; |
---|
1184 | /* messages queued after we executed this assignment will fail */ |
---|
1185 | msgQ=0; |
---|
1186 | } |
---|
1187 | MU_UNLOCK(hlock); |
---|
1188 | if (i>=0) { |
---|
1189 | fprintf(stderr,"RPCIO There are still transactions circulating; I refuse to go away\n"); |
---|
1190 | fprintf(stderr,"(1st in slot %i)\n",i); |
---|
1191 | rtems_semaphore_release(fini); |
---|
1192 | } else { |
---|
1193 | break; |
---|
1194 | } |
---|
1195 | } |
---|
1196 | |
---|
1197 | ASSERT( RTEMS_SUCCESSFUL == rtems_clock_get( |
---|
1198 | RTEMS_CLOCK_GET_TICKS_SINCE_BOOT, |
---|
1199 | &unow ) ); |
---|
1200 | |
---|
1201 | /* measure everything relative to then to protect against |
---|
1202 | * rollover |
---|
1203 | */ |
---|
1204 | now = unow - then; |
---|
1205 | |
---|
1206 | /* NOTE: we don't lock the hash table while we are operating |
---|
1207 | * on transactions; the paradigm is that we 'own' a particular |
---|
1208 | * transaction (and hence it's hash table slot) from the |
---|
1209 | * time the xact was put into the message queue until we |
---|
1210 | * wake up the requestor. |
---|
1211 | */ |
---|
1212 | |
---|
1213 | if (RPCIOD_RX_EVENT & events) { |
---|
1214 | |
---|
1215 | #if (DEBUG) & DEBUG_EVENTS |
---|
1216 | fprintf(stderr,"RPCIO: got RX event\n"); |
---|
1217 | #endif |
---|
1218 | |
---|
1219 | while ((xact=sockRcv())) { |
---|
1220 | |
---|
1221 | /* extract from the retransmission list */ |
---|
1222 | nodeXtract(&xact->node); |
---|
1223 | |
---|
1224 | /* change the ID - there might already be |
---|
1225 | * a retransmission on the way. When it's |
---|
1226 | * reply arrives we must not find it's ID |
---|
1227 | * in the hashtable |
---|
1228 | */ |
---|
1229 | xact->obuf.xid += XACT_HASHS; |
---|
1230 | |
---|
1231 | xact->status.re_status = RPC_SUCCESS; |
---|
1232 | |
---|
1233 | /* calculate roundtrip ticks */ |
---|
1234 | xact->trip = now - xact->trip; |
---|
1235 | |
---|
1236 | srv = xact->server; |
---|
1237 | |
---|
1238 | /* adjust the server's retry period */ |
---|
1239 | { |
---|
1240 | register TimeoutT rtry = srv->retry_period; |
---|
1241 | register TimeoutT trip = xact->trip; |
---|
1242 | |
---|
1243 | ASSERT( trip >= 0 ); |
---|
1244 | |
---|
1245 | if ( 0==trip ) |
---|
1246 | trip = 1; |
---|
1247 | |
---|
1248 | /* retry_new = 0.75*retry_old + 0.25 * 8 * roundrip */ |
---|
1249 | rtry = (3*rtry + (trip << 3)) >> 2; |
---|
1250 | |
---|
1251 | if ( rtry > max_period ) |
---|
1252 | rtry = max_period; |
---|
1253 | |
---|
1254 | srv->retry_period = rtry; |
---|
1255 | } |
---|
1256 | |
---|
1257 | /* wakeup requestor */ |
---|
1258 | rtems_event_send(xact->requestor, RTEMS_RPC_EVENT); |
---|
1259 | } |
---|
1260 | } |
---|
1261 | |
---|
1262 | if (RPCIOD_TX_EVENT & events) { |
---|
1263 | |
---|
1264 | #if (DEBUG) & DEBUG_EVENTS |
---|
1265 | fprintf(stderr,"RPCIO: got TX event\n"); |
---|
1266 | #endif |
---|
1267 | |
---|
1268 | while (RTEMS_SUCCESSFUL == rtems_message_queue_receive( |
---|
1269 | msgQ, |
---|
1270 | &xact, |
---|
1271 | &size, |
---|
1272 | RTEMS_NO_WAIT, |
---|
1273 | RTEMS_NO_TIMEOUT)) { |
---|
1274 | /* put to the head of timeout q */ |
---|
1275 | nodeAppend(&listHead, &xact->node); |
---|
1276 | |
---|
1277 | xact->age = now; |
---|
1278 | xact->trip = FIRST_ATTEMPT; |
---|
1279 | } |
---|
1280 | } |
---|
1281 | |
---|
1282 | |
---|
1283 | /* work the timeout q */ |
---|
1284 | newList = 0; |
---|
1285 | for ( xact=(RpcUdpXact)listHead.next; |
---|
1286 | xact && xact->age <= now; |
---|
1287 | xact=(RpcUdpXact)listHead.next ) { |
---|
1288 | |
---|
1289 | /* extract from the list */ |
---|
1290 | nodeXtract(&xact->node); |
---|
1291 | |
---|
1292 | srv = xact->server; |
---|
1293 | |
---|
1294 | if (xact->tolive < 0) { |
---|
1295 | /* this one timed out */ |
---|
1296 | xact->status.re_errno = ETIMEDOUT; |
---|
1297 | xact->status.re_status = RPC_TIMEDOUT; |
---|
1298 | |
---|
1299 | srv->timeouts++; |
---|
1300 | |
---|
1301 | /* Change the ID - there might still be |
---|
1302 | * a reply on the way. When it arrives we |
---|
1303 | * must not find it's ID in the hash table |
---|
1304 | * |
---|
1305 | * Thanks to Steven Johnson for hunting this |
---|
1306 | * one down. |
---|
1307 | */ |
---|
1308 | xact->obuf.xid += XACT_HASHS; |
---|
1309 | |
---|
1310 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
1311 | fprintf(stderr,"RPCIO XACT timed out; waking up requestor\n"); |
---|
1312 | #endif |
---|
1313 | if ( rtems_event_send(xact->requestor, RTEMS_RPC_EVENT) ) { |
---|
1314 | rtems_panic("RPCIO PANIC file %s line: %i, requestor id was 0x%08x", |
---|
1315 | __FILE__, |
---|
1316 | __LINE__, |
---|
1317 | xact->requestor); |
---|
1318 | } |
---|
1319 | |
---|
1320 | } else { |
---|
1321 | int len; |
---|
1322 | |
---|
1323 | len = (int)XDR_GETPOS(&xact->xdrs); |
---|
1324 | |
---|
1325 | #ifdef MBUF_TX |
---|
1326 | xact->refcnt = 1; /* sendto itself */ |
---|
1327 | #endif |
---|
1328 | if ( len != SENDTO( ourSock, |
---|
1329 | xact->obuf.buf, |
---|
1330 | len, |
---|
1331 | 0, |
---|
1332 | &srv->addr.sa, |
---|
1333 | sizeof(srv->addr.sin) |
---|
1334 | #ifdef MBUF_TX |
---|
1335 | , xact, |
---|
1336 | paranoia_free, |
---|
1337 | paranoia_ref |
---|
1338 | #endif |
---|
1339 | ) ) { |
---|
1340 | |
---|
1341 | xact->status.re_errno = errno; |
---|
1342 | xact->status.re_status = RPC_CANTSEND; |
---|
1343 | srv->errors++; |
---|
1344 | |
---|
1345 | /* wakeup requestor */ |
---|
1346 | fprintf(stderr,"RPCIO: SEND failure\n"); |
---|
1347 | ASSERT( RTEMS_SUCCESSFUL == |
---|
1348 | rtems_event_send(xact->requestor, RTEMS_RPC_EVENT) ); |
---|
1349 | |
---|
1350 | } else { |
---|
1351 | /* send successful; calculate retransmission time |
---|
1352 | * and enqueue to temporary list |
---|
1353 | */ |
---|
1354 | if (FIRST_ATTEMPT != xact->trip) { |
---|
1355 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
1356 | fprintf(stderr, |
---|
1357 | "timed out; tolive is %i (ticks), retry period is %i (ticks)\n", |
---|
1358 | xact->tolive, |
---|
1359 | srv->retry_period); |
---|
1360 | #endif |
---|
1361 | /* this is a real retry; we backup |
---|
1362 | * the server's retry interval |
---|
1363 | */ |
---|
1364 | if ( srv->retry_period < max_period ) { |
---|
1365 | |
---|
1366 | /* If multiple transactions for this server |
---|
1367 | * fail (e.g. because it died) this will |
---|
1368 | * back-off very agressively (doubling |
---|
1369 | * the retransmission period for every |
---|
1370 | * timed out transaction up to the CAP limit) |
---|
1371 | * which is desirable - single packet failure |
---|
1372 | * is treated more gracefully by this algorithm. |
---|
1373 | */ |
---|
1374 | |
---|
1375 | srv->retry_period<<=1; |
---|
1376 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
1377 | fprintf(stderr, |
---|
1378 | "adjusted to; retry period %i\n", |
---|
1379 | srv->retry_period); |
---|
1380 | #endif |
---|
1381 | } else { |
---|
1382 | /* never wait longer than RPCIOD_RETX_CAP_S seconds */ |
---|
1383 | fprintf(stderr, |
---|
1384 | "RPCIO: server '%s' not responding - still trying\n", |
---|
1385 | srv->name); |
---|
1386 | } |
---|
1387 | if ( 0 == ++srv->retrans % 1000) { |
---|
1388 | fprintf(stderr, |
---|
1389 | "RPCIO - statistics: already %li retries to server %s\n", |
---|
1390 | srv->retrans, |
---|
1391 | srv->name); |
---|
1392 | } |
---|
1393 | } else { |
---|
1394 | srv->requests++; |
---|
1395 | } |
---|
1396 | xact->trip = now; |
---|
1397 | { |
---|
1398 | long capped_period = srv->retry_period; |
---|
1399 | if ( xact->lifetime < capped_period ) |
---|
1400 | capped_period = xact->lifetime; |
---|
1401 | xact->age = now + capped_period; |
---|
1402 | xact->tolive -= capped_period; |
---|
1403 | } |
---|
1404 | /* enqueue to the list of newly sent transactions */ |
---|
1405 | xact->node.next = newList; |
---|
1406 | newList = &xact->node; |
---|
1407 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
1408 | fprintf(stderr, |
---|
1409 | "XACT (0x%08x) age is 0x%x, now: 0x%x\n", |
---|
1410 | xact, |
---|
1411 | xact->age, |
---|
1412 | now); |
---|
1413 | #endif |
---|
1414 | } |
---|
1415 | } |
---|
1416 | } |
---|
1417 | |
---|
1418 | /* insert the newly sent transactions into the |
---|
1419 | * sorted retransmission list |
---|
1420 | */ |
---|
1421 | for (; (xact = (RpcUdpXact)newList); ) { |
---|
1422 | register ListNode p,n; |
---|
1423 | newList = newList->next; |
---|
1424 | for ( p=&listHead; (n=p->next) && xact->age > ((RpcUdpXact)n)->age; p=n ) |
---|
1425 | /* nothing else to do */; |
---|
1426 | nodeAppend(p, &xact->node); |
---|
1427 | } |
---|
1428 | |
---|
1429 | if (now > epoch) { |
---|
1430 | /* every now and then, readjust the epoch */ |
---|
1431 | register ListNode n; |
---|
1432 | then += now; |
---|
1433 | for (n=listHead.next; n; n=n->next) { |
---|
1434 | /* readjust outstanding time intervals subject to the |
---|
1435 | * condition that the 'absolute' time must remain |
---|
1436 | * the same. 'age' and 'trip' are measured with |
---|
1437 | * respect to 'then' - hence: |
---|
1438 | * |
---|
1439 | * abs_age == old_age + old_then == new_age + new_then |
---|
1440 | * |
---|
1441 | * ==> new_age = old_age + old_then - new_then == old_age - 'now' |
---|
1442 | */ |
---|
1443 | ((RpcUdpXact)n)->age -= now; |
---|
1444 | ((RpcUdpXact)n)->trip -= now; |
---|
1445 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
1446 | fprintf(stderr, |
---|
1447 | "readjusted XACT (0x%08x); age is 0x%x, trip: 0x%x now: 0x%x\n", |
---|
1448 | (RpcUdpXact)n, |
---|
1449 | ((RpcUdpXact)n)->trip, |
---|
1450 | ((RpcUdpXact)n)->age, |
---|
1451 | now); |
---|
1452 | #endif |
---|
1453 | } |
---|
1454 | now = 0; |
---|
1455 | } |
---|
1456 | |
---|
1457 | next_retrans = listHead.next ? |
---|
1458 | ((RpcUdpXact)listHead.next)->age - now : |
---|
1459 | epoch; /* make sure we don't miss updating the epoch */ |
---|
1460 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
1461 | fprintf(stderr,"RPCIO: next timeout is %x\n",next_retrans); |
---|
1462 | #endif |
---|
1463 | } |
---|
1464 | /* close our socket; shut down the receiver */ |
---|
1465 | close(ourSock); |
---|
1466 | |
---|
1467 | #if 0 /* if we get here, no transactions exist, hence there can be none |
---|
1468 | * in the queue whatsoever |
---|
1469 | */ |
---|
1470 | /* flush the message queue */ |
---|
1471 | while (RTEMS_SUCCESSFUL == rtems_message_queue_receive( |
---|
1472 | q, |
---|
1473 | &xact, |
---|
1474 | &size, |
---|
1475 | RTEMS_NO_WAIT, |
---|
1476 | RTEMS_NO_TIMEOUT)) { |
---|
1477 | /* TODO enque xact */ |
---|
1478 | } |
---|
1479 | |
---|
1480 | /* flush all outstanding transactions */ |
---|
1481 | |
---|
1482 | for (xact=((RpcUdpXact)listHead.next); xact; xact=((RpcUdpXact)xact->node.next)) { |
---|
1483 | xact->status.re_status = RPC_TIMEDOUT; |
---|
1484 | rtems_event_send(xact->requestor, RTEMS_RPC_EVENT); |
---|
1485 | } |
---|
1486 | #endif |
---|
1487 | |
---|
1488 | rtems_message_queue_delete(q); |
---|
1489 | |
---|
1490 | MU_DESTROY(hlock); |
---|
1491 | |
---|
1492 | fprintf(stderr,"RPC daemon exited...\n"); |
---|
1493 | |
---|
1494 | rtems_semaphore_release(fini); |
---|
1495 | rtems_task_suspend(RTEMS_SELF); |
---|
1496 | } |
---|
1497 | |
---|
1498 | |
---|
1499 | /* support for transaction 'pools'. A number of XACT objects |
---|
1500 | * is always kept around. The initial number is 0 but it |
---|
1501 | * is allowed to grow up to a maximum. |
---|
1502 | * If the need grows beyond the maximum, behavior depends: |
---|
1503 | * Users can either block until a transaction becomes available, |
---|
1504 | * they can create a new XACT on the fly or get an error |
---|
1505 | * if no free XACT is available from the pool. |
---|
1506 | */ |
---|
1507 | |
---|
1508 | RpcUdpXactPool |
---|
1509 | rpcUdpXactPoolCreate( |
---|
1510 | int prog, int version, |
---|
1511 | int xactsize, int poolsize) |
---|
1512 | { |
---|
1513 | RpcUdpXactPool rval = MY_MALLOC(sizeof(*rval)); |
---|
1514 | |
---|
1515 | ASSERT( rval && |
---|
1516 | RTEMS_SUCCESSFUL == rtems_message_queue_create( |
---|
1517 | rtems_build_name('R','P','C','p'), |
---|
1518 | poolsize, |
---|
1519 | sizeof(RpcUdpXact), |
---|
1520 | RTEMS_DEFAULT_ATTRIBUTES, |
---|
1521 | &rval->box) ); |
---|
1522 | rval->prog = prog; |
---|
1523 | rval->version = version; |
---|
1524 | rval->xactSize = xactsize; |
---|
1525 | return rval; |
---|
1526 | } |
---|
1527 | |
---|
1528 | void |
---|
1529 | rpcUdpXactPoolDestroy(RpcUdpXactPool pool) |
---|
1530 | { |
---|
1531 | RpcUdpXact xact; |
---|
1532 | |
---|
1533 | while ((xact = rpcUdpXactPoolGet(pool, XactGetFail))) { |
---|
1534 | rpcUdpXactDestroy(xact); |
---|
1535 | } |
---|
1536 | rtems_message_queue_delete(pool->box); |
---|
1537 | MY_FREE(pool); |
---|
1538 | } |
---|
1539 | |
---|
1540 | RpcUdpXact |
---|
1541 | rpcUdpXactPoolGet(RpcUdpXactPool pool, XactPoolGetMode mode) |
---|
1542 | { |
---|
1543 | RpcUdpXact xact = 0; |
---|
1544 | size_t size; |
---|
1545 | |
---|
1546 | if (RTEMS_SUCCESSFUL != rtems_message_queue_receive( |
---|
1547 | pool->box, |
---|
1548 | &xact, |
---|
1549 | &size, |
---|
1550 | XactGetWait == mode ? |
---|
1551 | RTEMS_WAIT : RTEMS_NO_WAIT, |
---|
1552 | RTEMS_NO_TIMEOUT)) { |
---|
1553 | |
---|
1554 | /* nothing found in box; should we create a new one ? */ |
---|
1555 | |
---|
1556 | xact = (XactGetCreate == mode) ? |
---|
1557 | rpcUdpXactCreate( |
---|
1558 | pool->prog, |
---|
1559 | pool->version, |
---|
1560 | pool->xactSize) : 0 ; |
---|
1561 | if (xact) |
---|
1562 | xact->pool = pool; |
---|
1563 | |
---|
1564 | } |
---|
1565 | return xact; |
---|
1566 | } |
---|
1567 | |
---|
1568 | void |
---|
1569 | rpcUdpXactPoolPut(RpcUdpXact xact) |
---|
1570 | { |
---|
1571 | RpcUdpXactPool pool; |
---|
1572 | ASSERT( pool=xact->pool ); |
---|
1573 | if (RTEMS_SUCCESSFUL != rtems_message_queue_send( |
---|
1574 | pool->box, |
---|
1575 | &xact, |
---|
1576 | sizeof(xact))) |
---|
1577 | rpcUdpXactDestroy(xact); |
---|
1578 | } |
---|
1579 | |
---|
1580 | #ifdef MBUF_RX |
---|
1581 | |
---|
1582 | /* WORKAROUND: include sys/mbuf.h (or other bsdnet headers) only |
---|
1583 | * _after_ using malloc()/free() & friends because |
---|
1584 | * the RTEMS/BSDNET headers redefine those :-( |
---|
1585 | */ |
---|
1586 | |
---|
1587 | #define KERNEL |
---|
1588 | #define _KERNEL |
---|
1589 | #include <sys/mbuf.h> |
---|
1590 | |
---|
1591 | ssize_t |
---|
1592 | recv_mbuf_from(int s, struct mbuf **ppm, long len, struct sockaddr *fromaddr, int *fromlen); |
---|
1593 | |
---|
1594 | static void |
---|
1595 | bufFree(struct mbuf **m) |
---|
1596 | { |
---|
1597 | if (*m) { |
---|
1598 | rtems_bsdnet_semaphore_obtain(); |
---|
1599 | m_freem(*m); |
---|
1600 | rtems_bsdnet_semaphore_release(); |
---|
1601 | *m = 0; |
---|
1602 | } |
---|
1603 | } |
---|
1604 | #endif |
---|
1605 | |
---|
1606 | #ifdef MBUF_TX |
---|
1607 | static void |
---|
1608 | paranoia_free(caddr_t closure, u_int size) |
---|
1609 | { |
---|
1610 | #if (DEBUG) |
---|
1611 | RpcUdpXact xact = (RpcUdpXact)closure; |
---|
1612 | int len = (int)XDR_GETPOS(&xact->xdrs); |
---|
1613 | |
---|
1614 | ASSERT( --xact->refcnt >= 0 && size == len ); |
---|
1615 | #endif |
---|
1616 | } |
---|
1617 | |
---|
1618 | static void |
---|
1619 | paranoia_ref (caddr_t closure, u_int size) |
---|
1620 | { |
---|
1621 | #if (DEBUG) |
---|
1622 | RpcUdpXact xact = (RpcUdpXact)closure; |
---|
1623 | int len = (int)XDR_GETPOS(&xact->xdrs); |
---|
1624 | ASSERT( size == len ); |
---|
1625 | xact->refcnt++; |
---|
1626 | #endif |
---|
1627 | } |
---|
1628 | #endif |
---|
1629 | |
---|
1630 | /* receive from a socket and find |
---|
1631 | * the transaction corresponding to the |
---|
1632 | * transaction ID received in the server |
---|
1633 | * reply. |
---|
1634 | * |
---|
1635 | * The semantics of the 'pibuf' pointer are |
---|
1636 | * as follows: |
---|
1637 | * |
---|
1638 | * MBUF_RX: |
---|
1639 | * |
---|
1640 | */ |
---|
1641 | |
---|
1642 | #define RPCIOD_RXBUFSZ UDPMSGSIZE |
---|
1643 | |
---|
1644 | static RpcUdpXact |
---|
1645 | sockRcv(void) |
---|
1646 | { |
---|
1647 | int len,i; |
---|
1648 | u_long xid; |
---|
1649 | union { |
---|
1650 | struct sockaddr_in sin; |
---|
1651 | struct sockaddr sa; |
---|
1652 | } fromAddr; |
---|
1653 | int fromLen = sizeof(fromAddr.sin); |
---|
1654 | RxBuf ibuf = 0; |
---|
1655 | RpcUdpXact xact = 0; |
---|
1656 | |
---|
1657 | do { |
---|
1658 | |
---|
1659 | /* rcv_mbuf() and recvfrom() differ in that the |
---|
1660 | * former allocates buffers and passes them back |
---|
1661 | * to us whereas the latter requires us to provide |
---|
1662 | * buffer space. |
---|
1663 | * Hence, in the first case whe have to make sure |
---|
1664 | * no old buffer is leaked - in the second case, |
---|
1665 | * we might well re-use an old buffer but must |
---|
1666 | * make sure we have one allocated |
---|
1667 | */ |
---|
1668 | #ifdef MBUF_RX |
---|
1669 | if (ibuf) |
---|
1670 | bufFree(&ibuf); |
---|
1671 | |
---|
1672 | len = recv_mbuf_from( |
---|
1673 | ourSock, |
---|
1674 | &ibuf, |
---|
1675 | RPCIOD_RXBUFSZ, |
---|
1676 | &fromAddr.sa, |
---|
1677 | &fromLen); |
---|
1678 | #else |
---|
1679 | if ( !ibuf ) |
---|
1680 | ibuf = (RpcBuf)MY_MALLOC(RPCIOD_RXBUFSZ); |
---|
1681 | if ( !ibuf ) |
---|
1682 | goto cleanup; /* no memory - drop this message */ |
---|
1683 | |
---|
1684 | len = recvfrom(ourSock, |
---|
1685 | ibuf->buf, |
---|
1686 | RPCIOD_RXBUFSZ, |
---|
1687 | 0, |
---|
1688 | &fromAddr.sa, |
---|
1689 | &fromLen); |
---|
1690 | #endif |
---|
1691 | |
---|
1692 | if (len <= 0) { |
---|
1693 | if (EAGAIN != errno) |
---|
1694 | fprintf(stderr,"RECV failed: %s\n",strerror(errno)); |
---|
1695 | goto cleanup; |
---|
1696 | } |
---|
1697 | |
---|
1698 | #if (DEBUG) & DEBUG_PACKLOSS |
---|
1699 | if ( (unsigned)rand() < DEBUG_PACKLOSS_FRACT ) { |
---|
1700 | /* lose packets once in a while */ |
---|
1701 | static int xxx = 0; |
---|
1702 | if ( ++xxx % 16 == 0 ) |
---|
1703 | fprintf(stderr,"DEBUG: dropped %i packets, so far...\n",xxx); |
---|
1704 | if ( ibuf ) |
---|
1705 | bufFree( &ibuf ); |
---|
1706 | continue; |
---|
1707 | } |
---|
1708 | #endif |
---|
1709 | |
---|
1710 | i = (xid=XID(ibuf)) & XACT_HASH_MSK; |
---|
1711 | |
---|
1712 | if ( !(xact=xactHashTbl[i]) || |
---|
1713 | xact->obuf.xid != xid || |
---|
1714 | #ifdef REJECT_SERVERIP_MISMATCH |
---|
1715 | xact->server->addr.sin.sin_addr.s_addr != fromAddr.sin.sin_addr.s_addr || |
---|
1716 | #endif |
---|
1717 | xact->server->addr.sin.sin_port != fromAddr.sin.sin_port ) { |
---|
1718 | |
---|
1719 | if (xact) { |
---|
1720 | if ( |
---|
1721 | #ifdef REJECT_SERVERIP_MISMATCH |
---|
1722 | xact->server->addr.sin.sin_addr.s_addr == fromAddr.sin.sin_addr.s_addr && |
---|
1723 | #endif |
---|
1724 | xact->server->addr.sin.sin_port == fromAddr.sin.sin_port && |
---|
1725 | ( xact->obuf.xid == xid + XACT_HASHS || |
---|
1726 | xact->obuf.xid == xid + 2*XACT_HASHS ) |
---|
1727 | ) { |
---|
1728 | #ifndef DEBUG /* don't complain if it's just a late arrival of a retry */ |
---|
1729 | fprintf(stderr,"RPCIO - FYI sockRcv(): dropping late/redundant retry answer\n"); |
---|
1730 | #endif |
---|
1731 | } else { |
---|
1732 | fprintf(stderr,"RPCIO WARNING sockRcv(): transaction mismatch\n"); |
---|
1733 | fprintf(stderr,"xact: xid 0x%08lx -- got 0x%08lx\n", |
---|
1734 | xact->obuf.xid, xid); |
---|
1735 | fprintf(stderr,"xact: addr 0x%08lx -- got 0x%08lx\n", |
---|
1736 | xact->server->addr.sin.sin_addr.s_addr, |
---|
1737 | fromAddr.sin.sin_addr.s_addr); |
---|
1738 | fprintf(stderr,"xact: port 0x%08x -- got 0x%08x\n", |
---|
1739 | xact->server->addr.sin.sin_port, |
---|
1740 | fromAddr.sin.sin_port); |
---|
1741 | } |
---|
1742 | } else { |
---|
1743 | fprintf(stderr, |
---|
1744 | "RPCIO WARNING sockRcv(): got xid 0x%08lx but its slot is empty\n", |
---|
1745 | xid); |
---|
1746 | } |
---|
1747 | /* forget about this one and try again */ |
---|
1748 | xact = 0; |
---|
1749 | } |
---|
1750 | |
---|
1751 | } while ( !xact ); |
---|
1752 | |
---|
1753 | xact->ibuf = ibuf; |
---|
1754 | #ifndef MBUF_RX |
---|
1755 | xact->ibufsize = RPCIOD_RXBUFSZ; |
---|
1756 | #endif |
---|
1757 | |
---|
1758 | return xact; |
---|
1759 | |
---|
1760 | cleanup: |
---|
1761 | |
---|
1762 | bufFree(&ibuf); |
---|
1763 | |
---|
1764 | return 0; |
---|
1765 | } |
---|
1766 | |
---|
1767 | |
---|
1768 | #include <rtems/rtems_bsdnet_internal.h> |
---|
1769 | /* double check the event configuration; should probably globally |
---|
1770 | * manage system events!! |
---|
1771 | * We do this at the end of the file for the same reason we had |
---|
1772 | * included mbuf.h only a couple of lines above - see comment up |
---|
1773 | * there... |
---|
1774 | */ |
---|
1775 | #if RTEMS_RPC_EVENT & SOSLEEP_EVENT & SBWAIT_EVENT & NETISR_EVENTS |
---|
1776 | #error ILLEGAL EVENT CONFIGURATION |
---|
1777 | #endif |
---|