1 | /* $Id$ */ |
---|
2 | |
---|
3 | /* RPC multiplexor for a multitasking environment */ |
---|
4 | |
---|
5 | /* Author: Till Straumann <strauman@slac.stanford.edu>, 2002 */ |
---|
6 | |
---|
7 | /* This code funnels arbitrary task's UDP/RPC requests |
---|
8 | * through one socket to arbitrary servers. |
---|
9 | * The replies are gathered and dispatched to the |
---|
10 | * requestors. |
---|
11 | * One task handles all the sending and receiving |
---|
12 | * work including retries. |
---|
13 | * It is up to the requestor, however, to do |
---|
14 | * the XDR encoding of the arguments / decoding |
---|
15 | * of the results (except for the RPC header which |
---|
16 | * is handled by the daemon). |
---|
17 | */ |
---|
18 | |
---|
19 | /* |
---|
20 | * Authorship |
---|
21 | * ---------- |
---|
22 | * This software (NFS-2 client implementation for RTEMS) was created by |
---|
23 | * Till Straumann <strauman@slac.stanford.edu>, 2002-2007, |
---|
24 | * Stanford Linear Accelerator Center, Stanford University. |
---|
25 | * |
---|
26 | * Acknowledgement of sponsorship |
---|
27 | * ------------------------------ |
---|
28 | * The NFS-2 client implementation for RTEMS was produced by |
---|
29 | * the Stanford Linear Accelerator Center, Stanford University, |
---|
30 | * under Contract DE-AC03-76SFO0515 with the Department of Energy. |
---|
31 | * |
---|
32 | * Government disclaimer of liability |
---|
33 | * ---------------------------------- |
---|
34 | * Neither the United States nor the United States Department of Energy, |
---|
35 | * nor any of their employees, makes any warranty, express or implied, or |
---|
36 | * assumes any legal liability or responsibility for the accuracy, |
---|
37 | * completeness, or usefulness of any data, apparatus, product, or process |
---|
38 | * disclosed, or represents that its use would not infringe privately owned |
---|
39 | * rights. |
---|
40 | * |
---|
41 | * Stanford disclaimer of liability |
---|
42 | * -------------------------------- |
---|
43 | * Stanford University makes no representations or warranties, express or |
---|
44 | * implied, nor assumes any liability for the use of this software. |
---|
45 | * |
---|
46 | * Stanford disclaimer of copyright |
---|
47 | * -------------------------------- |
---|
48 | * Stanford University, owner of the copyright, hereby disclaims its |
---|
49 | * copyright and all other rights in this software. Hence, anyone may |
---|
50 | * freely use it for any purpose without restriction. |
---|
51 | * |
---|
52 | * Maintenance of notices |
---|
53 | * ---------------------- |
---|
54 | * In the interest of clarity regarding the origin and status of this |
---|
55 | * SLAC software, this and all the preceding Stanford University notices |
---|
56 | * are to remain affixed to any copy or derivative of this software made |
---|
57 | * or distributed by the recipient and are to be affixed to any copy of |
---|
58 | * software made or distributed by the recipient that contains a copy or |
---|
59 | * derivative of this software. |
---|
60 | * |
---|
61 | * ------------------ SLAC Software Notices, Set 4 OTT.002a, 2004 FEB 03 |
---|
62 | */ |
---|
63 | |
---|
64 | #if HAVE_CONFIG_H |
---|
65 | #include "config.h" |
---|
66 | #endif |
---|
67 | |
---|
68 | #include <rtems.h> |
---|
69 | #include <rtems/error.h> |
---|
70 | #include <rtems/rtems_bsdnet.h> |
---|
71 | #include <stdlib.h> |
---|
72 | #include <time.h> |
---|
73 | #include <rpc/rpc.h> |
---|
74 | #include <rpc/pmap_prot.h> |
---|
75 | #include <errno.h> |
---|
76 | #include <sys/ioctl.h> |
---|
77 | #include <assert.h> |
---|
78 | #include <stdio.h> |
---|
79 | #include <errno.h> |
---|
80 | #include <string.h> |
---|
81 | #include <netinet/in.h> |
---|
82 | #include <arpa/inet.h> |
---|
83 | |
---|
84 | #include "rpcio.h" |
---|
85 | |
---|
86 | /****************************************************************/ |
---|
87 | /* CONFIGURABLE PARAMETERS */ |
---|
88 | /****************************************************************/ |
---|
89 | |
---|
90 | #define MBUF_RX /* If defined: use mbuf XDR stream for |
---|
91 | * decoding directly out of mbufs |
---|
92 | * Otherwise, the regular 'recvfrom()' |
---|
93 | * interface will be used involving an |
---|
94 | * extra buffer allocation + copy step. |
---|
95 | */ |
---|
96 | |
---|
97 | #define MBUF_TX /* If defined: avoid copying data when |
---|
98 | * sending. Instead, use a wrapper to |
---|
99 | * 'sosend()' which will point an MBUF |
---|
100 | * directly to our buffer space. |
---|
101 | * Note that the BSD stack does not copy |
---|
102 | * data when fragmenting packets - it |
---|
103 | * merely uses an mbuf chain pointing |
---|
104 | * into different areas of the data. |
---|
105 | * |
---|
106 | * If undefined, the regular 'sendto()' |
---|
107 | * interface is used. |
---|
108 | */ |
---|
109 | |
---|
110 | #undef REJECT_SERVERIP_MISMATCH |
---|
111 | /* If defined, RPC replies must come from the server |
---|
112 | * that was queried. Eric Norum has reported problems |
---|
113 | * with clustered NFS servers. So we disable this |
---|
114 | * reducing paranoia... |
---|
115 | */ |
---|
116 | |
---|
117 | /* daemon task parameters */ |
---|
118 | #define RPCIOD_STACK 10000 |
---|
119 | #define RPCIOD_PRIO 100 /* *fallback* priority */ |
---|
120 | |
---|
121 | /* depth of the message queue for sending |
---|
122 | * RPC requests to the daemon |
---|
123 | */ |
---|
124 | #define RPCIOD_QDEPTH 20 |
---|
125 | |
---|
126 | /* Maximum retry limit for retransmission */ |
---|
127 | #define RPCIOD_RETX_CAP_S 3 /* seconds */ |
---|
128 | |
---|
129 | /* Default timeout for RPC calls */ |
---|
130 | #define RPCIOD_DEFAULT_TIMEOUT (&_rpc_default_timeout) |
---|
131 | static struct timeval _rpc_default_timeout = { 10 /* secs */, 0 /* usecs */ }; |
---|
132 | |
---|
133 | /* how many times should we try to resend a failed |
---|
134 | * transaction with refreshed AUTHs |
---|
135 | */ |
---|
136 | #define RPCIOD_REFRESH 2 |
---|
137 | |
---|
138 | /* Events we are using; the RPC_EVENT |
---|
139 | * MUST NOT be used by any application |
---|
140 | * thread doing RPC IO (e.g. NFS) |
---|
141 | */ |
---|
142 | #define RTEMS_RPC_EVENT RTEMS_EVENT_30 /* THE event used by RPCIO. Every task doing |
---|
143 | * RPC IO will receive this - hence it is |
---|
144 | * RESERVED |
---|
145 | */ |
---|
146 | #define RPCIOD_RX_EVENT RTEMS_EVENT_1 /* Events the RPCIOD is using/waiting for */ |
---|
147 | #define RPCIOD_TX_EVENT RTEMS_EVENT_2 |
---|
148 | #define RPCIOD_KILL_EVENT RTEMS_EVENT_3 /* send to the daemon to kill it */ |
---|
149 | |
---|
150 | #define LD_XACT_HASH 8 /* ld of the size of the transaction hash table */ |
---|
151 | |
---|
152 | |
---|
153 | /* Debugging Flags */ |
---|
154 | |
---|
155 | /* NOTE: defining DEBUG 0 leaves some 'assert()' paranoia checks |
---|
156 | * but produces no output |
---|
157 | */ |
---|
158 | |
---|
159 | #define DEBUG_TRACE_XACT (1<<0) |
---|
160 | #define DEBUG_EVENTS (1<<1) |
---|
161 | #define DEBUG_MALLOC (1<<2) |
---|
162 | #define DEBUG_TIMEOUT (1<<3) |
---|
163 | #define DEBUG_PACKLOSS (1<<4) /* This introduces random, artificial packet losses to test retransmission */ |
---|
164 | |
---|
165 | #define DEBUG_PACKLOSS_FRACT (0xffffffff/10) |
---|
166 | |
---|
167 | /* USE PARENTHESIS WHEN 'or'ing MULTIPLE FLAGS: (DEBUG_XX | DEBUG_YY) */ |
---|
168 | #define DEBUG (0) |
---|
169 | |
---|
170 | /****************************************************************/ |
---|
171 | /* END OF CONFIGURABLE SECTION */ |
---|
172 | /****************************************************************/ |
---|
173 | |
---|
174 | /* prevent rollover of our timers by readjusting the epoch on the fly */ |
---|
175 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
176 | #define RPCIOD_EPOCH_SECS 10 |
---|
177 | #else |
---|
178 | #define RPCIOD_EPOCH_SECS 10000 |
---|
179 | #endif |
---|
180 | |
---|
181 | #ifdef DEBUG |
---|
182 | #define ASSERT(arg) assert(arg) |
---|
183 | #else |
---|
184 | #define ASSERT(arg) if (arg) |
---|
185 | #endif |
---|
186 | |
---|
187 | /****************************************************************/ |
---|
188 | /* MACROS */ |
---|
189 | /****************************************************************/ |
---|
190 | |
---|
191 | |
---|
192 | #define XACT_HASHS (1<<(LD_XACT_HASH)) /* the hash table size derived from the ld */ |
---|
193 | #define XACT_HASH_MSK ((XACT_HASHS)-1) /* mask to extract the hash index from a RPC-XID */ |
---|
194 | |
---|
195 | |
---|
196 | #define MU_LOCK(mutex) do { \ |
---|
197 | assert( \ |
---|
198 | RTEMS_SUCCESSFUL == \ |
---|
199 | rtems_semaphore_obtain( \ |
---|
200 | (mutex), \ |
---|
201 | RTEMS_WAIT, \ |
---|
202 | RTEMS_NO_TIMEOUT \ |
---|
203 | ) ); \ |
---|
204 | } while(0) |
---|
205 | |
---|
206 | #define MU_UNLOCK(mutex) do { \ |
---|
207 | assert( \ |
---|
208 | RTEMS_SUCCESSFUL == \ |
---|
209 | rtems_semaphore_release( \ |
---|
210 | (mutex) \ |
---|
211 | ) ); \ |
---|
212 | } while(0) |
---|
213 | |
---|
214 | #define MU_CREAT(pmutex) do { \ |
---|
215 | assert( \ |
---|
216 | RTEMS_SUCCESSFUL == \ |
---|
217 | rtems_semaphore_create( \ |
---|
218 | rtems_build_name( \ |
---|
219 | 'R','P','C','l' \ |
---|
220 | ), \ |
---|
221 | 1, \ |
---|
222 | MUTEX_ATTRIBUTES, \ |
---|
223 | 0, \ |
---|
224 | (pmutex)) ); \ |
---|
225 | } while (0) |
---|
226 | |
---|
227 | |
---|
228 | #define MU_DESTROY(mutex) do { \ |
---|
229 | assert( \ |
---|
230 | RTEMS_SUCCESSFUL == \ |
---|
231 | rtems_semaphore_delete( \ |
---|
232 | mutex \ |
---|
233 | ) ); \ |
---|
234 | } while (0) |
---|
235 | |
---|
236 | #define MUTEX_ATTRIBUTES (RTEMS_LOCAL | \ |
---|
237 | RTEMS_PRIORITY | \ |
---|
238 | RTEMS_INHERIT_PRIORITY | \ |
---|
239 | RTEMS_BINARY_SEMAPHORE) |
---|
240 | |
---|
241 | #define FIRST_ATTEMPT 0x88888888 /* some time that is never reached */ |
---|
242 | |
---|
243 | /****************************************************************/ |
---|
244 | /* TYPE DEFINITIONS */ |
---|
245 | /****************************************************************/ |
---|
246 | |
---|
247 | typedef rtems_interval TimeoutT; |
---|
248 | |
---|
249 | /* 100000th implementation of a doubly linked list; |
---|
250 | * since only one thread is looking at these, |
---|
251 | * we need no locking |
---|
252 | */ |
---|
253 | typedef struct ListNodeRec_ { |
---|
254 | struct ListNodeRec_ *next, *prev; |
---|
255 | } ListNodeRec, *ListNode; |
---|
256 | |
---|
257 | |
---|
258 | /* Structure representing an RPC server */ |
---|
259 | typedef struct RpcUdpServerRec_ { |
---|
260 | RpcUdpServer next; /* linked list of all servers; protected by hlock */ |
---|
261 | union { |
---|
262 | struct sockaddr_in sin; |
---|
263 | struct sockaddr sa; |
---|
264 | } addr; |
---|
265 | AUTH *auth; |
---|
266 | rtems_id authlock; /* must MUTEX the auth object - it's not clear |
---|
267 | * what is better: |
---|
268 | * 1 having one (MUTEXed) auth per server |
---|
269 | * who is shared among all transactions |
---|
270 | * using that server |
---|
271 | * 2 maintaining an AUTH per transaction |
---|
272 | * (there are then other options: manage |
---|
273 | * XACT pools on a per-server basis instead |
---|
274 | * of associating a server with a XACT when |
---|
275 | * sending) |
---|
276 | * experience will show if the current (1) |
---|
277 | * approach has to be changed. |
---|
278 | */ |
---|
279 | TimeoutT retry_period; /* dynamically adjusted retry period |
---|
280 | * (based on packet roundtrip time) |
---|
281 | */ |
---|
282 | /* STATISTICS */ |
---|
283 | unsigned long retrans; /* how many retries were issued by this server */ |
---|
284 | unsigned long requests; /* how many requests have been sent */ |
---|
285 | unsigned long timeouts; /* how many requests have timed out */ |
---|
286 | unsigned long errors; /* how many errors have occurred (other than timeouts) */ |
---|
287 | char name[20]; /* server's address in IP 'dot' notation */ |
---|
288 | } RpcUdpServerRec; |
---|
289 | |
---|
290 | typedef union RpcBufU_ { |
---|
291 | u_long xid; |
---|
292 | char buf[1]; |
---|
293 | } RpcBufU, *RpcBuf; |
---|
294 | |
---|
295 | /* RX Buffer implementation; this is either |
---|
296 | * an MBUF chain (MBUF_RX configuration) |
---|
297 | * or a buffer allocated from the heap |
---|
298 | * where recvfrom copies the (encoded) reply |
---|
299 | * to. The XDR routines the copy/decode |
---|
300 | * it into the user's data structures. |
---|
301 | */ |
---|
302 | #ifdef MBUF_RX |
---|
303 | typedef struct mbuf * RxBuf; /* an MBUF chain */ |
---|
304 | static void bufFree(struct mbuf **m); |
---|
305 | #define XID(ibuf) (*(mtod((ibuf), u_long *))) |
---|
306 | extern void xdrmbuf_create(XDR *, struct mbuf *, enum xdr_op); |
---|
307 | #else |
---|
308 | typedef RpcBuf RxBuf; |
---|
309 | #define bufFree(b) do { MY_FREE(*(b)); *(b)=0; } while(0) |
---|
310 | #define XID(ibuf) ((ibuf)->xid) |
---|
311 | #endif |
---|
312 | |
---|
313 | /* A RPC 'transaction' consisting |
---|
314 | * of server and requestor information, |
---|
315 | * buffer space and an XDR object |
---|
316 | * (for encoding arguments). |
---|
317 | */ |
---|
318 | typedef struct RpcUdpXactRec_ { |
---|
319 | ListNodeRec node; /* so we can put XACTs on a list */ |
---|
320 | RpcUdpServer server; /* server this XACT goes to */ |
---|
321 | long lifetime; /* during the lifetime, retry attempts are made */ |
---|
322 | long tolive; /* lifetime timer */ |
---|
323 | struct rpc_err status; /* RPC reply error status */ |
---|
324 | long age; /* age info; needed to manage retransmission */ |
---|
325 | long trip; /* record round trip time in ticks */ |
---|
326 | rtems_id requestor; /* the task waiting for this XACT to complete */ |
---|
327 | RpcUdpXactPool pool; /* if this XACT belong to a pool, this is it */ |
---|
328 | XDR xdrs; /* argument encoder stream */ |
---|
329 | int xdrpos; /* stream position after the (permanent) header */ |
---|
330 | xdrproc_t xres; /* reply decoder proc - TODO needn't be here */ |
---|
331 | caddr_t pres; /* reply decoded obj - TODO needn't be here */ |
---|
332 | #ifndef MBUF_RX |
---|
333 | int ibufsize; /* size of the ibuf (bytes) */ |
---|
334 | #endif |
---|
335 | #ifdef MBUF_TX |
---|
336 | int refcnt; /* mbuf external storage reference count */ |
---|
337 | #endif |
---|
338 | int obufsize; /* size of the obuf (bytes) */ |
---|
339 | RxBuf ibuf; /* pointer to input buffer assigned by daemon */ |
---|
340 | RpcBufU obuf; /* output buffer (encoded args) APPENDED HERE */ |
---|
341 | } RpcUdpXactRec; |
---|
342 | |
---|
343 | typedef struct RpcUdpXactPoolRec_ { |
---|
344 | rtems_id box; |
---|
345 | int prog; |
---|
346 | int version; |
---|
347 | int xactSize; |
---|
348 | } RpcUdpXactPoolRec; |
---|
349 | |
---|
350 | /* a global hash table where all 'living' transaction |
---|
351 | * objects are registered. |
---|
352 | * A number of bits in a transaction's XID maps 1:1 to |
---|
353 | * an index in this table. Hence, the XACT matching |
---|
354 | * an RPC/UDP reply packet can quickly be found |
---|
355 | * The size of this table imposes a hard limit on the |
---|
356 | * number of all created transactions in the system. |
---|
357 | */ |
---|
358 | static RpcUdpXact xactHashTbl[XACT_HASHS]={0}; |
---|
359 | static u_long xidUpper [XACT_HASHS]={0}; |
---|
360 | static unsigned xidHashSeed = 0 ; |
---|
361 | |
---|
362 | /* forward declarations */ |
---|
363 | static RpcUdpXact |
---|
364 | sockRcv(void); |
---|
365 | |
---|
366 | static void |
---|
367 | rpcio_daemon(rtems_task_argument); |
---|
368 | |
---|
369 | #ifdef MBUF_TX |
---|
370 | ssize_t |
---|
371 | sendto_nocpy ( |
---|
372 | int s, |
---|
373 | const void *buf, size_t buflen, |
---|
374 | int flags, |
---|
375 | const struct sockaddr *toaddr, int tolen, |
---|
376 | void *closure, |
---|
377 | void (*freeproc)(caddr_t, u_int), |
---|
378 | void (*refproc)(caddr_t, u_int) |
---|
379 | ); |
---|
380 | static void paranoia_free(caddr_t closure, u_int size); |
---|
381 | static void paranoia_ref (caddr_t closure, u_int size); |
---|
382 | #define SENDTO sendto_nocpy |
---|
383 | #else |
---|
384 | #define SENDTO sendto |
---|
385 | #endif |
---|
386 | |
---|
387 | static RpcUdpServer rpcUdpServers = 0; /* linked list of all servers; protected by llock */ |
---|
388 | |
---|
389 | static int ourSock = -1; /* the socket we are using for communication */ |
---|
390 | static rtems_id rpciod = 0; /* task id of the RPC daemon */ |
---|
391 | static rtems_id msgQ = 0; /* message queue where the daemon picks up |
---|
392 | * requests |
---|
393 | */ |
---|
394 | static rtems_id llock = 0; /* MUTEX protecting the server list */ |
---|
395 | static rtems_id hlock = 0; /* MUTEX protecting the hash table and the list of servers */ |
---|
396 | static rtems_id fini = 0; /* a synchronization semaphore we use during |
---|
397 | * module cleanup / driver unloading |
---|
398 | */ |
---|
399 | static rtems_interval ticksPerSec; /* cached system clock rate (WHO IS ASSUMED NOT |
---|
400 | * TO CHANGE) |
---|
401 | */ |
---|
402 | |
---|
403 | rtems_task_priority rpciodPriority = 0; |
---|
404 | |
---|
405 | #if (DEBUG) & DEBUG_MALLOC |
---|
406 | /* malloc wrappers for debugging */ |
---|
407 | static int nibufs = 0; |
---|
408 | |
---|
409 | static inline void *MY_MALLOC(int s) |
---|
410 | { |
---|
411 | if (s) { |
---|
412 | void *rval; |
---|
413 | MU_LOCK(hlock); |
---|
414 | assert(nibufs++ < 2000); |
---|
415 | MU_UNLOCK(hlock); |
---|
416 | assert((rval = malloc(s)) != 0); |
---|
417 | return rval; |
---|
418 | } |
---|
419 | return 0; |
---|
420 | } |
---|
421 | |
---|
422 | static inline void *MY_CALLOC(int n, int s) |
---|
423 | { |
---|
424 | if (s) { |
---|
425 | void *rval; |
---|
426 | MU_LOCK(hlock); |
---|
427 | assert(nibufs++ < 2000); |
---|
428 | MU_UNLOCK(hlock); |
---|
429 | assert((rval = calloc(n,s)) != 0); |
---|
430 | return rval; |
---|
431 | } |
---|
432 | return 0; |
---|
433 | } |
---|
434 | |
---|
435 | |
---|
436 | static inline void MY_FREE(void *p) |
---|
437 | { |
---|
438 | if (p) { |
---|
439 | MU_LOCK(hlock); |
---|
440 | nibufs--; |
---|
441 | MU_UNLOCK(hlock); |
---|
442 | free(p); |
---|
443 | } |
---|
444 | } |
---|
445 | #else |
---|
446 | #define MY_MALLOC malloc |
---|
447 | #define MY_CALLOC calloc |
---|
448 | #define MY_FREE free |
---|
449 | #endif |
---|
450 | |
---|
451 | static inline bool_t |
---|
452 | locked_marshal(RpcUdpServer s, XDR *xdrs) |
---|
453 | { |
---|
454 | bool_t rval; |
---|
455 | MU_LOCK(s->authlock); |
---|
456 | rval = AUTH_MARSHALL(s->auth, xdrs); |
---|
457 | MU_UNLOCK(s->authlock); |
---|
458 | return rval; |
---|
459 | } |
---|
460 | |
---|
461 | /* Locked operations on a server's auth object */ |
---|
462 | static inline bool_t |
---|
463 | locked_validate(RpcUdpServer s, struct opaque_auth *v) |
---|
464 | { |
---|
465 | bool_t rval; |
---|
466 | MU_LOCK(s->authlock); |
---|
467 | rval = AUTH_VALIDATE(s->auth, v); |
---|
468 | MU_UNLOCK(s->authlock); |
---|
469 | return rval; |
---|
470 | } |
---|
471 | |
---|
472 | static inline bool_t |
---|
473 | locked_refresh(RpcUdpServer s) |
---|
474 | { |
---|
475 | bool_t rval; |
---|
476 | MU_LOCK(s->authlock); |
---|
477 | rval = AUTH_REFRESH(s->auth); |
---|
478 | MU_UNLOCK(s->authlock); |
---|
479 | return rval; |
---|
480 | } |
---|
481 | |
---|
482 | /* Create a server object |
---|
483 | * |
---|
484 | */ |
---|
485 | enum clnt_stat |
---|
486 | rpcUdpServerCreate( |
---|
487 | struct sockaddr_in *paddr, |
---|
488 | int prog, |
---|
489 | int vers, |
---|
490 | u_long uid, |
---|
491 | u_long gid, |
---|
492 | RpcUdpServer *psrv |
---|
493 | ) |
---|
494 | { |
---|
495 | RpcUdpServer rval; |
---|
496 | u_short port; |
---|
497 | char hname[MAX_MACHINE_NAME + 1]; |
---|
498 | int theuid, thegid; |
---|
499 | int thegids[NGRPS]; |
---|
500 | gid_t gids[NGROUPS]; |
---|
501 | int len,i; |
---|
502 | AUTH *auth; |
---|
503 | enum clnt_stat pmap_err; |
---|
504 | struct pmap pmaparg; |
---|
505 | |
---|
506 | if ( gethostname(hname, MAX_MACHINE_NAME) ) { |
---|
507 | fprintf(stderr, |
---|
508 | "RPCIO - error: I have no hostname ?? (%s)\n", |
---|
509 | strerror(errno)); |
---|
510 | return RPC_UNKNOWNHOST; |
---|
511 | } |
---|
512 | |
---|
513 | if ( (len = getgroups(NGROUPS, gids) < 0 ) ) { |
---|
514 | fprintf(stderr, |
---|
515 | "RPCIO - error: I unable to get group ids (%s)\n", |
---|
516 | strerror(errno)); |
---|
517 | return RPC_FAILED; |
---|
518 | } |
---|
519 | |
---|
520 | if ( len > NGRPS ) |
---|
521 | len = NGRPS; |
---|
522 | |
---|
523 | for (i=0; i<len; i++) |
---|
524 | thegids[i] = (int)gids[i]; |
---|
525 | |
---|
526 | theuid = (int) ((RPCIOD_DEFAULT_ID == uid) ? geteuid() : uid); |
---|
527 | thegid = (int) ((RPCIOD_DEFAULT_ID == gid) ? getegid() : gid); |
---|
528 | |
---|
529 | if ( !(auth = authunix_create(hname, theuid, thegid, len, thegids)) ) { |
---|
530 | fprintf(stderr, |
---|
531 | "RPCIO - error: unable to create RPC AUTH\n"); |
---|
532 | return RPC_FAILED; |
---|
533 | } |
---|
534 | |
---|
535 | /* if they specified no port try to ask the portmapper */ |
---|
536 | if (!paddr->sin_port) { |
---|
537 | |
---|
538 | paddr->sin_port = htons(PMAPPORT); |
---|
539 | |
---|
540 | pmaparg.pm_prog = prog; |
---|
541 | pmaparg.pm_vers = vers; |
---|
542 | pmaparg.pm_prot = IPPROTO_UDP; |
---|
543 | pmaparg.pm_port = 0; /* not needed or used */ |
---|
544 | |
---|
545 | |
---|
546 | /* dont use non-reentrant pmap_getport ! */ |
---|
547 | |
---|
548 | pmap_err = rpcUdpCallRp( |
---|
549 | paddr, |
---|
550 | PMAPPROG, |
---|
551 | PMAPVERS, |
---|
552 | PMAPPROC_GETPORT, |
---|
553 | xdr_pmap, |
---|
554 | &pmaparg, |
---|
555 | xdr_u_short, |
---|
556 | &port, |
---|
557 | uid, |
---|
558 | gid, |
---|
559 | 0); |
---|
560 | |
---|
561 | if ( RPC_SUCCESS != pmap_err ) { |
---|
562 | paddr->sin_port = 0; |
---|
563 | return pmap_err; |
---|
564 | } |
---|
565 | |
---|
566 | paddr->sin_port = htons(port); |
---|
567 | } |
---|
568 | |
---|
569 | if (0==paddr->sin_port) { |
---|
570 | return RPC_PROGNOTREGISTERED; |
---|
571 | } |
---|
572 | |
---|
573 | rval = (RpcUdpServer)MY_MALLOC(sizeof(*rval)); |
---|
574 | memset(rval, 0, sizeof(*rval)); |
---|
575 | |
---|
576 | if (!inet_ntop(AF_INET, &paddr->sin_addr, rval->name, sizeof(rval->name))) |
---|
577 | sprintf(rval->name,"?.?.?.?"); |
---|
578 | rval->addr.sin = *paddr; |
---|
579 | |
---|
580 | /* start with a long retransmission interval - it |
---|
581 | * will be adapted dynamically |
---|
582 | */ |
---|
583 | rval->retry_period = RPCIOD_RETX_CAP_S * ticksPerSec; |
---|
584 | |
---|
585 | rval->auth = auth; |
---|
586 | |
---|
587 | MU_CREAT( &rval->authlock ); |
---|
588 | |
---|
589 | /* link into list */ |
---|
590 | MU_LOCK( llock ); |
---|
591 | rval->next = rpcUdpServers; |
---|
592 | rpcUdpServers = rval; |
---|
593 | MU_UNLOCK( llock ); |
---|
594 | |
---|
595 | *psrv = rval; |
---|
596 | return RPC_SUCCESS; |
---|
597 | } |
---|
598 | |
---|
599 | void |
---|
600 | rpcUdpServerDestroy(RpcUdpServer s) |
---|
601 | { |
---|
602 | RpcUdpServer prev; |
---|
603 | if (!s) |
---|
604 | return; |
---|
605 | /* we should probably verify (but how?) that nobody |
---|
606 | * (at least: no outstanding XACTs) is using this |
---|
607 | * server; |
---|
608 | */ |
---|
609 | |
---|
610 | /* remove from server list */ |
---|
611 | MU_LOCK(llock); |
---|
612 | prev = rpcUdpServers; |
---|
613 | if ( s == prev ) { |
---|
614 | rpcUdpServers = s->next; |
---|
615 | } else { |
---|
616 | for ( ; prev ; prev = prev->next) { |
---|
617 | if (prev->next == s) { |
---|
618 | prev->next = s->next; |
---|
619 | break; |
---|
620 | } |
---|
621 | } |
---|
622 | } |
---|
623 | MU_UNLOCK(llock); |
---|
624 | |
---|
625 | /* MUST have found it */ |
---|
626 | assert(prev); |
---|
627 | |
---|
628 | auth_destroy(s->auth); |
---|
629 | |
---|
630 | MU_DESTROY(s->authlock); |
---|
631 | MY_FREE(s); |
---|
632 | } |
---|
633 | |
---|
634 | int |
---|
635 | rpcUdpStats(FILE *f) |
---|
636 | { |
---|
637 | RpcUdpServer s; |
---|
638 | |
---|
639 | if (!f) f = stdout; |
---|
640 | |
---|
641 | fprintf(f,"RPCIOD statistics:\n"); |
---|
642 | |
---|
643 | MU_LOCK(llock); |
---|
644 | for (s = rpcUdpServers; s; s=s->next) { |
---|
645 | fprintf(f,"\nServer -- %s:\n", s->name); |
---|
646 | fprintf(f," requests sent: %10ld, retransmitted: %10ld\n", |
---|
647 | s->requests, s->retrans); |
---|
648 | fprintf(f," timed out: %10ld, send errors: %10ld\n", |
---|
649 | s->timeouts, s->errors); |
---|
650 | fprintf(f," current retransmission interval: %dms\n", |
---|
651 | (unsigned)(s->retry_period * 1000 / ticksPerSec) ); |
---|
652 | } |
---|
653 | MU_UNLOCK(llock); |
---|
654 | |
---|
655 | return 0; |
---|
656 | } |
---|
657 | |
---|
658 | RpcUdpXact |
---|
659 | rpcUdpXactCreate( |
---|
660 | u_long program, |
---|
661 | u_long version, |
---|
662 | u_long size |
---|
663 | ) |
---|
664 | { |
---|
665 | RpcUdpXact rval=0; |
---|
666 | struct rpc_msg header; |
---|
667 | register int i,j; |
---|
668 | |
---|
669 | if (!size) |
---|
670 | size = UDPMSGSIZE; |
---|
671 | /* word align */ |
---|
672 | size = (size + 3) & ~3; |
---|
673 | |
---|
674 | rval = (RpcUdpXact)MY_CALLOC(1,sizeof(*rval) - sizeof(rval->obuf) + size); |
---|
675 | |
---|
676 | if (rval) { |
---|
677 | |
---|
678 | header.rm_xid = 0; |
---|
679 | header.rm_direction = CALL; |
---|
680 | header.rm_call.cb_rpcvers = RPC_MSG_VERSION; |
---|
681 | header.rm_call.cb_prog = program; |
---|
682 | header.rm_call.cb_vers = version; |
---|
683 | xdrmem_create(&(rval->xdrs), rval->obuf.buf, size, XDR_ENCODE); |
---|
684 | |
---|
685 | if (!xdr_callhdr(&(rval->xdrs), &header)) { |
---|
686 | MY_FREE(rval); |
---|
687 | return 0; |
---|
688 | } |
---|
689 | /* pick a free table slot and initialize the XID */ |
---|
690 | rval->obuf.xid = time(0) ^ (unsigned long)rval; |
---|
691 | MU_LOCK(hlock); |
---|
692 | rval->obuf.xid = (xidHashSeed++ ^ ((unsigned long)rval>>10)) & XACT_HASH_MSK; |
---|
693 | i=j=(rval->obuf.xid & XACT_HASH_MSK); |
---|
694 | if (msgQ) { |
---|
695 | /* if there's no message queue, refuse to |
---|
696 | * give them transactions; we might be in the process to |
---|
697 | * go away... |
---|
698 | */ |
---|
699 | do { |
---|
700 | i=(i+1) & XACT_HASH_MSK; /* cheap modulo */ |
---|
701 | if (!xactHashTbl[i]) { |
---|
702 | #if (DEBUG) & DEBUG_TRACE_XACT |
---|
703 | fprintf(stderr,"RPCIO: entering index %i, val %x\n",i,rval); |
---|
704 | #endif |
---|
705 | xactHashTbl[i]=rval; |
---|
706 | j=-1; |
---|
707 | break; |
---|
708 | } |
---|
709 | } while (i!=j); |
---|
710 | } |
---|
711 | MU_UNLOCK(hlock); |
---|
712 | if (i==j) { |
---|
713 | XDR_DESTROY(&rval->xdrs); |
---|
714 | MY_FREE(rval); |
---|
715 | return 0; |
---|
716 | } |
---|
717 | rval->obuf.xid = xidUpper[i] | i; |
---|
718 | rval->xdrpos = XDR_GETPOS(&(rval->xdrs)); |
---|
719 | rval->obufsize = size; |
---|
720 | } |
---|
721 | return rval; |
---|
722 | } |
---|
723 | |
---|
724 | void |
---|
725 | rpcUdpXactDestroy(RpcUdpXact xact) |
---|
726 | { |
---|
727 | int i = xact->obuf.xid & XACT_HASH_MSK; |
---|
728 | |
---|
729 | #if (DEBUG) & DEBUG_TRACE_XACT |
---|
730 | fprintf(stderr,"RPCIO: removing index %i, val %x\n",i,xact); |
---|
731 | #endif |
---|
732 | |
---|
733 | ASSERT( xactHashTbl[i]==xact ); |
---|
734 | |
---|
735 | MU_LOCK(hlock); |
---|
736 | xactHashTbl[i]=0; |
---|
737 | /* remember XID we used last time so we can avoid |
---|
738 | * reusing the same one (incremented by rpcUdpSend routine) |
---|
739 | */ |
---|
740 | xidUpper[i] = xact->obuf.xid & ~XACT_HASH_MSK; |
---|
741 | MU_UNLOCK(hlock); |
---|
742 | |
---|
743 | bufFree(&xact->ibuf); |
---|
744 | |
---|
745 | XDR_DESTROY(&xact->xdrs); |
---|
746 | MY_FREE(xact); |
---|
747 | } |
---|
748 | |
---|
749 | |
---|
750 | |
---|
751 | /* Send a transaction, i.e. enqueue it to the |
---|
752 | * RPC daemon who will actually send it. |
---|
753 | */ |
---|
754 | enum clnt_stat |
---|
755 | rpcUdpSend( |
---|
756 | RpcUdpXact xact, |
---|
757 | RpcUdpServer srvr, |
---|
758 | struct timeval *timeout, |
---|
759 | u_long proc, |
---|
760 | xdrproc_t xres, caddr_t pres, |
---|
761 | xdrproc_t xargs, caddr_t pargs, |
---|
762 | ... |
---|
763 | ) |
---|
764 | { |
---|
765 | register XDR *xdrs; |
---|
766 | unsigned long ms; |
---|
767 | va_list ap; |
---|
768 | |
---|
769 | va_start(ap,pargs); |
---|
770 | |
---|
771 | if (!timeout) |
---|
772 | timeout = RPCIOD_DEFAULT_TIMEOUT; |
---|
773 | |
---|
774 | ms = 1000 * timeout->tv_sec + timeout->tv_usec/1000; |
---|
775 | |
---|
776 | /* round lifetime to closest # of ticks */ |
---|
777 | xact->lifetime = (ms * ticksPerSec + 500) / 1000; |
---|
778 | if ( 0 == xact->lifetime ) |
---|
779 | xact->lifetime = 1; |
---|
780 | |
---|
781 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
782 | { |
---|
783 | static int once=0; |
---|
784 | if (!once++) { |
---|
785 | fprintf(stderr, |
---|
786 | "Initial lifetime: %i (ticks)\n", |
---|
787 | xact->lifetime); |
---|
788 | } |
---|
789 | } |
---|
790 | #endif |
---|
791 | |
---|
792 | xact->tolive = xact->lifetime; |
---|
793 | |
---|
794 | xact->xres = xres; |
---|
795 | xact->pres = pres; |
---|
796 | xact->server = srvr; |
---|
797 | |
---|
798 | xdrs = &xact->xdrs; |
---|
799 | xdrs->x_op = XDR_ENCODE; |
---|
800 | /* increment transaction ID */ |
---|
801 | xact->obuf.xid += XACT_HASHS; |
---|
802 | XDR_SETPOS(xdrs, xact->xdrpos); |
---|
803 | if ( !XDR_PUTLONG(xdrs,(long*)&proc) || !locked_marshal(srvr, xdrs) || |
---|
804 | !xargs(xdrs, pargs) ) { |
---|
805 | va_end(ap); |
---|
806 | return(xact->status.re_status=RPC_CANTENCODEARGS); |
---|
807 | } |
---|
808 | while ((xargs=va_arg(ap,xdrproc_t))) { |
---|
809 | if (!xargs(xdrs, va_arg(ap,caddr_t))) |
---|
810 | va_end(ap); |
---|
811 | return(xact->status.re_status=RPC_CANTENCODEARGS); |
---|
812 | } |
---|
813 | |
---|
814 | va_end(ap); |
---|
815 | |
---|
816 | rtems_task_ident(RTEMS_SELF, RTEMS_WHO_AM_I, &xact->requestor); |
---|
817 | if ( rtems_message_queue_send( msgQ, &xact, sizeof(xact)) ) { |
---|
818 | return RPC_CANTSEND; |
---|
819 | } |
---|
820 | /* wakeup the rpciod */ |
---|
821 | ASSERT( RTEMS_SUCCESSFUL==rtems_event_send(rpciod, RPCIOD_TX_EVENT) ); |
---|
822 | |
---|
823 | return RPC_SUCCESS; |
---|
824 | } |
---|
825 | |
---|
826 | /* Block for the RPC reply to an outstanding |
---|
827 | * transaction. |
---|
828 | * The caller is woken by the RPC daemon either |
---|
829 | * upon reception of the reply or on timeout. |
---|
830 | */ |
---|
831 | enum clnt_stat |
---|
832 | rpcUdpRcv(RpcUdpXact xact) |
---|
833 | { |
---|
834 | int refresh; |
---|
835 | XDR reply_xdrs; |
---|
836 | struct rpc_msg reply_msg; |
---|
837 | rtems_status_code status; |
---|
838 | rtems_event_set gotEvents; |
---|
839 | |
---|
840 | refresh = 0; |
---|
841 | |
---|
842 | do { |
---|
843 | |
---|
844 | /* block for the reply */ |
---|
845 | status = rtems_event_receive( |
---|
846 | RTEMS_RPC_EVENT, |
---|
847 | RTEMS_WAIT | RTEMS_EVENT_ANY, |
---|
848 | RTEMS_NO_TIMEOUT, |
---|
849 | &gotEvents); |
---|
850 | ASSERT( status == RTEMS_SUCCESSFUL ); |
---|
851 | |
---|
852 | if (xact->status.re_status) { |
---|
853 | #ifdef MBUF_RX |
---|
854 | /* add paranoia */ |
---|
855 | ASSERT( !xact->ibuf ); |
---|
856 | #endif |
---|
857 | return xact->status.re_status; |
---|
858 | } |
---|
859 | |
---|
860 | #ifdef MBUF_RX |
---|
861 | xdrmbuf_create(&reply_xdrs, xact->ibuf, XDR_DECODE); |
---|
862 | #else |
---|
863 | xdrmem_create(&reply_xdrs, xact->ibuf->buf, xact->ibufsize, XDR_DECODE); |
---|
864 | #endif |
---|
865 | |
---|
866 | reply_msg.acpted_rply.ar_verf = _null_auth; |
---|
867 | reply_msg.acpted_rply.ar_results.where = xact->pres; |
---|
868 | reply_msg.acpted_rply.ar_results.proc = xact->xres; |
---|
869 | |
---|
870 | if (xdr_replymsg(&reply_xdrs, &reply_msg)) { |
---|
871 | /* OK */ |
---|
872 | _seterr_reply(&reply_msg, &xact->status); |
---|
873 | if (RPC_SUCCESS == xact->status.re_status) { |
---|
874 | if ( !locked_validate(xact->server, |
---|
875 | &reply_msg.acpted_rply.ar_verf) ) { |
---|
876 | xact->status.re_status = RPC_AUTHERROR; |
---|
877 | xact->status.re_why = AUTH_INVALIDRESP; |
---|
878 | } |
---|
879 | if (reply_msg.acpted_rply.ar_verf.oa_base) { |
---|
880 | reply_xdrs.x_op = XDR_FREE; |
---|
881 | xdr_opaque_auth(&reply_xdrs, &reply_msg.acpted_rply.ar_verf); |
---|
882 | } |
---|
883 | refresh = 0; |
---|
884 | } else { |
---|
885 | /* should we try to refresh our credentials ? */ |
---|
886 | if ( !refresh ) { |
---|
887 | /* had never tried before */ |
---|
888 | refresh = RPCIOD_REFRESH; |
---|
889 | } |
---|
890 | } |
---|
891 | } else { |
---|
892 | reply_xdrs.x_op = XDR_FREE; |
---|
893 | xdr_replymsg(&reply_xdrs, &reply_msg); |
---|
894 | xact->status.re_status = RPC_CANTDECODERES; |
---|
895 | } |
---|
896 | XDR_DESTROY(&reply_xdrs); |
---|
897 | |
---|
898 | bufFree(&xact->ibuf); |
---|
899 | |
---|
900 | #ifndef MBUF_RX |
---|
901 | xact->ibufsize = 0; |
---|
902 | #endif |
---|
903 | |
---|
904 | if (refresh && locked_refresh(xact->server)) { |
---|
905 | rtems_task_ident(RTEMS_SELF, RTEMS_WHO_AM_I, &xact->requestor); |
---|
906 | if ( rtems_message_queue_send(msgQ, &xact, sizeof(xact)) ) { |
---|
907 | return RPC_CANTSEND; |
---|
908 | } |
---|
909 | /* wakeup the rpciod */ |
---|
910 | fprintf(stderr,"RPCIO INFO: refreshing my AUTH\n"); |
---|
911 | ASSERT( RTEMS_SUCCESSFUL==rtems_event_send(rpciod, RPCIOD_TX_EVENT) ); |
---|
912 | } |
---|
913 | |
---|
914 | } while ( 0 && refresh-- > 0 ); |
---|
915 | |
---|
916 | return xact->status.re_status; |
---|
917 | } |
---|
918 | |
---|
919 | |
---|
920 | /* On RTEMS, I'm told to avoid select(); this seems to |
---|
921 | * be more efficient |
---|
922 | */ |
---|
923 | static void |
---|
924 | rxWakeupCB(struct socket *sock, void *arg) |
---|
925 | { |
---|
926 | rtems_id *rpciod = (rtems_id*) arg; |
---|
927 | rtems_event_send(*rpciod, RPCIOD_RX_EVENT); |
---|
928 | } |
---|
929 | |
---|
930 | int |
---|
931 | rpcUdpInit(void) |
---|
932 | { |
---|
933 | int s; |
---|
934 | rtems_status_code status; |
---|
935 | int noblock = 1; |
---|
936 | struct sockwakeup wkup; |
---|
937 | |
---|
938 | if (ourSock < 0) { |
---|
939 | fprintf(stderr,"RTEMS-RPCIOD $Release$, " \ |
---|
940 | "Till Straumann, Stanford/SLAC/SSRL 2002, " \ |
---|
941 | "See LICENSE file for licensing info.\n"); |
---|
942 | |
---|
943 | ourSock=socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); |
---|
944 | if (ourSock>=0) { |
---|
945 | bindresvport(ourSock,(struct sockaddr_in*)0); |
---|
946 | s = ioctl(ourSock, FIONBIO, (char*)&noblock); |
---|
947 | assert( s == 0 ); |
---|
948 | /* assume nobody tampers with the clock !! */ |
---|
949 | ticksPerSec = rtems_clock_get_ticks_per_second(); |
---|
950 | MU_CREAT( &hlock ); |
---|
951 | MU_CREAT( &llock ); |
---|
952 | |
---|
953 | if ( !rpciodPriority ) { |
---|
954 | /* use configured networking priority */ |
---|
955 | if ( ! (rpciodPriority = rtems_bsdnet_config.network_task_priority) ) |
---|
956 | rpciodPriority = RPCIOD_PRIO; /* fallback value */ |
---|
957 | } |
---|
958 | |
---|
959 | status = rtems_task_create( |
---|
960 | rtems_build_name('R','P','C','d'), |
---|
961 | rpciodPriority, |
---|
962 | RPCIOD_STACK, |
---|
963 | RTEMS_DEFAULT_MODES, |
---|
964 | /* fprintf saves/restores FP registers on PPC :-( */ |
---|
965 | RTEMS_DEFAULT_ATTRIBUTES | RTEMS_FLOATING_POINT, |
---|
966 | &rpciod); |
---|
967 | assert( status == RTEMS_SUCCESSFUL ); |
---|
968 | |
---|
969 | wkup.sw_pfn = rxWakeupCB; |
---|
970 | wkup.sw_arg = &rpciod; |
---|
971 | assert( 0==setsockopt(ourSock, SOL_SOCKET, SO_RCVWAKEUP, &wkup, sizeof(wkup)) ); |
---|
972 | status = rtems_message_queue_create( |
---|
973 | rtems_build_name('R','P','C','q'), |
---|
974 | RPCIOD_QDEPTH, |
---|
975 | sizeof(RpcUdpXact), |
---|
976 | RTEMS_DEFAULT_ATTRIBUTES, |
---|
977 | &msgQ); |
---|
978 | assert( status == RTEMS_SUCCESSFUL ); |
---|
979 | status = rtems_task_start( rpciod, rpcio_daemon, 0 ); |
---|
980 | assert( status == RTEMS_SUCCESSFUL ); |
---|
981 | |
---|
982 | } else { |
---|
983 | return -1; |
---|
984 | } |
---|
985 | } |
---|
986 | return 0; |
---|
987 | } |
---|
988 | |
---|
989 | int |
---|
990 | rpcUdpCleanup(void) |
---|
991 | { |
---|
992 | rtems_semaphore_create( |
---|
993 | rtems_build_name('R','P','C','f'), |
---|
994 | 0, |
---|
995 | RTEMS_DEFAULT_ATTRIBUTES, |
---|
996 | 0, |
---|
997 | &fini); |
---|
998 | rtems_event_send(rpciod, RPCIOD_KILL_EVENT); |
---|
999 | /* synchronize with daemon */ |
---|
1000 | rtems_semaphore_obtain(fini, RTEMS_WAIT, 5*ticksPerSec); |
---|
1001 | /* if the message queue is still there, something went wrong */ |
---|
1002 | if (!msgQ) { |
---|
1003 | rtems_task_delete(rpciod); |
---|
1004 | } |
---|
1005 | rtems_semaphore_delete(fini); |
---|
1006 | return (msgQ !=0); |
---|
1007 | } |
---|
1008 | |
---|
1009 | /* Another API - simpler but less efficient. |
---|
1010 | * For each RPCall, a server and a Xact |
---|
1011 | * are created and destroyed on the fly. |
---|
1012 | * |
---|
1013 | * This should be used for infrequent calls |
---|
1014 | * (e.g. a NFS mount request). |
---|
1015 | * |
---|
1016 | * This is roughly compatible with the original |
---|
1017 | * clnt_call() etc. API - but it uses our |
---|
1018 | * daemon and is fully reentrant. |
---|
1019 | */ |
---|
1020 | enum clnt_stat |
---|
1021 | rpcUdpClntCreate( |
---|
1022 | struct sockaddr_in *psaddr, |
---|
1023 | int prog, |
---|
1024 | int vers, |
---|
1025 | u_long uid, |
---|
1026 | u_long gid, |
---|
1027 | RpcUdpClnt *pclnt |
---|
1028 | ) |
---|
1029 | { |
---|
1030 | RpcUdpXact x; |
---|
1031 | RpcUdpServer s; |
---|
1032 | enum clnt_stat err; |
---|
1033 | |
---|
1034 | if ( RPC_SUCCESS != (err=rpcUdpServerCreate(psaddr, prog, vers, uid, gid, &s)) ) |
---|
1035 | return err; |
---|
1036 | |
---|
1037 | if ( !(x=rpcUdpXactCreate(prog, vers, UDPMSGSIZE)) ) { |
---|
1038 | rpcUdpServerDestroy(s); |
---|
1039 | return RPC_FAILED; |
---|
1040 | } |
---|
1041 | /* TODO: could maintain a server cache */ |
---|
1042 | |
---|
1043 | x->server = s; |
---|
1044 | |
---|
1045 | *pclnt = x; |
---|
1046 | |
---|
1047 | return RPC_SUCCESS; |
---|
1048 | } |
---|
1049 | |
---|
1050 | void |
---|
1051 | rpcUdpClntDestroy(RpcUdpClnt xact) |
---|
1052 | { |
---|
1053 | rpcUdpServerDestroy(xact->server); |
---|
1054 | rpcUdpXactDestroy(xact); |
---|
1055 | } |
---|
1056 | |
---|
1057 | enum clnt_stat |
---|
1058 | rpcUdpClntCall( |
---|
1059 | RpcUdpClnt xact, |
---|
1060 | u_long proc, |
---|
1061 | XdrProcT xargs, |
---|
1062 | CaddrT pargs, |
---|
1063 | XdrProcT xres, |
---|
1064 | CaddrT pres, |
---|
1065 | struct timeval *timeout |
---|
1066 | ) |
---|
1067 | { |
---|
1068 | enum clnt_stat stat; |
---|
1069 | |
---|
1070 | if ( (stat = rpcUdpSend(xact, xact->server, timeout, proc, |
---|
1071 | xres, pres, |
---|
1072 | xargs, pargs, |
---|
1073 | 0)) ) { |
---|
1074 | fprintf(stderr,"RPCIO Send failed: %i\n",stat); |
---|
1075 | return stat; |
---|
1076 | } |
---|
1077 | return rpcUdpRcv(xact); |
---|
1078 | } |
---|
1079 | |
---|
1080 | /* a yet simpler interface */ |
---|
1081 | enum clnt_stat |
---|
1082 | rpcUdpCallRp( |
---|
1083 | struct sockaddr_in *psrvr, |
---|
1084 | u_long prog, |
---|
1085 | u_long vers, |
---|
1086 | u_long proc, |
---|
1087 | XdrProcT xargs, |
---|
1088 | CaddrT pargs, |
---|
1089 | XdrProcT xres, |
---|
1090 | CaddrT pres, |
---|
1091 | u_long uid, /* RPCIO_DEFAULT_ID picks default */ |
---|
1092 | u_long gid, /* RPCIO_DEFAULT_ID picks default */ |
---|
1093 | struct timeval *timeout /* NULL picks default */ |
---|
1094 | ) |
---|
1095 | { |
---|
1096 | RpcUdpClnt clp; |
---|
1097 | enum clnt_stat stat; |
---|
1098 | |
---|
1099 | stat = rpcUdpClntCreate( |
---|
1100 | psrvr, |
---|
1101 | prog, |
---|
1102 | vers, |
---|
1103 | uid, |
---|
1104 | gid, |
---|
1105 | &clp); |
---|
1106 | |
---|
1107 | if ( RPC_SUCCESS != stat ) |
---|
1108 | return stat; |
---|
1109 | |
---|
1110 | stat = rpcUdpClntCall( |
---|
1111 | clp, |
---|
1112 | proc, |
---|
1113 | xargs, pargs, |
---|
1114 | xres, pres, |
---|
1115 | timeout); |
---|
1116 | |
---|
1117 | rpcUdpClntDestroy(clp); |
---|
1118 | |
---|
1119 | return stat; |
---|
1120 | } |
---|
1121 | |
---|
1122 | /* linked list primitives */ |
---|
1123 | static void |
---|
1124 | nodeXtract(ListNode n) |
---|
1125 | { |
---|
1126 | if (n->prev) |
---|
1127 | n->prev->next = n->next; |
---|
1128 | if (n->next) |
---|
1129 | n->next->prev = n->prev; |
---|
1130 | n->next = n->prev = 0; |
---|
1131 | } |
---|
1132 | |
---|
1133 | static void |
---|
1134 | nodeAppend(ListNode l, ListNode n) |
---|
1135 | { |
---|
1136 | if ( (n->next = l->next) ) |
---|
1137 | n->next->prev = n; |
---|
1138 | l->next = n; |
---|
1139 | n->prev = l; |
---|
1140 | |
---|
1141 | } |
---|
1142 | |
---|
1143 | /* this code does the work */ |
---|
1144 | static void |
---|
1145 | rpcio_daemon(rtems_task_argument arg) |
---|
1146 | { |
---|
1147 | rtems_status_code stat; |
---|
1148 | RpcUdpXact xact; |
---|
1149 | RpcUdpServer srv; |
---|
1150 | rtems_interval next_retrans, then, unow; |
---|
1151 | long now; /* need to do signed comparison with age! */ |
---|
1152 | rtems_event_set events; |
---|
1153 | ListNode newList; |
---|
1154 | size_t size; |
---|
1155 | rtems_id q = 0; |
---|
1156 | ListNodeRec listHead = {0, 0}; |
---|
1157 | unsigned long epoch = RPCIOD_EPOCH_SECS * ticksPerSec; |
---|
1158 | unsigned long max_period = RPCIOD_RETX_CAP_S * ticksPerSec; |
---|
1159 | rtems_status_code status; |
---|
1160 | |
---|
1161 | |
---|
1162 | then = rtems_clock_get_ticks_since_boot(); |
---|
1163 | |
---|
1164 | for (next_retrans = epoch;;) { |
---|
1165 | |
---|
1166 | if ( RTEMS_SUCCESSFUL != |
---|
1167 | (stat = rtems_event_receive( |
---|
1168 | RPCIOD_RX_EVENT | RPCIOD_TX_EVENT | RPCIOD_KILL_EVENT, |
---|
1169 | RTEMS_WAIT | RTEMS_EVENT_ANY, |
---|
1170 | next_retrans, |
---|
1171 | &events)) ) { |
---|
1172 | ASSERT( RTEMS_TIMEOUT == stat ); |
---|
1173 | events = 0; |
---|
1174 | } |
---|
1175 | |
---|
1176 | if (events & RPCIOD_KILL_EVENT) { |
---|
1177 | int i; |
---|
1178 | |
---|
1179 | #if (DEBUG) & DEBUG_EVENTS |
---|
1180 | fprintf(stderr,"RPCIO: got KILL event\n"); |
---|
1181 | #endif |
---|
1182 | |
---|
1183 | MU_LOCK(hlock); |
---|
1184 | for (i=XACT_HASHS-1; i>=0; i--) { |
---|
1185 | if (xactHashTbl[i]) { |
---|
1186 | break; |
---|
1187 | } |
---|
1188 | } |
---|
1189 | if (i<0) { |
---|
1190 | /* prevent them from creating and enqueueing more messages */ |
---|
1191 | q=msgQ; |
---|
1192 | /* messages queued after we executed this assignment will fail */ |
---|
1193 | msgQ=0; |
---|
1194 | } |
---|
1195 | MU_UNLOCK(hlock); |
---|
1196 | if (i>=0) { |
---|
1197 | fprintf(stderr,"RPCIO There are still transactions circulating; I refuse to go away\n"); |
---|
1198 | fprintf(stderr,"(1st in slot %i)\n",i); |
---|
1199 | rtems_semaphore_release(fini); |
---|
1200 | } else { |
---|
1201 | break; |
---|
1202 | } |
---|
1203 | } |
---|
1204 | |
---|
1205 | unow = rtems_clock_get_ticks_since_boot(); |
---|
1206 | |
---|
1207 | /* measure everything relative to then to protect against |
---|
1208 | * rollover |
---|
1209 | */ |
---|
1210 | now = unow - then; |
---|
1211 | |
---|
1212 | /* NOTE: we don't lock the hash table while we are operating |
---|
1213 | * on transactions; the paradigm is that we 'own' a particular |
---|
1214 | * transaction (and hence it's hash table slot) from the |
---|
1215 | * time the xact was put into the message queue until we |
---|
1216 | * wake up the requestor. |
---|
1217 | */ |
---|
1218 | |
---|
1219 | if (RPCIOD_RX_EVENT & events) { |
---|
1220 | |
---|
1221 | #if (DEBUG) & DEBUG_EVENTS |
---|
1222 | fprintf(stderr,"RPCIO: got RX event\n"); |
---|
1223 | #endif |
---|
1224 | |
---|
1225 | while ((xact=sockRcv())) { |
---|
1226 | |
---|
1227 | /* extract from the retransmission list */ |
---|
1228 | nodeXtract(&xact->node); |
---|
1229 | |
---|
1230 | /* change the ID - there might already be |
---|
1231 | * a retransmission on the way. When it's |
---|
1232 | * reply arrives we must not find it's ID |
---|
1233 | * in the hashtable |
---|
1234 | */ |
---|
1235 | xact->obuf.xid += XACT_HASHS; |
---|
1236 | |
---|
1237 | xact->status.re_status = RPC_SUCCESS; |
---|
1238 | |
---|
1239 | /* calculate roundtrip ticks */ |
---|
1240 | xact->trip = now - xact->trip; |
---|
1241 | |
---|
1242 | srv = xact->server; |
---|
1243 | |
---|
1244 | /* adjust the server's retry period */ |
---|
1245 | { |
---|
1246 | register TimeoutT rtry = srv->retry_period; |
---|
1247 | register TimeoutT trip = xact->trip; |
---|
1248 | |
---|
1249 | ASSERT( trip >= 0 ); |
---|
1250 | |
---|
1251 | if ( 0==trip ) |
---|
1252 | trip = 1; |
---|
1253 | |
---|
1254 | /* retry_new = 0.75*retry_old + 0.25 * 8 * roundrip */ |
---|
1255 | rtry = (3*rtry + (trip << 3)) >> 2; |
---|
1256 | |
---|
1257 | if ( rtry > max_period ) |
---|
1258 | rtry = max_period; |
---|
1259 | |
---|
1260 | srv->retry_period = rtry; |
---|
1261 | } |
---|
1262 | |
---|
1263 | /* wakeup requestor */ |
---|
1264 | rtems_event_send(xact->requestor, RTEMS_RPC_EVENT); |
---|
1265 | } |
---|
1266 | } |
---|
1267 | |
---|
1268 | if (RPCIOD_TX_EVENT & events) { |
---|
1269 | |
---|
1270 | #if (DEBUG) & DEBUG_EVENTS |
---|
1271 | fprintf(stderr,"RPCIO: got TX event\n"); |
---|
1272 | #endif |
---|
1273 | |
---|
1274 | while (RTEMS_SUCCESSFUL == rtems_message_queue_receive( |
---|
1275 | msgQ, |
---|
1276 | &xact, |
---|
1277 | &size, |
---|
1278 | RTEMS_NO_WAIT, |
---|
1279 | RTEMS_NO_TIMEOUT)) { |
---|
1280 | /* put to the head of timeout q */ |
---|
1281 | nodeAppend(&listHead, &xact->node); |
---|
1282 | |
---|
1283 | xact->age = now; |
---|
1284 | xact->trip = FIRST_ATTEMPT; |
---|
1285 | } |
---|
1286 | } |
---|
1287 | |
---|
1288 | |
---|
1289 | /* work the timeout q */ |
---|
1290 | newList = 0; |
---|
1291 | for ( xact=(RpcUdpXact)listHead.next; |
---|
1292 | xact && xact->age <= now; |
---|
1293 | xact=(RpcUdpXact)listHead.next ) { |
---|
1294 | |
---|
1295 | /* extract from the list */ |
---|
1296 | nodeXtract(&xact->node); |
---|
1297 | |
---|
1298 | srv = xact->server; |
---|
1299 | |
---|
1300 | if (xact->tolive < 0) { |
---|
1301 | /* this one timed out */ |
---|
1302 | xact->status.re_errno = ETIMEDOUT; |
---|
1303 | xact->status.re_status = RPC_TIMEDOUT; |
---|
1304 | |
---|
1305 | srv->timeouts++; |
---|
1306 | |
---|
1307 | /* Change the ID - there might still be |
---|
1308 | * a reply on the way. When it arrives we |
---|
1309 | * must not find it's ID in the hash table |
---|
1310 | * |
---|
1311 | * Thanks to Steven Johnson for hunting this |
---|
1312 | * one down. |
---|
1313 | */ |
---|
1314 | xact->obuf.xid += XACT_HASHS; |
---|
1315 | |
---|
1316 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
1317 | fprintf(stderr,"RPCIO XACT timed out; waking up requestor\n"); |
---|
1318 | #endif |
---|
1319 | if ( rtems_event_send(xact->requestor, RTEMS_RPC_EVENT) ) { |
---|
1320 | rtems_panic("RPCIO PANIC file %s line: %i, requestor id was 0x%08x", |
---|
1321 | __FILE__, |
---|
1322 | __LINE__, |
---|
1323 | xact->requestor); |
---|
1324 | } |
---|
1325 | |
---|
1326 | } else { |
---|
1327 | int len; |
---|
1328 | |
---|
1329 | len = (int)XDR_GETPOS(&xact->xdrs); |
---|
1330 | |
---|
1331 | #ifdef MBUF_TX |
---|
1332 | xact->refcnt = 1; /* sendto itself */ |
---|
1333 | #endif |
---|
1334 | if ( len != SENDTO( ourSock, |
---|
1335 | xact->obuf.buf, |
---|
1336 | len, |
---|
1337 | 0, |
---|
1338 | &srv->addr.sa, |
---|
1339 | sizeof(srv->addr.sin) |
---|
1340 | #ifdef MBUF_TX |
---|
1341 | , xact, |
---|
1342 | paranoia_free, |
---|
1343 | paranoia_ref |
---|
1344 | #endif |
---|
1345 | ) ) { |
---|
1346 | |
---|
1347 | xact->status.re_errno = errno; |
---|
1348 | xact->status.re_status = RPC_CANTSEND; |
---|
1349 | srv->errors++; |
---|
1350 | |
---|
1351 | /* wakeup requestor */ |
---|
1352 | fprintf(stderr,"RPCIO: SEND failure\n"); |
---|
1353 | status = rtems_event_send(xact->requestor, RTEMS_RPC_EVENT); |
---|
1354 | assert( status == RTEMS_SUCCESSFUL ); |
---|
1355 | |
---|
1356 | } else { |
---|
1357 | /* send successful; calculate retransmission time |
---|
1358 | * and enqueue to temporary list |
---|
1359 | */ |
---|
1360 | if (FIRST_ATTEMPT != xact->trip) { |
---|
1361 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
1362 | fprintf(stderr, |
---|
1363 | "timed out; tolive is %i (ticks), retry period is %i (ticks)\n", |
---|
1364 | xact->tolive, |
---|
1365 | srv->retry_period); |
---|
1366 | #endif |
---|
1367 | /* this is a real retry; we backup |
---|
1368 | * the server's retry interval |
---|
1369 | */ |
---|
1370 | if ( srv->retry_period < max_period ) { |
---|
1371 | |
---|
1372 | /* If multiple transactions for this server |
---|
1373 | * fail (e.g. because it died) this will |
---|
1374 | * back-off very agressively (doubling |
---|
1375 | * the retransmission period for every |
---|
1376 | * timed out transaction up to the CAP limit) |
---|
1377 | * which is desirable - single packet failure |
---|
1378 | * is treated more gracefully by this algorithm. |
---|
1379 | */ |
---|
1380 | |
---|
1381 | srv->retry_period<<=1; |
---|
1382 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
1383 | fprintf(stderr, |
---|
1384 | "adjusted to; retry period %i\n", |
---|
1385 | srv->retry_period); |
---|
1386 | #endif |
---|
1387 | } else { |
---|
1388 | /* never wait longer than RPCIOD_RETX_CAP_S seconds */ |
---|
1389 | fprintf(stderr, |
---|
1390 | "RPCIO: server '%s' not responding - still trying\n", |
---|
1391 | srv->name); |
---|
1392 | } |
---|
1393 | if ( 0 == ++srv->retrans % 1000) { |
---|
1394 | fprintf(stderr, |
---|
1395 | "RPCIO - statistics: already %li retries to server %s\n", |
---|
1396 | srv->retrans, |
---|
1397 | srv->name); |
---|
1398 | } |
---|
1399 | } else { |
---|
1400 | srv->requests++; |
---|
1401 | } |
---|
1402 | xact->trip = now; |
---|
1403 | { |
---|
1404 | long capped_period = srv->retry_period; |
---|
1405 | if ( xact->lifetime < capped_period ) |
---|
1406 | capped_period = xact->lifetime; |
---|
1407 | xact->age = now + capped_period; |
---|
1408 | xact->tolive -= capped_period; |
---|
1409 | } |
---|
1410 | /* enqueue to the list of newly sent transactions */ |
---|
1411 | xact->node.next = newList; |
---|
1412 | newList = &xact->node; |
---|
1413 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
1414 | fprintf(stderr, |
---|
1415 | "XACT (0x%08x) age is 0x%x, now: 0x%x\n", |
---|
1416 | xact, |
---|
1417 | xact->age, |
---|
1418 | now); |
---|
1419 | #endif |
---|
1420 | } |
---|
1421 | } |
---|
1422 | } |
---|
1423 | |
---|
1424 | /* insert the newly sent transactions into the |
---|
1425 | * sorted retransmission list |
---|
1426 | */ |
---|
1427 | for (; (xact = (RpcUdpXact)newList); ) { |
---|
1428 | register ListNode p,n; |
---|
1429 | newList = newList->next; |
---|
1430 | for ( p=&listHead; (n=p->next) && xact->age > ((RpcUdpXact)n)->age; p=n ) |
---|
1431 | /* nothing else to do */; |
---|
1432 | nodeAppend(p, &xact->node); |
---|
1433 | } |
---|
1434 | |
---|
1435 | if (now > epoch) { |
---|
1436 | /* every now and then, readjust the epoch */ |
---|
1437 | register ListNode n; |
---|
1438 | then += now; |
---|
1439 | for (n=listHead.next; n; n=n->next) { |
---|
1440 | /* readjust outstanding time intervals subject to the |
---|
1441 | * condition that the 'absolute' time must remain |
---|
1442 | * the same. 'age' and 'trip' are measured with |
---|
1443 | * respect to 'then' - hence: |
---|
1444 | * |
---|
1445 | * abs_age == old_age + old_then == new_age + new_then |
---|
1446 | * |
---|
1447 | * ==> new_age = old_age + old_then - new_then == old_age - 'now' |
---|
1448 | */ |
---|
1449 | ((RpcUdpXact)n)->age -= now; |
---|
1450 | ((RpcUdpXact)n)->trip -= now; |
---|
1451 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
1452 | fprintf(stderr, |
---|
1453 | "readjusted XACT (0x%08x); age is 0x%x, trip: 0x%x now: 0x%x\n", |
---|
1454 | (RpcUdpXact)n, |
---|
1455 | ((RpcUdpXact)n)->trip, |
---|
1456 | ((RpcUdpXact)n)->age, |
---|
1457 | now); |
---|
1458 | #endif |
---|
1459 | } |
---|
1460 | now = 0; |
---|
1461 | } |
---|
1462 | |
---|
1463 | next_retrans = listHead.next ? |
---|
1464 | ((RpcUdpXact)listHead.next)->age - now : |
---|
1465 | epoch; /* make sure we don't miss updating the epoch */ |
---|
1466 | #if (DEBUG) & DEBUG_TIMEOUT |
---|
1467 | fprintf(stderr,"RPCIO: next timeout is %x\n",next_retrans); |
---|
1468 | #endif |
---|
1469 | } |
---|
1470 | /* close our socket; shut down the receiver */ |
---|
1471 | close(ourSock); |
---|
1472 | |
---|
1473 | #if 0 /* if we get here, no transactions exist, hence there can be none |
---|
1474 | * in the queue whatsoever |
---|
1475 | */ |
---|
1476 | /* flush the message queue */ |
---|
1477 | while (RTEMS_SUCCESSFUL == rtems_message_queue_receive( |
---|
1478 | q, |
---|
1479 | &xact, |
---|
1480 | &size, |
---|
1481 | RTEMS_NO_WAIT, |
---|
1482 | RTEMS_NO_TIMEOUT)) { |
---|
1483 | /* TODO enque xact */ |
---|
1484 | } |
---|
1485 | |
---|
1486 | /* flush all outstanding transactions */ |
---|
1487 | |
---|
1488 | for (xact=((RpcUdpXact)listHead.next); xact; xact=((RpcUdpXact)xact->node.next)) { |
---|
1489 | xact->status.re_status = RPC_TIMEDOUT; |
---|
1490 | rtems_event_send(xact->requestor, RTEMS_RPC_EVENT); |
---|
1491 | } |
---|
1492 | #endif |
---|
1493 | |
---|
1494 | rtems_message_queue_delete(q); |
---|
1495 | |
---|
1496 | MU_DESTROY(hlock); |
---|
1497 | |
---|
1498 | fprintf(stderr,"RPC daemon exited...\n"); |
---|
1499 | |
---|
1500 | rtems_semaphore_release(fini); |
---|
1501 | rtems_task_suspend(RTEMS_SELF); |
---|
1502 | } |
---|
1503 | |
---|
1504 | |
---|
1505 | /* support for transaction 'pools'. A number of XACT objects |
---|
1506 | * is always kept around. The initial number is 0 but it |
---|
1507 | * is allowed to grow up to a maximum. |
---|
1508 | * If the need grows beyond the maximum, behavior depends: |
---|
1509 | * Users can either block until a transaction becomes available, |
---|
1510 | * they can create a new XACT on the fly or get an error |
---|
1511 | * if no free XACT is available from the pool. |
---|
1512 | */ |
---|
1513 | |
---|
1514 | RpcUdpXactPool |
---|
1515 | rpcUdpXactPoolCreate( |
---|
1516 | int prog, int version, |
---|
1517 | int xactsize, int poolsize) |
---|
1518 | { |
---|
1519 | RpcUdpXactPool rval = MY_MALLOC(sizeof(*rval)); |
---|
1520 | rtems_status_code status; |
---|
1521 | |
---|
1522 | ASSERT( rval ); |
---|
1523 | status = rtems_message_queue_create( |
---|
1524 | rtems_build_name('R','P','C','p'), |
---|
1525 | poolsize, |
---|
1526 | sizeof(RpcUdpXact), |
---|
1527 | RTEMS_DEFAULT_ATTRIBUTES, |
---|
1528 | &rval->box); |
---|
1529 | assert( status == RTEMS_SUCCESSFUL ); |
---|
1530 | |
---|
1531 | rval->prog = prog; |
---|
1532 | rval->version = version; |
---|
1533 | rval->xactSize = xactsize; |
---|
1534 | return rval; |
---|
1535 | } |
---|
1536 | |
---|
1537 | void |
---|
1538 | rpcUdpXactPoolDestroy(RpcUdpXactPool pool) |
---|
1539 | { |
---|
1540 | RpcUdpXact xact; |
---|
1541 | |
---|
1542 | while ((xact = rpcUdpXactPoolGet(pool, XactGetFail))) { |
---|
1543 | rpcUdpXactDestroy(xact); |
---|
1544 | } |
---|
1545 | rtems_message_queue_delete(pool->box); |
---|
1546 | MY_FREE(pool); |
---|
1547 | } |
---|
1548 | |
---|
1549 | RpcUdpXact |
---|
1550 | rpcUdpXactPoolGet(RpcUdpXactPool pool, XactPoolGetMode mode) |
---|
1551 | { |
---|
1552 | RpcUdpXact xact = 0; |
---|
1553 | size_t size; |
---|
1554 | |
---|
1555 | if (RTEMS_SUCCESSFUL != rtems_message_queue_receive( |
---|
1556 | pool->box, |
---|
1557 | &xact, |
---|
1558 | &size, |
---|
1559 | XactGetWait == mode ? |
---|
1560 | RTEMS_WAIT : RTEMS_NO_WAIT, |
---|
1561 | RTEMS_NO_TIMEOUT)) { |
---|
1562 | |
---|
1563 | /* nothing found in box; should we create a new one ? */ |
---|
1564 | |
---|
1565 | xact = (XactGetCreate == mode) ? |
---|
1566 | rpcUdpXactCreate( |
---|
1567 | pool->prog, |
---|
1568 | pool->version, |
---|
1569 | pool->xactSize) : 0 ; |
---|
1570 | if (xact) |
---|
1571 | xact->pool = pool; |
---|
1572 | |
---|
1573 | } |
---|
1574 | return xact; |
---|
1575 | } |
---|
1576 | |
---|
1577 | void |
---|
1578 | rpcUdpXactPoolPut(RpcUdpXact xact) |
---|
1579 | { |
---|
1580 | RpcUdpXactPool pool; |
---|
1581 | |
---|
1582 | pool = xact->pool; |
---|
1583 | ASSERT( pool ); |
---|
1584 | |
---|
1585 | if (RTEMS_SUCCESSFUL != rtems_message_queue_send( |
---|
1586 | pool->box, |
---|
1587 | &xact, |
---|
1588 | sizeof(xact))) |
---|
1589 | rpcUdpXactDestroy(xact); |
---|
1590 | } |
---|
1591 | |
---|
1592 | #ifdef MBUF_RX |
---|
1593 | |
---|
1594 | /* WORKAROUND: include sys/mbuf.h (or other bsdnet headers) only |
---|
1595 | * _after_ using malloc()/free() & friends because |
---|
1596 | * the RTEMS/BSDNET headers redefine those :-( |
---|
1597 | */ |
---|
1598 | |
---|
1599 | #define _KERNEL |
---|
1600 | #include <sys/mbuf.h> |
---|
1601 | |
---|
1602 | ssize_t |
---|
1603 | recv_mbuf_from(int s, struct mbuf **ppm, long len, struct sockaddr *fromaddr, int *fromlen); |
---|
1604 | |
---|
1605 | static void |
---|
1606 | bufFree(struct mbuf **m) |
---|
1607 | { |
---|
1608 | if (*m) { |
---|
1609 | rtems_bsdnet_semaphore_obtain(); |
---|
1610 | m_freem(*m); |
---|
1611 | rtems_bsdnet_semaphore_release(); |
---|
1612 | *m = 0; |
---|
1613 | } |
---|
1614 | } |
---|
1615 | #endif |
---|
1616 | |
---|
1617 | #ifdef MBUF_TX |
---|
1618 | static void |
---|
1619 | paranoia_free(caddr_t closure, u_int size) |
---|
1620 | { |
---|
1621 | #if (DEBUG) |
---|
1622 | RpcUdpXact xact = (RpcUdpXact)closure; |
---|
1623 | int len = (int)XDR_GETPOS(&xact->xdrs); |
---|
1624 | |
---|
1625 | ASSERT( --xact->refcnt >= 0 && size == len ); |
---|
1626 | #endif |
---|
1627 | } |
---|
1628 | |
---|
1629 | static void |
---|
1630 | paranoia_ref (caddr_t closure, u_int size) |
---|
1631 | { |
---|
1632 | #if (DEBUG) |
---|
1633 | RpcUdpXact xact = (RpcUdpXact)closure; |
---|
1634 | int len = (int)XDR_GETPOS(&xact->xdrs); |
---|
1635 | ASSERT( size == len ); |
---|
1636 | xact->refcnt++; |
---|
1637 | #endif |
---|
1638 | } |
---|
1639 | #endif |
---|
1640 | |
---|
1641 | /* receive from a socket and find |
---|
1642 | * the transaction corresponding to the |
---|
1643 | * transaction ID received in the server |
---|
1644 | * reply. |
---|
1645 | * |
---|
1646 | * The semantics of the 'pibuf' pointer are |
---|
1647 | * as follows: |
---|
1648 | * |
---|
1649 | * MBUF_RX: |
---|
1650 | * |
---|
1651 | */ |
---|
1652 | |
---|
1653 | #define RPCIOD_RXBUFSZ UDPMSGSIZE |
---|
1654 | |
---|
1655 | static RpcUdpXact |
---|
1656 | sockRcv(void) |
---|
1657 | { |
---|
1658 | int len,i; |
---|
1659 | u_long xid; |
---|
1660 | union { |
---|
1661 | struct sockaddr_in sin; |
---|
1662 | struct sockaddr sa; |
---|
1663 | } fromAddr; |
---|
1664 | int fromLen = sizeof(fromAddr.sin); |
---|
1665 | RxBuf ibuf = 0; |
---|
1666 | RpcUdpXact xact = 0; |
---|
1667 | |
---|
1668 | do { |
---|
1669 | |
---|
1670 | /* rcv_mbuf() and recvfrom() differ in that the |
---|
1671 | * former allocates buffers and passes them back |
---|
1672 | * to us whereas the latter requires us to provide |
---|
1673 | * buffer space. |
---|
1674 | * Hence, in the first case whe have to make sure |
---|
1675 | * no old buffer is leaked - in the second case, |
---|
1676 | * we might well re-use an old buffer but must |
---|
1677 | * make sure we have one allocated |
---|
1678 | */ |
---|
1679 | #ifdef MBUF_RX |
---|
1680 | if (ibuf) |
---|
1681 | bufFree(&ibuf); |
---|
1682 | |
---|
1683 | len = recv_mbuf_from( |
---|
1684 | ourSock, |
---|
1685 | &ibuf, |
---|
1686 | RPCIOD_RXBUFSZ, |
---|
1687 | &fromAddr.sa, |
---|
1688 | &fromLen); |
---|
1689 | #else |
---|
1690 | if ( !ibuf ) |
---|
1691 | ibuf = (RpcBuf)MY_MALLOC(RPCIOD_RXBUFSZ); |
---|
1692 | if ( !ibuf ) |
---|
1693 | goto cleanup; /* no memory - drop this message */ |
---|
1694 | |
---|
1695 | len = recvfrom(ourSock, |
---|
1696 | ibuf->buf, |
---|
1697 | RPCIOD_RXBUFSZ, |
---|
1698 | 0, |
---|
1699 | &fromAddr.sa, |
---|
1700 | &fromLen); |
---|
1701 | #endif |
---|
1702 | |
---|
1703 | if (len <= 0) { |
---|
1704 | if (EAGAIN != errno) |
---|
1705 | fprintf(stderr,"RECV failed: %s\n",strerror(errno)); |
---|
1706 | goto cleanup; |
---|
1707 | } |
---|
1708 | |
---|
1709 | #if (DEBUG) & DEBUG_PACKLOSS |
---|
1710 | if ( (unsigned)rand() < DEBUG_PACKLOSS_FRACT ) { |
---|
1711 | /* lose packets once in a while */ |
---|
1712 | static int xxx = 0; |
---|
1713 | if ( ++xxx % 16 == 0 ) |
---|
1714 | fprintf(stderr,"DEBUG: dropped %i packets, so far...\n",xxx); |
---|
1715 | if ( ibuf ) |
---|
1716 | bufFree( &ibuf ); |
---|
1717 | continue; |
---|
1718 | } |
---|
1719 | #endif |
---|
1720 | |
---|
1721 | i = (xid=XID(ibuf)) & XACT_HASH_MSK; |
---|
1722 | |
---|
1723 | if ( !(xact=xactHashTbl[i]) || |
---|
1724 | xact->obuf.xid != xid || |
---|
1725 | #ifdef REJECT_SERVERIP_MISMATCH |
---|
1726 | xact->server->addr.sin.sin_addr.s_addr != fromAddr.sin.sin_addr.s_addr || |
---|
1727 | #endif |
---|
1728 | xact->server->addr.sin.sin_port != fromAddr.sin.sin_port ) { |
---|
1729 | |
---|
1730 | if (xact) { |
---|
1731 | if ( |
---|
1732 | #ifdef REJECT_SERVERIP_MISMATCH |
---|
1733 | xact->server->addr.sin.sin_addr.s_addr == fromAddr.sin.sin_addr.s_addr && |
---|
1734 | #endif |
---|
1735 | xact->server->addr.sin.sin_port == fromAddr.sin.sin_port && |
---|
1736 | ( xact->obuf.xid == xid + XACT_HASHS || |
---|
1737 | xact->obuf.xid == xid + 2*XACT_HASHS ) |
---|
1738 | ) { |
---|
1739 | #ifndef DEBUG /* don't complain if it's just a late arrival of a retry */ |
---|
1740 | fprintf(stderr,"RPCIO - FYI sockRcv(): dropping late/redundant retry answer\n"); |
---|
1741 | #endif |
---|
1742 | } else { |
---|
1743 | fprintf(stderr,"RPCIO WARNING sockRcv(): transaction mismatch\n"); |
---|
1744 | fprintf(stderr,"xact: xid 0x%08lx -- got 0x%08lx\n", |
---|
1745 | xact->obuf.xid, xid); |
---|
1746 | fprintf(stderr,"xact: addr 0x%08lx -- got 0x%08lx\n", |
---|
1747 | xact->server->addr.sin.sin_addr.s_addr, |
---|
1748 | fromAddr.sin.sin_addr.s_addr); |
---|
1749 | fprintf(stderr,"xact: port 0x%08x -- got 0x%08x\n", |
---|
1750 | xact->server->addr.sin.sin_port, |
---|
1751 | fromAddr.sin.sin_port); |
---|
1752 | } |
---|
1753 | } else { |
---|
1754 | fprintf(stderr, |
---|
1755 | "RPCIO WARNING sockRcv(): got xid 0x%08lx but its slot is empty\n", |
---|
1756 | xid); |
---|
1757 | } |
---|
1758 | /* forget about this one and try again */ |
---|
1759 | xact = 0; |
---|
1760 | } |
---|
1761 | |
---|
1762 | } while ( !xact ); |
---|
1763 | |
---|
1764 | xact->ibuf = ibuf; |
---|
1765 | #ifndef MBUF_RX |
---|
1766 | xact->ibufsize = RPCIOD_RXBUFSZ; |
---|
1767 | #endif |
---|
1768 | |
---|
1769 | return xact; |
---|
1770 | |
---|
1771 | cleanup: |
---|
1772 | |
---|
1773 | bufFree(&ibuf); |
---|
1774 | |
---|
1775 | return 0; |
---|
1776 | } |
---|
1777 | |
---|
1778 | |
---|
1779 | #include <rtems/rtems_bsdnet_internal.h> |
---|
1780 | /* double check the event configuration; should probably globally |
---|
1781 | * manage system events!! |
---|
1782 | * We do this at the end of the file for the same reason we had |
---|
1783 | * included mbuf.h only a couple of lines above - see comment up |
---|
1784 | * there... |
---|
1785 | */ |
---|
1786 | #if RTEMS_RPC_EVENT & SOSLEEP_EVENT & SBWAIT_EVENT & NETISR_EVENTS |
---|
1787 | #error ILLEGAL EVENT CONFIGURATION |
---|
1788 | #endif |
---|