1 | /* |
---|
2 | * This file has undergone several changes to reflect the |
---|
3 | * differences between the RTEMS and FreeBSD kernels. |
---|
4 | */ |
---|
5 | |
---|
6 | /* |
---|
7 | * Copyright (c) 1982, 1986, 1988, 1990, 1993 |
---|
8 | * The Regents of the University of California. All rights reserved. |
---|
9 | * |
---|
10 | * Redistribution and use in source and binary forms, with or without |
---|
11 | * modification, are permitted provided that the following conditions |
---|
12 | * are met: |
---|
13 | * 1. Redistributions of source code must retain the above copyright |
---|
14 | * notice, this list of conditions and the following disclaimer. |
---|
15 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
16 | * notice, this list of conditions and the following disclaimer in the |
---|
17 | * documentation and/or other materials provided with the distribution. |
---|
18 | * 3. All advertising materials mentioning features or use of this software |
---|
19 | * must display the following acknowledgement: |
---|
20 | * This product includes software developed by the University of |
---|
21 | * California, Berkeley and its contributors. |
---|
22 | * 4. Neither the name of the University nor the names of its contributors |
---|
23 | * may be used to endorse or promote products derived from this software |
---|
24 | * without specific prior written permission. |
---|
25 | * |
---|
26 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
---|
27 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
28 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
29 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
---|
30 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
---|
31 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
---|
32 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
---|
33 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
---|
34 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
---|
35 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
---|
36 | * SUCH DAMAGE. |
---|
37 | * |
---|
38 | * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93 |
---|
39 | */ |
---|
40 | |
---|
41 | #include <sys/param.h> |
---|
42 | #include <sys/systm.h> |
---|
43 | #include <sys/kernel.h> |
---|
44 | #include <sys/proc.h> |
---|
45 | #include <sys/file.h> |
---|
46 | #include <sys/queue.h> |
---|
47 | #include <sys/malloc.h> |
---|
48 | #include <sys/mbuf.h> |
---|
49 | #include <sys/protosw.h> |
---|
50 | #include <sys/stat.h> |
---|
51 | #include <sys/socket.h> |
---|
52 | #include <sys/socketvar.h> |
---|
53 | #include <sys/signalvar.h> |
---|
54 | #include <sys/sysctl.h> |
---|
55 | |
---|
56 | /* |
---|
57 | * Primitive routines for operating on sockets and socket buffers |
---|
58 | */ |
---|
59 | |
---|
60 | u_long sb_max = SB_MAX; /* XXX should be static */ |
---|
61 | SYSCTL_INT(_kern, KIPC_MAXSOCKBUF, maxsockbuf, CTLFLAG_RW, &sb_max, 0, ""); |
---|
62 | |
---|
63 | static u_long sb_efficiency = 8; /* parameter for sbreserve() */ |
---|
64 | SYSCTL_INT(_kern, OID_AUTO, sockbuf_waste_factor, CTLFLAG_RW, &sb_efficiency, |
---|
65 | 0, ""); |
---|
66 | |
---|
67 | #if defined(__rtems__) |
---|
68 | void rtems_set_sb_efficiency( |
---|
69 | u_long efficiency |
---|
70 | ) |
---|
71 | { |
---|
72 | sb_efficiency = (efficiency == 0) ? 2 : efficiency; |
---|
73 | } |
---|
74 | #endif |
---|
75 | |
---|
76 | /* |
---|
77 | * Procedures to manipulate state flags of socket |
---|
78 | * and do appropriate wakeups. Normal sequence from the |
---|
79 | * active (originating) side is that soisconnecting() is |
---|
80 | * called during processing of connect() call, |
---|
81 | * resulting in an eventual call to soisconnected() if/when the |
---|
82 | * connection is established. When the connection is torn down |
---|
83 | * soisdisconnecting() is called during processing of disconnect() call, |
---|
84 | * and soisdisconnected() is called when the connection to the peer |
---|
85 | * is totally severed. The semantics of these routines are such that |
---|
86 | * connectionless protocols can call soisconnected() and soisdisconnected() |
---|
87 | * only, bypassing the in-progress calls when setting up a ``connection'' |
---|
88 | * takes no time. |
---|
89 | * |
---|
90 | * From the passive side, a socket is created with |
---|
91 | * two queues of sockets: so_q0 for connections in progress |
---|
92 | * and so_q for connections already made and awaiting user acceptance. |
---|
93 | * As a protocol is preparing incoming connections, it creates a socket |
---|
94 | * structure queued on so_q0 by calling sonewconn(). When the connection |
---|
95 | * is established, soisconnected() is called, and transfers the |
---|
96 | * socket structure to so_q, making it available to accept(). |
---|
97 | * |
---|
98 | * If a socket is closed with sockets on either |
---|
99 | * so_q0 or so_q, these sockets are dropped. |
---|
100 | * |
---|
101 | * If higher level protocols are implemented in |
---|
102 | * the kernel, the wakeups done here will sometimes |
---|
103 | * cause software-interrupt process scheduling. |
---|
104 | */ |
---|
105 | |
---|
106 | void |
---|
107 | soisconnecting(struct socket *so) |
---|
108 | { |
---|
109 | |
---|
110 | so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING); |
---|
111 | so->so_state |= SS_ISCONNECTING; |
---|
112 | } |
---|
113 | |
---|
114 | void |
---|
115 | soisconnected(struct socket *so) |
---|
116 | { |
---|
117 | register struct socket *head = so->so_head; |
---|
118 | |
---|
119 | so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING); |
---|
120 | so->so_state |= SS_ISCONNECTED; |
---|
121 | if (head && (so->so_state & SS_INCOMP)) { |
---|
122 | TAILQ_REMOVE(&head->so_incomp, so, so_list); |
---|
123 | head->so_incqlen--; |
---|
124 | so->so_state &= ~SS_INCOMP; |
---|
125 | TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); |
---|
126 | so->so_state |= SS_COMP; |
---|
127 | sorwakeup(head); |
---|
128 | soconnwakeup(head); |
---|
129 | } else { |
---|
130 | soconnwakeup(so); |
---|
131 | sorwakeup(so); |
---|
132 | sowwakeup(so); |
---|
133 | } |
---|
134 | } |
---|
135 | |
---|
136 | void |
---|
137 | soisdisconnecting(struct socket *so) |
---|
138 | { |
---|
139 | |
---|
140 | so->so_state &= ~SS_ISCONNECTING; |
---|
141 | so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE); |
---|
142 | soconnwakeup(so); |
---|
143 | sowwakeup(so); |
---|
144 | sorwakeup(so); |
---|
145 | } |
---|
146 | |
---|
147 | void |
---|
148 | soisdisconnected(struct socket *so) |
---|
149 | { |
---|
150 | |
---|
151 | so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); |
---|
152 | so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE); |
---|
153 | soconnwakeup(so); |
---|
154 | sowwakeup(so); |
---|
155 | sorwakeup(so); |
---|
156 | } |
---|
157 | |
---|
158 | /* |
---|
159 | * Return a random connection that hasn't been serviced yet and |
---|
160 | * is eligible for discard. There is a one in qlen chance that |
---|
161 | * we will return a null, saying that there are no dropable |
---|
162 | * requests. In this case, the protocol specific code should drop |
---|
163 | * the new request. This insures fairness. |
---|
164 | * |
---|
165 | * This may be used in conjunction with protocol specific queue |
---|
166 | * congestion routines. |
---|
167 | */ |
---|
168 | struct socket * |
---|
169 | sodropablereq(struct socket *head) |
---|
170 | { |
---|
171 | register struct socket *so; |
---|
172 | uint32_t i, j, qlen, m; |
---|
173 | |
---|
174 | static int rnd; |
---|
175 | static long old_mono_secs; |
---|
176 | static unsigned int cur_cnt, old_cnt; |
---|
177 | |
---|
178 | if ((i = (m = rtems_bsdnet_seconds_since_boot()) - old_mono_secs) != 0) { |
---|
179 | old_mono_secs = m; |
---|
180 | old_cnt = cur_cnt / i; |
---|
181 | cur_cnt = 0; |
---|
182 | } |
---|
183 | |
---|
184 | so = TAILQ_FIRST(&head->so_incomp); |
---|
185 | if (!so) |
---|
186 | return (so); |
---|
187 | |
---|
188 | qlen = head->so_incqlen; |
---|
189 | if (++cur_cnt > qlen || old_cnt > qlen) { |
---|
190 | rnd = (314159 * rnd + 66329) & 0xffff; |
---|
191 | j = ((qlen + 1) * rnd) >> 16; |
---|
192 | |
---|
193 | while (j-- && so) |
---|
194 | so = TAILQ_NEXT(so, so_list); |
---|
195 | } |
---|
196 | |
---|
197 | return (so); |
---|
198 | } |
---|
199 | |
---|
200 | /* |
---|
201 | * When an attempt at a new connection is noted on a socket |
---|
202 | * which accepts connections, sonewconn is called. If the |
---|
203 | * connection is possible (subject to space constraints, etc.) |
---|
204 | * then we allocate a new structure, propoerly linked into the |
---|
205 | * data structure of the original socket, and return this. |
---|
206 | * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. |
---|
207 | * |
---|
208 | * Currently, sonewconn() is defined as sonewconn1() in socketvar.h |
---|
209 | * to catch calls that are missing the (new) second parameter. |
---|
210 | */ |
---|
211 | struct socket * |
---|
212 | sonewconn1(struct socket *head, int connstatus) |
---|
213 | { |
---|
214 | register struct socket *so; |
---|
215 | |
---|
216 | if (head->so_qlen > 3 * head->so_qlimit / 2) |
---|
217 | return ((struct socket *)0); |
---|
218 | MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_DONTWAIT); |
---|
219 | if (so == NULL) |
---|
220 | return ((struct socket *)0); |
---|
221 | bzero((caddr_t)so, sizeof(*so)); |
---|
222 | so->so_head = head; |
---|
223 | so->so_type = head->so_type; |
---|
224 | so->so_options = head->so_options &~ SO_ACCEPTCONN; |
---|
225 | so->so_linger = head->so_linger; |
---|
226 | so->so_state = head->so_state | SS_NOFDREF; |
---|
227 | so->so_proto = head->so_proto; |
---|
228 | so->so_timeo = head->so_timeo; |
---|
229 | (void) soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat); |
---|
230 | if (connstatus) { |
---|
231 | TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); |
---|
232 | so->so_state |= SS_COMP; |
---|
233 | } else { |
---|
234 | TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list); |
---|
235 | so->so_state |= SS_INCOMP; |
---|
236 | head->so_incqlen++; |
---|
237 | } |
---|
238 | head->so_qlen++; |
---|
239 | if ((*so->so_proto->pr_usrreqs->pru_attach)(so, 0)) { |
---|
240 | if (so->so_state & SS_COMP) { |
---|
241 | TAILQ_REMOVE(&head->so_comp, so, so_list); |
---|
242 | } else { |
---|
243 | TAILQ_REMOVE(&head->so_incomp, so, so_list); |
---|
244 | head->so_incqlen--; |
---|
245 | } |
---|
246 | head->so_qlen--; |
---|
247 | (void) free((caddr_t)so, M_SOCKET); |
---|
248 | return ((struct socket *)0); |
---|
249 | } |
---|
250 | if (connstatus) { |
---|
251 | sorwakeup(head); |
---|
252 | soconnwakeup(head); |
---|
253 | so->so_state |= connstatus; |
---|
254 | } |
---|
255 | return (so); |
---|
256 | } |
---|
257 | |
---|
258 | /* |
---|
259 | * Socantsendmore indicates that no more data will be sent on the |
---|
260 | * socket; it would normally be applied to a socket when the user |
---|
261 | * informs the system that no more data is to be sent, by the protocol |
---|
262 | * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data |
---|
263 | * will be received, and will normally be applied to the socket by a |
---|
264 | * protocol when it detects that the peer will send no more data. |
---|
265 | * Data queued for reading in the socket may yet be read. |
---|
266 | */ |
---|
267 | |
---|
268 | void |
---|
269 | socantsendmore(struct socket *so) |
---|
270 | { |
---|
271 | |
---|
272 | so->so_state |= SS_CANTSENDMORE; |
---|
273 | sowwakeup(so); |
---|
274 | } |
---|
275 | |
---|
276 | void |
---|
277 | socantrcvmore(struct socket *so) |
---|
278 | { |
---|
279 | |
---|
280 | so->so_state |= SS_CANTRCVMORE; |
---|
281 | sorwakeup(so); |
---|
282 | } |
---|
283 | |
---|
284 | /* |
---|
285 | * Socket buffer (struct sockbuf) utility routines. |
---|
286 | * |
---|
287 | * Each socket contains two socket buffers: one for sending data and |
---|
288 | * one for receiving data. Each buffer contains a queue of mbufs, |
---|
289 | * information about the number of mbufs and amount of data in the |
---|
290 | * queue, and other fields allowing select() statements and notification |
---|
291 | * on data availability to be implemented. |
---|
292 | * |
---|
293 | * Data stored in a socket buffer is maintained as a list of records. |
---|
294 | * Each record is a list of mbufs chained together with the m_next |
---|
295 | * field. Records are chained together with the m_nextpkt field. The upper |
---|
296 | * level routine soreceive() expects the following conventions to be |
---|
297 | * observed when placing information in the receive buffer: |
---|
298 | * |
---|
299 | * 1. If the protocol requires each message be preceded by the sender's |
---|
300 | * name, then a record containing that name must be present before |
---|
301 | * any associated data (mbuf's must be of type MT_SONAME). |
---|
302 | * 2. If the protocol supports the exchange of ``access rights'' (really |
---|
303 | * just additional data associated with the message), and there are |
---|
304 | * ``rights'' to be received, then a record containing this data |
---|
305 | * should be present (mbuf's must be of type MT_RIGHTS). |
---|
306 | * 3. If a name or rights record exists, then it must be followed by |
---|
307 | * a data record, perhaps of zero length. |
---|
308 | * |
---|
309 | * Before using a new socket structure it is first necessary to reserve |
---|
310 | * buffer space to the socket, by calling sbreserve(). This should commit |
---|
311 | * some of the available buffer space in the system buffer pool for the |
---|
312 | * socket (currently, it does nothing but enforce limits). The space |
---|
313 | * should be released by calling sbrelease() when the socket is destroyed. |
---|
314 | */ |
---|
315 | |
---|
316 | int |
---|
317 | soreserve(struct socket *so, u_long sndcc, u_long rcvcc) |
---|
318 | { |
---|
319 | |
---|
320 | if (sbreserve(&so->so_snd, sndcc) == 0) |
---|
321 | goto bad; |
---|
322 | if (sbreserve(&so->so_rcv, rcvcc) == 0) |
---|
323 | goto bad2; |
---|
324 | if (so->so_rcv.sb_lowat == 0) |
---|
325 | so->so_rcv.sb_lowat = 1; |
---|
326 | if (so->so_snd.sb_lowat == 0) |
---|
327 | so->so_snd.sb_lowat = MCLBYTES; |
---|
328 | if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat) |
---|
329 | so->so_snd.sb_lowat = so->so_snd.sb_hiwat; |
---|
330 | return (0); |
---|
331 | bad2: |
---|
332 | sbrelease(&so->so_snd); |
---|
333 | bad: |
---|
334 | return (ENOBUFS); |
---|
335 | } |
---|
336 | |
---|
337 | /* |
---|
338 | * Allot mbufs to a sockbuf. |
---|
339 | * Attempt to scale mbmax so that mbcnt doesn't become limiting |
---|
340 | * if buffering efficiency is near the normal case. |
---|
341 | */ |
---|
342 | int |
---|
343 | sbreserve(struct sockbuf *sb, u_long cc) |
---|
344 | { |
---|
345 | |
---|
346 | if (cc > sb_max * MCLBYTES / (MSIZE + MCLBYTES)) |
---|
347 | return (0); |
---|
348 | sb->sb_hiwat = cc; |
---|
349 | sb->sb_mbmax = min(cc * sb_efficiency, sb_max); |
---|
350 | if (sb->sb_lowat > sb->sb_hiwat) |
---|
351 | sb->sb_lowat = sb->sb_hiwat; |
---|
352 | return (1); |
---|
353 | } |
---|
354 | |
---|
355 | /* |
---|
356 | * Free mbufs held by a socket, and reserved mbuf space. |
---|
357 | */ |
---|
358 | void |
---|
359 | sbrelease(struct sockbuf *sb) |
---|
360 | { |
---|
361 | |
---|
362 | sbflush(sb); |
---|
363 | sb->sb_hiwat = sb->sb_mbmax = 0; |
---|
364 | } |
---|
365 | |
---|
366 | /* |
---|
367 | * Routines to add and remove |
---|
368 | * data from an mbuf queue. |
---|
369 | * |
---|
370 | * The routines sbappend() or sbappendrecord() are normally called to |
---|
371 | * append new mbufs to a socket buffer, after checking that adequate |
---|
372 | * space is available, comparing the function sbspace() with the amount |
---|
373 | * of data to be added. sbappendrecord() differs from sbappend() in |
---|
374 | * that data supplied is treated as the beginning of a new record. |
---|
375 | * To place a sender's address, optional access rights, and data in a |
---|
376 | * socket receive buffer, sbappendaddr() should be used. To place |
---|
377 | * access rights and data in a socket receive buffer, sbappendrights() |
---|
378 | * should be used. In either case, the new data begins a new record. |
---|
379 | * Note that unlike sbappend() and sbappendrecord(), these routines check |
---|
380 | * for the caller that there will be enough space to store the data. |
---|
381 | * Each fails if there is not enough space, or if it cannot find mbufs |
---|
382 | * to store additional information in. |
---|
383 | * |
---|
384 | * Reliable protocols may use the socket send buffer to hold data |
---|
385 | * awaiting acknowledgement. Data is normally copied from a socket |
---|
386 | * send buffer in a protocol with m_copy for output to a peer, |
---|
387 | * and then removing the data from the socket buffer with sbdrop() |
---|
388 | * or sbdroprecord() when the data is acknowledged by the peer. |
---|
389 | */ |
---|
390 | |
---|
391 | /* |
---|
392 | * Append mbuf chain m to the last record in the |
---|
393 | * socket buffer sb. The additional space associated |
---|
394 | * the mbuf chain is recorded in sb. Empty mbufs are |
---|
395 | * discarded and mbufs are compacted where possible. |
---|
396 | */ |
---|
397 | void |
---|
398 | sbappend(struct sockbuf *sb, struct mbuf *m) |
---|
399 | { |
---|
400 | register struct mbuf *n; |
---|
401 | |
---|
402 | if (m == 0) |
---|
403 | return; |
---|
404 | n = sb->sb_mb; |
---|
405 | if (n) { |
---|
406 | while (n->m_nextpkt) |
---|
407 | n = n->m_nextpkt; |
---|
408 | do { |
---|
409 | if (n->m_flags & M_EOR) { |
---|
410 | sbappendrecord(sb, m); /* XXXXXX!!!! */ |
---|
411 | return; |
---|
412 | } |
---|
413 | } while (n->m_next && (n = n->m_next)); |
---|
414 | } |
---|
415 | sbcompress(sb, m, n); |
---|
416 | } |
---|
417 | |
---|
418 | #ifdef SOCKBUF_DEBUG |
---|
419 | void |
---|
420 | sbcheck(struct sockbuf *sb) |
---|
421 | { |
---|
422 | register struct mbuf *m; |
---|
423 | register int len = 0, mbcnt = 0; |
---|
424 | |
---|
425 | for (m = sb->sb_mb; m; m = m->m_next) { |
---|
426 | len += m->m_len; |
---|
427 | mbcnt += MSIZE; |
---|
428 | if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */ |
---|
429 | mbcnt += m->m_ext.ext_size; |
---|
430 | if (m->m_nextpkt) |
---|
431 | panic("sbcheck nextpkt"); |
---|
432 | } |
---|
433 | if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) { |
---|
434 | printf("cc %d != %d || mbcnt %d != %d\n", len, sb->sb_cc, |
---|
435 | mbcnt, sb->sb_mbcnt); |
---|
436 | panic("sbcheck"); |
---|
437 | } |
---|
438 | } |
---|
439 | #endif |
---|
440 | |
---|
441 | /* |
---|
442 | * As above, except the mbuf chain |
---|
443 | * begins a new record. |
---|
444 | */ |
---|
445 | void |
---|
446 | sbappendrecord(struct sockbuf *sb, struct mbuf *m0) |
---|
447 | { |
---|
448 | register struct mbuf *m; |
---|
449 | |
---|
450 | if (m0 == 0) |
---|
451 | return; |
---|
452 | m = sb->sb_mb; |
---|
453 | if (m) |
---|
454 | while (m->m_nextpkt) |
---|
455 | m = m->m_nextpkt; |
---|
456 | /* |
---|
457 | * Put the first mbuf on the queue. |
---|
458 | * Note this permits zero length records. |
---|
459 | */ |
---|
460 | sballoc(sb, m0); |
---|
461 | if (m) |
---|
462 | m->m_nextpkt = m0; |
---|
463 | else |
---|
464 | sb->sb_mb = m0; |
---|
465 | m = m0->m_next; |
---|
466 | m0->m_next = 0; |
---|
467 | if (m && (m0->m_flags & M_EOR)) { |
---|
468 | m0->m_flags &= ~M_EOR; |
---|
469 | m->m_flags |= M_EOR; |
---|
470 | } |
---|
471 | sbcompress(sb, m, m0); |
---|
472 | } |
---|
473 | |
---|
474 | /* |
---|
475 | * As above except that OOB data |
---|
476 | * is inserted at the beginning of the sockbuf, |
---|
477 | * but after any other OOB data. |
---|
478 | */ |
---|
479 | void |
---|
480 | sbinsertoob(struct sockbuf *sb, struct mbuf *m0) |
---|
481 | { |
---|
482 | register struct mbuf *m; |
---|
483 | register struct mbuf **mp; |
---|
484 | |
---|
485 | if (m0 == 0) |
---|
486 | return; |
---|
487 | for (mp = &sb->sb_mb; *mp ; mp = &((*mp)->m_nextpkt)) { |
---|
488 | m = *mp; |
---|
489 | again: |
---|
490 | switch (m->m_type) { |
---|
491 | |
---|
492 | case MT_OOBDATA: |
---|
493 | continue; /* WANT next train */ |
---|
494 | |
---|
495 | case MT_CONTROL: |
---|
496 | m = m->m_next; |
---|
497 | if (m) |
---|
498 | goto again; /* inspect THIS train further */ |
---|
499 | } |
---|
500 | break; |
---|
501 | } |
---|
502 | /* |
---|
503 | * Put the first mbuf on the queue. |
---|
504 | * Note this permits zero length records. |
---|
505 | */ |
---|
506 | sballoc(sb, m0); |
---|
507 | m0->m_nextpkt = *mp; |
---|
508 | *mp = m0; |
---|
509 | m = m0->m_next; |
---|
510 | m0->m_next = 0; |
---|
511 | if (m && (m0->m_flags & M_EOR)) { |
---|
512 | m0->m_flags &= ~M_EOR; |
---|
513 | m->m_flags |= M_EOR; |
---|
514 | } |
---|
515 | sbcompress(sb, m, m0); |
---|
516 | } |
---|
517 | |
---|
518 | /* |
---|
519 | * Append address and data, and optionally, control (ancillary) data |
---|
520 | * to the receive queue of a socket. If present, |
---|
521 | * m0 must include a packet header with total length. |
---|
522 | * Returns 0 if no space in sockbuf or insufficient mbufs. |
---|
523 | */ |
---|
524 | int |
---|
525 | sbappendaddr(struct sockbuf *sb, struct sockaddr *asa, |
---|
526 | struct mbuf *m0, struct mbuf *control) |
---|
527 | { |
---|
528 | register struct mbuf *m, *n; |
---|
529 | int space = asa->sa_len; |
---|
530 | |
---|
531 | if (m0 && (m0->m_flags & M_PKTHDR) == 0) |
---|
532 | panic("sbappendaddr"); |
---|
533 | if (m0) |
---|
534 | space += m0->m_pkthdr.len; |
---|
535 | for (n = control; n; n = n->m_next) { |
---|
536 | space += n->m_len; |
---|
537 | if (n->m_next == 0) /* keep pointer to last control buf */ |
---|
538 | break; |
---|
539 | } |
---|
540 | if (space > sbspace(sb)) |
---|
541 | return (0); |
---|
542 | if (asa->sa_len > MLEN) |
---|
543 | return (0); |
---|
544 | MGET(m, M_DONTWAIT, MT_SONAME); |
---|
545 | if (m == 0) |
---|
546 | return (0); |
---|
547 | m->m_len = asa->sa_len; |
---|
548 | bcopy((caddr_t)asa, mtod(m, caddr_t), asa->sa_len); |
---|
549 | if (n) |
---|
550 | n->m_next = m0; /* concatenate data to control */ |
---|
551 | else |
---|
552 | control = m0; |
---|
553 | m->m_next = control; |
---|
554 | for (n = m; n; n = n->m_next) |
---|
555 | sballoc(sb, n); |
---|
556 | n = sb->sb_mb; |
---|
557 | if (n) { |
---|
558 | while (n->m_nextpkt) |
---|
559 | n = n->m_nextpkt; |
---|
560 | n->m_nextpkt = m; |
---|
561 | } else |
---|
562 | sb->sb_mb = m; |
---|
563 | return (1); |
---|
564 | } |
---|
565 | |
---|
566 | int |
---|
567 | sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, |
---|
568 | struct mbuf *control) |
---|
569 | { |
---|
570 | register struct mbuf *m, *n; |
---|
571 | int space = 0; |
---|
572 | |
---|
573 | if (control == 0) |
---|
574 | panic("sbappendcontrol"); |
---|
575 | for (m = control; ; m = m->m_next) { |
---|
576 | space += m->m_len; |
---|
577 | if (m->m_next == 0) |
---|
578 | break; |
---|
579 | } |
---|
580 | n = m; /* save pointer to last control buffer */ |
---|
581 | for (m = m0; m; m = m->m_next) |
---|
582 | space += m->m_len; |
---|
583 | if (space > sbspace(sb)) |
---|
584 | return (0); |
---|
585 | n->m_next = m0; /* concatenate data to control */ |
---|
586 | for (m = control; m; m = m->m_next) |
---|
587 | sballoc(sb, m); |
---|
588 | n = sb->sb_mb; |
---|
589 | if (n) { |
---|
590 | while (n->m_nextpkt) |
---|
591 | n = n->m_nextpkt; |
---|
592 | n->m_nextpkt = control; |
---|
593 | } else |
---|
594 | sb->sb_mb = control; |
---|
595 | return (1); |
---|
596 | } |
---|
597 | |
---|
598 | /* |
---|
599 | * Compress mbuf chain m into the socket |
---|
600 | * buffer sb following mbuf n. If n |
---|
601 | * is null, the buffer is presumed empty. |
---|
602 | */ |
---|
603 | void |
---|
604 | sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n) |
---|
605 | { |
---|
606 | register int eor = 0; |
---|
607 | register struct mbuf *o; |
---|
608 | |
---|
609 | while (m) { |
---|
610 | eor |= m->m_flags & M_EOR; |
---|
611 | if (m->m_len == 0 && |
---|
612 | (eor == 0 || |
---|
613 | (((o = m->m_next) || (o = n)) && |
---|
614 | o->m_type == m->m_type))) { |
---|
615 | m = m_free(m); |
---|
616 | continue; |
---|
617 | } |
---|
618 | if (n && (n->m_flags & (M_EXT | M_EOR)) == 0 && |
---|
619 | (n->m_data + n->m_len + m->m_len) < &n->m_dat[MLEN] && |
---|
620 | n->m_type == m->m_type) { |
---|
621 | bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len, |
---|
622 | (unsigned)m->m_len); |
---|
623 | n->m_len += m->m_len; |
---|
624 | sb->sb_cc += m->m_len; |
---|
625 | m = m_free(m); |
---|
626 | continue; |
---|
627 | } |
---|
628 | if (n) |
---|
629 | n->m_next = m; |
---|
630 | else |
---|
631 | sb->sb_mb = m; |
---|
632 | sballoc(sb, m); |
---|
633 | n = m; |
---|
634 | m->m_flags &= ~M_EOR; |
---|
635 | m = m->m_next; |
---|
636 | n->m_next = 0; |
---|
637 | } |
---|
638 | if (eor) { |
---|
639 | if (n) |
---|
640 | n->m_flags |= eor; |
---|
641 | else |
---|
642 | printf("semi-panic: sbcompress\n"); |
---|
643 | } |
---|
644 | } |
---|
645 | |
---|
646 | /* |
---|
647 | * Free all mbufs in a sockbuf. |
---|
648 | * Check that all resources are reclaimed. |
---|
649 | */ |
---|
650 | void |
---|
651 | sbflush(struct sockbuf *sb) |
---|
652 | { |
---|
653 | |
---|
654 | if (sb->sb_flags & SB_LOCK) |
---|
655 | panic("sbflush"); |
---|
656 | while (sb->sb_mbcnt) |
---|
657 | sbdrop(sb, (int)sb->sb_cc); |
---|
658 | if (sb->sb_cc || sb->sb_mb) |
---|
659 | panic("sbflush 2"); |
---|
660 | } |
---|
661 | |
---|
662 | /* |
---|
663 | * Drop data from (the front of) a sockbuf. |
---|
664 | */ |
---|
665 | void |
---|
666 | sbdrop(struct sockbuf *sb, int len) |
---|
667 | { |
---|
668 | register struct mbuf *m, *mn; |
---|
669 | struct mbuf *next; |
---|
670 | |
---|
671 | next = (m = sb->sb_mb) ? m->m_nextpkt : 0; |
---|
672 | while (len > 0) { |
---|
673 | if (m == 0) { |
---|
674 | if (next == 0) |
---|
675 | panic("sbdrop"); |
---|
676 | m = next; |
---|
677 | next = m->m_nextpkt; |
---|
678 | continue; |
---|
679 | } |
---|
680 | if (m->m_len > len) { |
---|
681 | m->m_len -= len; |
---|
682 | m->m_data += len; |
---|
683 | sb->sb_cc -= len; |
---|
684 | break; |
---|
685 | } |
---|
686 | len -= m->m_len; |
---|
687 | sbfree(sb, m); |
---|
688 | MFREE(m, mn); |
---|
689 | m = mn; |
---|
690 | } |
---|
691 | while (m && m->m_len == 0) { |
---|
692 | sbfree(sb, m); |
---|
693 | MFREE(m, mn); |
---|
694 | m = mn; |
---|
695 | } |
---|
696 | if (m) { |
---|
697 | sb->sb_mb = m; |
---|
698 | m->m_nextpkt = next; |
---|
699 | } else |
---|
700 | sb->sb_mb = next; |
---|
701 | } |
---|
702 | |
---|
703 | /* |
---|
704 | * Drop a record off the front of a sockbuf |
---|
705 | * and move the next record to the front. |
---|
706 | */ |
---|
707 | void |
---|
708 | sbdroprecord(struct sockbuf *sb) |
---|
709 | { |
---|
710 | register struct mbuf *m, *mn; |
---|
711 | |
---|
712 | m = sb->sb_mb; |
---|
713 | if (m) { |
---|
714 | sb->sb_mb = m->m_nextpkt; |
---|
715 | do { |
---|
716 | sbfree(sb, m); |
---|
717 | MFREE(m, mn); |
---|
718 | m = mn; |
---|
719 | } while (m); |
---|
720 | } |
---|
721 | } |
---|
722 | |
---|
723 | /* |
---|
724 | * Create a "control" mbuf containing the specified data |
---|
725 | * with the specified type for presentation on a socket buffer. |
---|
726 | */ |
---|
727 | struct mbuf * |
---|
728 | sbcreatecontrol(caddr_t p, int size, int type, int level) |
---|
729 | { |
---|
730 | register struct cmsghdr *cp; |
---|
731 | struct mbuf *m; |
---|
732 | |
---|
733 | if ((m = m_get(M_DONTWAIT, MT_CONTROL)) == NULL) |
---|
734 | return ((struct mbuf *) NULL); |
---|
735 | cp = mtod(m, struct cmsghdr *); |
---|
736 | /* XXX check size? */ |
---|
737 | (void)memcpy(CMSG_DATA(cp), p, size); |
---|
738 | size += sizeof(*cp); |
---|
739 | m->m_len = size; |
---|
740 | cp->cmsg_len = size; |
---|
741 | cp->cmsg_level = level; |
---|
742 | cp->cmsg_type = type; |
---|
743 | return (m); |
---|
744 | } |
---|
745 | |
---|
746 | #ifdef PRU_OLDSTYLE |
---|
747 | /* |
---|
748 | * The following routines mediate between the old-style `pr_usrreq' |
---|
749 | * protocol implementations and the new-style `struct pr_usrreqs' |
---|
750 | * calling convention. |
---|
751 | */ |
---|
752 | |
---|
753 | /* syntactic sugar */ |
---|
754 | #define nomb (struct mbuf *)0 |
---|
755 | |
---|
756 | static int |
---|
757 | old_abort(struct socket *so) |
---|
758 | { |
---|
759 | return so->so_proto->pr_ousrreq(so, PRU_ABORT, nomb, nomb, nomb); |
---|
760 | } |
---|
761 | |
---|
762 | static int |
---|
763 | old_accept(struct socket *so, struct mbuf *nam) |
---|
764 | { |
---|
765 | return so->so_proto->pr_ousrreq(so, PRU_ACCEPT, nomb, nam, nomb); |
---|
766 | } |
---|
767 | |
---|
768 | static int |
---|
769 | old_attach(struct socket *so, intptr_t proto) |
---|
770 | { |
---|
771 | return so->so_proto->pr_ousrreq(so, PRU_ATTACH, nomb, |
---|
772 | (struct mbuf *)proto, /* XXX */ |
---|
773 | nomb); |
---|
774 | } |
---|
775 | |
---|
776 | static int |
---|
777 | old_bind(struct socket *so, struct mbuf *nam) |
---|
778 | { |
---|
779 | return so->so_proto->pr_ousrreq(so, PRU_BIND, nomb, nam, nomb); |
---|
780 | } |
---|
781 | |
---|
782 | static int |
---|
783 | old_connect(struct socket *so, struct mbuf *nam) |
---|
784 | { |
---|
785 | return so->so_proto->pr_ousrreq(so, PRU_CONNECT, nomb, nam, nomb); |
---|
786 | } |
---|
787 | |
---|
788 | static int |
---|
789 | old_connect2(struct socket *so1, struct socket *so2) |
---|
790 | { |
---|
791 | return so1->so_proto->pr_ousrreq(so1, PRU_CONNECT2, nomb, |
---|
792 | (struct mbuf *)so2, nomb); |
---|
793 | } |
---|
794 | |
---|
795 | static int |
---|
796 | old_control(struct socket *so, intptr_t cmd, caddr_t data, struct ifnet *ifp) |
---|
797 | { |
---|
798 | return so->so_proto->pr_ousrreq(so, PRU_CONTROL, (struct mbuf *)cmd, |
---|
799 | (struct mbuf *)data, |
---|
800 | (struct mbuf *)ifp); |
---|
801 | } |
---|
802 | |
---|
803 | static int |
---|
804 | old_detach(struct socket *so) |
---|
805 | { |
---|
806 | return so->so_proto->pr_ousrreq(so, PRU_DETACH, nomb, nomb, nomb); |
---|
807 | } |
---|
808 | |
---|
809 | static int |
---|
810 | old_disconnect(struct socket *so) |
---|
811 | { |
---|
812 | return so->so_proto->pr_ousrreq(so, PRU_DISCONNECT, nomb, nomb, nomb); |
---|
813 | } |
---|
814 | |
---|
815 | static int |
---|
816 | old_listen(struct socket *so) |
---|
817 | { |
---|
818 | return so->so_proto->pr_ousrreq(so, PRU_LISTEN, nomb, nomb, nomb); |
---|
819 | } |
---|
820 | |
---|
821 | static int |
---|
822 | old_peeraddr(struct socket *so, struct mbuf *nam) |
---|
823 | { |
---|
824 | return so->so_proto->pr_ousrreq(so, PRU_PEERADDR, nomb, nam, nomb); |
---|
825 | } |
---|
826 | |
---|
827 | static int |
---|
828 | old_rcvd(struct socket *so, intptr_t flags) |
---|
829 | { |
---|
830 | return so->so_proto->pr_ousrreq(so, PRU_RCVD, nomb, |
---|
831 | (struct mbuf *)flags, /* XXX */ |
---|
832 | nomb); |
---|
833 | } |
---|
834 | |
---|
835 | static int |
---|
836 | old_rcvoob(struct socket *so, struct mbuf *m, intptr_t flags) |
---|
837 | { |
---|
838 | return so->so_proto->pr_ousrreq(so, PRU_RCVOOB, m, |
---|
839 | (struct mbuf *)flags, /* XXX */ |
---|
840 | nomb); |
---|
841 | } |
---|
842 | |
---|
843 | static int |
---|
844 | old_send(struct socket *so, int flags, struct mbuf *m, struct mbuf *addr, |
---|
845 | struct mbuf *control) |
---|
846 | { |
---|
847 | int req; |
---|
848 | |
---|
849 | if (flags & PRUS_OOB) { |
---|
850 | req = PRU_SENDOOB; |
---|
851 | } else if(flags & PRUS_EOF) { |
---|
852 | req = PRU_SEND_EOF; |
---|
853 | } else { |
---|
854 | req = PRU_SEND; |
---|
855 | } |
---|
856 | return so->so_proto->pr_ousrreq(so, req, m, addr, control); |
---|
857 | } |
---|
858 | |
---|
859 | static int |
---|
860 | old_sense(struct socket *so, struct stat *sb) |
---|
861 | { |
---|
862 | return so->so_proto->pr_ousrreq(so, PRU_SENSE, (struct mbuf *)sb, |
---|
863 | nomb, nomb); |
---|
864 | } |
---|
865 | |
---|
866 | static int |
---|
867 | old_shutdown(struct socket *so) |
---|
868 | { |
---|
869 | return so->so_proto->pr_ousrreq(so, PRU_SHUTDOWN, nomb, nomb, nomb); |
---|
870 | } |
---|
871 | |
---|
872 | static int |
---|
873 | old_sockaddr(struct socket *so, struct mbuf *nam) |
---|
874 | { |
---|
875 | return so->so_proto->pr_ousrreq(so, PRU_SOCKADDR, nomb, nam, nomb); |
---|
876 | } |
---|
877 | |
---|
878 | struct pr_usrreqs pru_oldstyle = { |
---|
879 | old_abort, old_accept, old_attach, old_bind, old_connect, |
---|
880 | old_connect2, old_control, old_detach, old_disconnect, |
---|
881 | old_listen, old_peeraddr, old_rcvd, old_rcvoob, old_send, |
---|
882 | old_sense, old_shutdown, old_sockaddr |
---|
883 | }; |
---|
884 | |
---|
885 | #endif /* PRU_OLDSTYLE */ |
---|
886 | |
---|
887 | /* |
---|
888 | * Some routines that return EOPNOTSUPP for entry points that are not |
---|
889 | * supported by a protocol. Fill in as needed. |
---|
890 | */ |
---|
891 | int |
---|
892 | pru_accept_notsupp(struct socket *so, struct mbuf *nam) |
---|
893 | { |
---|
894 | return EOPNOTSUPP; |
---|
895 | } |
---|
896 | |
---|
897 | int |
---|
898 | pru_connect2_notsupp(struct socket *so1, struct socket *so2) |
---|
899 | { |
---|
900 | return EOPNOTSUPP; |
---|
901 | } |
---|
902 | |
---|
903 | int |
---|
904 | pru_control_notsupp(struct socket *so, int cmd, caddr_t data, |
---|
905 | struct ifnet *ifp) |
---|
906 | { |
---|
907 | return EOPNOTSUPP; |
---|
908 | } |
---|
909 | |
---|
910 | int |
---|
911 | pru_listen_notsupp(struct socket *so) |
---|
912 | { |
---|
913 | return EOPNOTSUPP; |
---|
914 | } |
---|
915 | |
---|
916 | int |
---|
917 | pru_rcvd_notsupp(struct socket *so, int flags) |
---|
918 | { |
---|
919 | return EOPNOTSUPP; |
---|
920 | } |
---|
921 | |
---|
922 | int |
---|
923 | pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags) |
---|
924 | { |
---|
925 | return EOPNOTSUPP; |
---|
926 | } |
---|
927 | |
---|
928 | /* |
---|
929 | * This isn't really a ``null'' operation, but it's the default one |
---|
930 | * and doesn't do anything destructive. |
---|
931 | */ |
---|
932 | int |
---|
933 | pru_sense_null(struct socket *so, struct stat *sb) |
---|
934 | { |
---|
935 | sb->st_blksize = so->so_snd.sb_hiwat; |
---|
936 | return 0; |
---|
937 | } |
---|