source: rtems/cpukit/libnetworking/netinet/tcp_subr.c @ 657e6c93

5
Last change on this file since 657e6c93 was 657e6c93, checked in by Christian Mauderer <Christian.Mauderer@…>, on 06/24/16 at 05:57:17

libnetworking: Import current <netinet/in.h>

Import the <netinet/in.h> from current FreeBSD. This allows to build
some current software (e.g. libressl).

Add legacy support like

  • prototype for in_cksum(),
  • IPPORT_USERRESERVED,
  • deprecated IPCTL_RT* defines,
  • ip_fw_chk_t and ip_fw_ctl_t,
  • ip_nat_... (IP NAT hooks), and
  • IP_NAT option for get/setsockopt()

to new <rtems/rtems_netinet_in.h>.

  • Property mode set to 100644
File size: 19.4 KB
Line 
1/*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 *      The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 *      @(#)tcp_subr.c  8.2 (Berkeley) 5/24/95
30 * $FreeBSD: src/sys/netinet/tcp_subr.c,v 1.226 2005/05/07 00:41:36 cperciva Exp $
31 */
32 
33
34#ifdef HAVE_CONFIG_H
35#include "config.h"
36#endif
37
38#include "opt_tcpdebug.h"
39
40#include <sys/param.h>
41#include <sys/queue.h>
42#include <sys/proc.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/sysctl.h>
46#include <sys/malloc.h>
47#include <sys/mbuf.h>
48#include <sys/socket.h>
49#include <sys/socketvar.h>
50#include <sys/protosw.h>
51#include <errno.h>
52
53#include <net/route.h>
54#include <net/if.h>
55
56#define _IP_VHL
57#include <netinet/in.h>
58#include <rtems/rtems_netinet_in.h>
59#include <netinet/in_systm.h>
60#include <netinet/ip.h>
61#include <netinet/in_pcb.h>
62#include <netinet/in_var.h>
63#include <netinet/ip_var.h>
64#include <netinet/ip_icmp.h>
65#include <netinet/tcp.h>
66#include <netinet/tcp_fsm.h>
67#include <netinet/tcp_seq.h>
68#include <netinet/tcp_timer.h>
69#include <netinet/tcp_var.h>
70#include <netinet/tcpip.h>
71#ifdef TCPDEBUG
72#include <netinet/tcp_debug.h>
73#endif
74
75int     tcp_mssdflt = TCP_MSS;
76SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW,
77    &tcp_mssdflt , 0, "Default TCP Maximum Segment Size");
78
79static int      tcp_do_rfc1323 = 1;
80#if !defined(__rtems__)
81static int      tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
82SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt,
83        CTLFLAG_RW, &tcp_rttdflt , 0, "");
84
85SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323,
86        CTLFLAG_RW, &tcp_do_rfc1323 , 0, "");
87#endif
88
89static void     tcp_notify(struct inpcb *, int);
90
91/*
92 * Target size of TCP PCB hash table. Will be rounded down to a prime
93 * number.
94 */
95#ifndef TCBHASHSIZE
96#define TCBHASHSIZE     128
97#endif
98
99/*
100 * Tcp initialization
101 */
102void
103tcp_init(void)
104{
105
106        tcp_iss = random();     /* wrong, but better than a constant */
107        tcp_ccgen = 1;
108        LIST_INIT(&tcb);
109        tcbinfo.listhead = &tcb;
110        tcbinfo.hashbase = hashinit(TCBHASHSIZE, M_PCB, &tcbinfo.hashmask);
111        if (max_protohdr < sizeof(struct tcpiphdr))
112                max_protohdr = sizeof(struct tcpiphdr);
113        if (max_linkhdr + sizeof(struct tcpiphdr) > MHLEN)
114                panic("tcp_init");
115}
116
117/*
118 * Create template to be used to send tcp packets on a connection.
119 * Call after host entry created, allocates an mbuf and fills
120 * in a skeletal tcp/ip header, minimizing the amount of work
121 * necessary when the connection is used.
122 */
123struct tcpiphdr *
124tcp_template(struct tcpcb *tp)
125{
126        register struct inpcb *inp = tp->t_inpcb;
127        register struct mbuf *m;
128        register struct tcpiphdr *n;
129
130        if ((n = tp->t_template) == 0) {
131                m = m_get(M_DONTWAIT, MT_HEADER);
132                if (m == NULL)
133                        return (0);
134                m->m_len = sizeof (struct tcpiphdr);
135                n = mtod(m, struct tcpiphdr *);
136        }
137        n->ti_next = n->ti_prev = 0;
138        n->ti_x1 = 0;
139        n->ti_pr = IPPROTO_TCP;
140        n->ti_len = htons(sizeof (struct tcpiphdr) - sizeof (struct ip));
141        n->ti_src = inp->inp_laddr;
142        n->ti_dst = inp->inp_faddr;
143        n->ti_sport = inp->inp_lport;
144        n->ti_dport = inp->inp_fport;
145        n->ti_seq = 0;
146        n->ti_ack = 0;
147        n->ti_x2 = 0;
148        n->ti_off = 5;
149        n->ti_flags = 0;
150        n->ti_win = 0;
151        n->ti_sum = 0;
152        n->ti_urp = 0;
153        return (n);
154}
155
156/*
157 * Send a single message to the TCP at address specified by
158 * the given TCP/IP header.  If m == 0, then we make a copy
159 * of the tcpiphdr at ti and send directly to the addressed host.
160 * This is used to force keep alive messages out using the TCP
161 * template for a connection tp->t_template.  If flags are given
162 * then we send a message back to the TCP which originated the
163 * segment ti, and discard the mbuf containing it and any other
164 * attached mbufs.
165 *
166 * In any case the ack and sequence number of the transmitted
167 * segment are as specified by the parameters.
168 *
169 * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
170 */
171void
172tcp_respond(struct tcpcb *tp, struct tcpiphdr *ti, struct mbuf *m,
173    tcp_seq ack, tcp_seq seq, int flags)
174{
175        register int tlen;
176        int win = 0;
177        struct route *ro = 0;
178        struct route sro;
179
180        if (tp) {
181                win = sbspace(&tp->t_inpcb->inp_socket->so_rcv);
182                ro = &tp->t_inpcb->inp_route;
183        } else {
184                ro = &sro;
185                bzero(ro, sizeof *ro);
186        }
187        if (m == NULL) {
188                m = m_gethdr(M_DONTWAIT, MT_HEADER);
189                if (m == NULL)
190                        return;
191#ifdef TCP_COMPAT_42
192                tlen = 1;
193#else
194                tlen = 0;
195#endif
196                m->m_data += max_linkhdr;
197                *mtod(m, struct tcpiphdr *) = *ti;
198                ti = mtod(m, struct tcpiphdr *);
199                flags = TH_ACK;
200        } else {
201                m_freem(m->m_next);
202                m->m_next = NULL;
203                m->m_data = (caddr_t)ti;
204                m->m_len = sizeof (struct tcpiphdr);
205                tlen = 0;
206#define xchg(a,b,type) { type t; t=a; a=b; b=t; }
207                xchg(ti->ti_dst.s_addr, ti->ti_src.s_addr, u_long);
208                xchg(ti->ti_dport, ti->ti_sport, u_short);
209#undef xchg
210        }
211        ti->ti_len = htons((u_short)(sizeof (struct tcphdr) + tlen));
212        tlen += sizeof (struct tcpiphdr);
213        m->m_len = tlen;
214        m->m_pkthdr.len = tlen;
215        m->m_pkthdr.rcvif = (struct ifnet *) 0;
216        ti->ti_next = ti->ti_prev = 0;
217        ti->ti_x1 = 0;
218        ti->ti_seq = htonl(seq);
219        ti->ti_ack = htonl(ack);
220        ti->ti_x2 = 0;
221        ti->ti_off = sizeof (struct tcphdr) >> 2;
222        ti->ti_flags = flags;
223        if (tp)
224                ti->ti_win = htons((u_short) (win >> tp->rcv_scale));
225        else
226                ti->ti_win = htons((u_short)win);
227        ti->ti_urp = 0;
228        ti->ti_sum = 0;
229        ti->ti_sum = in_cksum(m, tlen);
230        ((struct ip *)ti)->ip_len = tlen;
231        ((struct ip *)ti)->ip_ttl = ip_defttl;
232#ifdef TCPDEBUG
233        if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
234                tcp_trace(TA_OUTPUT, 0, tp, ti, 0);
235#endif
236        (void) ip_output(m, NULL, ro, 0, NULL);
237        if (ro == &sro && ro->ro_rt) {
238                RTFREE(ro->ro_rt);
239        }
240}
241
242/*
243 * Create a new TCP control block, making an
244 * empty reassembly queue and hooking it to the argument
245 * protocol control block.
246 */
247struct tcpcb *
248tcp_newtcpcb(struct inpcb *inp)
249{
250        struct tcpcb *tp;
251
252        tp = malloc(sizeof(*tp), M_PCB, M_NOWAIT);
253        if (tp == NULL)
254                return ((struct tcpcb *)0);
255        bzero((char *) tp, sizeof(struct tcpcb));
256        tp->seg_next = tp->seg_prev = (struct tcpiphdr *)tp;
257        tp->t_maxseg = tp->t_maxopd = tcp_mssdflt;
258
259        if (tcp_do_rfc1323)
260                tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
261        tp->t_inpcb = inp;
262        /*
263         * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
264         * rtt estimate.  Set rttvar so that srtt + 4 * rttvar gives
265         * reasonable initial retransmit time.
266         */
267        tp->t_srtt = TCPTV_SRTTBASE;
268        tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
269        tp->t_rttmin = TCPTV_MIN;
270        tp->t_rxtcur = TCPTV_RTOBASE;
271        tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
272        tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
273        inp->inp_ip_ttl = ip_defttl;
274        inp->inp_ppcb = (caddr_t)tp;
275        return (tp);
276}
277
278/*
279 * Drop a TCP connection, reporting
280 * the specified error.  If connection is synchronized,
281 * then send a RST to peer.
282 */
283struct tcpcb *
284tcp_drop(struct tcpcb *tp, int errnum)
285{
286        struct socket *so = tp->t_inpcb->inp_socket;
287
288        if (TCPS_HAVERCVDSYN(tp->t_state)) {
289                tp->t_state = TCPS_CLOSED;
290                (void) tcp_output(tp);
291                tcpstat.tcps_drops++;
292        } else
293                tcpstat.tcps_conndrops++;
294        if (errnum == ETIMEDOUT && tp->t_softerror)
295                errnum = tp->t_softerror;
296        so->so_error = errnum;
297        return (tcp_close(tp));
298}
299
300/*
301 * Close a TCP control block:
302 *      discard all space held by the tcp
303 *      discard internet protocol block
304 *      wake up any sleepers
305 */
306struct tcpcb *
307tcp_close(struct tcpcb *tp)
308{
309        register struct tcpiphdr *t;
310        struct inpcb *inp = tp->t_inpcb;
311        struct socket *so = inp->inp_socket;
312        register struct mbuf *m;
313        register struct rtentry *rt;
314
315        /*
316         * If we got enough samples through the srtt filter,
317         * save the rtt and rttvar in the routing entry.
318         * 'Enough' is arbitrarily defined as the 16 samples.
319         * 16 samples is enough for the srtt filter to converge
320         * to within 5% of the correct value; fewer samples and
321         * we could save a very bogus rtt.
322         *
323         * Don't update the default route's characteristics and don't
324         * update anything that the user "locked".
325         */
326        if (tp->t_rttupdated >= 16 &&
327            (rt = inp->inp_route.ro_rt) &&
328            ((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr != INADDR_ANY) {
329                register u_long i = 0;
330
331                if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
332                        i = tp->t_srtt *
333                            (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTT_SCALE));
334                        if (rt->rt_rmx.rmx_rtt && i)
335                                /*
336                                 * filter this update to half the old & half
337                                 * the new values, converting scale.
338                                 * See route.h and tcp_var.h for a
339                                 * description of the scaling constants.
340                                 */
341                                rt->rt_rmx.rmx_rtt =
342                                    (rt->rt_rmx.rmx_rtt + i) / 2;
343                        else
344                                rt->rt_rmx.rmx_rtt = i;
345                        tcpstat.tcps_cachedrtt++;
346                }
347                if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
348                        i = tp->t_rttvar *
349                            (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTTVAR_SCALE));
350                        if (rt->rt_rmx.rmx_rttvar && i)
351                                rt->rt_rmx.rmx_rttvar =
352                                    (rt->rt_rmx.rmx_rttvar + i) / 2;
353                        else
354                                rt->rt_rmx.rmx_rttvar = i;
355                        tcpstat.tcps_cachedrttvar++;
356                }
357                /*
358                 * update the pipelimit (ssthresh) if it has been updated
359                 * already or if a pipesize was specified & the threshhold
360                 * got below half the pipesize.  I.e., wait for bad news
361                 * before we start updating, then update on both good
362                 * and bad news.
363                 */
364                if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
365                    ((i = tp->snd_ssthresh) != 0) && rt->rt_rmx.rmx_ssthresh) ||
366                    i < (rt->rt_rmx.rmx_sendpipe / 2)) {
367                        /*
368                         * convert the limit from user data bytes to
369                         * packets then to packet data bytes.
370                         */
371                        i = (i + tp->t_maxseg / 2) / tp->t_maxseg;
372                        if (i < 2)
373                                i = 2;
374                        i *= (u_long)(tp->t_maxseg + sizeof (struct tcpiphdr));
375                        if (rt->rt_rmx.rmx_ssthresh)
376                                rt->rt_rmx.rmx_ssthresh =
377                                    (rt->rt_rmx.rmx_ssthresh + i) / 2;
378                        else
379                                rt->rt_rmx.rmx_ssthresh = i;
380                        tcpstat.tcps_cachedssthresh++;
381                }
382        }
383        /* free the reassembly queue, if any */
384        t = tp->seg_next;
385        while (t != (struct tcpiphdr *)tp) {
386                t = (struct tcpiphdr *)t->ti_next;
387#if (defined(__GNUC__) && (defined(__arm__) || defined(__mips__)))
388        LD32_UNALGN((struct tcpiphdr *)t->ti_prev,m);
389#else
390                m = REASS_MBUF((struct tcpiphdr *)t->ti_prev);
391#endif
392                remque(t->ti_prev);
393                m_freem(m);
394        }
395        if (tp->t_template)
396                (void) m_free(dtom(tp->t_template));
397        free(tp, M_PCB);
398        inp->inp_ppcb = 0;
399        soisdisconnected(so);
400        in_pcbdetach(inp);
401        tcpstat.tcps_closed++;
402        return ((struct tcpcb *)0);
403}
404
405void
406tcp_drain(void)
407{
408
409}
410
411/*
412 * Notify a tcp user of an asynchronous error;
413 * store error as soft error, but wake up user
414 * (for now, won't do anything until can select for soft error).
415 */
416static void
417tcp_notify(struct inpcb *inp, int error)
418{
419        struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;
420        struct socket *so = inp->inp_socket;
421
422        /*
423         * Ignore some errors if we are hooked up.
424         * If connection hasn't completed, has retransmitted several times,
425         * and receives a second error, give up now.  This is better
426         * than waiting a long time to establish a connection that
427         * can never complete.
428         */
429        if (tp->t_state == TCPS_ESTABLISHED &&
430            (error == EHOSTUNREACH || error == ENETUNREACH ||
431             error == EHOSTDOWN)) {
432                return;
433        } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
434            tp->t_softerror)
435                so->so_error = error;
436        else
437                tp->t_softerror = error;
438        soconnwakeup (so);
439        sorwakeup(so);
440        sowwakeup(so);
441}
442
443#ifdef __rtems__
444#define INP_INFO_RLOCK(a)
445#define INP_INFO_RUNLOCK(a)
446#define INP_LOCK(a)
447#define INP_UNLOCK(a)
448#endif
449
450static int
451tcp_pcblist(SYSCTL_HANDLER_ARGS)
452{
453        int error, i, n, s;
454        struct inpcb *inp, **inp_list;
455        inp_gen_t gencnt;
456        struct xinpgen xig;
457
458        /*
459         * The process of preparing the TCB list is too time-consuming and
460         * resource-intensive to repeat twice on every request.
461         */
462        if (req->oldptr == NULL) {
463                n = tcbinfo.ipi_count;
464                req->oldidx = 2 * (sizeof xig)
465                        + (n + n/8) * sizeof(struct xtcpcb);
466                return (0);
467        }
468
469        if (req->newptr != NULL)
470                return (EPERM);
471
472        /*
473         * OK, now we're committed to doing something.
474         */
475        s = splnet();
476        INP_INFO_RLOCK(&tcbinfo);
477        gencnt = tcbinfo.ipi_gencnt;
478        n = tcbinfo.ipi_count;
479        INP_INFO_RUNLOCK(&tcbinfo);
480        splx(s);
481
482        sysctl_wire_old_buffer(req, 2 * (sizeof xig)
483                + n * sizeof(struct xtcpcb));
484
485        xig.xig_len = sizeof xig;
486        xig.xig_count = n;
487        xig.xig_gen = gencnt;
488/*      xig.xig_sogen = so_gencnt; remove by ccj */
489        error = SYSCTL_OUT(req, &xig, sizeof xig);
490        if (error)
491                return error;
492
493        /* ccj add exit if the count is 0 */
494        if (!n)
495                return error;
496 
497        inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
498        if (inp_list == 0)
499                return ENOMEM;
500       
501        s = splnet();
502        INP_INFO_RLOCK(&tcbinfo);
503        for (inp = LIST_FIRST(tcbinfo.listhead), i = 0; inp && i < n;
504             inp = LIST_NEXT(inp, inp_list)) {
505                INP_LOCK(inp);
506                if (inp->inp_gencnt <= gencnt)
507#if 0
508      &&
509                    cr_canseesocket(req->td->td_ucred, inp->inp_socket) == 0)
510#endif
511                        inp_list[i++] = inp;
512                INP_UNLOCK(inp);
513        }
514        INP_INFO_RUNLOCK(&tcbinfo);
515        splx(s);
516        n = i;
517
518        error = 0;
519        for (i = 0; i < n; i++) {
520                inp = inp_list[i];
521                INP_LOCK(inp);
522                if (inp->inp_gencnt <= gencnt) {
523                        struct xtcpcb xt;
524                        caddr_t inp_ppcb;
525                        xt.xt_len = sizeof xt;
526                        /* XXX should avoid extra copy */
527                        bcopy(inp, &xt.xt_inp, sizeof *inp);
528                        inp_ppcb = inp->inp_ppcb;
529                        if (inp_ppcb != NULL)
530                                bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp);
531                        else
532                                bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
533#if 0     
534                        if (inp->inp_socket)
535                                sotoxsocket(inp->inp_socket, &xt.xt_socket);
536#endif
537                        error = SYSCTL_OUT(req, &xt, sizeof xt);
538                }
539                INP_UNLOCK(inp);
540        }
541        if (!error) {
542                /*
543                 * Give the user an updated idea of our state.
544                 * If the generation differs from what we told
545                 * her before, she knows that something happened
546                 * while we were processing this request, and it
547                 * might be necessary to retry.
548                 */
549                s = splnet();
550                INP_INFO_RLOCK(&tcbinfo);
551                xig.xig_gen = tcbinfo.ipi_gencnt;
552#if 0   
553                xig.xig_sogen = so_gencnt;
554#endif
555                xig.xig_count = tcbinfo.ipi_count;
556                INP_INFO_RUNLOCK(&tcbinfo);
557                splx(s);
558                error = SYSCTL_OUT(req, &xig, sizeof xig);
559        }
560        free(inp_list, M_TEMP);
561        return error;
562}
563
564SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
565            tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
566
567void
568tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
569{
570        struct ip *ip = vip;
571        struct tcphdr *th;
572        void (*notify)(struct inpcb *, int) = tcp_notify;
573
574        if (cmd == PRC_QUENCH)
575                notify = tcp_quench;
576#if 1
577        else if (cmd == PRC_MSGSIZE)
578                notify = tcp_mtudisc;
579#endif
580        else if (!PRC_IS_REDIRECT(cmd) &&
581                 ((unsigned)cmd > PRC_NCMDS || inetctlerrmap[cmd] == 0))
582                return;
583        if (ip != NULL) {
584#ifdef _IP_VHL
585                th = (struct tcphdr *)((caddr_t)ip
586                                       + (IP_VHL_HL(ip->ip_vhl) << 2));
587#else
588                th = (struct tcphdr *)((caddr_t)ip
589                                       + (ip->ip_hl << 2));
590#endif
591                in_pcbnotify(&tcb, sa, th->th_dport, ip->ip_src, th->th_sport,
592                        cmd, notify);
593        } else
594                in_pcbnotify(&tcb, sa, 0, zeroin_addr, 0, cmd, notify);
595}
596
597/*
598 * When a source quench is received, close congestion window
599 * to one segment.  We will gradually open it again as we proceed.
600 */
601void
602tcp_quench( struct inpcb *inp, int errnum)
603{
604        struct tcpcb *tp = intotcpcb(inp);
605
606        if (tp)
607                tp->snd_cwnd = tp->t_maxseg;
608}
609
610/*
611 * When `need fragmentation' ICMP is received, update our idea of the MSS
612 * based on the new value in the route.  Also nudge TCP to send something,
613 * since we know the packet we just sent was dropped.
614 * This duplicates some code in the tcp_mss() function in tcp_input.c.
615 */
616void
617tcp_mtudisc(struct inpcb *inp, int errnum)
618{
619        struct tcpcb *tp = intotcpcb(inp);
620        struct rtentry *rt;
621        struct rmxp_tao *taop;
622        struct socket *so = inp->inp_socket;
623        int offered;
624        int mss;
625
626        if (tp) {
627                rt = tcp_rtlookup(inp);
628                if (!rt || !rt->rt_rmx.rmx_mtu) {
629                        tp->t_maxopd = tp->t_maxseg = tcp_mssdflt;
630                        return;
631                }
632                taop = rmx_taop(rt->rt_rmx);
633                offered = taop->tao_mssopt;
634                mss = rt->rt_rmx.rmx_mtu - sizeof(struct tcpiphdr);
635                if (offered)
636                        mss = min(mss, offered);
637                /*
638                 * XXX - The above conditional probably violates the TCP
639                 * spec.  The problem is that, since we don't know the
640                 * other end's MSS, we are supposed to use a conservative
641                 * default.  But, if we do that, then MTU discovery will
642                 * never actually take place, because the conservative
643                 * default is much less than the MTUs typically seen
644                 * on the Internet today.  For the moment, we'll sweep
645                 * this under the carpet.
646                 *
647                 * The conservative default might not actually be a problem
648                 * if the only case this occurs is when sending an initial
649                 * SYN with options and data to a host we've never talked
650                 * to before.  Then, they will reply with an MSS value which
651                 * will get recorded and the new parameters should get
652                 * recomputed.  For Further Study.
653                 */
654                if (tp->t_maxopd <= mss)
655                        return;
656                tp->t_maxopd = mss;
657
658                if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
659                    (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
660                        mss -= TCPOLEN_TSTAMP_APPA;
661                if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC &&
662                    (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC)
663                        mss -= TCPOLEN_CC_APPA;
664#if     (MCLBYTES & (MCLBYTES - 1)) == 0
665                if (mss > MCLBYTES)
666                        mss &= ~(MCLBYTES-1);
667#else
668                if (mss > MCLBYTES)
669                        mss = mss / MCLBYTES * MCLBYTES;
670#endif
671                if (so->so_snd.sb_hiwat < mss)
672                        mss = so->so_snd.sb_hiwat;
673
674                tp->t_maxseg = mss;
675
676                tcpstat.tcps_mturesent++;
677                tp->t_rtt = 0;
678                tp->snd_nxt = tp->snd_una;
679                tcp_output(tp);
680        }
681}
682
683/*
684 * Look-up the routing entry to the peer of this inpcb.  If no route
685 * is found and it cannot be allocated, then return NULL.  This routine
686 * is called by TCP routines that access the rmx structure and by tcp_mss
687 * to get the interface MTU.
688 */
689struct rtentry *
690tcp_rtlookup(struct inpcb *inp)
691{
692        struct route *ro;
693        struct rtentry *rt;
694
695        ro = &inp->inp_route;
696        rt = ro->ro_rt;
697        if (rt == NULL || !(rt->rt_flags & RTF_UP)) {
698                /* No route yet, so try to acquire one */
699                if (inp->inp_faddr.s_addr != INADDR_ANY) {
700                        ro->ro_dst.sa_family = AF_INET;
701                        ro->ro_dst.sa_len = sizeof(ro->ro_dst);
702                        ((struct sockaddr_in *) &ro->ro_dst)->sin_addr =
703                                inp->inp_faddr;
704                        rtalloc(ro);
705                        rt = ro->ro_rt;
706                }
707        }
708        return rt;
709}
710
711/*
712 * Return a pointer to the cached information about the remote host.
713 * The cached information is stored in the protocol specific part of
714 * the route metrics.
715 */
716struct rmxp_tao *
717tcp_gettaocache(struct inpcb *inp)
718{
719        struct rtentry *rt = tcp_rtlookup(inp);
720
721        /* Make sure this is a host route and is up. */
722        if (rt == NULL ||
723            (rt->rt_flags & (RTF_UP|RTF_HOST)) != (RTF_UP|RTF_HOST))
724                return NULL;
725
726        return rmx_taop(rt->rt_rmx);
727}
Note: See TracBrowser for help on using the repository browser.