/* * Copyright (c) 1996, 1997 Berkeley Software Design, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that this notice is retained, * the conditions in the following notices are met, and terms applying * to contributors in the following notices also apply to Berkeley * Software Design, Inc. * * BSDI tcp_output.c,v 2.11 1997/01/16 14:06:34 karels Exp */ /* * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)tcp_output.c 8.4 (Berkeley) 5/24/95 */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define TCPOUTFLAGS #include #include #include #include #include #include #ifdef notyet extern struct mbuf *m_copypack(); #endif #if defined(SACK) || defined(ER) extern int tcprexmtthresh; #endif /*#define MAX_TCPOPTLEN 32*/ /* max # bytes that go in options */ #define MAX_TCPOPTLEN 40 /* need 40 at least for 3 SACKs + TIMESTAMP */ /* * Tcp output routine: figure out what should be sent and send it. */ #ifdef LIMIT_BURST extern int hz; void tcp_output_dummy(void *arg) { struct tcpcb *tp; struct timeval curtime; microtime(&curtime); tp = (struct tcpcb *) arg; tp->burst_pending = 0; tcp_output(tp); } /* * XXX tcp_sched_burst() is not effective when data from the socket layer arrives * in small chunks (i.e., when len is small). The correct long-term solution is * to keep track of the time when each packet is due to complete transmission * based on the connection rate of cwnd/rtt. Basically, the code will be very * similar to the bandwidth emulation code under LINK_EMULATION. * * XXX todo: better solution -- have an array to keep track of the times at * which the last N packets were sent. */ int tcp_sched_burst(struct tcpcb *tp, int len) { u_long ticks; /* ticks to next burst */ int burstlen; /* in segments */ struct timeval curtime; if (tp->t_xflags & TXF_IGNORE_BURSTTIMER) { burstlen = (!len || len % tp->t_maxseg) ? (len/tp->t_maxseg + 1) : (len/tp->t_maxseg); return(burstlen); } microtime(&curtime); /* * cancel timer if pending */ if (tp->burst_pending) { untimeout(tcp_output_dummy, (void *) tp); tp->burst_pending = 0; } /* if amount to send is less than maxburst */ if (len <= tp->maxburst*tp->t_maxseg) { burstlen = (!len || len % tp->t_maxseg) ? (len/tp->t_maxseg + 1) : (len/tp->t_maxseg); } /* * amount to send more than maxburst, so send maxburst now and * schedule the rest for later. * * XXX should we consider snd_wnd when scheduling the time for the * next burst? */ else { ticks = ((tp->t_srtt_exact)*tp->maxburst*tp->t_maxseg)/tp->snd_cwnd/(EXACT_HZ/hz); if (ticks == 0) /* XXX we choose not to ignore maxburst in such cases */ ticks = 1; tp->burst_pending = 1; timeout(tcp_output_dummy, (void *) tp, ticks); burstlen = tp->maxburst; } return (burstlen); } int tcp_output_force(tp) register struct tcpcb *tp; { int retval; tp->t_xflags |= TXF_IGNORE_BURSTTIMER; retval = tcp_output(tp); tp->t_xflags &= ~TXF_IGNORE_BURSTTIMER; return(retval); } #endif int tcp_output(tp) register struct tcpcb *tp; { register struct socket *so = tp->t_inpcb->inp_socket; register long len, win; int off, flags, error; register struct mbuf *m; register struct tcpiphdr *ti; u_char opt[MAX_TCPOPTLEN]; unsigned optlen, hdrlen; int idle, sendalot = 0; struct rtentry *rt; #ifdef LIMIT_BURST /* * XXX todo: even if the burst timer is not pending, check to see if we * sent data in a previous call to tcp_output() in the very recent past. * This is needed because the application may do writes in small chunks * (specifically, smaller than maxburst), resulting in multiple calls to * tcp_output in a short space of time. */ if (tp->burst_pending && !(tp->t_xflags & TXF_IGNORE_BURSTTIMER)) { struct timeval curtime; microtime(&curtime); /* * We have a choice between cancelling a scheduled burst and * not doing so. */ if (tp->t_xflags & TXF_CANCEL_BURST) untimeout(tcp_output_dummy, (void *) tp); else return (0); } #endif /* * Determine length of data that should be transmitted, * and flags that will be used. * If there is some data or critical controls (SYN, RST) * to send, then transmit; otherwise, investigate further. */ idle = (tp->t_flags & TF_WASIDLE) ? 1 : (tp->snd_max == tp->snd_una); if (idle) { if (tp->t_idle >= tp->t_rxtcur) { /* * We have been idle for "a while" and no acks are * expected to clock out any data we send -- * slow start to get ack "clock" running again. */ #ifdef TCP_STANDARD tp->snd_cwnd = tp->t_maxseg; #else tp->snd_cwnd = 2*tp->t_maxseg; #endif } if (somoretosend(so)) { tp->t_flags |= TF_WASIDLE; idle = 0; goto again; } } tp->t_flags &= ~TF_WASIDLE; again: off = tp->snd_nxt - tp->snd_una; win = min(tp->snd_wnd, tp->snd_cwnd); flags = tcp_outflags[tp->t_state]; #ifndef LIMIT_BURST sendalot = 0; #endif /* * If in persist timeout with window of 0, send 1 byte. * Otherwise, if window is small but nonzero * and timer expired, we will send what we can * and go to transmit state. */ if (tp->t_force) { if (win == 0) { /* * If we still have some data to send, then * clear the FIN bit. Usually this would * happen below when it realizes that we * aren't sending all the data. However, * if we have exactly 1 byte of unset data, * then it won't clear the FIN bit below, * and if we are in persist state, we wind * up sending the packet without recording * that we sent the FIN bit. * * We can't just blindly clear the FIN bit, * because if we don't have any more data * to send then the probe will be the FIN * itself. */ if (off < so->so_snd.sb_cc) flags &= ~TH_FIN; win = 1; } else { tp->t_timer[TCPT_PERSIST] = 0; tp->t_rxtshift = 0; } } { if (win < so->so_snd.sb_cc) { len = win - off; if (idle) { tp->t_flags |= TF_WASIDLE; idle = 0; } } else { len = so->so_snd.sb_cc - off; } } if (len < 0) { /* * If FIN has been sent but not acked, * but we haven't been called to retransmit, * len will be -1. Otherwise, window shrank * after we sent into it. If window shrank to 0, * cancel pending retransmit, pull snd_nxt back * to (closed) window, and set the persist timer * if it isn't already going. If the window didn't * close completely, just wait for an ACK. */ len = 0; if (win == 0) { tp->t_timer[TCPT_REXMT] = 0; tp->t_rxtshift = 0; tp->snd_nxt = tp->snd_una; if (tp->t_timer[TCPT_PERSIST] == 0) tcp_setpersist(tp); } } if (len > tp->t_maxseg) { #ifdef LIMIT_BURST if (!tp->burst_pending) sendalot = tp->maxburst ? tcp_sched_burst(tp, len)-1:1; #else sendalot = 1; #endif len = tp->t_maxseg; } if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.sb_cc)) flags &= ~TH_FIN; win = sbspace(&so->so_rcv); /* * Sender silly window avoidance. If connection is idle * and can send all data or a maximum segment, * or are forced, do it; otherwise don't bother. * If peer's buffer is tiny, then send * when window is at least half open. * If retransmitting (possibly after persist timer forced us * to send into a small window), then must resend. */ if (len) { if (len == tp->t_maxseg) goto send; if ((idle || tp->t_flags & TF_NODELAY) && len + off >= so->so_snd.sb_cc) goto send; if (tp->t_force) goto send; if (len >= tp->max_sndwnd / 2) goto send; if (SEQ_LT(tp->snd_nxt, tp->snd_max)) goto send; } /* * Compare available window to amount of window * known to peer (as advertised window less * next expected input). If the difference is at least two * max size segments, or at least 50% of the maximum possible * window, then want to send a window update to peer. * * Of course, if we've already received the FIN, there's * no point in sending out a window update. */ if (win > 0 && !TCPS_HAVERCVDFIN(tp->t_state)) { /* * "adv" is the amount we can increase the window, * taking into account that we are limited by * TCP_MAXWIN << tp->rcv_scale. */ long adv = min(win, (long)TCP_MAXWIN << tp->rcv_scale) - (tp->rcv_adv - tp->rcv_nxt); u_short t_maxseg = tp->t_maxseg; #ifdef ACC if (adv >= (long) (tp->delack * t_maxseg)) goto send; #else if (adv >= (long) (2 * t_maxseg)) goto send; #endif if (2 * adv >= (long) so->so_rcv.sb_hiwat) goto send; } #ifdef TCP_STANDARD #ifdef ACC if (tp->num_segs_since_ack >= tp->delack) goto send; #else if (tp->num_segs_since_ack >= 2) goto send; #endif #endif /* * Send if we owe peer an ACK. */ if (tp->t_flags & TF_ACKNOW) { goto send; } if (flags & (TH_SYN|TH_RST)) goto send; if (SEQ_GT(tp->snd_up, tp->snd_nxt)) goto send; /* * If our state indicates that FIN should be sent * and we have not yet done so, or we're retransmitting the FIN, * then we need to send. */ if (flags & TH_FIN && ((tp->t_flags & TF_SENTFIN) == 0 || SEQ_LT(tp->snd_nxt, tp->snd_max))) goto send; /* * TCP window updates are not reliable, rather a polling protocol * using ``persist'' packets is used to insure receipt of window * updates. The three ``states'' for the output side are: * idle not doing retransmits or persists * persisting to move a small or zero window * (re)transmitting and thereby not persisting * * tp->t_timer[TCPT_PERSIST] * is set when we are in persist state. * tp->t_force * is set when we are called to send a persist packet. * tp->t_timer[TCPT_REXMT] * is set when we are retransmitting * The output side is idle when both timers are zero. * * If send window is too small, there is data to transmit, and no * retransmit or persist is pending, then go to persist state. * If nothing happens soon, send when timer expires: * if window is nonzero, transmit what we can, * otherwise force out a byte. */ if (so->so_snd.sb_cc && tp->t_timer[TCPT_REXMT] == 0 && tp->t_timer[TCPT_PERSIST] == 0 ) { tp->t_rxtshift = 0; tcp_setpersist(tp); } /* * No reason to send a segment, just return. */ return (0); send: /* * Before ESTABLISHED, force sending of initial options * unless TCP set not to do any options. * NOTE: we assume that the IP/TCP header plus TCP options * always fit in a single mbuf, leaving room for a maximum * link header, i.e. * max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MHLEN */ optlen = 0; hdrlen = sizeof (struct tcpiphdr); if (flags & TH_SYN) { tp->snd_nxt = tp->iss; if ((tp->t_flags & TF_NOOPT) == 0) { u_short mss; opt[0] = TCPOPT_MAXSEG; opt[1] = 4; mss = htons((u_short) tcp_send_mss(tp->t_inpcb)); bcopy((caddr_t)&mss, (caddr_t)(opt + 2), sizeof(mss)); optlen = 4; if (tp->t_flags & TF_USE_SCALE) { *((u_long *) (opt + optlen)) = htonl( TCPOPT_NOP << 24 | TCPOPT_WINDOW << 16 | TCPOLEN_WINDOW << 8 | tp->request_r_scale); optlen += 4; } } } /* * Send a timestamp and echo-reply if this is a SYN and our side * wants to use timestamps (TF_SEND_TSTMP is set) or both our side * and our peer have sent timestamps in our SYN's. */ if ((tp->t_flags & TF_SEND_TSTMP) && (flags & TH_RST) == 0) { u_long *lp = (u_long *)(opt + optlen); /* Form timestamp option as shown in appendix A of RFC 1323. */ *lp++ = htonl(TCPOPT_TSTAMP_HDR); #ifdef FINE_GRAINED_TSTAMP { struct timeval curtime; microtime(&curtime); *lp++ = htonl(EXACT_TIME(&curtime)); } #else *lp++ = htonl(tcp_now); #endif *lp = htonl(tp->ts_recent); optlen += TCPOLEN_TSTAMP_APPA; } #ifdef ACC /* * Use the TCPOPT_PEERWIN option to tell the peer what our window * size (measured in segments) is. We don't set this option in a * pure ACK packet. * * XXX to do: make this option conditional on the TH_DELACKS_OK flag * being set during the SYN exchange. */ if (len > 0) { u_short *sp = (u_short *) (opt + optlen); /* * We do NOT take the min of snd_cwnd and and snd_wnd. * The receiver takes the min of the peerwin we send and * the size of the of its receiver buffer for deciding * how much to delay acks. The justification for this is * that if in fact win is limited for us because the * receiver's buffer is full, end-to-end throughput is not * really impacted if the receiver waits, either until its * advertised window advances enough (as determined by * "delack") or the delayed ack timer expires, before * sending an new ack. */ long w = tp->snd_cwnd; *sp++ = htons(TCPOPT_PEERWIN_HDR); /* take the amount of data to send into account */ *sp = htons(min(w, so->so_snd.sb_cc)/tp->t_maxseg); optlen += TCPOLEN_PEERWIN; } #endif hdrlen += optlen; #ifdef already_accounted_for /* * Adjust data length if insertion of options will * bump the packet length beyond the t_maxseg length. */ if (len > tp->t_maxseg - optlen) { #ifdef LIMIT_BURST /* * We assume that the leftovers (caused by options) from each * segment will add up to at most one segment. So we adjust * sendalot exactly once per burst. */ if (!tp->burst_pending) sendalot++; #else sendalot = 1; #endif len = tp->t_maxseg - optlen; flags &= ~TH_FIN; } #endif /* already_accounted_for */ #ifdef DIAGNOSTIC if (max_linkhdr + hdrlen > MHLEN) panic("tcphdr too big"); #endif /* * Grab a header mbuf, attaching a copy of data to * be transmitted, and initialize the header from * the template for sends on this connection. */ if (len) { if (tp->t_force && len == 1) tcpstat.tcps_sndprobe++; else if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { tcpstat.tcps_sndrexmitpack++; tcpstat.tcps_sndrexmitbyte += len; } else { tcpstat.tcps_sndpack++; tcpstat.tcps_sndbyte += len; } #ifdef notyet if ((m = m_copypack(so->so_snd.sb_mb, off, (int)len, max_linkhdr + hdrlen)) == 0) { error = ENOBUFS; goto out; } /* * m_copypack left space for our hdr; use it. */ m->m_len += hdrlen; m->m_data -= hdrlen; #else MGETHDR(m, M_DONTWAIT, MT_HEADER); if (m == NULL) { error = ENOBUFS; goto out; } m->m_data += max_linkhdr; m->m_len = hdrlen; if (len <= MHLEN - hdrlen - max_linkhdr) { m_copydata(so->so_snd.sb_mb, off, (int) len, mtod(m, caddr_t) + hdrlen); m->m_len += len; } else { m->m_next = m_copy(so->so_snd.sb_mb, off, (int) len); if (m->m_next == 0) { (void) m_free(m); error = ENOBUFS; goto out; } } #endif /* * If we're sending everything we've got, set PUSH. * (This will keep happy those implementations which only * give data to the user when a buffer fills or * a PUSH comes in.) */ if (off + len == so->so_snd.sb_cc) flags |= TH_PUSH; } else { if (tp->t_flags & TF_ACKNOW) tcpstat.tcps_sndacks++; else if (flags & (TH_SYN|TH_FIN|TH_RST)) tcpstat.tcps_sndctrl++; else if (SEQ_GT(tp->snd_up, tp->snd_nxt)) tcpstat.tcps_sndurg++; else tcpstat.tcps_sndwinup++; MGETHDR(m, M_DONTWAIT, MT_HEADER); if (m == NULL) { error = ENOBUFS; goto out; } m->m_data += max_linkhdr; m->m_len = hdrlen; } m->m_pkthdr.rcvif = (struct ifnet *)0; ti = mtod(m, struct tcpiphdr *); if (tp->t_template == 0) panic("tcp_output"); bcopy((caddr_t)tp->t_template, (caddr_t)ti, sizeof (struct tcpiphdr)); /* * Fill in fields, remembering maximum advertised * window for use in delaying messages about window sizes. * If resending a FIN, be sure not to use a new sequence number. */ if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && tp->snd_nxt == tp->snd_max) tp->snd_nxt--; /* * If we are doing retransmissions, then snd_nxt will * not reflect the first unsent octet. For ACK only * packets, we do not want the sequence number of the * retransmitted packet, we want the sequence number * of the next unsent octet. So, if there is no data * (and no SYN or FIN), use snd_max instead of snd_nxt * when filling in ti_seq. But if we are in persist * state, snd_max might reflect one byte beyond the * right edge of the window, so use snd_nxt in that * case, since we know we aren't doing a retransmission. * (retransmit and persist are mutually exclusive...) */ if (len || (flags & (TH_SYN|TH_FIN)) || tp->t_timer[TCPT_PERSIST]) ti->ti_seq = htonl(tp->snd_nxt); else ti->ti_seq = htonl(tp->snd_max); ti->ti_ack = htonl(tp->rcv_nxt); if (optlen) { bcopy((caddr_t)opt, (caddr_t)(ti + 1), optlen); ti->ti_off = (sizeof (struct tcphdr) + optlen) >> 2; } ti->ti_flags = flags; /* * Calculate receive window. Don't shrink window, * but avoid silly window syndrome. */ if (win < (long)(so->so_rcv.sb_hiwat / 4) && win < (long)tp->t_maxseg) win = 0; if (win > (long)TCP_MAXWIN << tp->rcv_scale) win = (long)TCP_MAXWIN << tp->rcv_scale; if (win < (long)(tp->rcv_adv - tp->rcv_nxt)) win = (long)(tp->rcv_adv - tp->rcv_nxt); ti->ti_win = htons((u_short) (win>>tp->rcv_scale)); /* * If no urgent pointer is outstanding, then we pull the * urgent pointer to the left edge of the send window so * that it doesn't drift into the send window on sequence * number wraparound. Otherwise, if the urgent pointer * points into/after this packet, add it in. */ if (SEQ_GT(tp->snd_up, tp->snd_una)) { if (SEQ_GT(tp->snd_up, tp->snd_nxt)) { int urp = tp->snd_up - tp->snd_nxt; /* * Internally we store the urgent pointer as the first * byte of non-urgent data. But in the packet, the * urgent pointer is supposed to be the last byte of * urgent data. If the user specified TF_STDURG then * use this behavior, otherwise use the old method. */ if (tp->t_flags & TF_STDURG) urp--; if (urp > 65535) urp = 65535; ti->ti_urp = htons((u_short)urp); ti->ti_flags |= TH_URG; } } else tp->snd_up = tp->snd_una; /* * Put TCP length in extended header, and then * checksum extended header and data. */ if (len + optlen) ti->ti_len = htons((u_short)(sizeof (struct tcphdr) + optlen + len)); ti->ti_sum = in_cksum(m, (int)(hdrlen + len)); /* * In transmit state, time the transmission and arrange for * the retransmit. In persist state, just set snd_max. */ if (tp->t_force == 0 || tp->t_timer[TCPT_PERSIST] == 0) { tcp_seq startseq = tp->snd_nxt; /* * Advance snd_nxt over sequence space of this segment. */ if (flags & (TH_SYN|TH_FIN)) { if (flags & TH_SYN) tp->snd_nxt++; if (flags & TH_FIN) { tp->snd_nxt++; tp->t_flags |= TF_SENTFIN; } } tp->snd_nxt += len; if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { tp->snd_max = tp->snd_nxt; /* * Time this transmission if not a retransmission and * not currently timing anything. */ if (tp->t_rtt == 0) { tp->t_rtt = 1; tp->t_rtseq = startseq; tcpstat.tcps_segstimed++; } } /* * Set retransmit timer if not currently set, * and not doing an ack or a keep-alive probe. * Initial value for retransmit timer is smoothed * round-trip time + 2 * round-trip time variance. * Initialize shift counter which is used for backoff * of retransmit time. */ if (tp->t_timer[TCPT_REXMT] == 0 && tp->snd_nxt != tp->snd_una) { tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; if (tp->t_timer[TCPT_PERSIST]) { tp->t_timer[TCPT_PERSIST] = 0; tp->t_rxtshift = 0; } } } else if (SEQ_GT(tp->snd_nxt + len, tp->snd_max)) { tp->snd_max = tp->snd_nxt + len; } /* * Trace. */ if (so->so_options & SO_DEBUG) tcp_trace(TA_OUTPUT, tp->t_state, tp, ti, 0); /* * Fill in IP length and desired time to live and * send to IP level. There should be a better way * to handle ttl and tos; we could keep them in * the template, but need a way to checksum without them. */ m->m_pkthdr.len = hdrlen + len; #ifdef TUBA if (tp->t_tuba_pcb) error = tuba_output(m, tp); else #endif { /* Perhaps we should set/clear this in the template??? -dab */ if (tcp_pmtu && ((tp->t_flags & TF_NO_PMTU) == 0) && (rt = tp->t_inpcb->inp_route.ro_rt) && (rt->rt_flags & RTF_UP) && ((rt->rt_rmx.rmx_locks & RTV_MTU) == 0)) ((struct ip *)ti)->ip_off |= IP_DF; ((struct ip *)ti)->ip_len = m->m_pkthdr.len; ((struct ip *)ti)->ip_ttl = tp->t_inpcb->inp_ip.ip_ttl; /* XXX */ ((struct ip *)ti)->ip_tos = tp->t_inpcb->inp_ip.ip_tos; /* XXX */ #ifdef ACKSFIRST /* * If this is a pure ack, set IPTOS_LOWDELAY. The interface layer * will then schedule this at a higher priority than other packets. * This is useful in asymmetric networks with bidirectional traffic. * * For questions/comments, please contact: * Venkata N. Padmanabhan (padmanab@cs.berkeley.edu) * http://www.cs.berkeley.edu/~padmanab */ if (ti->ti_flags & TH_ACK && len == 0) ((struct ip *)ti)->ip_tos |= IPTOS_LOWDELAY; #endif #if BSD >= 43 error = ip_output(m, tp->t_inpcb->inp_options, &tp->t_inpcb->inp_route, so->so_options & SO_DONTROUTE, 0); #else error = ip_output(m, (struct mbuf *)0, &tp->t_inpcb->inp_route, so->so_options & SO_DONTROUTE); #endif /* BSD 43 */ } #ifdef TCP_STANDARD if (!error) tp->num_segs_since_ack = 0; #endif if (error) { out: switch (error) { case EMSGSIZE: /* * The Path MTU must have changed. Re-get * the mtu information, and resend. * XXX Should we check for a valid route??? */ tp->snd_nxt -= len; win = tp->t_maxseg; len = tp->t_inpcb->inp_route.ro_rt->rt_rmx.rmx_mtu - sizeof(struct tcpiphdr); tcp_changemss(tp, len); if (tp->t_maxseg < win) goto again; /* XXX FALL THROUGH if maxseg didn't get smaller! */ case ENOBUFS: tcp_quench(tp->t_inpcb, 0); /* * If we can't send, make sure there is something * to get us going again later. Persist state * is not necessarily right, but it is close enough. */ if (tp->t_timer[TCPT_REXMT] == 0 && tp->t_timer[TCPT_PERSIST] == 0) { tp->t_rxtshift = 0; tcp_setpersist(tp); } error = 0; break; case EHOSTUNREACH: case ENETDOWN: if (TCPS_HAVERCVDSYN(tp->t_state)) { tp->t_softerror = error; error = 0; } break; default: break; } return (error); } tcpstat.tcps_sndtotal++; /* * Data sent (as far as we can tell). * If this advertises a larger window than any other segment, * then remember the size of the advertised window. * Any pending ACK has now been sent. */ if (win > 0 && SEQ_GT(tp->rcv_nxt+win, tp->rcv_adv)) tp->rcv_adv = tp->rcv_nxt + win; tp->last_ack_sent = tp->rcv_nxt; tp->t_flags &= ~TF_ACKNOW; tcp_delack_done(tp); #ifdef LIMIT_BURST if (sendalot--) #else if (sendalot) #endif goto again; return (0); } void tcp_setpersist(tp) register struct tcpcb *tp; { register t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1; if (tp->t_timer[TCPT_REXMT]) panic("tcp_output REXMT"); /* * Start/restart persistance timer. */ TCPT_RANGESET(tp->t_timer[TCPT_PERSIST], t * tcp_backoff[tp->t_rxtshift], TCPTV_PERSMIN, TCPTV_PERSMAX); if (tp->t_rxtshift < TCP_MAXRXTSHIFT) tp->t_rxtshift++; }