/* * Copyright (c) 1996, 1997 Berkeley Software Design, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that this notice is retained, * the conditions in the following notices are met, and terms applying * to contributors in the following notices also apply to Berkeley * Software Design, Inc. * * BSDI tcp_timer.c,v 2.9 1997/01/16 14:06:35 karels Exp */ /* * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95 */ #ifndef TUBA_INCLUDE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif /* TUBA_INCLUDE */ extern int tcp_keepidle; extern int tcp_keepintvl; extern int tcp_keepcnt; extern int tcp_maxpersistidle; int tcp_maxidle; /* * Fast timeout routine for processing delayed acks */ void tcp_fasttimo() { register struct tcpcb *tp, *tpnext; int s = splnet(); for (tp = tcp_delacks.lh_first; tp; tp = tpnext) { tpnext = tp->t_delacks.le_next; tp->t_flags |= TF_ACKNOW; tcpstat.tcps_delack++; /* we assume that tcp_output will invoke tcp_delack_done() */ (void) tcp_output(tp); } #ifdef FINE_GRAINED_TSTAMP { struct timeval curtime; microtime(&curtime); exact_time_at_fasttick = EXACT_TIME(&curtime); } #endif #ifdef FAST_START for (tp = tcp_faststart.tqh_first; tp; tp = tp->t_faststart.tqe_next) { if (tp->t_timer[TCPT_RESET]) if (--tp->t_timer[TCPT_RESET] == 0) { /* fall back to regular slow start after a failed fast start */ tcp_timers(tp, TCPT_RESET); } } #endif splx(s); } /* * Tcp protocol timeout routine called every 500 ms. * Updates the timers in all active tcb's and * causes finite state machine actions if timers expire. */ void tcp_slowtimo() { register struct inpcb *ip, *ipnxt; register struct tcpcb *tp; int s = splnet(); register int i; extern int tcp_msltime; struct rtentry *rt; static int syn_cache_last = 0; tcp_maxidle = tcp_keepcnt * tcp_keepintvl; /* * Search through tcb's and update active timers. */ ip = tcb.inp_next; if (ip == 0) goto leave; for (; ip != &tcb; ip = ipnxt) { ipnxt = ip->inp_next; tp = intotcpcb(ip); if (tp == 0 || tp->t_state == TCPS_LISTEN) continue; /* * The first part of the connection queue contains * connections in states before TIME_WAIT; stop * at the first TIME_WAIT connection. */ if (tp->t_state == TCPS_TIME_WAIT) break; for (i = 0; i < TCPT_NTIMERS; i++) { #ifdef FAST_START if (i == TCPT_RESET) continue; #endif if (tp->t_timer[i] && --tp->t_timer[i] == 0) { (void) tcp_usrreq(ip->inp_socket, PRU_SLOWTIMO, (struct mbuf *)0, (struct mbuf *)i, (struct mbuf *)0); if (ipnxt->inp_prev != ip) goto tpgone; } } if (((rt = ip->inp_route.ro_rt) != NULL) && ((rt->rt_rmx.rmx_locks & RTV_MTU) == 0) && ((rt->rt_flags & RTF_PROBEMTU) != 0)) tcp_agepathmtu(ip, rt); tp->t_idle++; #ifdef FAST_START tp->t_snd_idle++; #endif if (tp->t_rtt) tp->t_rtt++; tpgone: ; } /* * If we did not hit the end of the queue, we must * have hit the oldest connection in TIME_WAIT state. * Decrement its remaining time; if expired, time out * this connection and each following connection with * no additional remaining time. */ if ((ip != &tcb) && (--tp->t_timer[TCPT_2MSL] <= 0)) { for (;;) { ipnxt = ip->inp_next; (void) tcp_usrreq(ip->inp_socket, PRU_SLOWTIMO, (struct mbuf *)0, (struct mbuf *)TCPT_2MSL, (struct mbuf *)0); if ((ip = ipnxt) == &tcb) break; tp = intotcpcb(ip); if (tp->t_timer[TCPT_2MSL]) break; } } if (tcp_msltime > 0) --tcp_msltime; tcp_iss += TCP_ISSINCR/PR_SLOWHZ; /* increment iss */ #ifdef TCP_COMPAT_42 if ((int)tcp_iss < 0) tcp_iss = TCP_ISSINCR; /* XXX */ #endif tcp_now++; /* for timestamps */ #ifdef FINE_GRAINED_TSTAMP { struct timeval curtime; microtime(&curtime); exact_time_at_slowtick = EXACT_TIME(&curtime); } #endif if (++syn_cache_last >= tcp_syn_cache_interval) { syn_cache_timer(syn_cache_last); syn_cache_last = 0; } leave: splx(s); #ifdef FASTCPTIMO if (!slowtimocount) { extern int hz; slowtimocount = SLOWTIMO_SPEEDUP - 1; timeout(tcp_slowtimo, (void *)0, hz/(2*SLOWTIMO_SPEEDUP)); } else slowtimocount--; #endif } #ifndef TUBA_INCLUDE /* * Cancel all timers for TCP tp. */ void tcp_canceltimers(tp) struct tcpcb *tp; { register int i; for (i = 0; i < TCPT_NTIMERS; i++) tp->t_timer[i] = 0; } int tcp_backoff[TCP_MAXRXTSHIFT + 1] = { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 }; int tcp_totbackoff = 511; /* sum of tcp_backoff[] */ /* * TCP timer processing. */ struct tcpcb * tcp_timers(tp, timer) register struct tcpcb *tp; int timer; { register int rexmt; #ifdef FAST_START /* * If we are in fast start mode with TXF_FS_FLR set but are using the regular * TCPT_REXMT timer (because TXF_FS_FRT is not set), we take action as if the * TCPT_RESET timer had expired. */ if (FS_FLR(tp) && timer == TCPT_REXMT && SEQ_GEQ(tp->snd_una, tp->fs_startseq)) timer = TCPT_RESET; #endif switch (timer) { /* * 2 MSL timeout in shutdown went off. If we're closed but * still waiting for peer to close and connection has been idle * too long, or if 2MSL time is up from TIME_WAIT, delete connection * control block. Otherwise, check again in a bit. */ case TCPT_2MSL: if (tp->t_state != TCPS_TIME_WAIT && tp->t_idle <= tcp_maxidle) tp->t_timer[TCPT_2MSL] = tcp_keepintvl; else tp = tcp_close(tp); break; /* * Retransmission timer went off. Message has not * been acked within retransmit interval. Back off * to a longer retransmit interval and retransmit one segment. */ case TCPT_REXMT: if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) { tp->t_rxtshift = TCP_MAXRXTSHIFT; tcpstat.tcps_timeoutdrop++; tp = tcp_drop(tp, tp->t_softerror ? tp->t_softerror : ETIMEDOUT); break; } tcpstat.tcps_rexmttimeo++; rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; TCPT_RANGESET(tp->t_rxtcur, rexmt, tp->t_rttmin, TCPTV_REXMTMAX); tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; /* * If losing, let the lower level know and try for * a better route. Also, if we backed off this far, * our srtt estimate is probably bogus. Clobber it * so we'll take the next rtt measurement as our srtt; * move the current srtt into rttvar to keep the current * retransmit times until then. */ if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { in_losing(tp->t_inpcb); tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT); tp->t_srtt = 0; } tp->snd_nxt = tp->snd_una; /* * If timing a segment in this window, stop the timer. */ tp->t_rtt = 0; /* * Close the congestion window down to one segment * (we'll open it by one segment for each ack we get). * Since we probably have a window's worth of unacked * data accumulated, this "slow start" keeps us from * dumping all that data as back-to-back packets (which * might overwhelm an intermediate gateway). * * There are two phases to the opening: Initially we * open by one mss on each ack. This makes the window * size increase exponentially with time. If the * window is larger than the path can handle, this * exponential growth results in dropped packet(s) * almost immediately. To get more time between * drops but still "push" the network to take advantage * of improving conditions, we switch from exponential * to linear window opening at some threshhold size. * For a threshhold, we use half the current window * size, truncated to a multiple of the mss. * * (the minimum cwnd that will give us exponential * growth is 2 mss. We don't allow the threshhold * to go below this.) */ { u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg; if (win < 2) win = 2; tp->snd_cwnd = tp->t_maxseg; tp->snd_ssthresh = win * tp->t_maxseg; /* printf("REXMT timeout\n");*/ tp->t_dupacks = 0; } #ifdef NEWRENO /* * Forget about pkts sent before timeout. * XXX This opens the danger of a false fast retransmission */ tp->snd_last = tp->snd_una; #endif #ifdef LIMIT_BURST if (tp->burst_pending) { untimeout(tcp_output_dummy, (void *) tp); tp->burst_pending = 0; } #endif (void) tcp_output(tp); break; #ifdef FAST_START case TCPT_RESET: /* * We try to mimic what the post-idle period behavior of the * connection would have been had fast start not been attempted. * cwnd and ssthresh are set to appropriate values, and snd_max * is retracted in an attempt to "forget" later packets that may * have been sent during fast start. However, the code in * tcp_input.cc ensures that if such packets had in fact made it * to the receiver (as indicated by the ack covering these * packets), we do not unnecessarily retransmit them. For this * purpose, we save the current value of snd_max in fs_snd_max. */ tp->t_xflags &= ~TXF_FS_MODE; /* terminate fast start mode */ /* remove this connection from list of fast start connections */ if (tp->t_faststart.tqe_prev) { TAILQ_REMOVE(&tcp_faststart, tp, t_faststart); tp->t_xflags &= ~TXF_FS_RESET_PEND; tp->t_faststart.tqe_prev = 0; } /* cancel the TCPT_RESET timer */ tp->t_timer[TCPT_RESET] = 0; /* set rexmt timer for segment about to be retransmitted */ tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; tp->snd_nxt = tp->snd_una; /* segment to retransmit */ /* if timing a segment in this window, stop the timer */ tp->t_rtt = 0; #ifdef TCP_STANDARD tp->snd_cwnd = tp->t_maxseg; /* close cwnd down to 1 seg */ #else tp->snd_cwnd = 2*tp->t_maxseg; /* close cwnd down to 2 segs */ #endif /* restore ssthresh to its value when fast start was initiated */ tp->snd_ssthresh = tp->fs_ssthresh; tp->fs_cwnd = 0; /* forget about most recent successful window */ tp->t_dupacks = 0; /* discard dupack info (XXX should we?) */ /* save the current value of snd_max */ tp->fs_snd_max = tp->snd_max; /* * "forget" about about later pkts we may have sent during * fast start. However, if an ack for such packets is ever * received, fs_snd_max is used to adjust snd_max appropriately * so that the acks are NOT discarded as being "beyond the * window" (code that does this is in tcp_input()). */ tp->snd_max = tp->snd_una; tp->snd_una = tp->snd_una; #ifdef LIMIT_BURST if (tp->burst_pending) { untimeout(tcp_output_dummy, (void *) tp); tp->burst_pending = 0; } #endif (void) tcp_output(tp); /* retransmit packet */ break; #endif /* * Persistence timer into zero window. * Force a byte to be output, if possible. */ case TCPT_PERSIST: tcpstat.tcps_persisttimeo++; /* * Hack: if the peer is dead/unreachable, we do not * time out if the window is closed. After a full * backoff, drop the connection if the idle time * (no responses to probes) reaches the maximum * backoff that we would use if retransmitting. */ if (tp->t_rxtshift == TCP_MAXRXTSHIFT && (tp->t_idle >= tcp_maxpersistidle || tp->t_idle >= TCP_REXMTVAL(tp) * tcp_totbackoff)) { tcpstat.tcps_persistdrop++; tp = tcp_drop(tp, ETIMEDOUT); break; } tcp_setpersist(tp); tp->t_force = 1; (void) tcp_output(tp); tp->t_force = 0; break; /* * Keep-alive timer went off; send something * or drop connection if idle for too long. */ case TCPT_KEEP: tcpstat.tcps_keeptimeo++; if (tp->t_state < TCPS_ESTABLISHED) goto dropit; if (tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE && tp->t_state <= TCPS_CLOSE_WAIT) { if (tp->t_idle >= tcp_keepidle + tcp_maxidle) goto dropit; /* * Send a packet designed to force a response * if the peer is up and reachable: * either an ACK if the connection is still alive, * or an RST if the peer has closed the connection * due to timeout or reboot. * Using sequence number tp->snd_una-1 * causes the transmitted zero-length segment * to lie outside the receive window; * by the protocol spec, this requires the * correspondent TCP to respond. */ tcpstat.tcps_keepprobe++; #ifdef TCP_COMPAT_42 /* * The keepalive packet must have nonzero length * to get a 4.2 host to respond. */ (void) tcp_respond(tp, tp->t_template, (struct mbuf *)NULL, tp->rcv_nxt - 1, tp->snd_una - 1, 0); #else (void) tcp_respond(tp, tp->t_template, (struct mbuf *)NULL, tp->rcv_nxt, tp->snd_una - 1, 0); #endif tp->t_timer[TCPT_KEEP] = tcp_keepintvl; } else tp->t_timer[TCPT_KEEP] = tcp_keepidle; break; dropit: tcpstat.tcps_keepdrops++; tp = tcp_drop(tp, ETIMEDOUT); break; } return (tp); } #endif /* TUBA_INCLUDE */