/* * Copyright (c) 1996, 1997 Berkeley Software Design, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that this notice is retained, * the conditions in the following notices are met, and terms applying * to contributors in the following notices also apply to Berkeley * Software Design, Inc. * * BSDI tcp_var.h,v 2.13 1997/01/16 14:06:37 karels Exp */ /* * Copyright (c) 1982, 1986, 1993, 1994, 1995, 1998 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)tcp_var.h 8.4 (Berkeley) 5/24/95 */ #ifndef _NETINET_TCP_VAR_H_ #define _NETINET_TCP_VAR_H_ /* * Kernel variables for tcp. */ #ifdef SACK struct sackblk { tcp_seq start; /* start seq no. of sack block */ tcp_seq end; /* end seq no. */ }; struct sackhole { tcp_seq start; /* start seq no. of hole */ tcp_seq end; /* end seq no. */ int dups; /* number of dup(s)acks for this hole */ tcp_seq rxmit; /* next seq. no in hole to be retransmitted */ struct sackhole *next; /* next in list */ }; #endif /* * Tcp control block, one per tcp; fields: */ struct tcpcb { struct tcpiphdr *seg_next; /* sequencing queue */ struct tcpiphdr *seg_prev; short t_state; /* state of this connection */ short t_timer[TCPT_NTIMERS]; /* tcp timers */ short t_rxtshift; /* log(2) of rexmt exp. backoff */ short t_rxtcur; /* current retransmit value */ short t_dupacks; /* consecutive dup acks recd */ u_short t_maxseg; /* maximum segment size */ char t_force; /* 1 if forcing out a byte */ u_short t_flags; #define TF_ACKNOW 0x0001 /* ack peer immediately */ /* #define TF_DELACK 0x0002 * ack, but try to delay it */ #define TF_NODELAY 0x0004 /* don't delay packets to coalesce */ #define TF_NOOPT 0x0008 /* don't use tcp options */ #define TF_SENTFIN 0x0010 /* have sent FIN */ #define TF_USE_SCALE 0x0020 /* request/use window scaling */ #define TF_SEND_TSTMP 0x0040 /* request/send timestamps */ #define TF_NO_PMTU 0x0080 /* Don't use path MTU discovery */ #define TF_SACK_PERMIT 0x0200 /* other side said I could SACK */ #define TF_STDURG 0x0400 /* URG ptr is last byte of urg data */ #define TF_WASIDLE 0x0800 /* tcp_output() was idle on last call */ struct tcpiphdr *t_template; /* skeletal packet for transmit */ struct inpcb *t_inpcb; /* back pointer to internet pcb */ /* * The following fields are used as in the protocol specification. * See RFC783, Dec. 1981, page 21. */ /* send sequence variables */ tcp_seq snd_una; /* send unacknowledged */ tcp_seq snd_nxt; /* send next */ tcp_seq snd_up; /* send urgent pointer */ tcp_seq snd_wl1; /* window update seg seq number */ tcp_seq snd_wl2; /* window update seg ack number */ tcp_seq iss; /* initial send sequence number */ u_long snd_wnd; /* send window */ #ifdef SACK int sack_disable; /* disable SACK for this connection */ int snd_numholes; /* number of holes seen by sender */ struct sackhole *snd_holes; /* linked list of holes (sorted) */ #if defined(SACK) && defined(FACK) tcp_seq snd_fack; /* for FACK congestion control */ u_long snd_awnd; /* snd_nxt - snd_fack + */ /* retransmitted data */ int retran_data; /* amount of outstanding retx. data */ #endif /* FACK */ #endif /* SACK */ #if defined(SACK) || defined(NEWRENO) tcp_seq snd_recover; /* for use in fast recovery */ #endif /* receive sequence variables */ u_long rcv_wnd; /* receive window */ tcp_seq rcv_nxt; /* receive next */ tcp_seq rcv_up; /* receive urgent pointer */ tcp_seq irs; /* initial receive sequence number */ #ifdef SACK tcp_seq rcv_laststart; /* start of last segment recd. */ tcp_seq rcv_lastend; /* end of ... */ tcp_seq rcv_lastsack; /* last seq number(+1) sack'd by rcv'r*/ int rcv_numsacks; /* # distinct sack blks present */ struct sackblk sackblks[MAX_SACK_BLKS]; /* seq nos. of sack blocks */ #endif /* * Additional variables for this implementation. */ /* receive variables */ tcp_seq rcv_adv; /* advertised window */ /* retransmit variables */ tcp_seq snd_max; /* highest sequence number sent; * used to recognize retransmits */ /* congestion control (for slow start, source quench, retransmit after loss) */ u_long snd_cwnd; /* congestion-controlled window */ u_long snd_ssthresh; /* snd_cwnd size threshhold for * for slow start exponential to * linear switch */ /* * transmit timing stuff. See below for scale of srtt and rttvar. * "Variance" is actually smoothed difference. */ short t_rtt; /* round trip time */ u_short t_rttmin; /* minimum rtt allowed */ tcp_seq t_rtseq; /* sequence number being timed */ short t_srtt; /* smoothed round-trip time */ short t_rttvar; /* variance in round-trip time */ u_long t_idle; /* inactivity time */ u_long max_sndwnd; /* largest window peer has offered */ /* out-of-band data */ char t_oobflags; /* have some */ char t_iobc; /* input character */ #define TCPOOB_HAVEDATA 0x01 #define TCPOOB_HADDATA 0x02 short t_softerror; /* possible error not yet reported */ /* RFC 1323 variables */ u_char snd_scale; /* window scaling for send window */ u_char rcv_scale; /* window scaling for recv window */ u_char request_r_scale; /* pending window scaling */ u_char requested_s_scale; u_long ts_recent; /* timestamp echo data */ u_long ts_recent_age; /* when last updated */ tcp_seq last_ack_sent; /* TUBA stuff */ caddr_t t_tuba_pcb; /* next level down pcb for TCP over z */ /* should be moved up */ LIST_ENTRY(tcpcb) t_delacks; /* list of connections needing delack */ u_short t_peermaxseg; /* MSS offered by peer */ }; /* This structure should not exceed 32 bytes. */ struct syn_cache { struct syn_cache *sc_next; u_long sc_tstmp:1, sc_hash:31; struct in_addr sc_src; struct in_addr sc_dst; tcp_seq sc_irs; tcp_seq sc_iss; u_short sc_sport; u_short sc_dport; u_short sc_peermaxseg; u_char sc_timer; u_char sc_request_r_scale:4, sc_requested_s_scale:4; }; struct syn_cache_head { struct syn_cache *sch_first; /* First entry in the bucket */ struct syn_cache *sch_last; /* Last entry in the bucket */ struct syn_cache_head *sch_headq; /* The next non-empty bucket */ short sch_timer_sum; /* Total time in this bucket */ u_short sch_length; /* @ # elements in bucket */ }; #define intotcpcb(ip) ((struct tcpcb *)(ip)->inp_ppcb) #define sototcpcb(so) (intotcpcb(sotoinpcb(so))) /* * The following results in generation of delayed acks * in the opposite order in which they were requested... */ #define tcp_delack(tp) { \ if ((tp)->t_delacks.le_prev == 0) \ LIST_INSERT_HEAD(&tcp_delacks, (tp), t_delacks); \ } #define tcp_delack_done(tp) { \ if ((tp)->t_delacks.le_prev) { \ LIST_REMOVE((tp), t_delacks); \ (tp)->t_delacks.le_prev = 0; \ } \ } /* * Cancel 2msl timer (to restart, or to delete connection prematurely): * if this is the newest 2msl connection, reduce the total time for * the queue, otherwise transfer the remaining time to the next newest * 2msl connection. */ #define tcp_cancel2msl(inp, tp) { \ if (inp->inp_next == &tcb) \ tcp_msltime -= tp->t_timer[TCPT_2MSL]; \ else \ intotcpcb(inp->inp_next)->t_timer[TCPT_2MSL] += \ tp->t_timer[TCPT_2MSL]; \ } /* * The smoothed round-trip time and estimated variance * are stored as fixed point numbers scaled by the values below. * For convenience, these scales are also used in smoothing the average * (smoothed = (1/scale)sample + ((scale-1)/scale)smoothed). * With these scales, srtt has 3 bits to the right of the binary point, * and thus an "ALPHA" of 0.875. rttvar has 2 bits to the right of the * binary point, and is smoothed with an ALPHA of 0.75. */ #define TCP_RTT_SCALE 8 /* multiplier for srtt; 3 bits frac. */ #define TCP_RTT_SHIFT 3 /* shift for srtt; 3 bits frac. */ #define TCP_RTTVAR_SCALE 4 /* multiplier for rttvar; 2 bits */ #define TCP_RTTVAR_SHIFT 2 /* multiplier for rttvar; 2 bits */ /* * The initial retransmission should happen at rtt + 4 * rttvar. * Because of the way we do the smoothing, srtt and rttvar * will each average +1/2 tick of bias. When we compute * the retransmit timer, we want 1/2 tick of rounding and * 1 extra tick because of +-1/2 tick uncertainty in the * firing of the timer. The bias will give us exactly the * 1.5 tick we need. But, because the bias is * statistical, we have to test that we don't drop below * the minimum feasible timer (which is 2 ticks). * This macro assumes that the value of TCP_RTTVAR_SCALE * is the same as the multiplier for rttvar. */ #define TCP_REXMTVAL(tp) \ (((tp)->t_srtt >> TCP_RTT_SHIFT) + (tp)->t_rttvar) /* XXX * We want to avoid doing m_pullup on incoming packets but that * means avoiding dtom on the tcp reassembly code. That in turn means * keeping an mbuf pointer in the reassembly queue (since we might * have a cluster). As a quick hack, the source & destination * port numbers (which are no longer needed once we've located the * tcpcb) are overlayed with an mbuf pointer. */ #define REASS_MBUF(ti) (*(struct mbuf **)&((ti)->ti_t)) /* * TCP statistics. * Many of these should be kept per connection, * but that's inconvenient at the moment. */ struct tcpstat { u_quad_t tcps_connattempt; /* connections initiated */ u_quad_t tcps_accepts; /* connections accepted */ u_quad_t tcps_connects; /* connections established */ u_quad_t tcps_drops; /* connections dropped */ u_quad_t tcps_conndrops; /* embryonic connections dropped */ u_quad_t tcps_closed; /* conn. closed (includes drops) */ u_quad_t tcps_segstimed; /* segs where we tried to get rtt */ u_quad_t tcps_rttupdated; /* times we succeeded */ u_quad_t tcps_delack; /* delayed acks sent */ u_quad_t tcps_timeoutdrop; /* conn. dropped in rxmt timeout */ u_quad_t tcps_rexmttimeo; /* retransmit timeouts */ u_quad_t tcps_persisttimeo; /* persist timeouts */ u_quad_t tcps_keeptimeo; /* keepalive timeouts */ u_quad_t tcps_keepprobe; /* keepalive probes sent */ u_quad_t tcps_keepdrops; /* connections dropped in keepalive */ u_quad_t tcps_sndtotal; /* total packets sent */ u_quad_t tcps_sndpack; /* data packets sent */ u_quad_t tcps_sndbyte; /* data bytes sent */ u_quad_t tcps_sndrexmitpack; /* data packets retransmitted */ u_quad_t tcps_sndrexmitbyte; /* data bytes retransmitted */ u_quad_t tcps_sndrexmitfast; /* Fast retransmits */ u_quad_t tcps_sndacks; /* ack-only packets sent */ u_quad_t tcps_sndprobe; /* window probes sent */ u_quad_t tcps_sndurg; /* packets sent with URG only */ u_quad_t tcps_sndwinup; /* window update-only packets sent */ u_quad_t tcps_sndctrl; /* control (SYN|FIN|RST) packets sent */ u_quad_t tcps_rcvtotal; /* total packets received */ u_quad_t tcps_rcvpack; /* packets received in sequence */ u_quad_t tcps_rcvbyte; /* bytes received in sequence */ u_quad_t tcps_rcvbadsum; /* packets received with ccksum errs */ u_quad_t tcps_rcvbadoff; /* packets received with bad offset */ u_quad_t tcps_rcvshort; /* packets received too short */ u_quad_t tcps_rcvduppack; /* duplicate-only packets received */ u_quad_t tcps_rcvdupbyte; /* duplicate-only bytes received */ u_quad_t tcps_rcvpartduppack; /* packets with some duplicate data */ u_quad_t tcps_rcvpartdupbyte; /* dup. bytes in part-dup. packets */ u_quad_t tcps_rcvoopack; /* out-of-order packets received */ u_quad_t tcps_rcvoobyte; /* out-of-order bytes received */ u_quad_t tcps_rcvpackafterwin; /* packets with data after window */ u_quad_t tcps_rcvbyteafterwin; /* bytes rcvd after window */ u_quad_t tcps_rcvafterclose; /* packets rcvd after "close" */ u_quad_t tcps_rcvwinprobe; /* rcvd window probe packets */ u_quad_t tcps_rcvdupack; /* rcvd duplicate acks */ u_quad_t tcps_rcvacktoomuch; /* rcvd acks for unsent data */ u_quad_t tcps_rcvackpack; /* rcvd ack packets */ u_quad_t tcps_rcvackbyte; /* bytes acked by rcvd acks */ u_quad_t tcps_rcvwinupd; /* rcvd window update packets */ u_quad_t tcps_pawsdrop; /* segments dropped due to PAWS */ u_quad_t tcps_predack; /* times hdr predict ok for acks */ u_quad_t tcps_preddat; /* times hdr predict ok for data pkts */ u_quad_t tcps_pcbcachemiss; u_quad_t tcps_persistdrop; /* timeout in persist state */ u_quad_t tcps_badsyn; /* bogus SYN, e.g. premature ACK */ u_quad_t tcps_droppedsyn; /* dropped SYN's because sonewconn() failed */ /* These statistics deal with the SYN cache. */ u_quad_t tcps_sc_added; /* # of entries added */ u_quad_t tcps_sc_completed; /* # of connections completed */ u_quad_t tcps_sc_timed_out; /* # of entries timed out */ u_quad_t tcps_sc_overflowed; /* # dropped due to overflow */ u_quad_t tcps_sc_reset; /* # dropped due to RST */ u_quad_t tcps_sc_unreach; /* # dropped due to ICMP unreach */ u_quad_t tcps_sc_bucketoverflow;/* # dropped due to bucket overflow */ u_quad_t tcps_sc_aborted; /* # of entries aborted (no mem) */ u_quad_t tcps_sc_dupesyn; /* # of duplicate SYNs received */ u_quad_t tcps_sc_dropped; /* # of SYNs dropped (no route/mem) */ }; #ifdef KERNEL struct inpcb tcb; /* head of queue of active tcpcb's */ struct tcpstat tcpstat; /* tcp statistics */ u_long tcp_now; /* for RFC 1323 timestamps */ int tcp_msltime; /* total of 2MSL timers already in queue */ int tcp_listen_hash_size; int tcp_conn_hash_size; LIST_HEAD(tcp_hash_list, inpcb) tcp_listen_hash[], tcp_conn_hash[]; LIST_HEAD(tcp_delacks, tcpcb) tcp_delacks; int tcp_syn_cache_size; int tcp_syn_cache_timeo; struct syn_cache_head tcp_syn_cache[], *tcp_syn_cache_first; u_long syn_cache_count; struct tcp_opt_info; int tcp_attach __P((struct socket *)); void tcp_canceltimers __P((struct tcpcb *)); struct tcpcb * tcp_close __P((struct tcpcb *)); void tcp_ctlinput __P((int, struct sockaddr *, struct ip *)); int tcp_ctloutput __P((int, struct socket *, int, int, struct mbuf **)); struct tcpcb * tcp_disconnect __P((struct tcpcb *)); struct tcpcb * tcp_drop __P((struct tcpcb *, int)); void tcp_dooptions __P((struct tcpcb *, u_char *, int, struct tcpiphdr *, struct tcp_opt_info *)); void tcp_drain __P((void)); void tcp_fasttimo __P((void)); void tcp_init __P((void)); void tcp_input __P((struct mbuf *, int)); struct rtentry * tcp_rtlookup __P((register struct inpcb *)); u_int tcp_maxseg __P((struct tcpcb *, u_int)); void tcp_maxseg_init __P((struct tcpcb *)); void tcp_peer_mss __P((struct tcpcb *, u_int)); u_long tcp_send_mss __P((struct inpcb *)); void tcp_changemss __P((register struct tcpcb *, u_int)); struct tcpcb * tcp_newtcpcb __P((struct inpcb *)); void tcp_notify __P((struct inpcb *, int)); int tcp_output __P((struct tcpcb *)); void tcp_pulloutofband __P((struct socket *, struct tcpiphdr *, struct mbuf *)); void tcp_quench __P((struct inpcb *, int)); void tcp_mtudisc __P((struct inpcb *, int)); int tcp_reass __P((struct tcpcb *, struct tcpiphdr *, struct mbuf *)); int tcp_respond __P((struct tcpcb *, struct tcpiphdr *, struct mbuf *, u_long, u_long, int)); void tcp_setpersist __P((struct tcpcb *)); void tcp_slowtimo __P((void)); int tcp_sysctl __P((int *, u_int, void *, size_t *, void *, size_t)); struct tcpiphdr * tcp_template __P((struct tcpcb *)); struct tcpcb * tcp_timers __P((struct tcpcb *, int)); void tcp_trace __P((int, int, struct tcpcb *, struct tcpiphdr *, int)); struct tcpcb * tcp_usrclosed __P((struct tcpcb *)); int tcp_usrreq __P((struct socket *, int, struct mbuf *, struct mbuf *, struct mbuf *)); void tcp_xmit_timer __P((struct tcpcb *, int)); int syn_cache_add __P((struct socket *, struct mbuf *, u_char *, int, struct tcp_opt_info *)); void syn_cache_unreach __P((struct ip *, struct tcphdr *)); struct socket * syn_cache_get __P((struct socket *so, struct mbuf *)); void syn_cache_insert __P((struct syn_cache *, struct syn_cache ***, struct syn_cache_head **)); struct syn_cache * syn_cache_lookup __P((struct tcpiphdr *, struct syn_cache ***, struct syn_cache_head **)); void syn_cache_reset __P((struct tcpiphdr *)); int syn_cache_respond __P((struct syn_cache *, struct mbuf *, register struct tcpiphdr *, long, u_long)); void syn_cache_timer __P((int)); #ifdef SACK int tcp_sack_option __P((struct tcpcb *,struct tcpiphdr *,u_char *,int)); void tcp_update_sack_list __P((struct tcpcb *tp)); void tcp_del_sackholes __P((struct tcpcb *, struct tcpiphdr *)); void tcp_clean_sackreport __P((struct tcpcb *tp)); void tcp_sack_adjust __P((struct tcpcb *tp)); struct sackhole * tcp_sack_output __P((struct tcpcb *tp)); int tcp_sack_partialack __P((struct tcpcb *, struct tcpiphdr *)); #ifdef DEBUG void tcp_print_holes __P((struct tcpcb *tp)); #endif #endif /* SACK */ #if defined(NEWRENO) || defined(SACK) int tcp_newreno __P((struct tcpcb *, struct tcpiphdr *)); #endif #if defined(SACK) || defined(NEWRENO) u_long tcp_seq_subtract __P((u_long, u_long )); #endif /* FACK */ #endif /* KERNEL */ #endif /* !_NETINET_TCP_VAR_H_ */