summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorrwatson <rwatson@FreeBSD.org>2004-08-16 18:32:07 +0000
committerrwatson <rwatson@FreeBSD.org>2004-08-16 18:32:07 +0000
commit87aa99bbbbf620c4ce98996d472fdae45f077eae (patch)
tree6046e1d576e7bbc883254e0b133fbd6b383544f2 /sys
parentcc3f0b4929d2de551332227291984a04e5758213 (diff)
downloadFreeBSD-src-87aa99bbbbf620c4ce98996d472fdae45f077eae.zip
FreeBSD-src-87aa99bbbbf620c4ce98996d472fdae45f077eae.tar.gz
White space cleanup for netinet before branch:
- Trailing tab/space cleanup - Remove spurious spaces between or before tabs This change avoids touching files that Andre likely has in his working set for PFIL hooks changes for IPFW/DUMMYNET. Approved by: re (scottl) Submitted by: Xin LI <delphij@frontfree.net>
Diffstat (limited to 'sys')
-rw-r--r--sys/netinet/icmp_var.h8
-rw-r--r--sys/netinet/if_atm.c20
-rw-r--r--sys/netinet/if_atm.h6
-rw-r--r--sys/netinet/igmp.h2
-rw-r--r--sys/netinet/in.c6
-rw-r--r--sys/netinet/in.h60
-rw-r--r--sys/netinet/in_pcb.h2
-rw-r--r--sys/netinet/in_proto.c32
-rw-r--r--sys/netinet/in_var.h2
-rw-r--r--sys/netinet/ip_gre.c6
-rw-r--r--sys/netinet/ip_icmp.c12
-rw-r--r--sys/netinet/ip_id.c22
-rw-r--r--sys/netinet/ip_mroute.c254
-rw-r--r--sys/netinet/ip_mroute.h66
-rw-r--r--sys/netinet/raw_ip.c6
-rw-r--r--sys/netinet/tcp.h2
-rw-r--r--sys/netinet/tcp_debug.h2
-rw-r--r--sys/netinet/tcp_hostcache.c22
-rw-r--r--sys/netinet/tcp_input.c114
-rw-r--r--sys/netinet/tcp_output.c142
-rw-r--r--sys/netinet/tcp_reass.c114
-rw-r--r--sys/netinet/tcp_sack.c14
-rw-r--r--sys/netinet/tcp_subr.c136
-rw-r--r--sys/netinet/tcp_syncache.c68
-rw-r--r--sys/netinet/tcp_timer.c20
-rw-r--r--sys/netinet/tcp_timer.h2
-rw-r--r--sys/netinet/tcp_timewait.c136
-rw-r--r--sys/netinet/tcp_usrreq.c18
-rw-r--r--sys/netinet/tcp_var.h26
-rw-r--r--sys/netinet/tcpip.h2
-rw-r--r--sys/netinet/udp_usrreq.c24
-rw-r--r--sys/netinet/udp_var.h2
32 files changed, 674 insertions, 674 deletions
diff --git a/sys/netinet/icmp_var.h b/sys/netinet/icmp_var.h
index 8657909..3af80de 100644
--- a/sys/netinet/icmp_var.h
+++ b/sys/netinet/icmp_var.h
@@ -45,16 +45,16 @@ struct icmpstat {
u_long icps_oldicmp; /* no error 'cuz old was icmp */
u_long icps_outhist[ICMP_MAXTYPE + 1];
/* statistics related to input messages processed */
- u_long icps_badcode; /* icmp_code out of range */
+ u_long icps_badcode; /* icmp_code out of range */
u_long icps_tooshort; /* packet < ICMP_MINLEN */
u_long icps_checksum; /* bad checksum */
u_long icps_badlen; /* calculated bound mismatch */
u_long icps_reflect; /* number of responses */
u_long icps_inhist[ICMP_MAXTYPE + 1];
- u_long icps_bmcastecho; /* b/mcast echo requests dropped */
- u_long icps_bmcasttstamp; /* b/mcast tstamp requests dropped */
+ u_long icps_bmcastecho; /* b/mcast echo requests dropped */
+ u_long icps_bmcasttstamp; /* b/mcast tstamp requests dropped */
u_long icps_badaddr; /* bad return address */
- u_long icps_noroute; /* no route back */
+ u_long icps_noroute; /* no route back */
};
/*
diff --git a/sys/netinet/if_atm.c b/sys/netinet/if_atm.c
index b19502a..1f4cb7f 100644
--- a/sys/netinet/if_atm.c
+++ b/sys/netinet/if_atm.c
@@ -15,8 +15,8 @@
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
- * This product includes software developed by Charles D. Cranor and
- * Washington University.
+ * This product includes software developed by Charles D. Cranor and
+ * Washington University.
* 4. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
@@ -119,7 +119,7 @@ atm_rtrequest(int req, struct rtentry *rt, struct rt_addrinfo *info)
* first check to see if this is not a host route, in which
* case we are being called via "ifconfig" to set the address.
*/
- if ((rt->rt_flags & RTF_HOST) == 0) {
+ if ((rt->rt_flags & RTF_HOST) == 0) {
rt_setgate(rt,rt_key(rt),(struct sockaddr *)&null_sdl);
gate = rt->rt_gateway;
SDL(gate)->sdl_type = rt->rt_ifp->if_type;
@@ -225,7 +225,7 @@ atm_rtrequest(int req, struct rtentry *rt, struct rt_addrinfo *info)
if (sin->sin_family != AF_INET)
goto failed;
npcb = npcb_add(NULL, rt->rt_ifp, op.param.vci, op.param.vpi);
- if (npcb == NULL)
+ if (npcb == NULL)
goto failed;
npcb->npcb_flags |= NPCB_IP;
npcb->ipaddr.s_addr = sin->sin_addr.s_addr;
@@ -238,7 +238,7 @@ atm_rtrequest(int req, struct rtentry *rt, struct rt_addrinfo *info)
*/
op.rxhand = NULL;
op.param.flags |= ATMIO_FLAG_ASYNC;
- if (rt->rt_ifp->if_ioctl(rt->rt_ifp, SIOCATMOPENVCC,
+ if (rt->rt_ifp->if_ioctl(rt->rt_ifp, SIOCATMOPENVCC,
(caddr_t)&op) != 0) {
printf("atm: couldn't add VC\n");
goto failed;
@@ -269,7 +269,7 @@ failed:
* tell native ATM we are done with this VC
*/
if (rt->rt_flags & RTF_LLINFO) {
- npcb_free((struct natmpcb *)rt->rt_llinfo,
+ npcb_free((struct natmpcb *)rt->rt_llinfo,
NPCB_DESTROY);
rt->rt_llinfo = NULL;
rt->rt_flags &= ~RTF_LLINFO;
@@ -284,7 +284,7 @@ failed:
cl.vpi = *addr++;
cl.vci = *addr++ << 8;
cl.vci |= *addr++;
- (void)rt->rt_ifp->if_ioctl(rt->rt_ifp, SIOCATMCLOSEVCC,
+ (void)rt->rt_ifp->if_ioctl(rt->rt_ifp, SIOCATMCLOSEVCC,
(caddr_t)&cl);
break;
}
@@ -298,7 +298,7 @@ failed:
* [3] "dst" = sockaddr_in (IP) address of dest.
* output:
* [4] "desten" = ATM pseudo header which we will fill in VPI/VCI info
- * return:
+ * return:
* 0 == resolve FAILED; note that "m" gets m_freem'd in this case
* 1 == resolve OK; desten contains result
*
@@ -321,7 +321,7 @@ atmresolve(struct rtentry *rt, struct mbuf *m, struct sockaddr *dst,
if (rt == NULL)
goto bad; /* failed */
RT_REMREF(rt); /* don't keep LL references */
- if ((rt->rt_flags & RTF_GATEWAY) != 0 ||
+ if ((rt->rt_flags & RTF_GATEWAY) != 0 ||
(rt->rt_flags & RTF_LLINFO) == 0 ||
/* XXX: are we using LLINFO? */
rt->rt_gateway->sa_family != AF_LINK) {
@@ -332,7 +332,7 @@ atmresolve(struct rtentry *rt, struct mbuf *m, struct sockaddr *dst,
}
/*
- * note that rt_gateway is a sockaddr_dl which contains the
+ * note that rt_gateway is a sockaddr_dl which contains the
* atm_pseudohdr data structure for this route. we currently
* don't need any rt_llinfo info (but will if we want to support
* ATM ARP [c.f. if_ether.c]).
diff --git a/sys/netinet/if_atm.h b/sys/netinet/if_atm.h
index b8cddf6..4f9a5fb 100644
--- a/sys/netinet/if_atm.h
+++ b/sys/netinet/if_atm.h
@@ -16,8 +16,8 @@
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
- * This product includes software developed by Charles D. Cranor and
- * Washington University.
+ * This product includes software developed by Charles D. Cranor and
+ * Washington University.
* 4. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
@@ -43,5 +43,5 @@ struct rtentry;
struct sockaddr;
void atm_rtrequest(int, struct rtentry *, struct rt_addrinfo *);
-int atmresolve(struct rtentry *, struct mbuf *, struct sockaddr *,
+int atmresolve(struct rtentry *, struct mbuf *, struct sockaddr *,
struct atm_pseudohdr *);
diff --git a/sys/netinet/igmp.h b/sys/netinet/igmp.h
index 09bfdf8..96f3844 100644
--- a/sys/netinet/igmp.h
+++ b/sys/netinet/igmp.h
@@ -60,7 +60,7 @@ struct igmp {
/*
* Message types, including version number.
*/
-#define IGMP_MEMBERSHIP_QUERY 0x11 /* membership query */
+#define IGMP_MEMBERSHIP_QUERY 0x11 /* membership query */
#define IGMP_V1_MEMBERSHIP_REPORT 0x12 /* Ver. 1 membership report */
#define IGMP_V2_MEMBERSHIP_REPORT 0x16 /* Ver. 2 membership report */
#define IGMP_V2_LEAVE_GROUP 0x17 /* Leave-group message */
diff --git a/sys/netinet/in.c b/sys/netinet/in.c
index ec57015..43a5260 100644
--- a/sys/netinet/in.c
+++ b/sys/netinet/in.c
@@ -60,7 +60,7 @@ static int in_ifinit(struct ifnet *,
struct in_ifaddr *, struct sockaddr_in *, int);
static int subnetsarelocal = 0;
-SYSCTL_INT(_net_inet_ip, OID_AUTO, subnets_are_local, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_ip, OID_AUTO, subnets_are_local, CTLFLAG_RW,
&subnetsarelocal, 0, "Treat all subnets as directly connected");
struct in_multihead in_multihead; /* XXX BSS initialization */
@@ -144,7 +144,7 @@ struct sockaddr_in *ap;
ap->sin_len = 0;
while (--cp >= cplim)
- if (*cp) {
+ if (*cp) {
(ap)->sin_len = cp - (char *) (ap) + 1;
break;
}
@@ -783,7 +783,7 @@ in_ifinit(ifp, ia, sin, scrub)
int
in_broadcast(in, ifp)
struct in_addr in;
- struct ifnet *ifp;
+ struct ifnet *ifp;
{
register struct ifaddr *ifa;
u_long t;
diff --git a/sys/netinet/in.h b/sys/netinet/in.h
index 41b2c6c..4e4f5b1 100644
--- a/sys/netinet/in.h
+++ b/sys/netinet/in.h
@@ -132,8 +132,8 @@ __END_DECLS
#define IPPROTO_HOPOPTS 0 /* IP6 hop-by-hop options */
#define IPPROTO_IGMP 2 /* group mgmt protocol */
#define IPPROTO_GGP 3 /* gateway^2 (deprecated) */
-#define IPPROTO_IPV4 4 /* IPv4 encapsulation */
-#define IPPROTO_IPIP IPPROTO_IPV4 /* for compatibility */
+#define IPPROTO_IPV4 4 /* IPv4 encapsulation */
+#define IPPROTO_IPIP IPPROTO_IPV4 /* for compatibility */
#define IPPROTO_ST 7 /* Stream protocol II */
#define IPPROTO_EGP 8 /* exterior gateway protocol */
#define IPPROTO_PIGP 9 /* private interior gateway */
@@ -155,7 +155,7 @@ __END_DECLS
#define IPPROTO_LEAF2 26 /* Leaf-2 */
#define IPPROTO_RDP 27 /* Reliable Data */
#define IPPROTO_IRTP 28 /* Reliable Transaction */
-#define IPPROTO_TP 29 /* tp-4 w/ class negotiation */
+#define IPPROTO_TP 29 /* tp-4 w/ class negotiation */
#define IPPROTO_BLT 30 /* Bulk Data Transfer */
#define IPPROTO_NSP 31 /* Network Services */
#define IPPROTO_INP 32 /* Merit Internodal */
@@ -172,7 +172,7 @@ __END_DECLS
#define IPPROTO_ROUTING 43 /* IP6 routing header */
#define IPPROTO_FRAGMENT 44 /* IP6 fragmentation header */
#define IPPROTO_IDRP 45 /* InterDomain Routing*/
-#define IPPROTO_RSVP 46 /* resource reservation */
+#define IPPROTO_RSVP 46 /* resource reservation */
#define IPPROTO_GRE 47 /* General Routing Encap. */
#define IPPROTO_MHRP 48 /* Mobile Host Routing */
#define IPPROTO_BHA 49 /* BHA */
@@ -181,9 +181,9 @@ __END_DECLS
#define IPPROTO_INLSP 52 /* Integ. Net Layer Security */
#define IPPROTO_SWIPE 53 /* IP with encryption */
#define IPPROTO_NHRP 54 /* Next Hop Resolution */
-#define IPPROTO_MOBILE 55 /* IP Mobility */
-#define IPPROTO_TLSP 56 /* Transport Layer Security */
-#define IPPROTO_SKIP 57 /* SKIP */
+#define IPPROTO_MOBILE 55 /* IP Mobility */
+#define IPPROTO_TLSP 56 /* Transport Layer Security */
+#define IPPROTO_SKIP 57 /* SKIP */
#define IPPROTO_ICMPV6 58 /* ICMP6 */
#define IPPROTO_NONE 59 /* IP6 no next header */
#define IPPROTO_DSTOPTS 60 /* IP6 destination option */
@@ -308,7 +308,7 @@ __END_DECLS
* 512, but that conflicts with some well-known-services that firewalls may
* have a fit if we use.
*/
-#define IPPORT_RESERVEDSTART 600
+#define IPPORT_RESERVEDSTART 600
#define IPPORT_MAX 65535
@@ -375,12 +375,12 @@ __END_DECLS
#define IP_MULTICAST_LOOP 11 /* u_char; set/get IP multicast loopback */
#define IP_ADD_MEMBERSHIP 12 /* ip_mreq; add an IP group membership */
#define IP_DROP_MEMBERSHIP 13 /* ip_mreq; drop an IP group membership */
-#define IP_MULTICAST_VIF 14 /* set/get IP mcast virt. iface */
-#define IP_RSVP_ON 15 /* enable RSVP in kernel */
-#define IP_RSVP_OFF 16 /* disable RSVP in kernel */
-#define IP_RSVP_VIF_ON 17 /* set RSVP per-vif socket */
-#define IP_RSVP_VIF_OFF 18 /* unset RSVP per-vif socket */
-#define IP_PORTRANGE 19 /* int; range to choose for unspec port */
+#define IP_MULTICAST_VIF 14 /* set/get IP mcast virt. iface */
+#define IP_RSVP_ON 15 /* enable RSVP in kernel */
+#define IP_RSVP_OFF 16 /* disable RSVP in kernel */
+#define IP_RSVP_VIF_ON 17 /* set RSVP per-vif socket */
+#define IP_RSVP_VIF_OFF 18 /* unset RSVP per-vif socket */
+#define IP_PORTRANGE 19 /* int; range to choose for unspec port */
#define IP_RECVIF 20 /* bool; receive reception if w/dgram */
/* for IPSEC */
#define IP_IPSEC_POLICY 21 /* int; set/get security policy */
@@ -394,11 +394,11 @@ __END_DECLS
#define IP_FW_TABLE_GETSIZE 43 /* get table size */
#define IP_FW_TABLE_LIST 44 /* list table contents */
-#define IP_FW_ADD 50 /* add a firewall rule to chain */
-#define IP_FW_DEL 51 /* delete a firewall rule from chain */
-#define IP_FW_FLUSH 52 /* flush firewall rule chain */
-#define IP_FW_ZERO 53 /* clear single/all firewall counter(s) */
-#define IP_FW_GET 54 /* get entire firewall rule chain */
+#define IP_FW_ADD 50 /* add a firewall rule to chain */
+#define IP_FW_DEL 51 /* delete a firewall rule from chain */
+#define IP_FW_FLUSH 52 /* flush firewall rule chain */
+#define IP_FW_ZERO 53 /* clear single/all firewall counter(s) */
+#define IP_FW_GET 54 /* get entire firewall rule chain */
#define IP_FW_RESETLOG 55 /* reset logging counters */
#define IP_DUMMYNET_CONFIGURE 60 /* add/configure a dummynet pipe */
@@ -519,12 +519,12 @@ struct ip_mreq {
#ifdef notyet
#define IPCTL_DEFMTU 4 /* default MTU */
#endif
-#define IPCTL_RTEXPIRE 5 /* cloned route expiration time */
-#define IPCTL_RTMINEXPIRE 6 /* min value for expiration time */
-#define IPCTL_RTMAXCACHE 7 /* trigger level for dynamic expire */
+#define IPCTL_RTEXPIRE 5 /* cloned route expiration time */
+#define IPCTL_RTMINEXPIRE 6 /* min value for expiration time */
+#define IPCTL_RTMAXCACHE 7 /* trigger level for dynamic expire */
#define IPCTL_SOURCEROUTE 8 /* may perform source routes */
#define IPCTL_DIRECTEDBROADCAST 9 /* may re-broadcast received packets */
-#define IPCTL_INTRQMAXLEN 10 /* max length of netisr queue */
+#define IPCTL_INTRQMAXLEN 10 /* max length of netisr queue */
#define IPCTL_INTRQDROPS 11 /* number of netisr q drops */
#define IPCTL_STATS 12 /* ipstat structure */
#define IPCTL_ACCEPTSOURCEROUTE 13 /* may accept source routed packets */
@@ -543,7 +543,7 @@ struct ip_mreq {
{ "rtminexpire", CTLTYPE_INT }, \
{ "rtmaxcache", CTLTYPE_INT }, \
{ "sourceroute", CTLTYPE_INT }, \
- { "directed-broadcast", CTLTYPE_INT }, \
+ { "directed-broadcast", CTLTYPE_INT }, \
{ "intr-queue-maxlen", CTLTYPE_INT }, \
{ "intr-queue-drops", CTLTYPE_INT }, \
{ "stats", CTLTYPE_STRUCT }, \
@@ -561,15 +561,15 @@ int in_broadcast(struct in_addr, struct ifnet *);
int in_canforward(struct in_addr);
int in_localaddr(struct in_addr);
int in_localip(struct in_addr);
-char *inet_ntoa(struct in_addr); /* in libkern */
+char *inet_ntoa(struct in_addr); /* in libkern */
char *inet_ntoa_r(struct in_addr ina, char *buf); /* in libkern */
-#define in_hosteq(s, t) ((s).s_addr == (t).s_addr)
-#define in_nullhost(x) ((x).s_addr == INADDR_ANY)
+#define in_hosteq(s, t) ((s).s_addr == (t).s_addr)
+#define in_nullhost(x) ((x).s_addr == INADDR_ANY)
-#define satosin(sa) ((struct sockaddr_in *)(sa))
-#define sintosa(sin) ((struct sockaddr *)(sin))
-#define ifatoia(ifa) ((struct in_ifaddr *)(ifa))
+#define satosin(sa) ((struct sockaddr_in *)(sa))
+#define sintosa(sin) ((struct sockaddr *)(sin))
+#define ifatoia(ifa) ((struct in_ifaddr *)(ifa))
#endif /* _KERNEL */
diff --git a/sys/netinet/in_pcb.h b/sys/netinet/in_pcb.h
index d6dfe4d..53a418a 100644
--- a/sys/netinet/in_pcb.h
+++ b/sys/netinet/in_pcb.h
@@ -322,7 +322,7 @@ struct inpcbinfo { /* XXX documentation, prefixes */
#define INP_SOCKAF(so) so->so_proto->pr_domain->dom_family
-#define INP_CHECK_SOCKAF(so, af) (INP_SOCKAF(so) == af)
+#define INP_CHECK_SOCKAF(so, af) (INP_SOCKAF(so) == af)
#ifdef _KERNEL
extern int ipport_lowfirstauto;
diff --git a/sys/netinet/in_proto.c b/sys/netinet/in_proto.c
index 553f90e..28fff4b 100644
--- a/sys/netinet/in_proto.c
+++ b/sys/netinet/in_proto.c
@@ -141,48 +141,48 @@ struct protosw inetsw[] = {
},
#ifdef IPSEC
{ SOCK_RAW, &inetdomain, IPPROTO_AH, PR_ATOMIC|PR_ADDR,
- ah4_input, 0, 0, 0,
- 0,
+ ah4_input, 0, 0, 0,
+ 0,
0, 0, 0, 0,
&nousrreqs
},
#ifdef IPSEC_ESP
{ SOCK_RAW, &inetdomain, IPPROTO_ESP, PR_ATOMIC|PR_ADDR,
- esp4_input, 0, 0, 0,
- 0,
+ esp4_input, 0, 0, 0,
+ 0,
0, 0, 0, 0,
&nousrreqs
},
#endif
{ SOCK_RAW, &inetdomain, IPPROTO_IPCOMP, PR_ATOMIC|PR_ADDR,
- ipcomp4_input, 0, 0, 0,
- 0,
+ ipcomp4_input, 0, 0, 0,
+ 0,
0, 0, 0, 0,
&nousrreqs
},
#endif /* IPSEC */
#ifdef FAST_IPSEC
{ SOCK_RAW, &inetdomain, IPPROTO_AH, PR_ATOMIC|PR_ADDR,
- ah4_input, 0, ah4_ctlinput, 0,
- 0,
+ ah4_input, 0, ah4_ctlinput, 0,
+ 0,
0, 0, 0, 0,
&nousrreqs
},
{ SOCK_RAW, &inetdomain, IPPROTO_ESP, PR_ATOMIC|PR_ADDR,
- esp4_input, 0, esp4_ctlinput, 0,
- 0,
+ esp4_input, 0, esp4_ctlinput, 0,
+ 0,
0, 0, 0, 0,
&nousrreqs
},
{ SOCK_RAW, &inetdomain, IPPROTO_IPCOMP, PR_ATOMIC|PR_ADDR,
- ipcomp4_input, 0, 0, 0,
- 0,
+ ipcomp4_input, 0, 0, 0,
+ 0,
0, 0, 0, 0,
&nousrreqs
},
#endif /* FAST_IPSEC */
{ SOCK_RAW, &inetdomain, IPPROTO_IPV4, PR_ATOMIC|PR_ADDR|PR_LASTHDR,
- encap4_input, 0, 0, rip_ctloutput,
+ encap4_input, 0, 0, rip_ctloutput,
0,
encap_init, 0, 0, 0,
&rip_usrreqs
@@ -201,7 +201,7 @@ struct protosw inetsw[] = {
},
# ifdef INET6
{ SOCK_RAW, &inetdomain, IPPROTO_IPV6, PR_ATOMIC|PR_ADDR|PR_LASTHDR,
- encap4_input, 0, 0, rip_ctloutput,
+ encap4_input, 0, 0, rip_ctloutput,
0,
encap_init, 0, 0, 0,
&rip_usrreqs
@@ -209,7 +209,7 @@ struct protosw inetsw[] = {
#endif
#ifdef IPDIVERT
{ SOCK_RAW, &inetdomain, IPPROTO_DIVERT, PR_ATOMIC|PR_ADDR,
- div_input, 0, div_ctlinput, ip_ctloutput,
+ div_input, 0, div_ctlinput, ip_ctloutput,
0,
div_init, 0, 0, 0,
&div_usrreqs,
@@ -251,7 +251,7 @@ struct protosw inetsw[] = {
extern int in_inithead(void **, int);
struct domain inetdomain =
- { AF_INET, "internet", 0, 0, 0,
+ { AF_INET, "internet", 0, 0, 0,
inetsw,
&inetsw[sizeof(inetsw)/sizeof(inetsw[0])], 0,
in_inithead, 32, sizeof(struct sockaddr_in)
diff --git a/sys/netinet/in_var.h b/sys/netinet/in_var.h
index 23ba209..6b26e69 100644
--- a/sys/netinet/in_var.h
+++ b/sys/netinet/in_var.h
@@ -81,7 +81,7 @@ struct in_aliasreq {
#ifdef _KERNEL
extern u_char inetctlerrmap[];
-/*
+/*
* Hash table for IP addresses.
*/
extern LIST_HEAD(in_ifaddrhashhead, in_ifaddr) *in_ifaddrhashtbl;
diff --git a/sys/netinet/ip_gre.c b/sys/netinet/ip_gre.c
index c11317d..a66f00e 100644
--- a/sys/netinet/ip_gre.c
+++ b/sys/netinet/ip_gre.c
@@ -87,7 +87,7 @@
#include <machine/stdarg.h>
#if 1
-void gre_inet_ntoa(struct in_addr in); /* XXX */
+void gre_inet_ntoa(struct in_addr in); /* XXX */
#endif
static struct gre_softc *gre_lookup(struct mbuf *, u_int8_t);
@@ -231,8 +231,8 @@ void
gre_mobile_input(struct mbuf *m, ...)
#else
gre_mobile_input(m, va_alist)
- struct mbuf *m;
- va_dcl
+ struct mbuf *m;
+ va_dcl
#endif
{
struct ip *ip;
diff --git a/sys/netinet/ip_icmp.c b/sys/netinet/ip_icmp.c
index 44c4080..125cdd3 100644
--- a/sys/netinet/ip_icmp.c
+++ b/sys/netinet/ip_icmp.c
@@ -91,11 +91,11 @@ SYSCTL_UINT(_net_inet_icmp, OID_AUTO, maskfake, CTLFLAG_RW,
&icmpmaskfake, 0, "Fake reply to ICMP Address Mask Request packets.");
static int drop_redirect = 0;
-SYSCTL_INT(_net_inet_icmp, OID_AUTO, drop_redirect, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_icmp, OID_AUTO, drop_redirect, CTLFLAG_RW,
&drop_redirect, 0, "");
static int log_redirect = 0;
-SYSCTL_INT(_net_inet_icmp, OID_AUTO, log_redirect, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_icmp, OID_AUTO, log_redirect, CTLFLAG_RW,
&log_redirect, 0, "");
static int icmplim = 200;
@@ -416,7 +416,7 @@ icmp_input(m, off)
* (if given) and then notify as usual. The ULPs will
* notice that the MTU has changed and adapt accordingly.
* If no new MTU was suggested, then we guess a new one
- * less than the current value. If the new MTU is
+ * less than the current value. If the new MTU is
* unreasonably small (defined by sysctl tcp_minmss), then
* we don't update the MTU value.
*
@@ -681,7 +681,7 @@ icmp_reflect(m)
goto match;
}
}
- /*
+ /*
* If the packet was transiting through us, use the address of
* the interface that is the closest to the packet source.
* When we don't have a route back to the packet source, stop here
@@ -869,7 +869,7 @@ ip_next_mtu(mtu, dir)
* badport_bandlim() - check for ICMP bandwidth limit
*
* Return 0 if it is ok to send an ICMP error response, -1 if we have
- * hit our bandwidth limit and it is not ok.
+ * hit our bandwidth limit and it is not ok.
*
* If icmplim is <= 0, the feature is disabled and 0 is returned.
*
@@ -880,7 +880,7 @@ ip_next_mtu(mtu, dir)
* Note that the printing of the error message is delayed so we can
* properly print the icmp error rate that the system was trying to do
* (i.e. 22000/100 pps, etc...). This can cause long delays in printing
- * the 'final' error, but it doesn't make sense to solve the printing
+ * the 'final' error, but it doesn't make sense to solve the printing
* delay with more complex code.
*/
diff --git a/sys/netinet/ip_id.c b/sys/netinet/ip_id.c
index c8455f8..eebc069 100644
--- a/sys/netinet/ip_id.c
+++ b/sys/netinet/ip_id.c
@@ -37,7 +37,7 @@
* $FreeBSD$
*/
-/*
+/*
* seed = random 15bit
* n = prime, g0 = generator to n,
* j = random so that gcd(j,n-1) == 1
@@ -45,7 +45,7 @@
*
* X[0] = random seed.
* X[n] = a*X[n-1]+b mod m is a Linear Congruential Generator
- * with a = 7^(even random) mod m,
+ * with a = 7^(even random) mod m,
* b = random with gcd(b,m) == 1
* m = 31104 and a maximal period of m-1.
*
@@ -72,7 +72,7 @@
#define PFAC_N 3
const static u_int16_t pfacts[PFAC_N] = {
- 2,
+ 2,
3,
2729
};
@@ -119,15 +119,15 @@ pmod(gen, exp, mod)
return (s);
}
-/*
- * Initalizes the seed and chooses a suitable generator. Also toggles
+/*
+ * Initalizes the seed and chooses a suitable generator. Also toggles
* the msb flag. The msb flag is used to generate two distinct
* cycles of random numbers and thus avoiding reuse of ids.
*
- * This function is called from id_randomid() when needed, an
+ * This function is called from id_randomid() when needed, an
* application does not have to worry about it.
*/
-static void
+static void
ip_initid(void)
{
u_int16_t j, i;
@@ -150,12 +150,12 @@ ip_initid(void)
ru_a = pmod(RU_AGEN, (tmp >> 16) & 0xfffe, RU_M);
while (ru_b % 3 == 0)
ru_b += 2;
-
+
read_random((void *) &tmp, sizeof(tmp));
j = tmp % RU_N;
tmp = tmp >> 16;
- /*
+ /*
* Do a fast gcd(j,RU_N-1), so we can find a j with
* gcd(j, RU_N-1) == 1, giving a new generator for
* RU_GEN^j mod RU_N
@@ -168,7 +168,7 @@ ip_initid(void)
if (i>=PFAC_N)
noprime = 0;
- else
+ else
j = (j+1) % RU_N;
}
@@ -176,7 +176,7 @@ ip_initid(void)
ru_counter = 0;
ru_reseed = time.tv_sec + RU_OUT;
- ru_msb = ru_msb == 0x8000 ? 0 : 0x8000;
+ ru_msb = ru_msb == 0x8000 ? 0 : 0x8000;
}
u_int16_t
diff --git a/sys/netinet/ip_mroute.c b/sys/netinet/ip_mroute.c
index 57e8db8..b0490a4 100644
--- a/sys/netinet/ip_mroute.c
+++ b/sys/netinet/ip_mroute.c
@@ -131,7 +131,7 @@ static struct callout expire_upcalls_ch;
/*
* Define the token bucket filter structures
- * tbftable -> each vif has one of these for storing info
+ * tbftable -> each vif has one of these for storing info
*/
static struct tbf tbftable[MAXVIFS];
@@ -161,7 +161,7 @@ static struct ip multicast_encap_iphdr = {
sizeof(struct ip), /* total length */
0, /* id */
0, /* frag offset */
- ENCAP_TTL, ENCAP_PROTO,
+ ENCAP_TTL, ENCAP_PROTO,
0, /* checksum */
};
@@ -220,7 +220,7 @@ static struct ip pim_encap_iphdr = {
0, /* tos */
sizeof(struct ip), /* total length */
0, /* id */
- 0, /* frag offset */
+ 0, /* frag offset */
ENCAP_TTL,
IPPROTO_PIM,
0, /* checksum */
@@ -952,7 +952,7 @@ add_vif(struct vifctl *vifcp)
return EIO; /* XXX */
}
for (i = 0; i < MAXVIFS; ++i) {
- if_initname(&multicast_decap_if[i], "mdecap", i);
+ if_initname(&multicast_decap_if[i], "mdecap", i);
}
}
/*
@@ -1025,12 +1025,12 @@ add_vif(struct vifctl *vifcp)
if (mrtdebug)
log(LOG_DEBUG, "add_vif #%d, lcladdr %lx, %s %lx, thresh %x, rate %d\n",
- vifcp->vifc_vifi,
+ vifcp->vifc_vifi,
(u_long)ntohl(vifcp->vifc_lcl_addr.s_addr),
(vifcp->vifc_flags & VIFF_TUNNEL) ? "rmtaddr" : "mask",
(u_long)ntohl(vifcp->vifc_rmt_addr.s_addr),
vifcp->vifc_threshold,
- vifcp->vifc_rate_limit);
+ vifcp->vifc_rate_limit);
return 0;
}
@@ -1165,7 +1165,7 @@ add_mfc(struct mfcctl2 *mfccp)
return 0;
}
- /*
+ /*
* Find the entry for which the upcall was made and update
*/
hash = MFCHASH(mfccp->mfcc_origin.s_addr, mfccp->mfcc_mcastgrp.s_addr);
@@ -1174,7 +1174,7 @@ add_mfc(struct mfcctl2 *mfccp)
if ((rt->mfc_origin.s_addr == mfccp->mfcc_origin.s_addr) &&
(rt->mfc_mcastgrp.s_addr == mfccp->mfcc_mcastgrp.s_addr) &&
(rt->mfc_stall != NULL)) {
-
+
if (nstl++)
log(LOG_ERR, "add_mfc %s o %lx g %lx p %x dbx %p\n",
"multiple kernel entries",
@@ -1215,7 +1215,7 @@ add_mfc(struct mfcctl2 *mfccp)
hash, (u_long)ntohl(mfccp->mfcc_origin.s_addr),
(u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr),
mfccp->mfcc_parent);
-
+
for (rt = mfctable[hash]; rt != NULL; rt = rt->mfc_next) {
if ((rt->mfc_origin.s_addr == mfccp->mfcc_origin.s_addr) &&
(rt->mfc_mcastgrp.s_addr == mfccp->mfcc_mcastgrp.s_addr)) {
@@ -1233,11 +1233,11 @@ add_mfc(struct mfcctl2 *mfccp)
VIF_UNLOCK();
return ENOBUFS;
}
-
+
init_mfc_params(rt, mfccp);
rt->mfc_expire = 0;
rt->mfc_stall = NULL;
-
+
rt->mfc_bw_meter = NULL;
/* insert new entry at head of hash chain */
rt->mfc_next = mfctable[hash];
@@ -1255,11 +1255,11 @@ add_mfc(struct mfcctl2 *mfccp)
static int
del_mfc(struct mfcctl2 *mfccp)
{
- struct in_addr origin;
- struct in_addr mcastgrp;
- struct mfc *rt;
- struct mfc **nptr;
- u_long hash;
+ struct in_addr origin;
+ struct in_addr mcastgrp;
+ struct mfc *rt;
+ struct mfc **nptr;
+ u_long hash;
struct bw_meter *list;
origin = mfccp->mfcc_origin;
@@ -1486,11 +1486,11 @@ X_ip_mforward(struct ip *ip, struct ifnet *ifp, struct mbuf *m,
if (mm == NULL)
goto fail1;
- /*
- * Send message to routing daemon to install
+ /*
+ * Send message to routing daemon to install
* a route into the kernel table
*/
-
+
im = mtod(mm, struct igmpmsg *);
im->im_msgtype = IGMPMSG_NOCACHE;
im->im_mbz = 0;
@@ -1559,15 +1559,15 @@ non_fatal:
*p = rte;
}
- rte->m = mb0;
- rte->ifp = ifp;
+ rte->m = mb0;
+ rte->ifp = ifp;
rte->next = NULL;
MFC_UNLOCK();
VIF_UNLOCK();
return 0;
- }
+ }
}
/*
@@ -1665,7 +1665,7 @@ ip_mdq(struct mbuf *m, struct ifnet *ifp, struct mfc *rt, vifi_t xmt_vif)
#ifdef PIM
if (viftable[xmt_vif].v_flags & VIFF_REGISTER)
pim_register_send(ip, viftable + xmt_vif, m, rt);
- else
+ else
#endif
MC_SEND(ip, viftable + xmt_vif, m);
return 1;
@@ -1679,7 +1679,7 @@ ip_mdq(struct mbuf *m, struct ifnet *ifp, struct mfc *rt, vifi_t xmt_vif)
/* came in the wrong interface */
if (mrtdebug & DEBUG_FORWARD)
log(LOG_DEBUG, "wrong if: ifp %p vifi %d vififp %p\n",
- (void *)ifp, vifi, (void *)viftable[vifi].v_ifp);
+ (void *)ifp, vifi, (void *)viftable[vifi].v_ifp);
++mrtstat.mrts_wrong_if;
++rt->mfc_wrong_if;
/*
@@ -1950,7 +1950,7 @@ tbf_control(struct vif *vifp, struct mbuf *m, struct ip *ip, u_long p_len)
}
}
-/*
+/*
* adds a packet to the queue at the interface
*/
static void
@@ -1977,7 +1977,7 @@ tbf_queue(struct vif *vifp, struct mbuf *m)
t->tbf_q_len++;
}
-/*
+/*
* processes the queue at the interface
*/
static void
@@ -2014,7 +2014,7 @@ tbf_reprocess_q(void *xvifp)
{
struct vif *vifp = xvifp;
- if (ip_mrouter == NULL)
+ if (ip_mrouter == NULL)
return;
VIF_LOCK();
tbf_update_tokens(vifp);
@@ -2086,7 +2086,7 @@ tbf_send_packet(struct vif *vifp, struct mbuf *m)
error = ip_output(m, NULL, &ro, IP_FORWARDING, &imo, NULL);
if (mrtdebug & DEBUG_XMIT)
- log(LOG_DEBUG, "phyint_send on vif %d err %d\n",
+ log(LOG_DEBUG, "phyint_send on vif %d err %d\n",
(int)(vifp - viftable), error);
}
}
@@ -2158,7 +2158,7 @@ priority(struct vif *vifp, struct ip *ip)
}
/*
- * End of token bucket filter modifications
+ * End of token bucket filter modifications
*/
static int
@@ -2172,7 +2172,7 @@ X_ip_rsvp_vif(struct socket *so, struct sockopt *sopt)
error = sooptcopyin(sopt, &vifi, sizeof vifi, sizeof vifi);
if (error)
return error;
-
+
VIF_LOCK();
if (vifi < 0 || vifi >= numvifs) { /* Error if vif is invalid */
@@ -2345,10 +2345,10 @@ compute_bw_meter_flags(struct bw_upcall *req)
flags |= BW_METER_GEQ;
if (req->bu_flags & BW_UPCALL_LEQ)
flags |= BW_METER_LEQ;
-
+
return flags;
}
-
+
/*
* Add a bw_meter entry
*/
@@ -2361,10 +2361,10 @@ add_bw_upcall(struct bw_upcall *req)
struct timeval now;
struct bw_meter *x;
uint32_t flags;
-
+
if (!(mrt_api_config & MRT_MFC_BW_UPCALL))
return EOPNOTSUPP;
-
+
/* Test if the flags are valid */
if (!(req->bu_flags & (BW_UPCALL_UNIT_PACKETS | BW_UPCALL_UNIT_BYTES)))
return EINVAL;
@@ -2373,11 +2373,11 @@ add_bw_upcall(struct bw_upcall *req)
if ((req->bu_flags & (BW_UPCALL_GEQ | BW_UPCALL_LEQ))
== (BW_UPCALL_GEQ | BW_UPCALL_LEQ))
return EINVAL;
-
+
/* Test if the threshold time interval is valid */
if (BW_TIMEVALCMP(&req->bu_threshold.b_time, &delta, <))
return EINVAL;
-
+
flags = compute_bw_meter_flags(req);
/*
@@ -2399,14 +2399,14 @@ add_bw_upcall(struct bw_upcall *req)
return 0; /* XXX Already installed */
}
}
-
+
/* Allocate the new bw_meter entry */
x = (struct bw_meter *)malloc(sizeof(*x), M_BWMETER, M_NOWAIT);
if (x == NULL) {
MFC_UNLOCK();
return ENOBUFS;
}
-
+
/* Set the new bw_meter entry */
x->bm_threshold.b_time = req->bu_threshold.b_time;
GET_TIME(now);
@@ -2418,14 +2418,14 @@ add_bw_upcall(struct bw_upcall *req)
x->bm_flags = flags;
x->bm_time_next = NULL;
x->bm_time_hash = BW_METER_BUCKETS;
-
+
/* Add the new bw_meter entry to the front of entries for this MFC */
x->bm_mfc = mfc;
x->bm_mfc_next = mfc->mfc_bw_meter;
mfc->mfc_bw_meter = x;
schedule_bw_meter(x, &now);
MFC_UNLOCK();
-
+
return 0;
}
@@ -2449,10 +2449,10 @@ del_bw_upcall(struct bw_upcall *req)
{
struct mfc *mfc;
struct bw_meter *x;
-
+
if (!(mrt_api_config & MRT_MFC_BW_UPCALL))
return EOPNOTSUPP;
-
+
MFC_LOCK();
/* Find the corresponding MFC entry */
mfc = mfc_find(req->bu_src.s_addr, req->bu_dst.s_addr);
@@ -2464,7 +2464,7 @@ del_bw_upcall(struct bw_upcall *req)
* Delete all bw_meter entries for this mfc
*/
struct bw_meter *list;
-
+
list = mfc->mfc_bw_meter;
mfc->mfc_bw_meter = NULL;
free_bw_list(list);
@@ -2512,12 +2512,12 @@ static void
bw_meter_receive_packet(struct bw_meter *x, int plen, struct timeval *nowp)
{
struct timeval delta;
-
+
MFC_LOCK_ASSERT();
delta = *nowp;
BW_TIMEVALDECR(&delta, &x->bm_start_time);
-
+
if (x->bm_flags & BW_METER_GEQ) {
/*
* Processing for ">=" type of bw_meter entry
@@ -2529,15 +2529,15 @@ bw_meter_receive_packet(struct bw_meter *x, int plen, struct timeval *nowp)
x->bm_measured.b_bytes = 0;
x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
}
-
+
/* Record that a packet is received */
x->bm_measured.b_packets++;
x->bm_measured.b_bytes += plen;
-
+
/*
* Test if we should deliver an upcall
*/
- if (!(x->bm_flags & BW_METER_UPCALL_DELIVERED)) {
+ if (!(x->bm_flags & BW_METER_UPCALL_DELIVERED)) {
if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
(x->bm_measured.b_packets >= x->bm_threshold.b_packets)) ||
((x->bm_flags & BW_METER_UNIT_BYTES) &&
@@ -2568,11 +2568,11 @@ bw_meter_receive_packet(struct bw_meter *x, int plen, struct timeval *nowp)
unschedule_bw_meter(x);
schedule_bw_meter(x, nowp);
}
-
+
/* Record that a packet is received */
x->bm_measured.b_packets++;
x->bm_measured.b_bytes += plen;
-
+
/*
* Test if we should restart the measuring interval
*/
@@ -2605,21 +2605,21 @@ bw_meter_prepare_upcall(struct bw_meter *x, struct timeval *nowp)
{
struct timeval delta;
struct bw_upcall *u;
-
+
MFC_LOCK_ASSERT();
-
+
/*
- * Compute the measured time interval
+ * Compute the measured time interval
*/
delta = *nowp;
BW_TIMEVALDECR(&delta, &x->bm_start_time);
-
+
/*
* If there are too many pending upcalls, deliver them now
*/
if (bw_upcalls_n >= BW_UPCALLS_MAX)
bw_upcalls_send();
-
+
/*
* Set the bw_upcall entry
*/
@@ -2660,14 +2660,14 @@ bw_upcalls_send(void)
0, /* unused3 */
{ 0 }, /* im_src */
{ 0 } }; /* im_dst */
-
+
MFC_LOCK_ASSERT();
if (bw_upcalls_n == 0)
return; /* No pending upcalls */
bw_upcalls_n = 0;
-
+
/*
* Allocate a new mbuf, initialize it with the header and
* the payload for the pending calls.
@@ -2677,11 +2677,11 @@ bw_upcalls_send(void)
log(LOG_WARNING, "bw_upcalls_send: cannot allocate mbuf\n");
return;
}
-
+
m->m_len = m->m_pkthdr.len = 0;
m_copyback(m, 0, sizeof(struct igmpmsg), (caddr_t)&igmpmsg);
m_copyback(m, sizeof(struct igmpmsg), len, (caddr_t)&bw_upcalls[0]);
-
+
/*
* Send the upcalls
* XXX do we need to set the address in k_igmpsrc ?
@@ -2715,12 +2715,12 @@ static void
schedule_bw_meter(struct bw_meter *x, struct timeval *nowp)
{
int time_hash;
-
+
MFC_LOCK_ASSERT();
if (!(x->bm_flags & BW_METER_LEQ))
return; /* XXX: we schedule timers only for "<=" entries */
-
+
/*
* Reset the bw_meter entry
*/
@@ -2728,7 +2728,7 @@ schedule_bw_meter(struct bw_meter *x, struct timeval *nowp)
x->bm_measured.b_packets = 0;
x->bm_measured.b_bytes = 0;
x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
-
+
/*
* Compute the timeout hash value and insert the entry
*/
@@ -2747,32 +2747,32 @@ unschedule_bw_meter(struct bw_meter *x)
{
int time_hash;
struct bw_meter *prev, *tmp;
-
+
MFC_LOCK_ASSERT();
if (!(x->bm_flags & BW_METER_LEQ))
return; /* XXX: we schedule timers only for "<=" entries */
-
+
/*
* Compute the timeout hash value and delete the entry
*/
time_hash = x->bm_time_hash;
if (time_hash >= BW_METER_BUCKETS)
return; /* Entry was not scheduled */
-
+
for (prev = NULL, tmp = bw_meter_timers[time_hash];
tmp != NULL; prev = tmp, tmp = tmp->bm_time_next)
if (tmp == x)
break;
-
+
if (tmp == NULL)
panic("unschedule_bw_meter: bw_meter entry not found");
-
+
if (prev != NULL)
prev->bm_time_next = x->bm_time_next;
else
bw_meter_timers[time_hash] = x->bm_time_next;
-
+
x->bm_time_next = NULL;
x->bm_time_hash = BW_METER_BUCKETS;
}
@@ -2795,7 +2795,7 @@ bw_meter_process()
uint32_t loops;
int i;
struct timeval now, process_endtime;
-
+
GET_TIME(now);
if (last_tv_sec == now.tv_sec)
return; /* nothing to do */
@@ -2813,26 +2813,26 @@ bw_meter_process()
*/
for (i = (now.tv_sec - loops) % BW_METER_BUCKETS; loops > 0; loops--) {
struct bw_meter *x, *tmp_list;
-
+
if (++i >= BW_METER_BUCKETS)
i = 0;
-
+
/* Disconnect the list of bw_meter entries from the bin */
tmp_list = bw_meter_timers[i];
bw_meter_timers[i] = NULL;
-
+
/* Process the list of bw_meter entries */
while (tmp_list != NULL) {
x = tmp_list;
tmp_list = tmp_list->bm_time_next;
-
+
/* Test if the time interval is over */
process_endtime = x->bm_start_time;
BW_TIMEVALADD(&process_endtime, &x->bm_threshold.b_time);
if (BW_TIMEVALCMP(&process_endtime, &now, >)) {
/* Not yet: reschedule, but don't reset */
int time_hash;
-
+
BW_METER_TIMEHASH(x, time_hash);
if (time_hash == i && process_endtime.tv_sec == now.tv_sec) {
/*
@@ -2845,10 +2845,10 @@ bw_meter_process()
x->bm_time_next = bw_meter_timers[time_hash];
bw_meter_timers[time_hash] = x;
x->bm_time_hash = time_hash;
-
+
continue;
}
-
+
/*
* Test if we should deliver an upcall
*/
@@ -2859,14 +2859,14 @@ bw_meter_process()
/* Prepare an upcall for delivery */
bw_meter_prepare_upcall(x, &now);
}
-
+
/*
* Reschedule for next processing
*/
schedule_bw_meter(x, &now);
}
}
-
+
/* Send all upcalls that are pending delivery */
bw_upcalls_send();
@@ -2882,7 +2882,7 @@ expire_bw_upcalls_send(void *unused)
MFC_LOCK();
bw_upcalls_send();
MFC_UNLOCK();
-
+
callout_reset(&bw_upcalls_ch, BW_UPCALLS_PERIOD,
expire_bw_upcalls_send, NULL);
}
@@ -2896,7 +2896,7 @@ expire_bw_meter_process(void *unused)
{
if (mrt_api_config & MRT_MFC_BW_UPCALL)
bw_meter_process();
-
+
callout_reset(&bw_meter_ch, BW_METER_PERIOD, expire_bw_meter_process, NULL);
}
@@ -2914,14 +2914,14 @@ pim_register_send(struct ip *ip, struct vif *vifp,
struct mbuf *m, struct mfc *rt)
{
struct mbuf *mb_copy, *mm;
-
+
if (mrtdebug & DEBUG_PIM)
- log(LOG_DEBUG, "pim_register_send: ");
-
+ log(LOG_DEBUG, "pim_register_send: ");
+
mb_copy = pim_register_prepare(ip, m);
if (mb_copy == NULL)
return ENOBUFS;
-
+
/*
* Send all the fragments. Note that the mbuf for each fragment
* is freed by the sending machinery.
@@ -2940,7 +2940,7 @@ pim_register_send(struct ip *ip, struct vif *vifp,
}
}
}
-
+
return 0;
}
@@ -2954,7 +2954,7 @@ pim_register_prepare(struct ip *ip, struct mbuf *m)
{
struct mbuf *mb_copy = NULL;
int mtu;
-
+
/* Take care of delayed checksums */
if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
in_delayed_cksum(m);
@@ -2971,14 +2971,14 @@ pim_register_prepare(struct ip *ip, struct mbuf *m)
mb_copy = m_pullup(mb_copy, ip->ip_hl << 2);
if (mb_copy == NULL)
return NULL;
-
+
/* take care of the TTL */
ip = mtod(mb_copy, struct ip *);
--ip->ip_ttl;
-
+
/* Compute the MTU after the PIM Register encapsulation */
mtu = 0xffff - sizeof(pim_encap_iphdr) - sizeof(pim_encap_pimhdr);
-
+
if (ip->ip_len <= mtu) {
/* Turn the IP header into a valid one */
ip->ip_len = htons(ip->ip_len);
@@ -3006,7 +3006,7 @@ pim_register_send_upcall(struct ip *ip, struct vif *vifp,
int len = ntohs(ip->ip_len);
struct igmpmsg *im;
struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
-
+
VIF_LOCK_ASSERT();
/*
@@ -3021,7 +3021,7 @@ pim_register_send_upcall(struct ip *ip, struct vif *vifp,
mb_first->m_pkthdr.len = len + sizeof(struct igmpmsg);
mb_first->m_len = sizeof(struct igmpmsg);
mb_first->m_next = mb_copy;
-
+
/* Send message to routing daemon */
im = mtod(mb_first, struct igmpmsg *);
im->im_msgtype = IGMPMSG_WHOLEPKT;
@@ -3029,11 +3029,11 @@ pim_register_send_upcall(struct ip *ip, struct vif *vifp,
im->im_vif = vifp - viftable;
im->im_src = ip->ip_src;
im->im_dst = ip->ip_dst;
-
+
k_igmpsrc.sin_addr = ip->ip_src;
-
+
mrtstat.mrts_upcalls++;
-
+
if (socket_send(ip_mrouter, mb_first, &k_igmpsrc) < 0) {
if (mrtdebug & DEBUG_PIM)
log(LOG_WARNING,
@@ -3041,11 +3041,11 @@ pim_register_send_upcall(struct ip *ip, struct vif *vifp,
++mrtstat.mrts_upq_sockfull;
return ENOBUFS;
}
-
+
/* Keep statistics */
pimstat.pims_snd_registers_msgs++;
pimstat.pims_snd_registers_bytes += len;
-
+
return 0;
}
@@ -3061,14 +3061,14 @@ pim_register_send_rp(struct ip *ip, struct vif *vifp,
struct pim_encap_pimhdr *pimhdr;
int len = ntohs(ip->ip_len);
vifi_t vifi = rt->mfc_parent;
-
+
VIF_LOCK_ASSERT();
-
+
if ((vifi >= numvifs) || (viftable[vifi].v_lcl_addr.s_addr == 0)) {
m_freem(mb_copy);
return EADDRNOTAVAIL; /* The iif vif is invalid */
}
-
+
/*
* Add a new mbuf with the encapsulating header
*/
@@ -3082,7 +3082,7 @@ pim_register_send_rp(struct ip *ip, struct vif *vifp,
mb_first->m_next = mb_copy;
mb_first->m_pkthdr.len = len + mb_first->m_len;
-
+
/*
* Fill in the encapsulating IP and PIM header
*/
@@ -3105,20 +3105,20 @@ pim_register_send_rp(struct ip *ip, struct vif *vifp,
/* If the iif crosses a border, set the Border-bit */
if (rt->mfc_flags[vifi] & MRT_MFC_FLAGS_BORDER_VIF & mrt_api_config)
pimhdr->flags |= htonl(PIM_BORDER_REGISTER);
-
+
mb_first->m_data += sizeof(pim_encap_iphdr);
pimhdr->pim.pim_cksum = in_cksum(mb_first, sizeof(pim_encap_pimhdr));
mb_first->m_data -= sizeof(pim_encap_iphdr);
-
+
if (vifp->v_rate_limit == 0)
tbf_send_packet(vifp, mb_first);
else
tbf_control(vifp, mb_first, ip, ip_outer->ip_len);
-
+
/* Keep statistics */
pimstat.pims_snd_registers_msgs++;
pimstat.pims_snd_registers_bytes += len;
-
+
return 0;
}
@@ -3139,11 +3139,11 @@ pim_input(struct mbuf *m, int off)
int datalen = ip->ip_len;
int ip_tos;
int iphlen = off;
-
+
/* Keep statistics */
pimstat.pims_rcv_total_msgs++;
pimstat.pims_rcv_total_bytes += datalen;
-
+
/*
* Validate lengths
*/
@@ -3154,12 +3154,12 @@ pim_input(struct mbuf *m, int off)
m_freem(m);
return;
}
-
+
/*
* If the packet is at least as big as a REGISTER, go agead
* and grab the PIM REGISTER header size, to avoid another
* possible m_pullup() later.
- *
+ *
* PIM_MINLEN == pimhdr + u_int32_t == 4 + 4 = 8
* PIM_REG_MINLEN == pimhdr + reghdr + encap_iphdr == 4 + 4 + 20 = 28
*/
@@ -3176,12 +3176,12 @@ pim_input(struct mbuf *m, int off)
/* m_pullup() may have given us a new mbuf so reset ip. */
ip = mtod(m, struct ip *);
ip_tos = ip->ip_tos;
-
+
/* adjust mbuf to point to the PIM header */
m->m_data += iphlen;
m->m_len -= iphlen;
pim = mtod(m, struct pim *);
-
+
/*
* Validate checksum. If PIM REGISTER, exclude the data packet.
*
@@ -3207,11 +3207,11 @@ pim_input(struct mbuf *m, int off)
m_freem(m);
return;
}
-
+
/* restore mbuf back to the outer IP */
m->m_data -= iphlen;
m->m_len += iphlen;
-
+
if (PIM_VT_T(pim->pim_vt) == PIM_REGISTER) {
/*
* Since this is a REGISTER, we'll make a copy of the register
@@ -3223,7 +3223,7 @@ pim_input(struct mbuf *m, int off)
struct ip *encap_ip;
u_int32_t *reghdr;
struct ifnet *vifp;
-
+
VIF_LOCK();
if ((reg_vif_num >= numvifs) || (reg_vif_num == VIFI_INVALID)) {
VIF_UNLOCK();
@@ -3236,7 +3236,7 @@ pim_input(struct mbuf *m, int off)
/* XXX need refcnt? */
vifp = viftable[reg_vif_num].v_ifp;
VIF_UNLOCK();
-
+
/*
* Validate length
*/
@@ -3249,10 +3249,10 @@ pim_input(struct mbuf *m, int off)
m_freem(m);
return;
}
-
+
reghdr = (u_int32_t *)(pim + 1);
encap_ip = (struct ip *)(reghdr + 1);
-
+
if (mrtdebug & DEBUG_PIM) {
log(LOG_DEBUG,
"pim_input[register], encap_ip: %lx -> %lx, encap_ip len %d\n",
@@ -3260,7 +3260,7 @@ pim_input(struct mbuf *m, int off)
(u_long)ntohl(encap_ip->ip_dst.s_addr),
ntohs(encap_ip->ip_len));
}
-
+
/* verify the version number of the inner packet */
if (encap_ip->ip_v != IPVERSION) {
pimstat.pims_rcv_badregisters++;
@@ -3271,7 +3271,7 @@ pim_input(struct mbuf *m, int off)
m_freem(m);
return;
}
-
+
/* verify the inner packet is destined to a mcast group */
if (!IN_MULTICAST(ntohl(encap_ip->ip_dst.s_addr))) {
pimstat.pims_rcv_badregisters++;
@@ -3295,14 +3295,14 @@ pim_input(struct mbuf *m, int off)
/* Outer TOS -> inner TOS */
encap_ip->ip_tos = ip_tos;
/* Recompute the inner header checksum. Sigh... */
-
+
/* adjust mbuf to point to the inner IP header */
m->m_data += (iphlen + PIM_MINLEN);
m->m_len -= (iphlen + PIM_MINLEN);
-
+
encap_ip->ip_sum = 0;
encap_ip->ip_sum = in_cksum(m, encap_ip->ip_hl << 2);
-
+
/* restore mbuf to point back to the outer IP header */
m->m_data -= (iphlen + PIM_MINLEN);
m->m_len += (iphlen + PIM_MINLEN);
@@ -3310,7 +3310,7 @@ pim_input(struct mbuf *m, int off)
/*
* Decapsulate the inner IP packet and loopback to forward it
- * as a normal multicast packet. Also, make a copy of the
+ * as a normal multicast packet. Also, make a copy of the
* outer_iphdr + pimhdr + reghdr + encap_iphdr
* to pass to the daemon later, so it can take the appropriate
* actions (e.g., send back PIM_REGISTER_STOP).
@@ -3323,17 +3323,17 @@ pim_input(struct mbuf *m, int off)
m_freem(m);
return;
}
-
+
/* Keep statistics */
/* XXX: registers_bytes include only the encap. mcast pkt */
pimstat.pims_rcv_registers_msgs++;
pimstat.pims_rcv_registers_bytes += ntohs(encap_ip->ip_len);
-
+
/*
* forward the inner ip packet; point m_data at the inner ip.
*/
m_adj(m, iphlen + PIM_MINLEN);
-
+
if (mrtdebug & DEBUG_PIM) {
log(LOG_DEBUG,
"pim_input: forwarding decapsulated register: "
@@ -3344,12 +3344,12 @@ pim_input(struct mbuf *m, int off)
}
/* NB: vifp was collected above; can it change on us? */
if_simloop(vifp, m, dst.sin_family, 0);
-
+
/* prepare the register head to send to the mrouting daemon */
m = mcp;
}
-pim_input_to_daemon:
+pim_input_to_daemon:
/*
* Pass the PIM message up to the daemon; if it is a Register message,
* pass the 'head' only up to the daemon. This includes the
diff --git a/sys/netinet/ip_mroute.h b/sys/netinet/ip_mroute.h
index e677a5b..d4bb517 100644
--- a/sys/netinet/ip_mroute.h
+++ b/sys/netinet/ip_mroute.h
@@ -94,9 +94,9 @@ typedef u_short vifi_t; /* type of a vif index */
* (MRT_DEL_VIF takes a single vifi_t argument.)
*/
struct vifctl {
- vifi_t vifc_vifi; /* the index of the vif to be added */
- u_char vifc_flags; /* VIFF_ flags defined below */
- u_char vifc_threshold; /* min ttl required to forward on vif */
+ vifi_t vifc_vifi; /* the index of the vif to be added */
+ u_char vifc_flags; /* VIFF_ flags defined below */
+ u_char vifc_threshold; /* min ttl required to forward on vif */
u_int vifc_rate_limit; /* max rate */
struct in_addr vifc_lcl_addr; /* local interface address */
struct in_addr vifc_rmt_addr; /* remote address (tunnels only) */
@@ -112,9 +112,9 @@ struct vifctl {
*/
struct mfcctl {
struct in_addr mfcc_origin; /* ip origin of mcasts */
- struct in_addr mfcc_mcastgrp; /* multicast group associated*/
- vifi_t mfcc_parent; /* incoming vif */
- u_char mfcc_ttls[MAXVIFS]; /* forwarding ttls on vifs */
+ struct in_addr mfcc_mcastgrp; /* multicast group associated*/
+ vifi_t mfcc_parent; /* incoming vif */
+ u_char mfcc_ttls[MAXVIFS]; /* forwarding ttls on vifs */
};
/*
@@ -126,7 +126,7 @@ struct mfcctl2 {
struct in_addr mfcc_origin; /* ip origin of mcasts */
struct in_addr mfcc_mcastgrp; /* multicast group associated*/
vifi_t mfcc_parent; /* incoming vif */
- u_char mfcc_ttls[MAXVIFS]; /* forwarding ttls on vifs */
+ u_char mfcc_ttls[MAXVIFS]; /* forwarding ttls on vifs */
/* extension fields */
uint8_t mfcc_flags[MAXVIFS]; /* the MRT_MFC_FLAGS_* flags */
@@ -165,7 +165,7 @@ struct mfcctl2 {
*
* Measurement works as follows:
*
- * For >= measurements:
+ * For >= measurements:
* The first packet marks the start of a measurement interval.
* During an interval we count packets and bytes, and when we
* pass the threshold we deliver an upcall and we are done.
@@ -217,10 +217,10 @@ struct mrtstat {
u_long mrts_cant_tunnel; /* no room for tunnel options */
u_long mrts_wrong_if; /* arrived on wrong interface */
u_long mrts_upq_ovflw; /* upcall Q overflow */
- u_long mrts_cache_cleanups; /* # entries with no upcalls */
- u_long mrts_drop_sel; /* pkts dropped selectively */
- u_long mrts_q_overflow; /* pkts dropped - Q overflow */
- u_long mrts_pkt2large; /* pkts dropped - size > BKT SIZE */
+ u_long mrts_cache_cleanups; /* # entries with no upcalls */
+ u_long mrts_drop_sel; /* pkts dropped selectively */
+ u_long mrts_q_overflow; /* pkts dropped - Q overflow */
+ u_long mrts_pkt2large; /* pkts dropped - size > BKT SIZE */
u_long mrts_upq_sockfull; /* upcalls dropped - socket full */
};
@@ -245,19 +245,19 @@ struct sioc_vif_req {
u_long ibytes; /* Input byte count on vif */
u_long obytes; /* Output byte count on vif */
};
-
+
/*
* The kernel's virtual-interface structure.
*/
struct vif {
- u_char v_flags; /* VIFF_ flags defined above */
- u_char v_threshold; /* min ttl required to forward on vif*/
- u_int v_rate_limit; /* max rate */
- struct tbf *v_tbf; /* token bucket structure at intf. */
- struct in_addr v_lcl_addr; /* local interface address */
- struct in_addr v_rmt_addr; /* remote address (tunnels only) */
- struct ifnet *v_ifp; /* pointer to interface */
+ u_char v_flags; /* VIFF_ flags defined above */
+ u_char v_threshold; /* min ttl required to forward on vif*/
+ u_int v_rate_limit; /* max rate */
+ struct tbf *v_tbf; /* token bucket structure at intf. */
+ struct in_addr v_lcl_addr; /* local interface address */
+ struct in_addr v_rmt_addr; /* remote address (tunnels only) */
+ struct ifnet *v_ifp; /* pointer to interface */
u_long v_pkt_in; /* # pkts in on interface */
u_long v_pkt_out; /* # pkts out on interface */
u_long v_bytes_in; /* # bytes in on interface */
@@ -268,15 +268,15 @@ struct vif {
};
/*
- * The kernel's multicast forwarding cache entry structure
- * (A field for the type of service (mfc_tos) is to be added
+ * The kernel's multicast forwarding cache entry structure
+ * (A field for the type of service (mfc_tos) is to be added
* at a future point)
*/
struct mfc {
struct in_addr mfc_origin; /* IP origin of mcasts */
- struct in_addr mfc_mcastgrp; /* multicast group associated*/
- vifi_t mfc_parent; /* incoming vif */
- u_char mfc_ttls[MAXVIFS]; /* forwarding ttls on vifs */
+ struct in_addr mfc_mcastgrp; /* multicast group associated*/
+ vifi_t mfc_parent; /* incoming vif */
+ u_char mfc_ttls[MAXVIFS]; /* forwarding ttls on vifs */
u_long mfc_pkt_cnt; /* pkt count for src-grp */
u_long mfc_byte_cnt; /* byte count for src-grp */
u_long mfc_wrong_if; /* wrong if for src-grp */
@@ -311,7 +311,7 @@ struct igmpmsg {
* Argument structure used for pkt info. while upcall is made
*/
struct rtdetq {
- struct mbuf *m; /* A copy of the packet */
+ struct mbuf *m; /* A copy of the packet */
struct ifnet *ifp; /* Interface pkt came in on */
vifi_t xmt_vif; /* Saved copy of imo_multicast_vif */
struct rtdetq *next; /* Next in list of packets */
@@ -327,19 +327,19 @@ struct rtdetq {
#define MAX_UPQ 4 /* max. no of pkts in upcall Q */
/*
- * Token Bucket filter code
+ * Token Bucket filter code
*/
-#define MAX_BKT_SIZE 10000 /* 10K bytes size */
-#define MAXQSIZE 10 /* max # of pkts in queue */
+#define MAX_BKT_SIZE 10000 /* 10K bytes size */
+#define MAXQSIZE 10 /* max # of pkts in queue */
/*
* the token bucket filter at each vif
*/
struct tbf
{
- struct timeval tbf_last_pkt_t; /* arr. time of last pkt */
- u_long tbf_n_tok; /* no of tokens in bucket */
- u_long tbf_q_len; /* length of queue at this vif */
+ struct timeval tbf_last_pkt_t; /* arr. time of last pkt */
+ u_long tbf_n_tok; /* no of tokens in bucket */
+ u_long tbf_q_len; /* length of queue at this vif */
u_long tbf_max_q_len; /* max. queue length */
struct mbuf *tbf_q; /* Packet queue */
struct mbuf *tbf_t; /* tail-insertion pointer */
@@ -359,7 +359,7 @@ struct bw_meter {
#define BW_METER_UNIT_BYTES (1 << 1) /* threshold (in bytes) */
#define BW_METER_GEQ (1 << 2) /* upcall if bw >= threshold */
#define BW_METER_LEQ (1 << 3) /* upcall if bw <= threshold */
-#define BW_METER_USER_FLAGS (BW_METER_UNIT_PACKETS | \
+#define BW_METER_USER_FLAGS (BW_METER_UNIT_PACKETS | \
BW_METER_UNIT_BYTES | \
BW_METER_GEQ | \
BW_METER_LEQ)
diff --git a/sys/netinet/raw_ip.c b/sys/netinet/raw_ip.c
index 3f3fb02..50e1961 100644
--- a/sys/netinet/raw_ip.c
+++ b/sys/netinet/raw_ip.c
@@ -94,7 +94,7 @@ int (*ip_mrouter_set)(struct socket *, struct sockopt *);
int (*ip_mrouter_get)(struct socket *, struct sockopt *);
int (*ip_mrouter_done)(void);
int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *,
- struct ip_moptions *);
+ struct ip_moptions *);
int (*mrt_ioctl)(int, caddr_t);
int (*legal_vif_num)(int);
u_long (*ip_mcast_src)(int);
@@ -207,10 +207,10 @@ rip_input(struct mbuf *m, int off)
goto docontinue;
#endif
if (inp->inp_laddr.s_addr &&
- inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
+ inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
goto docontinue;
if (inp->inp_faddr.s_addr &&
- inp->inp_faddr.s_addr != ip->ip_src.s_addr)
+ inp->inp_faddr.s_addr != ip->ip_src.s_addr)
goto docontinue;
if (jailed(inp->inp_socket->so_cred))
if (htonl(prison_getip(inp->inp_socket->so_cred)) !=
diff --git a/sys/netinet/tcp.h b/sys/netinet/tcp.h
index 92460d9..cda7257 100644
--- a/sys/netinet/tcp.h
+++ b/sys/netinet/tcp.h
@@ -151,7 +151,7 @@ struct tcphdr {
#define TCP_MAX_WINSHIFT 14 /* maximum window shift */
-#define TCP_MAXBURST 4 /* maximum segments in a burst */
+#define TCP_MAXBURST 4 /* maximum segments in a burst */
#define TCP_MAXHLEN (0xf<<2) /* max length of header in bytes */
#define TCP_MAXOLEN (TCP_MAXHLEN - sizeof(struct tcphdr))
diff --git a/sys/netinet/tcp_debug.h b/sys/netinet/tcp_debug.h
index c7e3663..b6033ce 100644
--- a/sys/netinet/tcp_debug.h
+++ b/sys/netinet/tcp_debug.h
@@ -57,7 +57,7 @@ struct tcp_debug {
struct tcpcb td_cb;
};
-#define TA_INPUT 0
+#define TA_INPUT 0
#define TA_OUTPUT 1
#define TA_USER 2
#define TA_RESPOND 3
diff --git a/sys/netinet/tcp_hostcache.c b/sys/netinet/tcp_hostcache.c
index ab9b00a..bebd51f 100644
--- a/sys/netinet/tcp_hostcache.c
+++ b/sys/netinet/tcp_hostcache.c
@@ -182,11 +182,11 @@ SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, list,
static MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "TCP hostcache");
#define HOSTCACHE_HASH(ip) \
- (((ip)->s_addr ^ ((ip)->s_addr >> 7) ^ ((ip)->s_addr >> 17)) & \
+ (((ip)->s_addr ^ ((ip)->s_addr >> 7) ^ ((ip)->s_addr >> 17)) & \
tcp_hostcache.hashmask)
/* XXX: What is the recommended hash to get good entropy for IPv6 addresses? */
-#define HOSTCACHE_HASH6(ip6) \
+#define HOSTCACHE_HASH6(ip6) \
(((ip6)->s6_addr32[0] ^ \
(ip6)->s6_addr32[1] ^ \
(ip6)->s6_addr32[2] ^ \
@@ -211,16 +211,16 @@ tcp_hc_init(void)
tcp_hostcache.hashsize * tcp_hostcache.bucket_limit;
tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE;
- TUNABLE_INT_FETCH("net.inet.tcp.hostcache.hashsize",
+ TUNABLE_INT_FETCH("net.inet.tcp.hostcache.hashsize",
&tcp_hostcache.hashsize);
- TUNABLE_INT_FETCH("net.inet.tcp.hostcache.cachelimit",
+ TUNABLE_INT_FETCH("net.inet.tcp.hostcache.cachelimit",
&tcp_hostcache.cache_limit);
- TUNABLE_INT_FETCH("net.inet.tcp.hostcache.bucketlimit",
+ TUNABLE_INT_FETCH("net.inet.tcp.hostcache.bucketlimit",
&tcp_hostcache.bucket_limit);
if (!powerof2(tcp_hostcache.hashsize)) {
- printf("WARNING: hostcache hash size is not a power of 2.\n");
+ printf("WARNING: hostcache hash size is not a power of 2.\n");
tcp_hostcache.hashsize = 512; /* safe default */
- }
+ }
tcp_hostcache.hashmask = tcp_hostcache.hashsize - 1;
/*
@@ -311,7 +311,7 @@ tcp_hc_lookup(struct in_conninfo *inc)
/*
* Internal function: insert an entry into the hostcache or return NULL
* if unable to allocate a new one.
- *
+ *
* If an entry has been returned, the caller becomes responsible for
* unlocking the bucket row after he is done reading/modifying the entry.
*/
@@ -545,7 +545,7 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
}
if (hcml->rmx_rttvar != 0) {
if (hc_entry->rmx_rttvar == 0)
- hc_entry->rmx_rttvar = hcml->rmx_rttvar;
+ hc_entry->rmx_rttvar = hcml->rmx_rttvar;
else
hc_entry->rmx_rttvar =
(hc_entry->rmx_rttvar + hcml->rmx_rttvar) / 2;
@@ -581,8 +581,8 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
else
hc_entry->rmx_sendpipe =
(hc_entry->rmx_sendpipe + hcml->rmx_sendpipe) /2;
- /* tcpstat.tcps_cachedsendpipe++; */
- }
+ /* tcpstat.tcps_cachedsendpipe++; */
+ }
if (hcml->rmx_recvpipe != 0) {
if (hc_entry->rmx_recvpipe == 0)
hc_entry->rmx_recvpipe = hcml->rmx_recvpipe;
diff --git a/sys/netinet/tcp_input.c b/sys/netinet/tcp_input.c
index ffde973..902044b 100644
--- a/sys/netinet/tcp_input.c
+++ b/sys/netinet/tcp_input.c
@@ -105,7 +105,7 @@ SYSCTL_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW,
&tcpstat , tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
static int log_in_vain = 0;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
&log_in_vain, 0, "Log all incoming TCP connections");
static int blackhole = 0;
@@ -113,8 +113,8 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW,
&blackhole, 0, "Do not send RST when dropping refused connections");
int tcp_delack_enabled = 1;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
- &tcp_delack_enabled, 0,
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
+ &tcp_delack_enabled, 0,
"Delay ACK to try and piggyback it onto a data packet");
#ifdef TCP_DROP_SYNFIN
@@ -394,7 +394,7 @@ tcp6_input(mp, offp, proto)
* better place to put this in?
*/
ia6 = ip6_getdstifaddr(m);
- if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
+ if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
struct ip6_hdr *ip6;
ip6 = mtod(m, struct ip6_hdr *);
@@ -615,7 +615,7 @@ findpcb:
if (next_hop != NULL && isipv6 == 0) { /* IPv6 support is not yet */
/*
* Transparently forwarded. Pretend to be the destination.
- * already got one like this?
+ * already got one like this?
*/
inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, th->th_sport,
ip->ip_dst, th->th_dport,
@@ -643,7 +643,7 @@ findpcb:
ip->ip_src, th->th_sport,
ip->ip_dst, th->th_dport,
1, m->m_pkthdr.rcvif);
- }
+ }
#if defined(IPSEC) || defined(FAST_IPSEC)
#ifdef INET6
@@ -707,7 +707,7 @@ findpcb:
break;
}
}
- if (blackhole) {
+ if (blackhole) {
switch (blackhole) {
case 1:
if (thflags & TH_SYN)
@@ -976,7 +976,7 @@ findpcb:
*/
if (thflags & TH_FIN || tlen != 0)
tp->t_flags |= (TF_DELACK | TF_NEEDSYN);
- else
+ else
tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
tcpstat.tcps_connects++;
soisconnected(so);
@@ -1143,7 +1143,7 @@ after_listen:
tp->snd_cwnd >= tp->snd_wnd &&
((!tcp_do_newreno && !tp->sack_enable &&
tp->t_dupacks < tcprexmtthresh) ||
- ((tcp_do_newreno || tp->sack_enable) &&
+ ((tcp_do_newreno || tp->sack_enable) &&
!IN_FASTRECOVERY(tp)))) {
KASSERT(headlocked, ("headlocked"));
INP_INFO_WUNLOCK(&tcbinfo);
@@ -1171,7 +1171,7 @@ after_listen:
* Recalculate the transmit timer / rtt.
*
* Some boxes send broken timestamp replies
- * during the SYN+ACK phase, ignore
+ * during the SYN+ACK phase, ignore
* timestamps of 0 or we could calculate a
* huge RTT and blow up the retransmit timer.
*/
@@ -1221,7 +1221,7 @@ after_listen:
if (tp->snd_una == tp->snd_max)
callout_stop(tp->tt_rexmt);
else if (!callout_active(tp->tt_persist))
- callout_reset(tp->tt_rexmt,
+ callout_reset(tp->tt_rexmt,
tp->t_rxtcur,
tcp_timer_rexmt, tp);
@@ -1404,8 +1404,8 @@ after_listen:
* ACKNOW will be turned on later.
*/
if (DELAY_ACK(tp) && tlen != 0)
- callout_reset(tp->tt_delack, tcp_delacktime,
- tcp_timer_delack, tp);
+ callout_reset(tp->tt_delack, tcp_delacktime,
+ tcp_timer_delack, tp);
else
tp->t_flags |= TF_ACKNOW;
/*
@@ -1426,15 +1426,15 @@ after_listen:
}
} else {
/*
- * Received initial SYN in SYN-SENT[*] state =>
- * simultaneous open. If segment contains CC option
- * and there is a cached CC, apply TAO test.
- * If it succeeds, connection is * half-synchronized.
- * Otherwise, do 3-way handshake:
- * SYN-SENT -> SYN-RECEIVED
- * SYN-SENT* -> SYN-RECEIVED*
- * If there was no CC option, clear cached CC value.
- */
+ * Received initial SYN in SYN-SENT[*] state =>
+ * simultaneous open. If segment contains CC option
+ * and there is a cached CC, apply TAO test.
+ * If it succeeds, connection is * half-synchronized.
+ * Otherwise, do 3-way handshake:
+ * SYN-SENT -> SYN-RECEIVED
+ * SYN-SENT* -> SYN-RECEIVED*
+ * If there was no CC option, clear cached CC value.
+ */
tp->t_flags |= TF_ACKNOW;
callout_stop(tp->tt_rexmt);
if (to.to_flags & TOF_CC) {
@@ -1501,7 +1501,7 @@ trimthenstep6:
* processing in the middle of step 5, ack processing.
* Otherwise, goto step 6.
*/
- if (thflags & TH_ACK)
+ if (thflags & TH_ACK)
goto process_ACK;
goto step6;
@@ -1538,7 +1538,7 @@ trimthenstep6:
else
goto drop;
}
- break; /* continue normal processing */
+ break; /* continue normal processing */
}
/*
@@ -1574,7 +1574,7 @@ trimthenstep6:
* we use a much stricter check while in the ESTABLISHED state,
* only accepting RSTs where the sequence number is equal to
* last_ack_sent. In all other states (the states in which a
- * RST is more likely), the more permissive check is used.
+ * RST is more likely), the more permissive check is used.
* If we have multiple segments in flight, the intial reset
* segment sequence numbers will be to the left of last_ack_sent,
* but they will eventually catch up.
@@ -1680,7 +1680,7 @@ trimthenstep6:
*/
if ((tp->t_flags & (TF_REQ_CC|TF_RCVD_CC)) == (TF_REQ_CC|TF_RCVD_CC) &&
((to.to_flags & TOF_CC) == 0 || tp->cc_recv != to.to_cc))
- goto dropafterack;
+ goto dropafterack;
/*
* In the SYN-RECEIVED state, validate that the packet belongs to
@@ -1869,7 +1869,7 @@ trimthenstep6:
tp->t_flags &= ~TF_NEEDFIN;
} else {
tp->t_state = TCPS_ESTABLISHED;
- callout_reset(tp->tt_keep, tcp_keepidle,
+ callout_reset(tp->tt_keep, tcp_keepidle,
tcp_timer_keep, tp);
}
/*
@@ -1939,8 +1939,8 @@ trimthenstep6:
u_int win;
/*
- * If we're doing sack, check to
- * see if we're already in sack
+ * If we're doing sack, check to
+ * see if we're already in sack
* recovery. If we're not doing sack,
* check to see if we're in newreno
* recovery.
@@ -1968,11 +1968,11 @@ trimthenstep6:
tp->t_rtttime = 0;
if (tp->sack_enable) {
tcpstat.tcps_sack_recovery_episode++;
- tp->snd_cwnd =
- tp->t_maxseg *
+ tp->snd_cwnd =
+ tp->t_maxseg *
tp->t_dupacks;
(void) tcp_output(tp);
- tp->snd_cwnd +=
+ tp->snd_cwnd +=
tp->snd_ssthresh;
goto drop;
}
@@ -2054,11 +2054,11 @@ trimthenstep6:
tp->snd_cwnd = tp->snd_ssthresh;
}
}
- } else {
- if (tp->t_dupacks >= tcprexmtthresh &&
- tp->snd_cwnd > tp->snd_ssthresh)
+ } else {
+ if (tp->t_dupacks >= tcprexmtthresh &&
+ tp->snd_cwnd > tp->snd_ssthresh)
tp->snd_cwnd = tp->snd_ssthresh;
- }
+ }
tp->t_dupacks = 0;
if (SEQ_GT(th->th_ack, tp->snd_max)) {
tcpstat.tcps_rcvacktoomuch++;
@@ -2110,7 +2110,7 @@ process_ACK:
if (tp->t_flags & TF_WASFRECOVERY)
ENTER_FASTRECOVERY(tp);
tp->snd_nxt = tp->snd_max;
- tp->t_badrxtwin = 0; /* XXX probably not required */
+ tp->t_badrxtwin = 0; /* XXX probably not required */
}
/*
@@ -2123,7 +2123,7 @@ process_ACK:
* Recompute the initial retransmit timer.
*
* Some boxes send broken timestamp replies
- * during the SYN+ACK phase, ignore
+ * during the SYN+ACK phase, ignore
* timestamps of 0 or we could calculate a
* huge RTT and blow up the retransmit timer.
*/
@@ -2162,7 +2162,7 @@ process_ACK:
* Otherwise open linearly: maxseg per window
* (maxseg^2 / cwnd per packet).
*/
- if ((!tcp_do_newreno && !tp->sack_enable) ||
+ if ((!tcp_do_newreno && !tp->sack_enable) ||
!IN_FASTRECOVERY(tp)) {
register u_int cw = tp->snd_cwnd;
register u_int incr = tp->t_maxseg;
@@ -2182,12 +2182,12 @@ process_ACK:
}
sowwakeup_locked(so);
/* detect una wraparound */
- if ((tcp_do_newreno || tp->sack_enable) &&
+ if ((tcp_do_newreno || tp->sack_enable) &&
!IN_FASTRECOVERY(tp) &&
SEQ_GT(tp->snd_una, tp->snd_recover) &&
SEQ_LEQ(th->th_ack, tp->snd_recover))
tp->snd_recover = th->th_ack - 1;
- if ((tcp_do_newreno || tp->sack_enable) &&
+ if ((tcp_do_newreno || tp->sack_enable) &&
IN_FASTRECOVERY(tp) &&
SEQ_GEQ(th->th_ack, tp->snd_recover))
EXIT_FASTRECOVERY(tp);
@@ -2195,7 +2195,7 @@ process_ACK:
if (tp->sack_enable) {
if (SEQ_GT(tp->snd_una, tp->snd_recover))
tp->snd_recover = tp->snd_una;
- }
+ }
if (SEQ_LT(tp->snd_nxt, tp->snd_una))
tp->snd_nxt = tp->snd_una;
@@ -2228,7 +2228,7 @@ process_ACK:
}
break;
- /*
+ /*
* In CLOSING STATE in addition to the processing for
* the ESTABLISHED state if the ACK acknowledges our FIN
* then enter the TIME-WAIT state, otherwise ignore
@@ -2440,7 +2440,7 @@ dodata: /* XXX */
}
switch (tp->t_state) {
- /*
+ /*
* In SYN_RECEIVED and ESTABLISHED STATES
* enter the CLOSE_WAIT state.
*/
@@ -2451,7 +2451,7 @@ dodata: /* XXX */
tp->t_state = TCPS_CLOSE_WAIT;
break;
- /*
+ /*
* If still in FIN_WAIT_1 STATE FIN has not been acked so
* enter the CLOSING state.
*/
@@ -2459,7 +2459,7 @@ dodata: /* XXX */
tp->t_state = TCPS_CLOSING;
break;
- /*
+ /*
* In FIN_WAIT_2 state enter the TIME_WAIT state,
* starting the time-wait timer, turning off the other
* standard timers.
@@ -2497,8 +2497,8 @@ check_delack:
INP_LOCK_ASSERT(inp);
if (tp->t_flags & TF_DELACK) {
tp->t_flags &= ~TF_DELACK;
- callout_reset(tp->tt_delack, tcp_delacktime,
- tcp_timer_delack, tp);
+ callout_reset(tp->tt_delack, tcp_delacktime,
+ tcp_timer_delack, tp);
}
INP_UNLOCK(inp);
return;
@@ -2553,8 +2553,8 @@ dropwithreset:
} else {
if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
- ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
- in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
+ ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
+ in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
goto drop;
}
/* IPv6 anycast check is done at tcp6_input() */
@@ -2564,7 +2564,7 @@ dropwithreset:
*/
if (badport_bandlim(rstreason) < 0)
goto drop;
-
+
#ifdef TCPDEBUG
if (tp == 0 || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
@@ -2710,7 +2710,7 @@ tcp_dooptions(tp, to, cp, cnt, is_syn, th)
optlen != TCPOLEN_SACK_PERMITTED)
continue;
if (is_syn) {
- /* MUST only be set on SYN */
+ /* MUST only be set on SYN */
to->to_flags |= TOF_SACK;
}
break;
@@ -2999,11 +2999,11 @@ tcp_mss(tp, offer)
* were received yet. In this case we just guess, otherwise
* we do the same as before T/TCP.
*/
- if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
+ if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
(origoffer == -1 ||
(tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
mss -= TCPOLEN_TSTAMP_APPA;
- if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC &&
+ if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC &&
(origoffer == -1 ||
(tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC))
mss -= TCPOLEN_CC_APPA;
@@ -3202,7 +3202,7 @@ tcp_newreno_partial_ack(tp, th)
* Returns 1 if the TIME_WAIT state was killed and we should start over,
* looking for a pcb in the listen state. Returns 0 otherwise.
*/
-static int
+static int
tcp_timewait(tw, to, th, m, tlen)
struct tcptw *tw;
struct tcpopt *to;
@@ -3235,7 +3235,7 @@ tcp_timewait(tw, to, th, m, tlen)
/*
* If segment contains a SYN and CC [not CC.NEW] option:
- * if connection duration > MSL, drop packet and send RST;
+ * if connection duration > MSL, drop packet and send RST;
*
* if SEG.CC > CCrecv then is new SYN.
* Complete close and delete TCPCB. Then reprocess
@@ -3300,7 +3300,7 @@ tcp_timewait(tw, to, th, m, tlen)
/*
* Acknowledge the segment if it has data or is not a duplicate ACK.
*/
- if (thflags != TH_ACK || tlen != 0 ||
+ if (thflags != TH_ACK || tlen != 0 ||
th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt)
tcp_twrespond(tw, TH_ACK);
goto drop;
diff --git a/sys/netinet/tcp_output.c b/sys/netinet/tcp_output.c
index 5d71887..ce739e9 100644
--- a/sys/netinet/tcp_output.c
+++ b/sys/netinet/tcp_output.c
@@ -102,7 +102,7 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, local_slowstart_flightsize, CTLFLAG_RW,
int tcp_do_newreno = 1;
SYSCTL_INT(_net_inet_tcp, OID_AUTO, newreno, CTLFLAG_RW, &tcp_do_newreno,
- 0, "Enable NewReno Algorithms");
+ 0, "Enable NewReno Algorithms");
/*
* Tcp output routine: figure out what should be sent and send it.
@@ -151,10 +151,10 @@ tcp_output(struct tcpcb *tp)
* We have been idle for "a while" and no acks are
* expected to clock out any data we send --
* slow start to get ack "clock" running again.
- *
+ *
* Set the slow-start flight size depending on whether
* this is a local network or not.
- */
+ */
int ss = ss_fltsz;
#ifdef INET6
if (isipv6) {
@@ -194,7 +194,7 @@ again:
* we're replacing a (future) new transmission with a retransmission
* now, and we previously incremented snd_cwnd in tcp_input().
*/
- /*
+ /*
* Still in sack recovery , reset rxmit flag to zero.
*/
sack_rxmit = 0;
@@ -207,33 +207,33 @@ again:
/* Do not retransmit SACK segments beyond snd_recover */
if (SEQ_GT(p->end, tp->snd_recover)) {
/*
- * (At least) part of sack hole extends beyond
- * snd_recover. Check to see if we can rexmit data
+ * (At least) part of sack hole extends beyond
+ * snd_recover. Check to see if we can rexmit data
* for this hole.
*/
if (SEQ_GEQ(p->rxmit, tp->snd_recover)) {
- /*
+ /*
* Can't rexmit any more data for this hole.
- * That data will be rexmitted in the next
- * sack recovery episode, when snd_recover
+ * That data will be rexmitted in the next
+ * sack recovery episode, when snd_recover
* moves past p->rxmit.
*/
p = NULL;
goto after_sack_rexmit;
} else
/* Can rexmit part of the current hole */
- len = ((long)ulmin(tp->snd_cwnd,
+ len = ((long)ulmin(tp->snd_cwnd,
tp->snd_recover - p->rxmit));
} else
len = ((long)ulmin(tp->snd_cwnd, p->end - p->rxmit));
sack_rxmit = 1;
sendalot = 1;
off = p->rxmit - tp->snd_una;
- KASSERT(off >= 0,("%s: sack block to the left of una : %d",
+ KASSERT(off >= 0,("%s: sack block to the left of una : %d",
__func__, off));
if (len > 0) {
tcpstat.tcps_sack_rexmits++;
- tcpstat.tcps_sack_rexmit_bytes +=
+ tcpstat.tcps_sack_rexmit_bytes +=
min(len, tp->t_maxseg);
}
}
@@ -281,7 +281,7 @@ after_sack_rexmit:
}
/*
- * If snd_nxt == snd_max and we have transmitted a FIN, the
+ * If snd_nxt == snd_max and we have transmitted a FIN, the
* offset will be > 0 even if so_snd.sb_cc is 0, resulting in
* a negative length. This can also occur when TCP opens up
* its congestion window while receiving additional duplicate
@@ -291,9 +291,9 @@ after_sack_rexmit:
* In the normal retransmit-FIN-only case, however, snd_nxt will
* be set to snd_una, the offset will be 0, and the length may
* wind up 0.
- *
+ *
* If sack_rxmit is true we are retransmitting from the scoreboard
- * in which case len is already set.
+ * in which case len is already set.
*/
if (!sack_rxmit)
len = ((long)ulmin(so->so_snd.sb_cc, sendwin) - off);
@@ -359,7 +359,7 @@ after_sack_rexmit:
if (sack_rxmit) {
if (SEQ_LT(p->rxmit + len, tp->snd_una + so->so_snd.sb_cc))
flags &= ~TH_FIN;
- } else {
+ } else {
if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.sb_cc))
flags &= ~TH_FIN;
}
@@ -451,12 +451,12 @@ after_sack_rexmit:
* that the retransmission timer is set.
*/
if (tp->sack_enable && SEQ_GT(tp->snd_max, tp->snd_una) &&
- !callout_active(tp->tt_rexmt) &&
+ !callout_active(tp->tt_rexmt) &&
!callout_active(tp->tt_persist)) {
callout_reset(tp->tt_rexmt, tp->t_rxtcur,
tcp_timer_rexmt, tp);
- return (0);
- }
+ return (0);
+ }
/*
* TCP window updates are not reliable, rather a polling protocol
* using ``persist'' packets is used to insure receipt of window
@@ -517,19 +517,19 @@ send:
(void)memcpy(opt + 2, &mss, sizeof(mss));
optlen = TCPOLEN_MAXSEG;
- /*
- * If this is the first SYN of connection (not a SYN
- * ACK), include SACK_PERMIT_HDR option. If this is a
- * SYN ACK, include SACK_PERMIT_HDR option if peer has
- * already done so. This is only for active connect,
+ /*
+ * If this is the first SYN of connection (not a SYN
+ * ACK), include SACK_PERMIT_HDR option. If this is a
+ * SYN ACK, include SACK_PERMIT_HDR option if peer has
+ * already done so. This is only for active connect,
* since the syncache takes care of the passive connect.
- */
- if (tp->sack_enable && ((flags & TH_ACK) == 0 ||
+ */
+ if (tp->sack_enable && ((flags & TH_ACK) == 0 ||
(tp->t_flags & TF_SACK_PERMIT))) {
- *((u_int32_t *) (opt + optlen)) =
+ *((u_int32_t *) (opt + optlen)) =
htonl(TCPOPT_SACK_PERMIT_HDR);
- optlen += 4;
- }
+ optlen += 4;
+ }
if ((tp->t_flags & TF_REQ_SCALE) &&
((flags & TH_ACK) == 0 ||
(tp->t_flags & TF_RCVD_SCALE))) {
@@ -541,25 +541,25 @@ send:
optlen += 4;
}
}
- }
+ }
- /*
+ /*
* Send a timestamp and echo-reply if this is a SYN and our side
* wants to use timestamps (TF_REQ_TSTMP is set) or both our side
* and our peer have sent timestamps in our SYN's.
- */
- if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
- (flags & TH_RST) == 0 &&
+ */
+ if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
+ (flags & TH_RST) == 0 &&
((flags & TH_ACK) == 0 ||
(tp->t_flags & TF_RCVD_TSTMP))) {
u_int32_t *lp = (u_int32_t *)(opt + optlen);
- /* Form timestamp option as shown in appendix A of RFC 1323. */
- *lp++ = htonl(TCPOPT_TSTAMP_HDR);
- *lp++ = htonl(ticks);
- *lp = htonl(tp->ts_recent);
- optlen += TCPOLEN_TSTAMP_APPA;
- }
+ /* Form timestamp option as shown in appendix A of RFC 1323. */
+ *lp++ = htonl(TCPOPT_TSTAMP_HDR);
+ *lp++ = htonl(ticks);
+ *lp = htonl(tp->ts_recent);
+ optlen += TCPOLEN_TSTAMP_APPA;
+ }
/*
* Send SACKs if necessary. This should be the last option processed.
@@ -587,12 +587,12 @@ send:
*olp = htonl(TCPOPT_SACK_HDR|(TCPOLEN_SACK*count+2));
optlen += TCPOLEN_SACK*count + 4; /* including leading NOPs */
}
- /*
+ /*
* Send `CC-family' options if our side wants to use them (TF_REQ_CC),
* options are allowed (!TF_NOOPT) and it's not a RST.
- */
- if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC &&
- (flags & TH_RST) == 0) {
+ */
+ if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC &&
+ (flags & TH_RST) == 0) {
switch (flags & (TH_SYN|TH_ACK)) {
/*
* This is a normal ACK, send CC if we received CC before
@@ -630,7 +630,7 @@ send:
TCPOPT_CCNEW : TCPOPT_CC;
opt[optlen++] = TCPOLEN_CC;
*(u_int32_t *)&opt[optlen] = htonl(tp->cc_send);
- optlen += 4;
+ optlen += 4;
break;
/*
@@ -656,7 +656,7 @@ send:
}
break;
}
- }
+ }
#ifdef TCP_SIGNATURE
#ifdef INET6
@@ -682,7 +682,7 @@ send:
}
#endif /* TCP_SIGNATURE */
- hdrlen += optlen;
+ hdrlen += optlen;
#ifdef INET6
if (isipv6)
@@ -715,9 +715,9 @@ send:
/*#ifdef DIAGNOSTIC*/
#ifdef INET6
- if (max_linkhdr + hdrlen > MCLBYTES)
+ if (max_linkhdr + hdrlen > MCLBYTES)
#else
- if (max_linkhdr + hdrlen > MHLEN)
+ if (max_linkhdr + hdrlen > MHLEN)
#endif
panic("tcphdr too big");
/*#endif*/
@@ -822,12 +822,12 @@ send:
tcpip_fillheaders(tp->t_inpcb, ip6, th);
} else
#endif /* INET6 */
- {
- ip = mtod(m, struct ip *);
- ipov = (struct ipovly *)ip;
- th = (struct tcphdr *)(ip + 1);
- tcpip_fillheaders(tp->t_inpcb, ip, th);
- }
+ {
+ ip = mtod(m, struct ip *);
+ ipov = (struct ipovly *)ip;
+ th = (struct tcphdr *)(ip + 1);
+ tcpip_fillheaders(tp->t_inpcb, ip, th);
+ }
/*
* Fill in fields, remembering maximum advertised
@@ -850,7 +850,7 @@ send:
* case, since we know we aren't doing a retransmission.
* (retransmit and persist are mutually exclusive...)
*/
- if (len || (flags & (TH_SYN|TH_FIN))
+ if (len || (flags & (TH_SYN|TH_FIN))
|| callout_active(tp->tt_persist))
th->th_seq = htonl(tp->snd_nxt);
else
@@ -927,16 +927,16 @@ send:
sizeof(struct tcphdr) + optlen + len);
else
#endif /* INET6 */
- {
- m->m_pkthdr.csum_flags = CSUM_TCP;
- m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
- th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
- htons(sizeof(struct tcphdr) + IPPROTO_TCP + len + optlen));
-
- /* IP version must be set here for ipv4/ipv6 checking later */
- KASSERT(ip->ip_v == IPVERSION,
- ("%s: IP version incorrect: %d", __func__, ip->ip_v));
- }
+ {
+ m->m_pkthdr.csum_flags = CSUM_TCP;
+ m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
+ th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
+ htons(sizeof(struct tcphdr) + IPPROTO_TCP + len + optlen));
+
+ /* IP version must be set here for ipv4/ipv6 checking later */
+ KASSERT(ip->ip_v == IPVERSION,
+ ("%s: IP version incorrect: %d", __func__, ip->ip_v));
+ }
/*
* In transmit state, time the transmission and arrange for
@@ -1067,8 +1067,8 @@ timer:
{
ip->ip_len = m->m_pkthdr.len;
#ifdef INET6
- if (INP_CHECK_SOCKAF(so, AF_INET6))
- ip->ip_ttl = in6_selecthlim(tp->t_inpcb, NULL);
+ if (INP_CHECK_SOCKAF(so, AF_INET6))
+ ip->ip_ttl = in6_selecthlim(tp->t_inpcb, NULL);
#endif /* INET6 */
/*
* If we do path MTU discovery, then we set DF on every packet.
@@ -1096,7 +1096,7 @@ timer:
if ((flags & TH_SYN) == 0) {
if (sack_rxmit)
p->rxmit -= len;
- else
+ else
tp->snd_nxt -= len;
}
}
@@ -1104,9 +1104,9 @@ timer:
out:
if (error == ENOBUFS) {
if (!callout_active(tp->tt_rexmt) &&
- !callout_active(tp->tt_persist))
+ !callout_active(tp->tt_persist))
callout_reset(tp->tt_rexmt, tp->t_rxtcur,
- tcp_timer_rexmt, tp);
+ tcp_timer_rexmt, tp);
tcp_quench(tp->t_inpcb, 0);
return (0);
}
diff --git a/sys/netinet/tcp_reass.c b/sys/netinet/tcp_reass.c
index ffde973..902044b 100644
--- a/sys/netinet/tcp_reass.c
+++ b/sys/netinet/tcp_reass.c
@@ -105,7 +105,7 @@ SYSCTL_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW,
&tcpstat , tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
static int log_in_vain = 0;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
&log_in_vain, 0, "Log all incoming TCP connections");
static int blackhole = 0;
@@ -113,8 +113,8 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW,
&blackhole, 0, "Do not send RST when dropping refused connections");
int tcp_delack_enabled = 1;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
- &tcp_delack_enabled, 0,
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
+ &tcp_delack_enabled, 0,
"Delay ACK to try and piggyback it onto a data packet");
#ifdef TCP_DROP_SYNFIN
@@ -394,7 +394,7 @@ tcp6_input(mp, offp, proto)
* better place to put this in?
*/
ia6 = ip6_getdstifaddr(m);
- if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
+ if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
struct ip6_hdr *ip6;
ip6 = mtod(m, struct ip6_hdr *);
@@ -615,7 +615,7 @@ findpcb:
if (next_hop != NULL && isipv6 == 0) { /* IPv6 support is not yet */
/*
* Transparently forwarded. Pretend to be the destination.
- * already got one like this?
+ * already got one like this?
*/
inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, th->th_sport,
ip->ip_dst, th->th_dport,
@@ -643,7 +643,7 @@ findpcb:
ip->ip_src, th->th_sport,
ip->ip_dst, th->th_dport,
1, m->m_pkthdr.rcvif);
- }
+ }
#if defined(IPSEC) || defined(FAST_IPSEC)
#ifdef INET6
@@ -707,7 +707,7 @@ findpcb:
break;
}
}
- if (blackhole) {
+ if (blackhole) {
switch (blackhole) {
case 1:
if (thflags & TH_SYN)
@@ -976,7 +976,7 @@ findpcb:
*/
if (thflags & TH_FIN || tlen != 0)
tp->t_flags |= (TF_DELACK | TF_NEEDSYN);
- else
+ else
tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
tcpstat.tcps_connects++;
soisconnected(so);
@@ -1143,7 +1143,7 @@ after_listen:
tp->snd_cwnd >= tp->snd_wnd &&
((!tcp_do_newreno && !tp->sack_enable &&
tp->t_dupacks < tcprexmtthresh) ||
- ((tcp_do_newreno || tp->sack_enable) &&
+ ((tcp_do_newreno || tp->sack_enable) &&
!IN_FASTRECOVERY(tp)))) {
KASSERT(headlocked, ("headlocked"));
INP_INFO_WUNLOCK(&tcbinfo);
@@ -1171,7 +1171,7 @@ after_listen:
* Recalculate the transmit timer / rtt.
*
* Some boxes send broken timestamp replies
- * during the SYN+ACK phase, ignore
+ * during the SYN+ACK phase, ignore
* timestamps of 0 or we could calculate a
* huge RTT and blow up the retransmit timer.
*/
@@ -1221,7 +1221,7 @@ after_listen:
if (tp->snd_una == tp->snd_max)
callout_stop(tp->tt_rexmt);
else if (!callout_active(tp->tt_persist))
- callout_reset(tp->tt_rexmt,
+ callout_reset(tp->tt_rexmt,
tp->t_rxtcur,
tcp_timer_rexmt, tp);
@@ -1404,8 +1404,8 @@ after_listen:
* ACKNOW will be turned on later.
*/
if (DELAY_ACK(tp) && tlen != 0)
- callout_reset(tp->tt_delack, tcp_delacktime,
- tcp_timer_delack, tp);
+ callout_reset(tp->tt_delack, tcp_delacktime,
+ tcp_timer_delack, tp);
else
tp->t_flags |= TF_ACKNOW;
/*
@@ -1426,15 +1426,15 @@ after_listen:
}
} else {
/*
- * Received initial SYN in SYN-SENT[*] state =>
- * simultaneous open. If segment contains CC option
- * and there is a cached CC, apply TAO test.
- * If it succeeds, connection is * half-synchronized.
- * Otherwise, do 3-way handshake:
- * SYN-SENT -> SYN-RECEIVED
- * SYN-SENT* -> SYN-RECEIVED*
- * If there was no CC option, clear cached CC value.
- */
+ * Received initial SYN in SYN-SENT[*] state =>
+ * simultaneous open. If segment contains CC option
+ * and there is a cached CC, apply TAO test.
+ * If it succeeds, connection is * half-synchronized.
+ * Otherwise, do 3-way handshake:
+ * SYN-SENT -> SYN-RECEIVED
+ * SYN-SENT* -> SYN-RECEIVED*
+ * If there was no CC option, clear cached CC value.
+ */
tp->t_flags |= TF_ACKNOW;
callout_stop(tp->tt_rexmt);
if (to.to_flags & TOF_CC) {
@@ -1501,7 +1501,7 @@ trimthenstep6:
* processing in the middle of step 5, ack processing.
* Otherwise, goto step 6.
*/
- if (thflags & TH_ACK)
+ if (thflags & TH_ACK)
goto process_ACK;
goto step6;
@@ -1538,7 +1538,7 @@ trimthenstep6:
else
goto drop;
}
- break; /* continue normal processing */
+ break; /* continue normal processing */
}
/*
@@ -1574,7 +1574,7 @@ trimthenstep6:
* we use a much stricter check while in the ESTABLISHED state,
* only accepting RSTs where the sequence number is equal to
* last_ack_sent. In all other states (the states in which a
- * RST is more likely), the more permissive check is used.
+ * RST is more likely), the more permissive check is used.
* If we have multiple segments in flight, the intial reset
* segment sequence numbers will be to the left of last_ack_sent,
* but they will eventually catch up.
@@ -1680,7 +1680,7 @@ trimthenstep6:
*/
if ((tp->t_flags & (TF_REQ_CC|TF_RCVD_CC)) == (TF_REQ_CC|TF_RCVD_CC) &&
((to.to_flags & TOF_CC) == 0 || tp->cc_recv != to.to_cc))
- goto dropafterack;
+ goto dropafterack;
/*
* In the SYN-RECEIVED state, validate that the packet belongs to
@@ -1869,7 +1869,7 @@ trimthenstep6:
tp->t_flags &= ~TF_NEEDFIN;
} else {
tp->t_state = TCPS_ESTABLISHED;
- callout_reset(tp->tt_keep, tcp_keepidle,
+ callout_reset(tp->tt_keep, tcp_keepidle,
tcp_timer_keep, tp);
}
/*
@@ -1939,8 +1939,8 @@ trimthenstep6:
u_int win;
/*
- * If we're doing sack, check to
- * see if we're already in sack
+ * If we're doing sack, check to
+ * see if we're already in sack
* recovery. If we're not doing sack,
* check to see if we're in newreno
* recovery.
@@ -1968,11 +1968,11 @@ trimthenstep6:
tp->t_rtttime = 0;
if (tp->sack_enable) {
tcpstat.tcps_sack_recovery_episode++;
- tp->snd_cwnd =
- tp->t_maxseg *
+ tp->snd_cwnd =
+ tp->t_maxseg *
tp->t_dupacks;
(void) tcp_output(tp);
- tp->snd_cwnd +=
+ tp->snd_cwnd +=
tp->snd_ssthresh;
goto drop;
}
@@ -2054,11 +2054,11 @@ trimthenstep6:
tp->snd_cwnd = tp->snd_ssthresh;
}
}
- } else {
- if (tp->t_dupacks >= tcprexmtthresh &&
- tp->snd_cwnd > tp->snd_ssthresh)
+ } else {
+ if (tp->t_dupacks >= tcprexmtthresh &&
+ tp->snd_cwnd > tp->snd_ssthresh)
tp->snd_cwnd = tp->snd_ssthresh;
- }
+ }
tp->t_dupacks = 0;
if (SEQ_GT(th->th_ack, tp->snd_max)) {
tcpstat.tcps_rcvacktoomuch++;
@@ -2110,7 +2110,7 @@ process_ACK:
if (tp->t_flags & TF_WASFRECOVERY)
ENTER_FASTRECOVERY(tp);
tp->snd_nxt = tp->snd_max;
- tp->t_badrxtwin = 0; /* XXX probably not required */
+ tp->t_badrxtwin = 0; /* XXX probably not required */
}
/*
@@ -2123,7 +2123,7 @@ process_ACK:
* Recompute the initial retransmit timer.
*
* Some boxes send broken timestamp replies
- * during the SYN+ACK phase, ignore
+ * during the SYN+ACK phase, ignore
* timestamps of 0 or we could calculate a
* huge RTT and blow up the retransmit timer.
*/
@@ -2162,7 +2162,7 @@ process_ACK:
* Otherwise open linearly: maxseg per window
* (maxseg^2 / cwnd per packet).
*/
- if ((!tcp_do_newreno && !tp->sack_enable) ||
+ if ((!tcp_do_newreno && !tp->sack_enable) ||
!IN_FASTRECOVERY(tp)) {
register u_int cw = tp->snd_cwnd;
register u_int incr = tp->t_maxseg;
@@ -2182,12 +2182,12 @@ process_ACK:
}
sowwakeup_locked(so);
/* detect una wraparound */
- if ((tcp_do_newreno || tp->sack_enable) &&
+ if ((tcp_do_newreno || tp->sack_enable) &&
!IN_FASTRECOVERY(tp) &&
SEQ_GT(tp->snd_una, tp->snd_recover) &&
SEQ_LEQ(th->th_ack, tp->snd_recover))
tp->snd_recover = th->th_ack - 1;
- if ((tcp_do_newreno || tp->sack_enable) &&
+ if ((tcp_do_newreno || tp->sack_enable) &&
IN_FASTRECOVERY(tp) &&
SEQ_GEQ(th->th_ack, tp->snd_recover))
EXIT_FASTRECOVERY(tp);
@@ -2195,7 +2195,7 @@ process_ACK:
if (tp->sack_enable) {
if (SEQ_GT(tp->snd_una, tp->snd_recover))
tp->snd_recover = tp->snd_una;
- }
+ }
if (SEQ_LT(tp->snd_nxt, tp->snd_una))
tp->snd_nxt = tp->snd_una;
@@ -2228,7 +2228,7 @@ process_ACK:
}
break;
- /*
+ /*
* In CLOSING STATE in addition to the processing for
* the ESTABLISHED state if the ACK acknowledges our FIN
* then enter the TIME-WAIT state, otherwise ignore
@@ -2440,7 +2440,7 @@ dodata: /* XXX */
}
switch (tp->t_state) {
- /*
+ /*
* In SYN_RECEIVED and ESTABLISHED STATES
* enter the CLOSE_WAIT state.
*/
@@ -2451,7 +2451,7 @@ dodata: /* XXX */
tp->t_state = TCPS_CLOSE_WAIT;
break;
- /*
+ /*
* If still in FIN_WAIT_1 STATE FIN has not been acked so
* enter the CLOSING state.
*/
@@ -2459,7 +2459,7 @@ dodata: /* XXX */
tp->t_state = TCPS_CLOSING;
break;
- /*
+ /*
* In FIN_WAIT_2 state enter the TIME_WAIT state,
* starting the time-wait timer, turning off the other
* standard timers.
@@ -2497,8 +2497,8 @@ check_delack:
INP_LOCK_ASSERT(inp);
if (tp->t_flags & TF_DELACK) {
tp->t_flags &= ~TF_DELACK;
- callout_reset(tp->tt_delack, tcp_delacktime,
- tcp_timer_delack, tp);
+ callout_reset(tp->tt_delack, tcp_delacktime,
+ tcp_timer_delack, tp);
}
INP_UNLOCK(inp);
return;
@@ -2553,8 +2553,8 @@ dropwithreset:
} else {
if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
- ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
- in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
+ ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
+ in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
goto drop;
}
/* IPv6 anycast check is done at tcp6_input() */
@@ -2564,7 +2564,7 @@ dropwithreset:
*/
if (badport_bandlim(rstreason) < 0)
goto drop;
-
+
#ifdef TCPDEBUG
if (tp == 0 || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
@@ -2710,7 +2710,7 @@ tcp_dooptions(tp, to, cp, cnt, is_syn, th)
optlen != TCPOLEN_SACK_PERMITTED)
continue;
if (is_syn) {
- /* MUST only be set on SYN */
+ /* MUST only be set on SYN */
to->to_flags |= TOF_SACK;
}
break;
@@ -2999,11 +2999,11 @@ tcp_mss(tp, offer)
* were received yet. In this case we just guess, otherwise
* we do the same as before T/TCP.
*/
- if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
+ if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
(origoffer == -1 ||
(tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
mss -= TCPOLEN_TSTAMP_APPA;
- if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC &&
+ if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC &&
(origoffer == -1 ||
(tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC))
mss -= TCPOLEN_CC_APPA;
@@ -3202,7 +3202,7 @@ tcp_newreno_partial_ack(tp, th)
* Returns 1 if the TIME_WAIT state was killed and we should start over,
* looking for a pcb in the listen state. Returns 0 otherwise.
*/
-static int
+static int
tcp_timewait(tw, to, th, m, tlen)
struct tcptw *tw;
struct tcpopt *to;
@@ -3235,7 +3235,7 @@ tcp_timewait(tw, to, th, m, tlen)
/*
* If segment contains a SYN and CC [not CC.NEW] option:
- * if connection duration > MSL, drop packet and send RST;
+ * if connection duration > MSL, drop packet and send RST;
*
* if SEG.CC > CCrecv then is new SYN.
* Complete close and delete TCPCB. Then reprocess
@@ -3300,7 +3300,7 @@ tcp_timewait(tw, to, th, m, tlen)
/*
* Acknowledge the segment if it has data or is not a duplicate ACK.
*/
- if (thflags != TH_ACK || tlen != 0 ||
+ if (thflags != TH_ACK || tlen != 0 ||
th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt)
tcp_twrespond(tw, TH_ACK);
goto drop;
diff --git a/sys/netinet/tcp_sack.c b/sys/netinet/tcp_sack.c
index 8dfa682..52c1980 100644
--- a/sys/netinet/tcp_sack.c
+++ b/sys/netinet/tcp_sack.c
@@ -71,10 +71,10 @@
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgements:
- * This product includes software developed by the University of
- * California, Berkeley and its contributors.
- * This product includes software developed at the Information
- * Technology Division, US Naval Research Laboratory.
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * This product includes software developed at the Information
+ * Technology Division, US Naval Research Laboratory.
* 4. Neither the name of the NRL nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
@@ -494,9 +494,9 @@ tcp_sack_partialack(tp, th)
* Set snd_cwnd to one segment beyond acknowledged offset
* (tp->snd_una has not yet been updated when this function is called.)
*/
- /*
- * Should really be
- * min(tp->snd_cwnd, tp->t_maxseg + (th->th_ack - tp->snd_una))
+ /*
+ * Should really be
+ * min(tp->snd_cwnd, tp->t_maxseg + (th->th_ack - tp->snd_una))
*/
tp->snd_cwnd = tp->t_maxseg + (th->th_ack - tp->snd_una);
tp->t_flags |= TF_ACKNOW;
diff --git a/sys/netinet/tcp_subr.c b/sys/netinet/tcp_subr.c
index 2d29844..2eebce4 100644
--- a/sys/netinet/tcp_subr.c
+++ b/sys/netinet/tcp_subr.c
@@ -110,8 +110,8 @@
#include <machine/in_cksum.h>
#include <sys/md5.h>
-int tcp_mssdflt = TCP_MSS;
-SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW,
+int tcp_mssdflt = TCP_MSS;
+SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW,
&tcp_mssdflt , 0, "Default TCP Maximum Segment Size");
#ifdef INET6
@@ -146,17 +146,17 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmssoverload, CTLFLAG_RW,
"be under the MINMSS Size");
#if 0
-static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
-SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW,
+static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
+SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW,
&tcp_rttdflt , 0, "Default maximum TCP Round Trip Time");
#endif
int tcp_do_rfc1323 = 1;
-SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
&tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions");
int tcp_do_rfc1644 = 0;
-SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW,
&tcp_do_rfc1644 , 0, "Enable rfc1644 (TTCP) extensions");
static int tcp_tcbhashsize = 0;
@@ -167,11 +167,11 @@ static int do_tcpdrain = 1;
SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
"Enable tcp_drain routine for extra help when low on mbufs");
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD,
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD,
&tcbinfo.ipi_count, 0, "Number of active PCBs");
static int icmp_may_rst = 1;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0,
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0,
"Certain ICMP unreachable messages may abort connections in SYN_SENT");
static int tcp_isn_reseed_interval = 0;
@@ -179,8 +179,8 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW,
&tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret");
/*
- * TCP bandwidth limiting sysctls. Note that the default lower bound of
- * 1024 exists only for debugging. A good production default would be
+ * TCP bandwidth limiting sysctls. Note that the default lower bound of
+ * 1024 exists only for debugging. A good production default would be
* something like 6100.
*/
SYSCTL_NODE(_net_inet_tcp, OID_AUTO, inflight, CTLFLAG_RW, 0,
@@ -250,7 +250,7 @@ void
tcp_init()
{
int hashsize = TCBHASHSIZE;
-
+
tcp_ccgen = 1;
tcp_delacktime = TCPTV_DELACK;
@@ -274,7 +274,7 @@ tcp_init()
tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask);
tcbinfo.porthashbase = hashinit(hashsize, M_PCB,
&tcbinfo.porthashmask);
- tcbinfo.ipi_zone = uma_zcreate("inpcb", sizeof(struct inpcb),
+ tcbinfo.ipi_zone = uma_zcreate("inpcb", sizeof(struct inpcb),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
uma_zone_set_max(tcbinfo.ipi_zone, maxsockets);
#ifdef INET6
@@ -290,10 +290,10 @@ tcp_init()
/*
* These have to be type stable for the benefit of the timers.
*/
- tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem),
+ tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
uma_zone_set_max(tcpcb_zone, maxsockets);
- tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw),
+ tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
uma_zone_set_max(tcptw_zone, maxsockets / 5);
tcp_timer_init();
@@ -304,7 +304,7 @@ tcp_init()
tcp_isn_tick(NULL);
EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL,
SHUTDOWN_PRI_DEFAULT);
- sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole),
+ sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
}
@@ -459,7 +459,7 @@ tcp_respond(tp, ipgen, th, m, ack, seq, flags)
m->m_data += max_linkhdr;
#ifdef INET6
if (isipv6) {
- bcopy((caddr_t)ip6, mtod(m, caddr_t),
+ bcopy((caddr_t)ip6, mtod(m, caddr_t),
sizeof(struct ip6_hdr));
ip6 = mtod(m, struct ip6_hdr *);
nth = (struct tcphdr *)(ip6 + 1);
@@ -511,13 +511,13 @@ tcp_respond(tp, ipgen, th, m, ack, seq, flags)
tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
} else
#endif
- {
- tlen += sizeof (struct tcpiphdr);
- ip->ip_len = tlen;
- ip->ip_ttl = ip_defttl;
- if (path_mtu_discovery)
- ip->ip_off |= IP_DF;
- }
+ {
+ tlen += sizeof (struct tcpiphdr);
+ ip->ip_len = tlen;
+ ip->ip_ttl = ip_defttl;
+ if (path_mtu_discovery)
+ ip->ip_off |= IP_DF;
+ }
m->m_len = tlen;
m->m_pkthdr.len = tlen;
m->m_pkthdr.rcvif = NULL;
@@ -557,12 +557,12 @@ tcp_respond(tp, ipgen, th, m, ack, seq, flags)
NULL, NULL);
} else
#endif /* INET6 */
- {
- nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
- htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
- m->m_pkthdr.csum_flags = CSUM_TCP;
- m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
- }
+ {
+ nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
+ htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
+ m->m_pkthdr.csum_flags = CSUM_TCP;
+ m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
+ }
#ifdef TCPDEBUG
if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG))
tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
@@ -636,7 +636,7 @@ tcp_newtcpcb(inp)
tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
tp->t_rcvtime = ticks;
tp->t_bw_rtttime = ticks;
- /*
+ /*
* IPv4 TTL initialization is necessary for an IPv6 socket as well,
* because the socket may be bound to an IPv6 wildcard address,
* which may match an IPv4-mapped IPv6 address.
@@ -740,7 +740,7 @@ tcp_discardcb(tp)
/* XXX: This wraps if the pipe is more than 4 Gbit per second */
metrics.rmx_bandwidth = tp->snd_bandwidth;
metrics.rmx_cwnd = tp->snd_cwnd;
- metrics.rmx_sendpipe = 0;
+ metrics.rmx_sendpipe = 0;
metrics.rmx_recvpipe = 0;
tcp_hc_update(&inp->inp_inc, &metrics);
@@ -801,8 +801,8 @@ tcp_drain()
* if there is one...
* XXX: The "Net/3" implementation doesn't imply that the TCP
* reassembly queue should be flushed, but in a situation
- * where we're really low on mbufs, this is potentially
- * usefull.
+ * where we're really low on mbufs, this is potentially
+ * usefull.
*/
INP_INFO_RLOCK(&tcbinfo);
LIST_FOREACH(inpb, tcbinfo.listhead, inp_list) {
@@ -914,7 +914,7 @@ tcp_pcblist(SYSCTL_HANDLER_ARGS)
inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
if (inp_list == NULL)
return ENOMEM;
-
+
s = splnet();
INP_INFO_RLOCK(&tcbinfo);
for (inp = LIST_FIRST(tcbinfo.listhead), i = 0; inp != NULL && i < n;
@@ -1143,7 +1143,7 @@ tcp_ctlinput(cmd, sa, vip)
return;
if (ip != NULL) {
s = splnet();
- th = (struct tcphdr *)((caddr_t)ip
+ th = (struct tcphdr *)((caddr_t)ip
+ (ip->ip_hl << 2));
INP_INFO_WLOCK(&tcbinfo);
inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport,
@@ -1154,7 +1154,7 @@ tcp_ctlinput(cmd, sa, vip)
icmp_seq = htonl(th->th_seq);
tp = intotcpcb(inp);
if (SEQ_GEQ(icmp_seq, tp->snd_una) &&
- SEQ_LT(icmp_seq, tp->snd_max))
+ SEQ_LT(icmp_seq, tp->snd_max))
inp = (*notify)(inp, inetctlerrmap[cmd]);
}
if (inp != NULL)
@@ -1269,7 +1269,7 @@ tcp6_ctlinput(cmd, sa, d)
* depends on this property. In addition, these ISNs should be
* unguessable so as to prevent connection hijacking. To satisfy
* the requirements of this situation, the algorithm outlined in
- * RFC 1948 is used, with only small modifications.
+ * RFC 1948 is used, with only small modifications.
*
* Implementation details:
*
@@ -1319,7 +1319,7 @@ tcp_new_isn(tp)
read_random(&isn_secret, sizeof(isn_secret));
isn_last_reseed = ticks;
}
-
+
/* Compute the md5 hash and return the ISN. */
MD5Init(&isn_ctx);
MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short));
@@ -1357,7 +1357,7 @@ tcp_isn_tick(xtp)
void *xtp;
{
u_int32_t projected_offset;
-
+
projected_offset = isn_offset_old + ISN_BYTES_PER_SECOND / hz;
if (projected_offset > isn_offset)
@@ -1512,7 +1512,7 @@ tcp_mtudisc(inp, errno)
* is called by TCP routines that access the rmx structure and by tcp_mss
* to get the interface MTU.
*/
-u_long
+u_long
tcp_maxmtu(inc)
struct in_conninfo *inc;
{
@@ -1605,13 +1605,13 @@ ipsec_hdrsiz_tcp(tp)
hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
} else
#endif /* INET6 */
- {
- ip = mtod(m, struct ip *);
- th = (struct tcphdr *)(ip + 1);
- m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
- tcpip_fillheaders(inp, ip, th);
- hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
- }
+ {
+ ip = mtod(m, struct ip *);
+ th = (struct tcphdr *)(ip + 1);
+ m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
+ tcpip_fillheaders(inp, ip, th);
+ hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
+ }
m_free(m);
return hdrsiz;
@@ -1651,8 +1651,8 @@ tcp_twstart(tp)
/*
* Set t_recent if timestamps are used on the connection.
*/
- if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
- (TF_REQ_TSTMP|TF_RCVD_TSTMP))
+ if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
+ (TF_REQ_TSTMP|TF_RCVD_TSTMP))
tw->t_recent = tp->ts_recent;
else
tw->t_recent = 0;
@@ -1719,7 +1719,7 @@ tcp_twrecycleable(struct tcptw *tw)
new_iss += (ticks - tw->t_starttime) * (ISN_BYTES_PER_SECOND / hz);
new_irs += (ticks - tw->t_starttime) * (MS_ISN_BYTES_PER_SECOND / hz);
-
+
if (SEQ_GT(new_iss, tw->snd_nxt) && SEQ_GT(new_irs, tw->rcv_nxt))
return 1;
else
@@ -1789,31 +1789,31 @@ tcp_twrespond(struct tcptw *tw, int flags)
tcpip_fillheaders(inp, ip, th);
}
optp = (u_int8_t *)(th + 1);
-
- /*
+
+ /*
* Send a timestamp and echo-reply if both our side and our peer
* have sent timestamps in our SYN's and this is not a RST.
- */
+ */
if (tw->t_recent && flags == TH_ACK) {
u_int32_t *lp = (u_int32_t *)optp;
- /* Form timestamp option as shown in appendix A of RFC 1323. */
- *lp++ = htonl(TCPOPT_TSTAMP_HDR);
- *lp++ = htonl(ticks);
- *lp = htonl(tw->t_recent);
- optp += TCPOLEN_TSTAMP_APPA;
- }
+ /* Form timestamp option as shown in appendix A of RFC 1323. */
+ *lp++ = htonl(TCPOPT_TSTAMP_HDR);
+ *lp++ = htonl(ticks);
+ *lp = htonl(tw->t_recent);
+ optp += TCPOLEN_TSTAMP_APPA;
+ }
- /*
+ /*
* Send `CC-family' options if needed, and it's not a RST.
- */
+ */
if (tw->cc_recv != 0 && flags == TH_ACK) {
u_int32_t *lp = (u_int32_t *)optp;
*lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CC));
*lp = htonl(tw->cc_send);
optp += TCPOLEN_CC_APPA;
- }
+ }
optlen = optp - (u_int8_t *)(th + 1);
m->m_len = hdrlen + optlen;
@@ -1838,7 +1838,7 @@ tcp_twrespond(struct tcptw *tw, int flags)
#endif
{
th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
- htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP));
+ htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP));
m->m_pkthdr.csum_flags = CSUM_TCP;
m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
ip->ip_len = m->m_pkthdr.len;
@@ -1868,13 +1868,13 @@ tcp_twrespond(struct tcptw *tw, int flags)
* side of the connection.
*
* BACKGROUND: TCP makes no provision for the management of buffer space
- * at the end points or at the intermediate routers and switches. A TCP
+ * at the end points or at the intermediate routers and switches. A TCP
* stream, whether using NewReno or not, will eventually buffer as
* many packets as it is able and the only reason this typically works is
* due to the fairly small default buffers made available for a connection
* (typicaly 16K or 32K). As machines use larger windows and/or window
* scaling it is now fairly easy for even a single TCP connection to blow-out
- * all available buffer space not only on the local interface, but on
+ * all available buffer space not only on the local interface, but on
* intermediate routers and switches as well. NewReno makes a misguided
* attempt to 'solve' this problem by waiting for an actual failure to occur,
* then backing off, then steadily increasing the window again until another
@@ -1896,7 +1896,7 @@ tcp_twrespond(struct tcptw *tw, int flags)
*
* The second method is to limit the window to the bandwidth delay product
* of the link. This is the method we implement. RTT variances and our
- * own manipulation of the congestion window, bwnd, can potentially
+ * own manipulation of the congestion window, bwnd, can potentially
* destabilize the algorithm. For this reason we have to stabilize the
* elements used to calculate the window. We do this by using the minimum
* observed RTT, the long term average of the observed bandwidth, and
@@ -1936,7 +1936,7 @@ tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
if ((u_int)(save_ticks - tp->t_bw_rtttime) < 1)
return;
- bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz /
+ bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz /
(save_ticks - tp->t_bw_rtttime);
tp->t_bw_rtttime = save_ticks;
tp->t_bw_rtseq = ack_seq;
@@ -1956,7 +1956,7 @@ tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
* Situations Handled:
* (1) Prevents over-queueing of packets on LANs, especially on
* high speed LANs, allowing larger TCP buffers to be
- * specified, and also does a good job preventing
+ * specified, and also does a good job preventing
* over-queueing of packets over choke points like modems
* (at least for the transmit side).
*
diff --git a/sys/netinet/tcp_syncache.c b/sys/netinet/tcp_syncache.c
index 9c918a9..9125235 100644
--- a/sys/netinet/tcp_syncache.c
+++ b/sys/netinet/tcp_syncache.c
@@ -105,7 +105,7 @@
static int tcp_syncookies = 1;
SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW,
- &tcp_syncookies, 0,
+ &tcp_syncookies, 0,
"Use TCP SYN cookies if the syncache overflows");
static void syncache_drop(struct syncache *, struct syncache_head *);
@@ -117,7 +117,7 @@ static int syncache_respond(struct syncache *, struct mbuf *, struct socket *);
#else
static int syncache_respond(struct syncache *, struct mbuf *);
#endif
-static struct socket *syncache_socket(struct syncache *, struct socket *,
+static struct socket *syncache_socket(struct syncache *, struct socket *,
struct mbuf *m);
static void syncache_timer(void *);
static u_int32_t syncookie_generate(struct syncache *, u_int32_t *);
@@ -169,16 +169,16 @@ SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW,
static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache");
-#define SYNCACHE_HASH(inc, mask) \
+#define SYNCACHE_HASH(inc, mask) \
((tcp_syncache.hash_secret ^ \
(inc)->inc_faddr.s_addr ^ \
- ((inc)->inc_faddr.s_addr >> 16) ^ \
+ ((inc)->inc_faddr.s_addr >> 16) ^ \
(inc)->inc_fport ^ (inc)->inc_lport) & mask)
-#define SYNCACHE_HASH6(inc, mask) \
+#define SYNCACHE_HASH6(inc, mask) \
((tcp_syncache.hash_secret ^ \
- (inc)->inc6_faddr.s6_addr32[0] ^ \
- (inc)->inc6_faddr.s6_addr32[3] ^ \
+ (inc)->inc6_faddr.s6_addr32[0] ^ \
+ (inc)->inc6_faddr.s6_addr32[3] ^ \
(inc)->inc_fport ^ (inc)->inc_lport) & mask)
#define ENDPTS_EQ(a, b) ( \
@@ -222,16 +222,16 @@ syncache_init(void)
tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
tcp_syncache.hash_secret = arc4random();
- TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
+ TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
&tcp_syncache.hashsize);
- TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
+ TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
&tcp_syncache.cache_limit);
- TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
+ TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
&tcp_syncache.bucket_limit);
if (!powerof2(tcp_syncache.hashsize)) {
- printf("WARNING: syncache hash size is not a power of 2.\n");
+ printf("WARNING: syncache hash size is not a power of 2.\n");
tcp_syncache.hashsize = 512; /* safe default */
- }
+ }
tcp_syncache.hashmask = tcp_syncache.hashsize - 1;
/* Allocate the hash table. */
@@ -356,15 +356,15 @@ syncache_timer(xslot)
struct inpcb *inp;
INP_INFO_WLOCK(&tcbinfo);
- if (callout_pending(&tcp_syncache.tt_timerq[slot]) ||
- !callout_active(&tcp_syncache.tt_timerq[slot])) {
+ if (callout_pending(&tcp_syncache.tt_timerq[slot]) ||
+ !callout_active(&tcp_syncache.tt_timerq[slot])) {
/* XXX can this happen? */
INP_INFO_WUNLOCK(&tcbinfo);
- return;
- }
- callout_deactivate(&tcp_syncache.tt_timerq[slot]);
+ return;
+ }
+ callout_deactivate(&tcp_syncache.tt_timerq[slot]);
- nsc = TAILQ_FIRST(&tcp_syncache.timerq[slot]);
+ nsc = TAILQ_FIRST(&tcp_syncache.timerq[slot]);
while (nsc != NULL) {
if (ticks < nsc->sc_rxttime)
break;
@@ -592,7 +592,7 @@ syncache_socket(sc, lso, m)
#ifdef INET6
if (sc->sc_inc.inc_isipv6)
inp->in6p_laddr = in6addr_any;
- else
+ else
#endif
inp->inp_laddr.s_addr = INADDR_ANY;
inp->inp_lport = 0;
@@ -619,7 +619,7 @@ syncache_socket(sc, lso, m)
* copied, since it stores previously received options and is
* used to detect if each new option is different than the
* previous one and hence should be passed to a user.
- * If we copied in6p_inputopts, a user would not be able to
+ * If we copied in6p_inputopts, a user would not be able to
* receive options just after calling the accept system call.
*/
inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS;
@@ -722,7 +722,7 @@ syncache_socket(sc, lso, m)
* If the SYN,ACK was retransmitted, reset cwnd to 1 segment.
*/
if (sc->sc_rxtslot != 0)
- tp->snd_cwnd = tp->t_maxseg;
+ tp->snd_cwnd = tp->t_maxseg;
callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp);
INP_UNLOCK(inp);
@@ -761,11 +761,11 @@ syncache_expand(inc, th, sop, m)
sc = syncache_lookup(inc, &sch);
if (sc == NULL) {
/*
- * There is no syncache entry, so see if this ACK is
+ * There is no syncache entry, so see if this ACK is
* a returning syncookie. To do this, first:
* A. See if this socket has had a syncache entry dropped in
* the past. We don't want to accept a bogus syncookie
- * if we've never received a SYN.
+ * if we've never received a SYN.
* B. check that the syncookie is valid. If it is, then
* cobble up a fake syncache entry, and return.
*/
@@ -888,7 +888,7 @@ syncache_add(inc, to, th, sop, m)
TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot],
sc, sc_timerq);
SYNCACHE_TIMEOUT(sc, sc->sc_rxtslot);
- tcpstat.tcps_sndacks++;
+ tcpstat.tcps_sndacks++;
tcpstat.tcps_sndtotal++;
}
*sop = NULL;
@@ -899,7 +899,7 @@ syncache_add(inc, to, th, sop, m)
if (sc == NULL) {
/*
* The zone allocator couldn't provide more entries.
- * Treat this as if the cache was full; drop the oldest
+ * Treat this as if the cache was full; drop the oldest
* entry and insert the new one.
*/
/* NB: guarded by INP_INFO_WLOCK(&tcbinfo) */
@@ -945,7 +945,7 @@ syncache_add(inc, to, th, sop, m)
sc->sc_flowlabel = 0;
if (tcp_syncookies) {
sc->sc_iss = syncookie_generate(sc, &flowtmp);
-#ifdef INET6
+#ifdef INET6
if (inc->inc_isipv6 &&
(sc->sc_tp->t_inpcb->in6p_flags & IN6P_AUTOFLOWLABEL)) {
sc->sc_flowlabel = flowtmp & IPV6_FLOWLABEL_MASK;
@@ -953,7 +953,7 @@ syncache_add(inc, to, th, sop, m)
#endif
} else {
sc->sc_iss = arc4random();
-#ifdef INET6
+#ifdef INET6
if (inc->inc_isipv6 &&
(sc->sc_tp->t_inpcb->in6p_flags & IN6P_AUTOFLOWLABEL)) {
sc->sc_flowlabel =
@@ -1014,7 +1014,7 @@ syncache_add(inc, to, th, sop, m)
sc->sc_flags = SCF_SIGNATURE;
#endif
- if (to->to_flags & TOF_SACK)
+ if (to->to_flags & TOF_SACK)
sc->sc_flags |= SCF_SACK;
/*
@@ -1112,7 +1112,7 @@ syncache_respond(sc, m)
hlen =
#ifdef INET6
- (sc->sc_inc.inc_isipv6) ? sizeof(struct ip6_hdr) :
+ (sc->sc_inc.inc_isipv6) ? sizeof(struct ip6_hdr) :
#endif
sizeof(struct ip);
@@ -1288,7 +1288,7 @@ syncache_respond(sc, m)
} else
#endif
{
- th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
+ th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
htons(tlen - hlen + IPPROTO_TCP));
m->m_pkthdr.csum_flags = CSUM_TCP;
m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
@@ -1330,7 +1330,7 @@ syncache_respond(sc, m)
#define SYNCOOKIE_NSECRETS (1 << SYNCOOKIE_WNDBITS)
#define SYNCOOKIE_TIMEOUT \
(hz * (1 << SYNCOOKIE_WNDBITS) / (1 << SYNCOOKIE_TIMESHIFT))
-#define SYNCOOKIE_DATAMASK ((3 << SYNCOOKIE_WNDBITS) | SYNCOOKIE_WNDMASK)
+#define SYNCOOKIE_DATAMASK ((3 << SYNCOOKIE_WNDBITS) | SYNCOOKIE_WNDMASK)
static struct {
u_int32_t ts_secbits[4];
@@ -1355,9 +1355,9 @@ CTASSERT(sizeof(struct md5_add) == 28);
/*
* Consider the problem of a recreated (and retransmitted) cookie. If the
- * original SYN was accepted, the connection is established. The second
- * SYN is inflight, and if it arrives with an ISN that falls within the
- * receive window, the connection is killed.
+ * original SYN was accepted, the connection is established. The second
+ * SYN is inflight, and if it arrives with an ISN that falls within the
+ * receive window, the connection is killed.
*
* However, since cookies have other problems, this may not be worth
* worrying about.
diff --git a/sys/netinet/tcp_timer.c b/sys/netinet/tcp_timer.c
index 44664ad..65d9d65 100644
--- a/sys/netinet/tcp_timer.c
+++ b/sys/netinet/tcp_timer.c
@@ -80,7 +80,7 @@ sysctl_msec_to_ticks(SYSCTL_HANDLER_ARGS)
return (EINVAL);
*(int *)oidp->oid_arg1 = tt;
- return (0);
+ return (0);
}
int tcp_keepinit;
@@ -99,7 +99,7 @@ int tcp_delacktime;
SYSCTL_PROC(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime,
CTLTYPE_INT|CTLFLAG_RW, &tcp_delacktime, 0, sysctl_msec_to_ticks, "I",
"Time before a delayed ACK is sent");
-
+
int tcp_msl;
SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, CTLTYPE_INT|CTLFLAG_RW,
&tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
@@ -113,7 +113,7 @@ SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_slop, CTLTYPE_INT|CTLFLAG_RW,
&tcp_rexmit_slop, 0, sysctl_msec_to_ticks, "I", "Retransmission Timer Slop");
static int always_keepalive = 1;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_RW,
&always_keepalive , 0, "Assume SO_KEEPALIVE on all TCP connections");
static int tcp_keepcnt = TCPTV_KEEPCNT;
@@ -299,7 +299,7 @@ tcp_timer_2msl_tw(int reuse)
struct tcptw *tw, *tw_tail;
struct twlist *twl;
int i;
-
+
for (i = 0; i < 2; i++) {
twl = tw_2msl_list[i];
tw_tail = &twl->tw_tail;
@@ -517,10 +517,10 @@ tcp_timer_rexmt(xtp)
if (tp->t_rxtshift == 1) {
/*
* first retransmit; record ssthresh and cwnd so they can
- * be recovered if this turns out to be a "bad" retransmit.
- * A retransmit is considered "bad" if an ACK for this
+ * be recovered if this turns out to be a "bad" retransmit.
+ * A retransmit is considered "bad" if an ACK for this
* segment is received within RTT/2 interval; the assumption
- * here is that the ACK was already in flight. See
+ * here is that the ACK was already in flight. See
* "On Estimating End-to-End Network Path Properties" by
* Allman and Paxson for more details.
*/
@@ -542,9 +542,9 @@ tcp_timer_rexmt(xtp)
tp->t_rttmin, TCPTV_REXMTMAX);
/*
* Disable rfc1323 and rfc1644 if we havn't got any response to
- * our third SYN to work-around some broken terminal servers
- * (most of which have hopefully been retired) that have bad VJ
- * header compression code which trashes TCP segments containing
+ * our third SYN to work-around some broken terminal servers
+ * (most of which have hopefully been retired) that have bad VJ
+ * header compression code which trashes TCP segments containing
* unknown-to-them TCP options.
*/
if ((tp->t_state == TCPS_SYN_SENT) && (tp->t_rxtshift == 3))
diff --git a/sys/netinet/tcp_timer.h b/sys/netinet/tcp_timer.h
index 2475f2a..22d938e 100644
--- a/sys/netinet/tcp_timer.h
+++ b/sys/netinet/tcp_timer.h
@@ -97,7 +97,7 @@
* acks (typically 100ms) could create issues so we set the slop
* to 200ms to try to cover it. Note that, properly speaking,
* delayed-acks should not create a major issue for interactive
- * environments which 'P'ush the last segment, at least as
+ * environments which 'P'ush the last segment, at least as
* long as implementations do the required 'at least one ack
* for every two packets' for the non-interactive streaming case.
* (maybe the RTO calculation should use 2*RTT instead of RTT
diff --git a/sys/netinet/tcp_timewait.c b/sys/netinet/tcp_timewait.c
index 2d29844..2eebce4 100644
--- a/sys/netinet/tcp_timewait.c
+++ b/sys/netinet/tcp_timewait.c
@@ -110,8 +110,8 @@
#include <machine/in_cksum.h>
#include <sys/md5.h>
-int tcp_mssdflt = TCP_MSS;
-SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW,
+int tcp_mssdflt = TCP_MSS;
+SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW,
&tcp_mssdflt , 0, "Default TCP Maximum Segment Size");
#ifdef INET6
@@ -146,17 +146,17 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmssoverload, CTLFLAG_RW,
"be under the MINMSS Size");
#if 0
-static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
-SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW,
+static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
+SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW,
&tcp_rttdflt , 0, "Default maximum TCP Round Trip Time");
#endif
int tcp_do_rfc1323 = 1;
-SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
&tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions");
int tcp_do_rfc1644 = 0;
-SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW,
&tcp_do_rfc1644 , 0, "Enable rfc1644 (TTCP) extensions");
static int tcp_tcbhashsize = 0;
@@ -167,11 +167,11 @@ static int do_tcpdrain = 1;
SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
"Enable tcp_drain routine for extra help when low on mbufs");
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD,
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD,
&tcbinfo.ipi_count, 0, "Number of active PCBs");
static int icmp_may_rst = 1;
-SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0,
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0,
"Certain ICMP unreachable messages may abort connections in SYN_SENT");
static int tcp_isn_reseed_interval = 0;
@@ -179,8 +179,8 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW,
&tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret");
/*
- * TCP bandwidth limiting sysctls. Note that the default lower bound of
- * 1024 exists only for debugging. A good production default would be
+ * TCP bandwidth limiting sysctls. Note that the default lower bound of
+ * 1024 exists only for debugging. A good production default would be
* something like 6100.
*/
SYSCTL_NODE(_net_inet_tcp, OID_AUTO, inflight, CTLFLAG_RW, 0,
@@ -250,7 +250,7 @@ void
tcp_init()
{
int hashsize = TCBHASHSIZE;
-
+
tcp_ccgen = 1;
tcp_delacktime = TCPTV_DELACK;
@@ -274,7 +274,7 @@ tcp_init()
tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask);
tcbinfo.porthashbase = hashinit(hashsize, M_PCB,
&tcbinfo.porthashmask);
- tcbinfo.ipi_zone = uma_zcreate("inpcb", sizeof(struct inpcb),
+ tcbinfo.ipi_zone = uma_zcreate("inpcb", sizeof(struct inpcb),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
uma_zone_set_max(tcbinfo.ipi_zone, maxsockets);
#ifdef INET6
@@ -290,10 +290,10 @@ tcp_init()
/*
* These have to be type stable for the benefit of the timers.
*/
- tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem),
+ tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
uma_zone_set_max(tcpcb_zone, maxsockets);
- tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw),
+ tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
uma_zone_set_max(tcptw_zone, maxsockets / 5);
tcp_timer_init();
@@ -304,7 +304,7 @@ tcp_init()
tcp_isn_tick(NULL);
EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL,
SHUTDOWN_PRI_DEFAULT);
- sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole),
+ sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
}
@@ -459,7 +459,7 @@ tcp_respond(tp, ipgen, th, m, ack, seq, flags)
m->m_data += max_linkhdr;
#ifdef INET6
if (isipv6) {
- bcopy((caddr_t)ip6, mtod(m, caddr_t),
+ bcopy((caddr_t)ip6, mtod(m, caddr_t),
sizeof(struct ip6_hdr));
ip6 = mtod(m, struct ip6_hdr *);
nth = (struct tcphdr *)(ip6 + 1);
@@ -511,13 +511,13 @@ tcp_respond(tp, ipgen, th, m, ack, seq, flags)
tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
} else
#endif
- {
- tlen += sizeof (struct tcpiphdr);
- ip->ip_len = tlen;
- ip->ip_ttl = ip_defttl;
- if (path_mtu_discovery)
- ip->ip_off |= IP_DF;
- }
+ {
+ tlen += sizeof (struct tcpiphdr);
+ ip->ip_len = tlen;
+ ip->ip_ttl = ip_defttl;
+ if (path_mtu_discovery)
+ ip->ip_off |= IP_DF;
+ }
m->m_len = tlen;
m->m_pkthdr.len = tlen;
m->m_pkthdr.rcvif = NULL;
@@ -557,12 +557,12 @@ tcp_respond(tp, ipgen, th, m, ack, seq, flags)
NULL, NULL);
} else
#endif /* INET6 */
- {
- nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
- htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
- m->m_pkthdr.csum_flags = CSUM_TCP;
- m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
- }
+ {
+ nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
+ htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
+ m->m_pkthdr.csum_flags = CSUM_TCP;
+ m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
+ }
#ifdef TCPDEBUG
if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG))
tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
@@ -636,7 +636,7 @@ tcp_newtcpcb(inp)
tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
tp->t_rcvtime = ticks;
tp->t_bw_rtttime = ticks;
- /*
+ /*
* IPv4 TTL initialization is necessary for an IPv6 socket as well,
* because the socket may be bound to an IPv6 wildcard address,
* which may match an IPv4-mapped IPv6 address.
@@ -740,7 +740,7 @@ tcp_discardcb(tp)
/* XXX: This wraps if the pipe is more than 4 Gbit per second */
metrics.rmx_bandwidth = tp->snd_bandwidth;
metrics.rmx_cwnd = tp->snd_cwnd;
- metrics.rmx_sendpipe = 0;
+ metrics.rmx_sendpipe = 0;
metrics.rmx_recvpipe = 0;
tcp_hc_update(&inp->inp_inc, &metrics);
@@ -801,8 +801,8 @@ tcp_drain()
* if there is one...
* XXX: The "Net/3" implementation doesn't imply that the TCP
* reassembly queue should be flushed, but in a situation
- * where we're really low on mbufs, this is potentially
- * usefull.
+ * where we're really low on mbufs, this is potentially
+ * usefull.
*/
INP_INFO_RLOCK(&tcbinfo);
LIST_FOREACH(inpb, tcbinfo.listhead, inp_list) {
@@ -914,7 +914,7 @@ tcp_pcblist(SYSCTL_HANDLER_ARGS)
inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
if (inp_list == NULL)
return ENOMEM;
-
+
s = splnet();
INP_INFO_RLOCK(&tcbinfo);
for (inp = LIST_FIRST(tcbinfo.listhead), i = 0; inp != NULL && i < n;
@@ -1143,7 +1143,7 @@ tcp_ctlinput(cmd, sa, vip)
return;
if (ip != NULL) {
s = splnet();
- th = (struct tcphdr *)((caddr_t)ip
+ th = (struct tcphdr *)((caddr_t)ip
+ (ip->ip_hl << 2));
INP_INFO_WLOCK(&tcbinfo);
inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport,
@@ -1154,7 +1154,7 @@ tcp_ctlinput(cmd, sa, vip)
icmp_seq = htonl(th->th_seq);
tp = intotcpcb(inp);
if (SEQ_GEQ(icmp_seq, tp->snd_una) &&
- SEQ_LT(icmp_seq, tp->snd_max))
+ SEQ_LT(icmp_seq, tp->snd_max))
inp = (*notify)(inp, inetctlerrmap[cmd]);
}
if (inp != NULL)
@@ -1269,7 +1269,7 @@ tcp6_ctlinput(cmd, sa, d)
* depends on this property. In addition, these ISNs should be
* unguessable so as to prevent connection hijacking. To satisfy
* the requirements of this situation, the algorithm outlined in
- * RFC 1948 is used, with only small modifications.
+ * RFC 1948 is used, with only small modifications.
*
* Implementation details:
*
@@ -1319,7 +1319,7 @@ tcp_new_isn(tp)
read_random(&isn_secret, sizeof(isn_secret));
isn_last_reseed = ticks;
}
-
+
/* Compute the md5 hash and return the ISN. */
MD5Init(&isn_ctx);
MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short));
@@ -1357,7 +1357,7 @@ tcp_isn_tick(xtp)
void *xtp;
{
u_int32_t projected_offset;
-
+
projected_offset = isn_offset_old + ISN_BYTES_PER_SECOND / hz;
if (projected_offset > isn_offset)
@@ -1512,7 +1512,7 @@ tcp_mtudisc(inp, errno)
* is called by TCP routines that access the rmx structure and by tcp_mss
* to get the interface MTU.
*/
-u_long
+u_long
tcp_maxmtu(inc)
struct in_conninfo *inc;
{
@@ -1605,13 +1605,13 @@ ipsec_hdrsiz_tcp(tp)
hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
} else
#endif /* INET6 */
- {
- ip = mtod(m, struct ip *);
- th = (struct tcphdr *)(ip + 1);
- m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
- tcpip_fillheaders(inp, ip, th);
- hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
- }
+ {
+ ip = mtod(m, struct ip *);
+ th = (struct tcphdr *)(ip + 1);
+ m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
+ tcpip_fillheaders(inp, ip, th);
+ hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
+ }
m_free(m);
return hdrsiz;
@@ -1651,8 +1651,8 @@ tcp_twstart(tp)
/*
* Set t_recent if timestamps are used on the connection.
*/
- if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
- (TF_REQ_TSTMP|TF_RCVD_TSTMP))
+ if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
+ (TF_REQ_TSTMP|TF_RCVD_TSTMP))
tw->t_recent = tp->ts_recent;
else
tw->t_recent = 0;
@@ -1719,7 +1719,7 @@ tcp_twrecycleable(struct tcptw *tw)
new_iss += (ticks - tw->t_starttime) * (ISN_BYTES_PER_SECOND / hz);
new_irs += (ticks - tw->t_starttime) * (MS_ISN_BYTES_PER_SECOND / hz);
-
+
if (SEQ_GT(new_iss, tw->snd_nxt) && SEQ_GT(new_irs, tw->rcv_nxt))
return 1;
else
@@ -1789,31 +1789,31 @@ tcp_twrespond(struct tcptw *tw, int flags)
tcpip_fillheaders(inp, ip, th);
}
optp = (u_int8_t *)(th + 1);
-
- /*
+
+ /*
* Send a timestamp and echo-reply if both our side and our peer
* have sent timestamps in our SYN's and this is not a RST.
- */
+ */
if (tw->t_recent && flags == TH_ACK) {
u_int32_t *lp = (u_int32_t *)optp;
- /* Form timestamp option as shown in appendix A of RFC 1323. */
- *lp++ = htonl(TCPOPT_TSTAMP_HDR);
- *lp++ = htonl(ticks);
- *lp = htonl(tw->t_recent);
- optp += TCPOLEN_TSTAMP_APPA;
- }
+ /* Form timestamp option as shown in appendix A of RFC 1323. */
+ *lp++ = htonl(TCPOPT_TSTAMP_HDR);
+ *lp++ = htonl(ticks);
+ *lp = htonl(tw->t_recent);
+ optp += TCPOLEN_TSTAMP_APPA;
+ }
- /*
+ /*
* Send `CC-family' options if needed, and it's not a RST.
- */
+ */
if (tw->cc_recv != 0 && flags == TH_ACK) {
u_int32_t *lp = (u_int32_t *)optp;
*lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CC));
*lp = htonl(tw->cc_send);
optp += TCPOLEN_CC_APPA;
- }
+ }
optlen = optp - (u_int8_t *)(th + 1);
m->m_len = hdrlen + optlen;
@@ -1838,7 +1838,7 @@ tcp_twrespond(struct tcptw *tw, int flags)
#endif
{
th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
- htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP));
+ htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP));
m->m_pkthdr.csum_flags = CSUM_TCP;
m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
ip->ip_len = m->m_pkthdr.len;
@@ -1868,13 +1868,13 @@ tcp_twrespond(struct tcptw *tw, int flags)
* side of the connection.
*
* BACKGROUND: TCP makes no provision for the management of buffer space
- * at the end points or at the intermediate routers and switches. A TCP
+ * at the end points or at the intermediate routers and switches. A TCP
* stream, whether using NewReno or not, will eventually buffer as
* many packets as it is able and the only reason this typically works is
* due to the fairly small default buffers made available for a connection
* (typicaly 16K or 32K). As machines use larger windows and/or window
* scaling it is now fairly easy for even a single TCP connection to blow-out
- * all available buffer space not only on the local interface, but on
+ * all available buffer space not only on the local interface, but on
* intermediate routers and switches as well. NewReno makes a misguided
* attempt to 'solve' this problem by waiting for an actual failure to occur,
* then backing off, then steadily increasing the window again until another
@@ -1896,7 +1896,7 @@ tcp_twrespond(struct tcptw *tw, int flags)
*
* The second method is to limit the window to the bandwidth delay product
* of the link. This is the method we implement. RTT variances and our
- * own manipulation of the congestion window, bwnd, can potentially
+ * own manipulation of the congestion window, bwnd, can potentially
* destabilize the algorithm. For this reason we have to stabilize the
* elements used to calculate the window. We do this by using the minimum
* observed RTT, the long term average of the observed bandwidth, and
@@ -1936,7 +1936,7 @@ tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
if ((u_int)(save_ticks - tp->t_bw_rtttime) < 1)
return;
- bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz /
+ bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz /
(save_ticks - tp->t_bw_rtttime);
tp->t_bw_rtttime = save_ticks;
tp->t_bw_rtseq = ack_seq;
@@ -1956,7 +1956,7 @@ tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
* Situations Handled:
* (1) Prevents over-queueing of packets on LANs, especially on
* high speed LANs, allowing larger TCP buffers to be
- * specified, and also does a good job preventing
+ * specified, and also does a good job preventing
* over-queueing of packets over choke points like modems
* (at least for the transmit side).
*
diff --git a/sys/netinet/tcp_usrreq.c b/sys/netinet/tcp_usrreq.c
index df6fa74..525b48a 100644
--- a/sys/netinet/tcp_usrreq.c
+++ b/sys/netinet/tcp_usrreq.c
@@ -467,7 +467,7 @@ tcp_usr_accept(struct socket *so, struct sockaddr **nam)
tp = intotcpcb(inp);
TCPDEBUG1();
- /*
+ /*
* We inline in_setpeeraddr and COMMON_END here, so that we can
* copy the data of interest and defer the malloc until after we
* release the lock.
@@ -511,7 +511,7 @@ tcp6_usr_accept(struct socket *so, struct sockaddr **nam)
INP_INFO_RUNLOCK(&tcbinfo);
tp = intotcpcb(inp);
TCPDEBUG1();
- /*
+ /*
* We inline in6_mapped_peeraddr and COMMON_END here, so that we can
* copy the data of interest and defer the malloc until after we
* release the lock.
@@ -539,8 +539,8 @@ out: TCPDEBUG2(PRU_ACCEPT);
#endif /* INET6 */
/*
- * This is the wrapper function for in_setsockaddr. We just pass down
- * the pcbinfo for in_setsockaddr to lock. We don't want to do the locking
+ * This is the wrapper function for in_setsockaddr. We just pass down
+ * the pcbinfo for in_setsockaddr to lock. We don't want to do the locking
* here because in_setsockaddr will call malloc and can block.
*/
static int
@@ -602,7 +602,7 @@ tcp_usr_rcvd(struct socket *so, int flags)
* generally are caller-frees.
*/
static int
-tcp_usr_send(struct socket *so, int flags, struct mbuf *m,
+tcp_usr_send(struct socket *so, int flags, struct mbuf *m,
struct sockaddr *nam, struct mbuf *control, struct thread *td)
{
int error = 0;
@@ -728,7 +728,7 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m,
error = tcp_output(tp);
tp->t_force = 0;
}
- COMMON_END((flags & PRUS_OOB) ? PRU_SENDOOB :
+ COMMON_END((flags & PRUS_OOB) ? PRU_SENDOOB :
((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND));
}
@@ -945,7 +945,7 @@ tcp6_connect(tp, nam, td)
/* update flowinfo - draft-itojun-ipv6-flowlabel-api-00 */
inp->in6p_flowinfo &= ~IPV6_FLOWLABEL_MASK;
if (inp->in6p_flags & IN6P_AUTOFLOWLABEL)
- inp->in6p_flowinfo |=
+ inp->in6p_flowinfo |=
(htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
in_pcbrehash(inp);
@@ -1133,10 +1133,10 @@ tcp_ctloutput(so, sopt)
* be set by the route).
*/
u_long tcp_sendspace = 1024*32;
-SYSCTL_INT(_net_inet_tcp, TCPCTL_SENDSPACE, sendspace, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_tcp, TCPCTL_SENDSPACE, sendspace, CTLFLAG_RW,
&tcp_sendspace , 0, "Maximum outgoing TCP datagram size");
u_long tcp_recvspace = 1024*64;
-SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
&tcp_recvspace , 0, "Maximum incoming TCP datagram size");
/*
diff --git a/sys/netinet/tcp_var.h b/sys/netinet/tcp_var.h
index c8a612d..4ba7a91 100644
--- a/sys/netinet/tcp_var.h
+++ b/sys/netinet/tcp_var.h
@@ -243,27 +243,27 @@ struct tcpopt {
tcp_cc to_cc; /* holds CC or CCnew */
tcp_cc to_ccecho;
u_int16_t to_mss;
- u_int8_t to_requested_s_scale;
- u_int8_t to_pad;
+ u_int8_t to_requested_s_scale;
+ u_int8_t to_pad;
};
#ifdef _NETINET_IN_PCB_H_
struct syncache {
inp_gen_t sc_inp_gencnt; /* pointer check */
- struct tcpcb *sc_tp; /* tcb for listening socket */
+ struct tcpcb *sc_tp; /* tcb for listening socket */
struct mbuf *sc_ipopts; /* source route */
- struct in_conninfo sc_inc; /* addresses */
+ struct in_conninfo sc_inc; /* addresses */
u_int32_t sc_tsrecent;
u_int32_t sc_flowlabel; /* IPv6 flowlabel */
tcp_cc sc_cc_send; /* holds CC or CCnew */
tcp_cc sc_cc_recv;
- tcp_seq sc_irs; /* seq from peer */
- tcp_seq sc_iss; /* our ISS */
+ tcp_seq sc_irs; /* seq from peer */
+ tcp_seq sc_iss; /* our ISS */
u_long sc_rxttime; /* retransmit time */
- u_int16_t sc_rxtslot; /* retransmit counter */
+ u_int16_t sc_rxtslot; /* retransmit counter */
u_int16_t sc_peer_mss; /* peer's MSS */
u_int16_t sc_wnd; /* advertised window */
- u_int8_t sc_requested_s_scale:4,
+ u_int8_t sc_requested_s_scale:4,
sc_request_r_scale:4;
u_int8_t sc_flags;
#define SCF_NOOPT 0x01 /* no TCP options */
@@ -307,12 +307,12 @@ struct tcptw {
u_short last_win; /* cached window value */
u_short tw_so_options; /* copy of so_options */
struct ucred *tw_cred; /* user credentials */
- u_long t_recent;
+ u_long t_recent;
u_long t_starttime;
int tw_time;
LIST_ENTRY(tcptw) tw_2msl;
};
-
+
/*
* The TAO cache entry which is stored in the tcp hostcache.
*/
@@ -462,7 +462,7 @@ struct tcpstat {
/* SACK related stats */
u_long tcps_sack_recovery_episode; /* SACK recovery episodes */
u_long tcps_sack_rexmits; /* SACK rexmit segments */
- u_long tcps_sack_rexmit_bytes; /* SACK rexmit bytes */
+ u_long tcps_sack_rexmit_bytes; /* SACK rexmit bytes */
u_long tcps_sack_rcv_blocks; /* SACK blocks (options) received */
u_long tcps_sack_send_blocks; /* SACK blocks (options) sent */
};
@@ -560,7 +560,7 @@ u_long tcp_maxmtu(struct in_conninfo *);
u_long tcp_maxmtu6(struct in_conninfo *);
void tcp_mss(struct tcpcb *, int);
int tcp_mssopt(struct in_conninfo *);
-struct inpcb *
+struct inpcb *
tcp_drop_syn_sent(struct inpcb *, int);
struct inpcb *
tcp_mtudisc(struct inpcb *, int);
@@ -623,7 +623,7 @@ void tcp_free_sackholes(struct tcpcb *tp);
int tcp_newreno(struct tcpcb *, struct tcphdr *);
u_long tcp_seq_subtract(u_long, u_long );
#ifdef TCP_SACK_DEBUG
-void tcp_print_holes(struct tcpcb *tp);
+void tcp_print_holes(struct tcpcb *tp);
#endif /* TCP_SACK_DEBUG */
#endif /* _KERNEL */
diff --git a/sys/netinet/tcpip.h b/sys/netinet/tcpip.h
index 49a4be3..c410da7 100644
--- a/sys/netinet/tcpip.h
+++ b/sys/netinet/tcpip.h
@@ -37,7 +37,7 @@
* Tcp+ip header, after ip options removed.
*/
struct tcpiphdr {
- struct ipovly ti_i; /* overlaid ip structure */
+ struct ipovly ti_i; /* overlaid ip structure */
struct tcphdr ti_t; /* tcp header */
};
#define ti_x1 ti_i.ih_x1
diff --git a/sys/netinet/udp_usrreq.c b/sys/netinet/udp_usrreq.c
index ef084a3..fec2b1c 100644
--- a/sys/netinet/udp_usrreq.c
+++ b/sys/netinet/udp_usrreq.c
@@ -97,7 +97,7 @@ SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW,
&udpcksum, 0, "");
int log_in_vain = 0;
-SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW,
&log_in_vain, 0, "Log all incoming UDP packets");
static int blackhole = 0;
@@ -240,7 +240,7 @@ udp_input(m, off)
if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
uh->uh_sum = m->m_pkthdr.csum_data;
else
- uh->uh_sum = in_pseudo(ip->ip_src.s_addr,
+ uh->uh_sum = in_pseudo(ip->ip_src.s_addr,
ip->ip_dst.s_addr, htonl((u_short)len +
m->m_pkthdr.csum_data + IPPROTO_UDP));
uh->uh_sum ^= 0xffff;
@@ -533,13 +533,13 @@ udp_ctlinput(cmd, sa, vip)
struct ip *ip = vip;
struct udphdr *uh;
struct inpcb *(*notify)(struct inpcb *, int) = udp_notify;
- struct in_addr faddr;
+ struct in_addr faddr;
struct inpcb *inp;
int s;
faddr = ((struct sockaddr_in *)sa)->sin_addr;
if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
- return;
+ return;
/*
* Redirects don't need to be handled up here.
@@ -560,7 +560,7 @@ udp_ctlinput(cmd, sa, vip)
uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
INP_INFO_RLOCK(&udbinfo);
inp = in_pcblookup_hash(&udbinfo, faddr, uh->uh_dport,
- ip->ip_src, uh->uh_sport, 0, NULL);
+ ip->ip_src, uh->uh_sport, 0, NULL);
if (inp != NULL) {
INP_LOCK(inp);
if (inp->inp_socket != NULL) {
@@ -622,7 +622,7 @@ udp_pcblist(SYSCTL_HANDLER_ARGS)
inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
if (inp_list == 0)
return ENOMEM;
-
+
s = splnet();
INP_INFO_RLOCK(&udbinfo);
for (inp = LIST_FIRST(udbinfo.listhead), i = 0; inp && i < n;
@@ -1080,7 +1080,7 @@ udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
ret = udp_output(inp, m, addr, control, td);
INP_UNLOCK(inp);
INP_INFO_WUNLOCK(&udbinfo);
- return ret;
+ return ret;
}
int
@@ -1102,8 +1102,8 @@ udp_shutdown(struct socket *so)
}
/*
- * This is the wrapper function for in_setsockaddr. We just pass down
- * the pcbinfo for in_setsockaddr to lock. We don't want to do the locking
+ * This is the wrapper function for in_setsockaddr. We just pass down
+ * the pcbinfo for in_setsockaddr to lock. We don't want to do the locking
* here because in_setsockaddr will call malloc and might block.
*/
static int
@@ -1123,9 +1123,9 @@ udp_peeraddr(struct socket *so, struct sockaddr **nam)
}
struct pr_usrreqs udp_usrreqs = {
- udp_abort, pru_accept_notsupp, udp_attach, udp_bind, udp_connect,
- pru_connect2_notsupp, in_control, udp_detach, udp_disconnect,
- pru_listen_notsupp, udp_peeraddr, pru_rcvd_notsupp,
+ udp_abort, pru_accept_notsupp, udp_attach, udp_bind, udp_connect,
+ pru_connect2_notsupp, in_control, udp_detach, udp_disconnect,
+ pru_listen_notsupp, udp_peeraddr, pru_rcvd_notsupp,
pru_rcvoob_notsupp, udp_send, pru_sense_null, udp_shutdown,
udp_sockaddr, sosend, soreceive, sopoll, in_pcbsosetlabel
};
diff --git a/sys/netinet/udp_var.h b/sys/netinet/udp_var.h
index 216ae4f..4fc9a1d 100644
--- a/sys/netinet/udp_var.h
+++ b/sys/netinet/udp_var.h
@@ -37,7 +37,7 @@
* UDP kernel structures and variables.
*/
struct udpiphdr {
- struct ipovly ui_i; /* overlaid ip structure */
+ struct ipovly ui_i; /* overlaid ip structure */
struct udphdr ui_u; /* udp header */
};
#define ui_x1 ui_i.ih_x1
OpenPOWER on IntegriCloud