summaryrefslogtreecommitdiffstats
path: root/sys/netinet
diff options
context:
space:
mode:
authorjulian <julian@FreeBSD.org>2008-08-20 01:05:56 +0000
committerjulian <julian@FreeBSD.org>2008-08-20 01:05:56 +0000
commit0592958505e144fa8a1cdff63ecc2e605ac5e407 (patch)
tree1a19e6226789a77aca65db762eda0fb078daa7e5 /sys/netinet
parente808f0ec7052dba8316366ce3f9ae8f001c34252 (diff)
downloadFreeBSD-src-0592958505e144fa8a1cdff63ecc2e605ac5e407.zip
FreeBSD-src-0592958505e144fa8a1cdff63ecc2e605ac5e407.tar.gz
A bunch of formatting fixes brough to light by, or created by the Vimage commit
a few days ago.
Diffstat (limited to 'sys/netinet')
-rw-r--r--sys/netinet/ip_fw2.c10
-rw-r--r--sys/netinet/ip_input.c3
-rw-r--r--sys/netinet/ip_mroute.c2
-rw-r--r--sys/netinet/raw_ip.c8
-rw-r--r--sys/netinet/tcp_hostcache.c17
-rw-r--r--sys/netinet/tcp_input.c6
-rw-r--r--sys/netinet/tcp_subr.c17
-rw-r--r--sys/netinet/tcp_syncache.c6
8 files changed, 40 insertions, 29 deletions
diff --git a/sys/netinet/ip_fw2.c b/sys/netinet/ip_fw2.c
index 7cfe653..f58899a 100644
--- a/sys/netinet/ip_fw2.c
+++ b/sys/netinet/ip_fw2.c
@@ -4090,11 +4090,12 @@ ipfw_getrules(struct ip_fw_chain *chain, void *buf, size_t space)
if (bp + i <= ep) {
bcopy(rule, bp, i);
/*
- * XXX HACK. Store the disable mask in the "next" pointer
- * in a wild attempt to keep the ABI the same.
+ * XXX HACK. Store the disable mask in the "next"
+ * pointer in a wild attempt to keep the ABI the same.
* Why do we do this on EVERY rule?
*/
- bcopy(&V_set_disable, &(((struct ip_fw *)bp)->next_rule),
+ bcopy(&V_set_disable,
+ &(((struct ip_fw *)bp)->next_rule),
sizeof(V_set_disable));
if (((struct ip_fw *)bp)->timestamp)
((struct ip_fw *)bp)->timestamp += boot_seconds;
@@ -4483,7 +4484,8 @@ ipfw_tick(void * __unused unused)
ip_output(m, NULL, NULL, 0, NULL, NULL);
}
done:
- callout_reset(&V_ipfw_timeout, V_dyn_keepalive_period*hz, ipfw_tick, NULL);
+ callout_reset(&V_ipfw_timeout, V_dyn_keepalive_period * hz,
+ ipfw_tick, NULL);
}
int
diff --git a/sys/netinet/ip_input.c b/sys/netinet/ip_input.c
index 4ab975e6..7230408 100644
--- a/sys/netinet/ip_input.c
+++ b/sys/netinet/ip_input.c
@@ -798,7 +798,8 @@ ip_reass(struct mbuf *m)
for (i = 0; i < IPREASS_NHASH; i++) {
struct ipq *r = TAILQ_LAST(&V_ipq[i], ipqhead);
if (r) {
- V_ipstat.ips_fragtimeout += r->ipq_nfrags;
+ V_ipstat.ips_fragtimeout +=
+ r->ipq_nfrags;
ip_freef(&V_ipq[i], r);
break;
}
diff --git a/sys/netinet/ip_mroute.c b/sys/netinet/ip_mroute.c
index 0476a36..730f0af 100644
--- a/sys/netinet/ip_mroute.c
+++ b/sys/netinet/ip_mroute.c
@@ -1891,7 +1891,7 @@ X_rsvp_input(struct mbuf *m, int off)
struct ifnet *ifp;
if (rsvpdebug)
- printf("rsvp_input: rsvp_on %d\n",V_rsvp_on);
+ printf("rsvp_input: rsvp_on %d\n", V_rsvp_on);
/* Can still get packets with rsvp_on = 0 if there is a local member
* of the group to which the RSVP packet is addressed. But in this
diff --git a/sys/netinet/raw_ip.c b/sys/netinet/raw_ip.c
index 7ea0113..9e70ceb 100644
--- a/sys/netinet/raw_ip.c
+++ b/sys/netinet/raw_ip.c
@@ -178,10 +178,10 @@ rip_init(void)
INP_INFO_LOCK_INIT(&V_ripcbinfo, "rip");
LIST_INIT(&V_ripcb);
V_ripcbinfo.ipi_listhead = &V_ripcb;
- V_ripcbinfo.ipi_hashbase = hashinit(INP_PCBHASH_RAW_SIZE, M_PCB,
- &V_ripcbinfo.ipi_hashmask);
- V_ripcbinfo.ipi_porthashbase = hashinit(1, M_PCB,
- &V_ripcbinfo.ipi_porthashmask);
+ V_ripcbinfo.ipi_hashbase =
+ hashinit(INP_PCBHASH_RAW_SIZE, M_PCB, &V_ripcbinfo.ipi_hashmask);
+ V_ripcbinfo.ipi_porthashbase =
+ hashinit(1, M_PCB, &V_ripcbinfo.ipi_porthashmask);
V_ripcbinfo.ipi_zone = uma_zcreate("ripcb", sizeof(struct inpcb),
NULL, NULL, rip_inpcb_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets);
diff --git a/sys/netinet/tcp_hostcache.c b/sys/netinet/tcp_hostcache.c
index bf8f616..3a7e9e1 100644
--- a/sys/netinet/tcp_hostcache.c
+++ b/sys/netinet/tcp_hostcache.c
@@ -248,15 +248,17 @@ tcp_hc_init(void)
/*
* Allocate the hostcache entries.
*/
- V_tcp_hostcache.zone = uma_zcreate("hostcache", sizeof(struct hc_metrics),
- NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
+ V_tcp_hostcache.zone =
+ uma_zcreate("hostcache", sizeof(struct hc_metrics),
+ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
uma_zone_set_max(V_tcp_hostcache.zone, V_tcp_hostcache.cache_limit);
/*
* Set up periodic cache cleanup.
*/
callout_init(&V_tcp_hc_callout, CALLOUT_MPSAFE);
- callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz, tcp_hc_purge, 0);
+ callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
+ tcp_hc_purge, 0);
}
/*
@@ -667,8 +669,9 @@ tcp_hc_purge(void *arg)
for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
- TAILQ_FOREACH_SAFE(hc_entry, &V_tcp_hostcache.hashbase[i].hch_bucket,
- rmx_q, hc_next) {
+ TAILQ_FOREACH_SAFE(hc_entry,
+ &V_tcp_hostcache.hashbase[i].hch_bucket,
+ rmx_q, hc_next) {
if (all || hc_entry->rmx_expire <= 0) {
TAILQ_REMOVE(&V_tcp_hostcache.hashbase[i].hch_bucket,
hc_entry, rmx_q);
@@ -680,5 +683,7 @@ tcp_hc_purge(void *arg)
}
THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
}
- callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz, tcp_hc_purge, 0);
+
+ callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
+ tcp_hc_purge, arg);
}
diff --git a/sys/netinet/tcp_input.c b/sys/netinet/tcp_input.c
index fae2ba6..f8ae94e 100644
--- a/sys/netinet/tcp_input.c
+++ b/sys/netinet/tcp_input.c
@@ -212,7 +212,6 @@ do { \
(tp->t_flags & TF_RXWIN0SENT) == 0) && \
(V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
-
/*
* TCP input handling is split into multiple parts:
* tcp6_input is a thin wrapper around tcp_input for the extended
@@ -654,8 +653,9 @@ findpcb:
log(LOG_DEBUG, "%s; %s: Listen socket: "
"Socket allocation failed due to "
"limits or memory shortage, %s\n",
- s, __func__, (V_tcp_sc_rst_sock_fail ?
- "sending RST" : "try again"));
+ s, __func__,
+ V_tcp_sc_rst_sock_fail ?
+ "sending RST" : "try again");
if (V_tcp_sc_rst_sock_fail) {
rstreason = BANDLIM_UNLIMITED;
goto dropwithreset;
diff --git a/sys/netinet/tcp_subr.c b/sys/netinet/tcp_subr.c
index 659626c..5f98078 100644
--- a/sys/netinet/tcp_subr.c
+++ b/sys/netinet/tcp_subr.c
@@ -941,8 +941,8 @@ tcp_pcblist(SYSCTL_HANDLER_ARGS)
return (ENOMEM);
INP_INFO_RLOCK(&V_tcbinfo);
- for (inp = LIST_FIRST(V_tcbinfo.ipi_listhead), i = 0; inp != NULL && i
- < n; inp = LIST_NEXT(inp, inp_list)) {
+ for (inp = LIST_FIRST(V_tcbinfo.ipi_listhead), i = 0;
+ inp != NULL && i < n; inp = LIST_NEXT(inp, inp_list)) {
INP_RLOCK(inp);
if (inp->inp_gencnt <= gencnt) {
/*
@@ -1037,8 +1037,8 @@ tcp_getcred(SYSCTL_HANDLER_ARGS)
if (error)
return (error);
INP_INFO_RLOCK(&V_tcbinfo);
- inp = in_pcblookup_hash(&V_tcbinfo, addrs[1].sin_addr, addrs[1].sin_port,
- addrs[0].sin_addr, addrs[0].sin_port, 0, NULL);
+ inp = in_pcblookup_hash(&V_tcbinfo, addrs[1].sin_addr,
+ addrs[1].sin_port, addrs[0].sin_addr, addrs[0].sin_port, 0, NULL);
if (inp != NULL) {
INP_RLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
@@ -1209,7 +1209,7 @@ tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
if (!mtu)
mtu = ip_next_mtu(ip->ip_len,
1);
- if (mtu < max(296, (V_tcp_minmss)
+ if (mtu < max(296, V_tcp_minmss
+ sizeof(struct tcpiphdr)))
mtu = 0;
if (!mtu)
@@ -1757,7 +1757,8 @@ tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
* If inflight_enable is disabled in the middle of a tcp connection,
* make sure snd_bwnd is effectively disabled.
*/
- if (V_tcp_inflight_enable == 0 || tp->t_rttlow < V_tcp_inflight_rttthresh) {
+ if (V_tcp_inflight_enable == 0 ||
+ tp->t_rttlow < V_tcp_inflight_rttthresh) {
tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
tp->snd_bandwidth = 0;
return;
@@ -2039,8 +2040,8 @@ sysctl_drop(SYSCTL_HANDLER_ARGS)
break;
#endif
case AF_INET:
- inp = in_pcblookup_hash(&V_tcbinfo, fin->sin_addr, fin->sin_port,
- lin->sin_addr, lin->sin_port, 0, NULL);
+ inp = in_pcblookup_hash(&V_tcbinfo, fin->sin_addr,
+ fin->sin_port, lin->sin_addr, lin->sin_port, 0, NULL);
break;
}
if (inp != NULL) {
diff --git a/sys/netinet/tcp_syncache.c b/sys/netinet/tcp_syncache.c
index fd580d2..df2a682 100644
--- a/sys/netinet/tcp_syncache.c
+++ b/sys/netinet/tcp_syncache.c
@@ -286,7 +286,8 @@ syncache_init(void)
&V_tcp_syncache.hashsize);
TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
&V_tcp_syncache.bucket_limit);
- if (!powerof2(V_tcp_syncache.hashsize) || V_tcp_syncache.hashsize == 0) {
+ if (!powerof2(V_tcp_syncache.hashsize) ||
+ V_tcp_syncache.hashsize == 0) {
printf("WARNING: syncache hash size is not a power of 2.\n");
V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
}
@@ -1569,7 +1570,8 @@ syncookie_generate(struct syncache_head *sch, struct syncache *sc,
off = sc->sc_iss & 0x7; /* iss was randomized before */
/* Maximum segment size calculation. */
- pmss = max( min(sc->sc_peer_mss, tcp_mssopt(&sc->sc_inc)), V_tcp_minmss);
+ pmss =
+ max( min(sc->sc_peer_mss, tcp_mssopt(&sc->sc_inc)), V_tcp_minmss);
for (mss = sizeof(tcp_sc_msstab) / sizeof(int) - 1; mss > 0; mss--)
if (tcp_sc_msstab[mss] <= pmss)
break;
OpenPOWER on IntegriCloud