summaryrefslogtreecommitdiffstats
path: root/sys/netinet
diff options
context:
space:
mode:
authorbz <bz@FreeBSD.org>2008-08-17 23:27:27 +0000
committerbz <bz@FreeBSD.org>2008-08-17 23:27:27 +0000
commit1021d43b569bfc8d2c5544bde2f540fa432b011f (patch)
tree1496da534aec03cf2f9d2d0735d80e4c1e3b5715 /sys/netinet
parent7fc341305a3e341fca7f202fc1219358f8d9dbbd (diff)
downloadFreeBSD-src-1021d43b569bfc8d2c5544bde2f540fa432b011f.zip
FreeBSD-src-1021d43b569bfc8d2c5544bde2f540fa432b011f.tar.gz
Commit step 1 of the vimage project, (network stack)
virtualization work done by Marko Zec (zec@). This is the first in a series of commits over the course of the next few weeks. Mark all uses of global variables to be virtualized with a V_ prefix. Use macros to map them back to their global names for now, so this is a NOP change only. We hope to have caught at least 85-90% of what is needed so we do not invalidate a lot of outstanding patches again. Obtained from: //depot/projects/vimage-commit2/... Reviewed by: brooks, des, ed, mav, julian, jamie, kris, rwatson, zec, ... (various people I forgot, different versions) md5 (with a bit of help) Sponsored by: NLnet Foundation, The FreeBSD Foundation X-MFC after: never V_Commit_Message_Reviewed_By: more people than the patch
Diffstat (limited to 'sys/netinet')
-rw-r--r--sys/netinet/if_ether.c21
-rw-r--r--sys/netinet/igmp.c35
-rw-r--r--sys/netinet/in.c23
-rw-r--r--sys/netinet/in_gif.c11
-rw-r--r--sys/netinet/in_mcast.c17
-rw-r--r--sys/netinet/in_pcb.c67
-rw-r--r--sys/netinet/in_rmx.c39
-rw-r--r--sys/netinet/in_var.h6
-rw-r--r--sys/netinet/ip6.h8
-rw-r--r--sys/netinet/ip_carp.c5
-rw-r--r--sys/netinet/ip_divert.c91
-rw-r--r--sys/netinet/ip_fastfwd.c39
-rw-r--r--sys/netinet/ip_fw2.c279
-rw-r--r--sys/netinet/ip_fw_nat.c71
-rw-r--r--sys/netinet/ip_fw_pfil.c5
-rw-r--r--sys/netinet/ip_icmp.c39
-rw-r--r--sys/netinet/ip_input.c197
-rw-r--r--sys/netinet/ip_ipsec.c3
-rw-r--r--sys/netinet/ip_mroute.c35
-rw-r--r--sys/netinet/ip_options.c17
-rw-r--r--sys/netinet/ip_output.c35
-rw-r--r--sys/netinet/ip_var.h5
-rw-r--r--sys/netinet/raw_ip.c109
-rw-r--r--sys/netinet/sctp_os_bsd.h13
-rw-r--r--sys/netinet/sctp_pcb.c4
-rw-r--r--sys/netinet/tcp_hostcache.c133
-rw-r--r--sys/netinet/tcp_input.c231
-rw-r--r--sys/netinet/tcp_offload.c13
-rw-r--r--sys/netinet/tcp_output.c55
-rw-r--r--sys/netinet/tcp_reass.c37
-rw-r--r--sys/netinet/tcp_sack.c13
-rw-r--r--sys/netinet/tcp_subr.c197
-rw-r--r--sys/netinet/tcp_syncache.c157
-rw-r--r--sys/netinet/tcp_timer.c65
-rw-r--r--sys/netinet/tcp_timewait.c43
-rw-r--r--sys/netinet/tcp_usrreq.c91
-rw-r--r--sys/netinet/udp_usrreq.c161
37 files changed, 1204 insertions, 1166 deletions
diff --git a/sys/netinet/if_ether.c b/sys/netinet/if_ether.c
index 206cf37..6f489a2 100644
--- a/sys/netinet/if_ether.c
+++ b/sys/netinet/if_ether.c
@@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/socket.h>
#include <sys/syslog.h>
+#include <sys/vimage.h>
#include <net/if.h>
#include <net/if_dl.h>
@@ -249,7 +250,7 @@ arp_rtrequest(int req, struct rtentry *rt, struct rt_addrinfo *info)
}
#endif
- TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) {
+ TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
if (ia->ia_ifp == rt->rt_ifp &&
SIN(rt_key(rt))->sin_addr.s_addr ==
(IA_SIN(ia))->sin_addr.s_addr)
@@ -269,9 +270,9 @@ arp_rtrequest(int req, struct rtentry *rt, struct rt_addrinfo *info)
rt->rt_expire = 0;
bcopy(IF_LLADDR(rt->rt_ifp), LLADDR(SDL(gate)),
SDL(gate)->sdl_alen = rt->rt_ifp->if_addrlen);
- if (useloopback) {
- rt->rt_ifp = loif;
- rt->rt_rmx.rmx_mtu = loif->if_mtu;
+ if (V_useloopback) {
+ rt->rt_ifp = V_loif;
+ rt->rt_rmx.rmx_mtu = V_loif->if_mtu;
}
/*
@@ -478,7 +479,7 @@ arpresolve(struct ifnet *ifp, struct rtentry *rt0, struct mbuf *m,
* if we have already sent arp_maxtries ARP requests. Retransmit the
* ARP request, but not faster than one request per second.
*/
- if (la->la_asked < arp_maxtries)
+ if (la->la_asked < V_arp_maxtries)
error = EWOULDBLOCK; /* First request. */
else
error = (rt == rt0) ? EHOSTDOWN : EHOSTUNREACH;
@@ -657,7 +658,7 @@ in_arpinput(struct mbuf *m)
/*
* If bridging, fall back to using any inet address.
*/
- if (!bridged || (ia = TAILQ_FIRST(&in_ifaddrhead)) == NULL)
+ if (!bridged || (ia = TAILQ_FIRST(&V_in_ifaddrhead)) == NULL)
goto drop;
match:
if (!enaddr)
@@ -826,12 +827,12 @@ match:
}
if (rt->rt_expire) {
- rt->rt_expire = time_uptime + arpt_keep;
- callout_reset(&la->la_timer, hz * arpt_keep,
+ rt->rt_expire = time_uptime + V_arpt_keep;
+ callout_reset(&la->la_timer, hz * V_arpt_keep,
arptimer, rt);
}
la->la_asked = 0;
- la->la_preempt = arp_maxtries;
+ la->la_preempt = V_arp_maxtries;
hold = la->la_hold;
la->la_hold = NULL;
RT_UNLOCK(rt);
@@ -864,7 +865,7 @@ reply:
/* Nope, only intersted now if proxying everything. */
struct sockaddr_in sin;
- if (!arp_proxyall)
+ if (!V_arp_proxyall)
goto drop;
bzero(&sin, sizeof sin);
diff --git a/sys/netinet/igmp.c b/sys/netinet/igmp.c
index 811307c..54dd41d 100644
--- a/sys/netinet/igmp.c
+++ b/sys/netinet/igmp.c
@@ -57,6 +57,7 @@ __FBSDID("$FreeBSD$");
#include <sys/protosw.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
+#include <sys/vimage.h>
#include <net/if.h>
#include <net/route.h>
@@ -138,7 +139,7 @@ igmp_init(void)
router_alert->m_len = sizeof(ra->ipopt_dst) + ra->ipopt_list[1];
mtx_init(&igmp_mtx, "igmp_mtx", NULL, MTX_DEF);
- SLIST_INIT(&router_info_head);
+ SLIST_INIT(&V_router_info_head);
}
static struct router_info *
@@ -148,7 +149,7 @@ find_rti(struct ifnet *ifp)
mtx_assert(&igmp_mtx, MA_OWNED);
IGMP_PRINTF("[igmp.c, _find_rti] --> entering \n");
- SLIST_FOREACH(rti, &router_info_head, rti_list) {
+ SLIST_FOREACH(rti, &V_router_info_head, rti_list) {
if (rti->rti_ifp == ifp) {
IGMP_PRINTF(
"[igmp.c, _find_rti] --> found old entry \n");
@@ -163,7 +164,7 @@ find_rti(struct ifnet *ifp)
rti->rti_ifp = ifp;
rti->rti_type = IGMP_V2_ROUTER;
rti->rti_time = 0;
- SLIST_INSERT_HEAD(&router_info_head, rti, rti_list);
+ SLIST_INSERT_HEAD(&V_router_info_head, rti, rti_list);
IGMP_PRINTF("[igmp.c, _find_rti] --> created an entry \n");
return (rti);
}
@@ -183,7 +184,7 @@ igmp_input(register struct mbuf *m, int off)
struct router_info *rti;
int timer; /** timer value in the igmp query header **/
- ++igmpstat.igps_rcv_total;
+ ++V_igmpstat.igps_rcv_total;
ip = mtod(m, struct ip *);
igmplen = ip->ip_len;
@@ -192,14 +193,14 @@ igmp_input(register struct mbuf *m, int off)
* Validate lengths.
*/
if (igmplen < IGMP_MINLEN) {
- ++igmpstat.igps_rcv_tooshort;
+ ++V_igmpstat.igps_rcv_tooshort;
m_freem(m);
return;
}
minlen = iphlen + IGMP_MINLEN;
if ((m->m_flags & M_EXT || m->m_len < minlen) &&
(m = m_pullup(m, minlen)) == 0) {
- ++igmpstat.igps_rcv_tooshort;
+ ++V_igmpstat.igps_rcv_tooshort;
return;
}
@@ -210,7 +211,7 @@ igmp_input(register struct mbuf *m, int off)
m->m_len -= iphlen;
igmp = mtod(m, struct igmp *);
if (in_cksum(m, igmplen)) {
- ++igmpstat.igps_rcv_badsum;
+ ++V_igmpstat.igps_rcv_badsum;
m_freem(m);
return;
}
@@ -235,7 +236,7 @@ igmp_input(register struct mbuf *m, int off)
*/
switch (igmp->igmp_type) {
case IGMP_MEMBERSHIP_QUERY:
- ++igmpstat.igps_rcv_queries;
+ ++V_igmpstat.igps_rcv_queries;
if (ifp->if_flags & IFF_LOOPBACK)
break;
@@ -262,7 +263,7 @@ igmp_input(register struct mbuf *m, int off)
if (ip->ip_dst.s_addr != igmp_all_hosts_group ||
igmp->igmp_group.s_addr != 0) {
- ++igmpstat.igps_rcv_badqueries;
+ ++V_igmpstat.igps_rcv_badqueries;
m_freem(m);
return;
}
@@ -273,7 +274,7 @@ igmp_input(register struct mbuf *m, int off)
if (igmp->igmp_group.s_addr != 0 &&
!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr))) {
- ++igmpstat.igps_rcv_badqueries;
+ ++V_igmpstat.igps_rcv_badqueries;
m_freem(m);
return;
}
@@ -321,13 +322,13 @@ igmp_input(register struct mbuf *m, int off)
ip->ip_src.s_addr == IA_SIN(ia)->sin_addr.s_addr)
break;
- ++igmpstat.igps_rcv_reports;
+ ++V_igmpstat.igps_rcv_reports;
if (ifp->if_flags & IFF_LOOPBACK)
break;
if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr))) {
- ++igmpstat.igps_rcv_badreports;
+ ++V_igmpstat.igps_rcv_badreports;
m_freem(m);
return;
}
@@ -354,7 +355,7 @@ igmp_input(register struct mbuf *m, int off)
IN_LOOKUP_MULTI(igmp->igmp_group, ifp, inm);
if (inm != NULL) {
inm->inm_timer = 0;
- ++igmpstat.igps_rcv_ourreports;
+ ++V_igmpstat.igps_rcv_ourreports;
inm->inm_state = IGMP_OTHERMEMBER;
}
IN_MULTI_UNLOCK();
@@ -444,7 +445,7 @@ igmp_slowtimo(void)
IGMP_PRINTF("[igmp.c,_slowtimo] -- > entering \n");
mtx_lock(&igmp_mtx);
- SLIST_FOREACH(rti, &router_info_head, rti_list) {
+ SLIST_FOREACH(rti, &V_router_info_head, rti_list) {
if (rti->rti_type == IGMP_V1_ROUTER) {
rti->rti_time++;
if (rti->rti_time >= IGMP_AGE_THRESHOLD)
@@ -469,7 +470,7 @@ igmp_sendpkt(struct in_multi *inm, int type, unsigned long addr)
if (m == NULL)
return;
- m->m_pkthdr.rcvif = loif;
+ m->m_pkthdr.rcvif = V_loif;
#ifdef MAC
mac_netinet_igmp_send(inm->inm_ifp, m);
#endif
@@ -501,12 +502,12 @@ igmp_sendpkt(struct in_multi *inm, int type, unsigned long addr)
* Request loopback of the report if we are acting as a multicast
* router, so that the process-level routing daemon can hear it.
*/
- imo.imo_multicast_loop = (ip_mrouter != NULL);
+ imo.imo_multicast_loop = (V_ip_mrouter != NULL);
/*
* XXX: Do we have to worry about reentrancy here? Don't think so.
*/
ip_output(m, router_alert, &igmprt, 0, &imo, NULL);
- ++igmpstat.igps_snd_reports;
+ ++V_igmpstat.igps_snd_reports;
}
diff --git a/sys/netinet/in.c b/sys/netinet/in.c
index 4c74b07..1952fff 100644
--- a/sys/netinet/in.c
+++ b/sys/netinet/in.c
@@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$");
#include <sys/socket.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
+#include <sys/vimage.h>
#include <net/if.h>
#include <net/if_types.h>
@@ -88,12 +89,12 @@ in_localaddr(struct in_addr in)
register u_long i = ntohl(in.s_addr);
register struct in_ifaddr *ia;
- if (subnetsarelocal) {
- TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link)
+ if (V_subnetsarelocal) {
+ TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link)
if ((i & ia->ia_netmask) == ia->ia_net)
return (1);
} else {
- TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link)
+ TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link)
if ((i & ia->ia_subnetmask) == ia->ia_subnet)
return (1);
}
@@ -328,7 +329,7 @@ in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp,
}
ia->ia_ifp = ifp;
- TAILQ_INSERT_TAIL(&in_ifaddrhead, ia, ia_link);
+ TAILQ_INSERT_TAIL(&V_in_ifaddrhead, ia, ia_link);
splx(s);
iaIsNew = 1;
}
@@ -492,7 +493,7 @@ in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp,
*/
s = splnet();
TAILQ_REMOVE(&ifp->if_addrhead, &ia->ia_ifa, ifa_link);
- TAILQ_REMOVE(&in_ifaddrhead, ia, ia_link);
+ TAILQ_REMOVE(&V_in_ifaddrhead, ia, ia_link);
if (ia->ia_addr.sin_family == AF_INET) {
LIST_REMOVE(ia, ia_hash);
/*
@@ -822,7 +823,7 @@ in_addprefix(struct in_ifaddr *target, int flags)
prefix.s_addr &= mask.s_addr;
}
- TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) {
+ TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
if (rtinitflags(ia)) {
p = ia->ia_addr.sin_addr;
@@ -843,7 +844,7 @@ in_addprefix(struct in_ifaddr *target, int flags)
* interface address, we are done here.
*/
if (ia->ia_flags & IFA_ROUTE) {
- if (sameprefixcarponly &&
+ if (V_sameprefixcarponly &&
target->ia_ifp->if_type != IFT_CARP &&
ia->ia_ifp->if_type != IFT_CARP)
return (EEXIST);
@@ -884,7 +885,7 @@ in_scrubprefix(struct in_ifaddr *target)
prefix.s_addr &= mask.s_addr;
}
- TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) {
+ TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
if (rtinitflags(ia))
p = ia->ia_dstaddr.sin_addr;
else {
@@ -983,7 +984,7 @@ in_purgemaddrs(struct ifnet *ifp)
#endif
IFF_LOCKGIANT(ifp);
IN_MULTI_LOCK();
- LIST_FOREACH_SAFE(inm, &in_multihead, inm_link, oinm) {
+ LIST_FOREACH_SAFE(inm, &V_in_multihead, inm_link, oinm) {
if (inm->inm_ifp == ifp)
in_delmulti_locked(inm);
}
@@ -998,7 +999,7 @@ void
in_ifdetach(struct ifnet *ifp)
{
- in_pcbpurgeif0(&ripcbinfo, ifp);
- in_pcbpurgeif0(&udbinfo, ifp);
+ in_pcbpurgeif0(&V_ripcbinfo, ifp);
+ in_pcbpurgeif0(&V_udbinfo, ifp);
in_purgemaddrs(ifp);
}
diff --git a/sys/netinet/in_gif.c b/sys/netinet/in_gif.c
index 55b4ec7..cbbde54 100644
--- a/sys/netinet/in_gif.c
+++ b/sys/netinet/in_gif.c
@@ -47,6 +47,7 @@ __FBSDID("$FreeBSD$");
#include <sys/protosw.h>
#include <sys/malloc.h>
+#include <sys/vimage.h>
#include <net/if.h>
#include <net/route.h>
@@ -176,7 +177,7 @@ in_gif_output(struct ifnet *ifp, int family, struct mbuf *m)
}
iphdr.ip_p = proto;
/* version will be set in ip_output() */
- iphdr.ip_ttl = ip_gif_ttl;
+ iphdr.ip_ttl = V_ip_gif_ttl;
iphdr.ip_len = m->m_pkthdr.len + sizeof(struct ip);
ip_ecn_ingress((ifp->if_flags & IFF_LINK1) ? ECN_ALLOWED : ECN_NOCARE,
&iphdr.ip_tos, &tos);
@@ -254,14 +255,14 @@ in_gif_input(struct mbuf *m, int off)
sc = (struct gif_softc *)encap_getarg(m);
if (sc == NULL) {
m_freem(m);
- ipstat.ips_nogif++;
+ V_ipstat.ips_nogif++;
return;
}
gifp = GIF2IFP(sc);
if (gifp == NULL || (gifp->if_flags & IFF_UP) == 0) {
m_freem(m);
- ipstat.ips_nogif++;
+ V_ipstat.ips_nogif++;
return;
}
@@ -321,7 +322,7 @@ in_gif_input(struct mbuf *m, int off)
break;
default:
- ipstat.ips_nogif++;
+ V_ipstat.ips_nogif++;
m_freem(m);
return;
}
@@ -354,7 +355,7 @@ gif_validate4(const struct ip *ip, struct gif_softc *sc, struct ifnet *ifp)
return 0;
}
/* reject packets with broadcast on source */
- TAILQ_FOREACH(ia4, &in_ifaddrhead, ia_link) {
+ TAILQ_FOREACH(ia4, &V_in_ifaddrhead, ia_link) {
if ((ia4->ia_ifa.ifa_ifp->if_flags & IFF_BROADCAST) == 0)
continue;
if (ip->ip_src.s_addr == ia4->ia_broadaddr.sin_addr.s_addr)
diff --git a/sys/netinet/in_mcast.c b/sys/netinet/in_mcast.c
index 9f37f33..0aa19a3 100644
--- a/sys/netinet/in_mcast.c
+++ b/sys/netinet/in_mcast.c
@@ -48,6 +48,7 @@ __FBSDID("$FreeBSD$");
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/sysctl.h>
+#include <sys/vimage.h>
#include <net/if.h>
#include <net/if_dl.h>
@@ -371,7 +372,7 @@ in_addmulti(struct in_addr *ap, struct ifnet *ifp)
ninm->inm_ifma = ifma;
ninm->inm_refcount = 1;
ifma->ifma_protospec = ninm;
- LIST_INSERT_HEAD(&in_multihead, ninm, inm_link);
+ LIST_INSERT_HEAD(&V_in_multihead, ninm, inm_link);
igmp_joingroup(ninm);
@@ -530,7 +531,7 @@ inp_change_source_filter(struct inpcb *inp, struct sockopt *sopt)
ssa->sin.sin_len != sizeof(struct sockaddr_in))
return (EINVAL);
- if (gsr.gsr_interface == 0 || if_index < gsr.gsr_interface)
+ if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface)
return (EADDRNOTAVAIL);
ifp = ifnet_byindex(gsr.gsr_interface);
@@ -774,7 +775,7 @@ inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt)
if (error)
return (error);
- if (msfr.msfr_ifindex == 0 || if_index < msfr.msfr_ifindex)
+ if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex)
return (EINVAL);
ifp = ifnet_byindex(msfr.msfr_ifindex);
@@ -1035,7 +1036,7 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
} else {
struct in_ifaddr *ia;
struct ifnet *mfp = NULL;
- TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) {
+ TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
mfp = ia->ia_ifp;
if (!(mfp->if_flags & IFF_LOOPBACK) &&
(mfp->if_flags & IFF_MULTICAST)) {
@@ -1088,7 +1089,7 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt)
/*
* Obtain the ifp.
*/
- if (gsr.gsr_interface == 0 || if_index < gsr.gsr_interface)
+ if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface)
return (EADDRNOTAVAIL);
ifp = ifnet_byindex(gsr.gsr_interface);
@@ -1297,7 +1298,7 @@ inp_leave_group(struct inpcb *inp, struct sockopt *sopt)
return (EINVAL);
}
- if (gsr.gsr_interface == 0 || if_index < gsr.gsr_interface)
+ if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface)
return (EADDRNOTAVAIL);
ifp = ifnet_byindex(gsr.gsr_interface);
@@ -1414,7 +1415,7 @@ inp_set_multicast_if(struct inpcb *inp, struct sockopt *sopt)
if (error)
return (error);
- if (mreqn.imr_ifindex < 0 || if_index < mreqn.imr_ifindex)
+ if (mreqn.imr_ifindex < 0 || V_if_index < mreqn.imr_ifindex)
return (EINVAL);
if (mreqn.imr_ifindex == 0) {
@@ -1495,7 +1496,7 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
gsa->sin.sin_port = 0; /* ignore port */
- if (msfr.msfr_ifindex == 0 || if_index < msfr.msfr_ifindex)
+ if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex)
return (EADDRNOTAVAIL);
ifp = ifnet_byindex(msfr.msfr_ifindex);
diff --git a/sys/netinet/in_pcb.c b/sys/netinet/in_pcb.c
index 88f86fb..135182c 100644
--- a/sys/netinet/in_pcb.c
+++ b/sys/netinet/in_pcb.c
@@ -52,6 +52,7 @@ __FBSDID("$FreeBSD$");
#include <sys/jail.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
+#include <sys/vimage.h>
#ifdef DDB
#include <ddb/ddb.h>
@@ -121,12 +122,12 @@ sysctl_net_ipport_check(SYSCTL_HANDLER_ARGS)
error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
if (error == 0) {
- RANGECHK(ipport_lowfirstauto, 1, IPPORT_RESERVED - 1);
- RANGECHK(ipport_lowlastauto, 1, IPPORT_RESERVED - 1);
- RANGECHK(ipport_firstauto, IPPORT_RESERVED, IPPORT_MAX);
- RANGECHK(ipport_lastauto, IPPORT_RESERVED, IPPORT_MAX);
- RANGECHK(ipport_hifirstauto, IPPORT_RESERVED, IPPORT_MAX);
- RANGECHK(ipport_hilastauto, IPPORT_RESERVED, IPPORT_MAX);
+ RANGECHK(V_ipport_lowfirstauto, 1, IPPORT_RESERVED - 1);
+ RANGECHK(V_ipport_lowlastauto, 1, IPPORT_RESERVED - 1);
+ RANGECHK(V_ipport_firstauto, IPPORT_RESERVED, IPPORT_MAX);
+ RANGECHK(V_ipport_lastauto, IPPORT_RESERVED, IPPORT_MAX);
+ RANGECHK(V_ipport_hifirstauto, IPPORT_RESERVED, IPPORT_MAX);
+ RANGECHK(V_ipport_hilastauto, IPPORT_RESERVED, IPPORT_MAX);
}
return (error);
}
@@ -208,7 +209,7 @@ in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo)
#ifdef INET6
if (INP_SOCKAF(so) == AF_INET6) {
inp->inp_vflag |= INP_IPV6PROTO;
- if (ip6_v6only)
+ if (V_ip6_v6only)
inp->inp_flags |= IN6P_IPV6_V6ONLY;
}
#endif
@@ -216,7 +217,7 @@ in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo)
pcbinfo->ipi_count++;
so->so_pcb = (caddr_t)inp;
#ifdef INET6
- if (ip6_auto_flowlabel)
+ if (V_ip6_auto_flowlabel)
inp->inp_flags |= IN6P_AUTOFLOWLABEL;
#endif
INP_WLOCK(inp);
@@ -286,7 +287,7 @@ in_pcbbind_setup(struct inpcb *inp, struct sockaddr *nam, in_addr_t *laddrp,
INP_INFO_LOCK_ASSERT(pcbinfo);
INP_LOCK_ASSERT(inp);
- if (TAILQ_EMPTY(&in_ifaddrhead)) /* XXX broken! */
+ if (TAILQ_EMPTY(&V_in_ifaddrhead)) /* XXX broken! */
return (EADDRNOTAVAIL);
laddr.s_addr = *laddrp;
if (nam != NULL && laddr.s_addr != INADDR_ANY)
@@ -337,8 +338,8 @@ in_pcbbind_setup(struct inpcb *inp, struct sockaddr *nam, in_addr_t *laddrp,
struct tcptw *tw;
/* GROSS */
- if (ntohs(lport) <= ipport_reservedhigh &&
- ntohs(lport) >= ipport_reservedlow &&
+ if (ntohs(lport) <= V_ipport_reservedhigh &&
+ ntohs(lport) >= V_ipport_reservedlow &&
priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT,
0))
return (EACCES);
@@ -406,20 +407,20 @@ in_pcbbind_setup(struct inpcb *inp, struct sockaddr *nam, in_addr_t *laddrp,
return (EINVAL);
if (inp->inp_flags & INP_HIGHPORT) {
- first = ipport_hifirstauto; /* sysctl */
- last = ipport_hilastauto;
+ first = V_ipport_hifirstauto; /* sysctl */
+ last = V_ipport_hilastauto;
lastport = &pcbinfo->ipi_lasthi;
} else if (inp->inp_flags & INP_LOWPORT) {
error = priv_check_cred(cred,
PRIV_NETINET_RESERVEDPORT, 0);
if (error)
return error;
- first = ipport_lowfirstauto; /* 1023 */
- last = ipport_lowlastauto; /* 600 */
+ first = V_ipport_lowfirstauto; /* 1023 */
+ last = V_ipport_lowlastauto; /* 600 */
lastport = &pcbinfo->ipi_lastlow;
} else {
- first = ipport_firstauto; /* sysctl */
- last = ipport_lastauto;
+ first = V_ipport_firstauto; /* sysctl */
+ last = V_ipport_lastauto;
lastport = &pcbinfo->ipi_lastport;
}
/*
@@ -428,8 +429,8 @@ in_pcbbind_setup(struct inpcb *inp, struct sockaddr *nam, in_addr_t *laddrp,
* use random port allocation only if the user allows it AND
* ipport_tick() allows it.
*/
- if (ipport_randomized &&
- (!ipport_stoprandom || pcbinfo == &udbinfo))
+ if (V_ipport_randomized &&
+ (!V_ipport_stoprandom || pcbinfo == &V_udbinfo))
dorandom = 1;
else
dorandom = 0;
@@ -440,8 +441,8 @@ in_pcbbind_setup(struct inpcb *inp, struct sockaddr *nam, in_addr_t *laddrp,
if (first == last)
dorandom = 0;
/* Make sure to not include UDP packets in the count. */
- if (pcbinfo != &udbinfo)
- ipport_tcpallocs++;
+ if (pcbinfo != &V_udbinfo)
+ V_ipport_tcpallocs++;
/*
* Simple check to ensure all ports are not used up causing
* a deadlock here.
@@ -581,7 +582,7 @@ in_pcbconnect_setup(struct inpcb *inp, struct sockaddr *nam,
if (error)
return (error);
}
- if (!TAILQ_EMPTY(&in_ifaddrhead)) {
+ if (!TAILQ_EMPTY(&V_in_ifaddrhead)) {
/*
* If the destination address is INADDR_ANY,
* use the primary local address.
@@ -590,12 +591,12 @@ in_pcbconnect_setup(struct inpcb *inp, struct sockaddr *nam,
* choose the broadcast address for that interface.
*/
if (faddr.s_addr == INADDR_ANY)
- faddr = IA_SIN(TAILQ_FIRST(&in_ifaddrhead))->sin_addr;
+ faddr = IA_SIN(TAILQ_FIRST(&V_in_ifaddrhead))->sin_addr;
else if (faddr.s_addr == (u_long)INADDR_BROADCAST &&
- (TAILQ_FIRST(&in_ifaddrhead)->ia_ifp->if_flags &
+ (TAILQ_FIRST(&V_in_ifaddrhead)->ia_ifp->if_flags &
IFF_BROADCAST))
faddr = satosin(&TAILQ_FIRST(
- &in_ifaddrhead)->ia_broadaddr)->sin_addr;
+ &V_in_ifaddrhead)->ia_broadaddr)->sin_addr;
}
if (laddr.s_addr == INADDR_ANY) {
ia = NULL;
@@ -640,7 +641,7 @@ in_pcbconnect_setup(struct inpcb *inp, struct sockaddr *nam,
imo = inp->inp_moptions;
if (imo->imo_multicast_ifp != NULL) {
ifp = imo->imo_multicast_ifp;
- TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link)
+ TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link)
if (ia->ia_ifp == ifp)
break;
if (ia == NULL)
@@ -1201,12 +1202,12 @@ void
ipport_tick(void *xtp)
{
- if (ipport_tcpallocs <= ipport_tcplastcount + ipport_randomcps) {
- if (ipport_stoprandom > 0)
- ipport_stoprandom--;
+ if (V_ipport_tcpallocs <= V_ipport_tcplastcount + V_ipport_randomcps) {
+ if (V_ipport_stoprandom > 0)
+ V_ipport_stoprandom--;
} else
- ipport_stoprandom = ipport_randomtime;
- ipport_tcplastcount = ipport_tcpallocs;
+ V_ipport_stoprandom = V_ipport_randomtime;
+ V_ipport_tcplastcount = V_ipport_tcpallocs;
callout_reset(&ipport_tick_callout, hz, ipport_tick, NULL);
}
@@ -1259,13 +1260,13 @@ inp_apply_all(void (*func)(struct inpcb *, void *), void *arg)
{
struct inpcb *inp;
- INP_INFO_RLOCK(&tcbinfo);
+ INP_INFO_RLOCK(&V_tcbinfo);
LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
INP_WLOCK(inp);
func(inp, arg);
INP_WUNLOCK(inp);
}
- INP_INFO_RUNLOCK(&tcbinfo);
+ INP_INFO_RUNLOCK(&V_tcbinfo);
}
struct socket *
diff --git a/sys/netinet/in_rmx.c b/sys/netinet/in_rmx.c
index aabf57e..d8a90d4 100644
--- a/sys/netinet/in_rmx.c
+++ b/sys/netinet/in_rmx.c
@@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mbuf.h>
#include <sys/syslog.h>
#include <sys/callout.h>
+#include <sys/vimage.h>
#include <net/if.h>
#include <net/route.h>
@@ -190,9 +191,9 @@ in_clsroute(struct radix_node *rn, struct radix_node_head *head)
* If rtq_reallyold is 0, just delete the route without
* waiting for a timeout cycle to kill it.
*/
- if (rtq_reallyold != 0) {
+ if (V_rtq_reallyold != 0) {
rt->rt_flags |= RTPRF_OURS;
- rt->rt_rmx.rmx_expire = time_uptime + rtq_reallyold;
+ rt->rt_rmx.rmx_expire = time_uptime + V_rtq_reallyold;
} else {
rtexpunge(rt);
}
@@ -238,9 +239,9 @@ in_rtqkill(struct radix_node *rn, void *rock)
} else {
if (ap->updating &&
(rt->rt_rmx.rmx_expire - time_uptime >
- rtq_reallyold)) {
+ V_rtq_reallyold)) {
rt->rt_rmx.rmx_expire =
- time_uptime + rtq_reallyold;
+ time_uptime + V_rtq_reallyold;
}
ap->nextstop = lmin(ap->nextstop,
rt->rt_rmx.rmx_expire);
@@ -263,15 +264,15 @@ in_rtqtimo(void *rock)
void *newrock;
struct timeval atv;
- KASSERT((rock == (void *)rt_tables[0][AF_INET]),
+ KASSERT((rock == (void *)V_rt_tables[0][AF_INET]),
("in_rtqtimo: unexpected arg"));
for (fibnum = 0; fibnum < rt_numfibs; fibnum++) {
- if ((newrock = rt_tables[fibnum][AF_INET]) != NULL)
+ if ((newrock = V_rt_tables[fibnum][AF_INET]) != NULL)
in_rtqtimo_one(newrock);
}
atv.tv_usec = 0;
- atv.tv_sec = rtq_timeout;
- callout_reset(&rtq_timer, tvtohz(&atv), in_rtqtimo, rock);
+ atv.tv_sec = V_rtq_timeout;
+ callout_reset(&V_rtq_timer, tvtohz(&atv), in_rtqtimo, rock);
}
static void
@@ -283,7 +284,7 @@ in_rtqtimo_one(void *rock)
arg.found = arg.killed = 0;
arg.rnh = rnh;
- arg.nextstop = time_uptime + rtq_timeout;
+ arg.nextstop = time_uptime + V_rtq_timeout;
arg.draining = arg.updating = 0;
RADIX_NODE_HEAD_LOCK(rnh);
rnh->rnh_walktree(rnh, in_rtqkill, &arg);
@@ -297,18 +298,18 @@ in_rtqtimo_one(void *rock)
* than once in rtq_timeout seconds, to keep from cranking down too
* hard.
*/
- if ((arg.found - arg.killed > rtq_toomany) &&
- (time_uptime - last_adjusted_timeout >= rtq_timeout) &&
- rtq_reallyold > rtq_minreallyold) {
- rtq_reallyold = 2 * rtq_reallyold / 3;
- if (rtq_reallyold < rtq_minreallyold) {
- rtq_reallyold = rtq_minreallyold;
+ if ((arg.found - arg.killed > V_rtq_toomany) &&
+ (time_uptime - last_adjusted_timeout >= V_rtq_timeout) &&
+ V_rtq_reallyold > V_rtq_minreallyold) {
+ V_rtq_reallyold = 2 * V_rtq_reallyold / 3;
+ if (V_rtq_reallyold < V_rtq_minreallyold) {
+ V_rtq_reallyold = V_rtq_minreallyold;
}
last_adjusted_timeout = time_uptime;
#ifdef DIAGNOSTIC
log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n",
- rtq_reallyold);
+ V_rtq_reallyold);
#endif
arg.found = arg.killed = 0;
arg.updating = 1;
@@ -327,7 +328,7 @@ in_rtqdrain(void)
int fibnum;
for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) {
- rnh = rt_tables[fibnum][AF_INET];
+ rnh = V_rt_tables[fibnum][AF_INET];
arg.found = arg.killed = 0;
arg.rnh = rnh;
arg.nextstop = 0;
@@ -367,7 +368,7 @@ in_inithead(void **head, int off)
rnh->rnh_matchaddr = in_matroute;
rnh->rnh_close = in_clsroute;
if (_in_rt_was_here == 0 ) {
- callout_init(&rtq_timer, CALLOUT_MPSAFE);
+ callout_init(&V_rtq_timer, CALLOUT_MPSAFE);
in_rtqtimo(rnh); /* kick off timeout first time */
_in_rt_was_here = 1;
}
@@ -423,7 +424,7 @@ in_ifadown(struct ifaddr *ifa, int delete)
return 1;
for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) {
- rnh = rt_tables[fibnum][AF_INET];
+ rnh = V_rt_tables[fibnum][AF_INET];
arg.ifa = ifa;
arg.del = delete;
RADIX_NODE_HEAD_LOCK(rnh);
diff --git a/sys/netinet/in_var.h b/sys/netinet/in_var.h
index d7f1e28..01ff7b3 100644
--- a/sys/netinet/in_var.h
+++ b/sys/netinet/in_var.h
@@ -92,7 +92,7 @@ extern u_long in_ifaddrhmask; /* mask for hash table */
#define INADDR_NHASH (1 << INADDR_NHASH_LOG2)
#define INADDR_HASHVAL(x) fnv_32_buf((&(x)), sizeof(x), FNV1_32_INIT)
#define INADDR_HASH(x) \
- (&in_ifaddrhashtbl[INADDR_HASHVAL(x) & in_ifaddrhmask])
+ (&V_in_ifaddrhashtbl[INADDR_HASHVAL(x) & V_in_ifaddrhmask])
/*
* Macro for finding the internet address structure (in_ifaddr)
@@ -130,7 +130,7 @@ do { \
/* struct ifnet *ifp; */ \
/* struct in_ifaddr *ia; */ \
{ \
- for ((ia) = TAILQ_FIRST(&in_ifaddrhead); \
+ for ((ia) = TAILQ_FIRST(&V_in_ifaddrhead); \
(ia) != NULL && (ia)->ia_ifp != (ifp); \
(ia) = TAILQ_NEXT((ia), ia_link)) \
continue; \
@@ -283,7 +283,7 @@ do { \
/* struct in_multi *inm; */ \
do { \
IN_MULTI_LOCK_ASSERT(); \
- (step).i_inm = LIST_FIRST(&in_multihead); \
+ (step).i_inm = LIST_FIRST(&V_in_multihead); \
IN_NEXT_MULTI((step), (inm)); \
} while(0)
diff --git a/sys/netinet/ip6.h b/sys/netinet/ip6.h
index 883b65a..09692d9 100644
--- a/sys/netinet/ip6.h
+++ b/sys/netinet/ip6.h
@@ -275,24 +275,24 @@ do { \
if (((m)->m_flags & M_LOOP) && \
((m)->m_len < (off) + (hlen)) && \
(((m) = m_pullup((m), (off) + (hlen))) == NULL)) { \
- ip6stat.ip6s_exthdrtoolong++; \
+ V_ip6stat.ip6s_exthdrtoolong++; \
return ret; \
} else if ((m)->m_flags & M_EXT) { \
if ((m)->m_len < (off) + (hlen)) { \
- ip6stat.ip6s_exthdrtoolong++; \
+ V_ip6stat.ip6s_exthdrtoolong++; \
m_freem(m); \
return ret; \
} \
} else { \
if ((m)->m_len < (off) + (hlen)) { \
- ip6stat.ip6s_exthdrtoolong++; \
+ V_ip6stat.ip6s_exthdrtoolong++; \
m_freem(m); \
return ret; \
} \
} \
} else { \
if ((m)->m_len < (off) + (hlen)) { \
- ip6stat.ip6s_tooshort++; \
+ V_ip6stat.ip6s_tooshort++; \
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_truncated); \
m_freem(m); \
return ret; \
diff --git a/sys/netinet/ip_carp.c b/sys/netinet/ip_carp.c
index 69fea3f..1a07a97 100644
--- a/sys/netinet/ip_carp.c
+++ b/sys/netinet/ip_carp.c
@@ -52,6 +52,7 @@ __FBSDID("$FreeBSD$");
#include <sys/socket.h>
#include <sys/vnode.h>
+#include <sys/vimage.h>
#include <machine/stdarg.h>
@@ -1474,7 +1475,7 @@ carp_set_addr(struct carp_softc *sc, struct sockaddr_in *sin)
/* we have to do it by hands to check we won't match on us */
ia_if = NULL; own = 0;
- TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) {
+ TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
/* and, yeah, we need a multicast-capable iface too */
if (ia->ia_ifp != SC2IFP(sc) &&
(ia->ia_ifp->if_flags & IFF_MULTICAST) &&
@@ -1639,7 +1640,7 @@ carp_set_addr6(struct carp_softc *sc, struct sockaddr_in6 *sin6)
/* we have to do it by hands to check we won't match on us */
ia_if = NULL; own = 0;
- for (ia = in6_ifaddr; ia; ia = ia->ia_next) {
+ for (ia = V_in6_ifaddr; ia; ia = ia->ia_next) {
int i;
for (i = 0; i < 4; i++) {
diff --git a/sys/netinet/ip_divert.c b/sys/netinet/ip_divert.c
index a509151..1bba15a 100644
--- a/sys/netinet/ip_divert.c
+++ b/sys/netinet/ip_divert.c
@@ -58,6 +58,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sx.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
+#include <sys/vimage.h>
#include <vm/uma.h>
@@ -124,7 +125,7 @@ static void
div_zone_change(void *tag)
{
- uma_zone_set_max(divcbinfo.ipi_zone, maxsockets);
+ uma_zone_set_max(V_divcbinfo.ipi_zone, maxsockets);
}
static int
@@ -148,18 +149,18 @@ void
div_init(void)
{
- INP_INFO_LOCK_INIT(&divcbinfo, "div");
- LIST_INIT(&divcb);
- divcbinfo.ipi_listhead = &divcb;
+ INP_INFO_LOCK_INIT(&V_divcbinfo, "div");
+ LIST_INIT(&V_divcb);
+ V_divcbinfo.ipi_listhead = &V_divcb;
/*
* XXX We don't use the hash list for divert IP, but it's easier
* to allocate a one entry hash list than it is to check all
* over the place for hashbase == NULL.
*/
- divcbinfo.ipi_hashbase = hashinit(1, M_PCB, &divcbinfo.ipi_hashmask);
- divcbinfo.ipi_porthashbase = hashinit(1, M_PCB,
- &divcbinfo.ipi_porthashmask);
- divcbinfo.ipi_zone = uma_zcreate("divcb", sizeof(struct inpcb),
+ V_divcbinfo.ipi_hashbase = hashinit(1, M_PCB, &V_divcbinfo.ipi_hashmask);
+ V_divcbinfo.ipi_porthashbase = hashinit(1, M_PCB,
+ &V_divcbinfo.ipi_porthashmask);
+ V_divcbinfo.ipi_zone = uma_zcreate("divcb", sizeof(struct inpcb),
NULL, NULL, div_inpcb_init, div_inpcb_fini, UMA_ALIGN_PTR,
UMA_ZONE_NOFREE);
uma_zone_set_max(divcbinfo.ipi_zone, maxsockets);
@@ -174,7 +175,7 @@ div_init(void)
void
div_input(struct mbuf *m, int off)
{
- ipstat.ips_noproto++;
+ V_ipstat.ips_noproto++;
m_freem(m);
}
@@ -266,8 +267,8 @@ divert_packet(struct mbuf *m, int incoming)
/* Put packet on socket queue, if any */
sa = NULL;
nport = htons((u_int16_t)divert_info(mtag));
- INP_INFO_RLOCK(&divcbinfo);
- LIST_FOREACH(inp, &divcb, inp_list) {
+ INP_INFO_RLOCK(&V_divcbinfo);
+ LIST_FOREACH(inp, &V_divcb, inp_list) {
/* XXX why does only one socket match? */
if (inp->inp_lport == nport) {
INP_RLOCK(inp);
@@ -284,11 +285,11 @@ divert_packet(struct mbuf *m, int incoming)
break;
}
}
- INP_INFO_RUNLOCK(&divcbinfo);
+ INP_INFO_RUNLOCK(&V_divcbinfo);
if (sa == NULL) {
m_freem(m);
- ipstat.ips_noproto++;
- ipstat.ips_delivered--;
+ V_ipstat.ips_noproto++;
+ V_ipstat.ips_delivered--;
}
}
@@ -353,7 +354,7 @@ div_output(struct socket *so, struct mbuf *m, struct sockaddr_in *sin,
struct inpcb *inp;
dt->info |= IP_FW_DIVERT_OUTPUT_FLAG;
- INP_INFO_WLOCK(&divcbinfo);
+ INP_INFO_WLOCK(&V_divcbinfo);
inp = sotoinpcb(so);
INP_RLOCK(inp);
/*
@@ -364,7 +365,7 @@ div_output(struct socket *so, struct mbuf *m, struct sockaddr_in *sin,
((u_short)ntohs(ip->ip_len) > m->m_pkthdr.len)) {
error = EINVAL;
INP_RUNLOCK(inp);
- INP_INFO_WUNLOCK(&divcbinfo);
+ INP_INFO_WUNLOCK(&V_divcbinfo);
m_freem(m);
} else {
/* Convert fields to host order for ip_output() */
@@ -372,7 +373,7 @@ div_output(struct socket *so, struct mbuf *m, struct sockaddr_in *sin,
ip->ip_off = ntohs(ip->ip_off);
/* Send packet to output processing */
- ipstat.ips_rawout++; /* XXX */
+ V_ipstat.ips_rawout++; /* XXX */
#ifdef MAC
mac_inpcb_create_mbuf(inp, m);
@@ -405,7 +406,7 @@ div_output(struct socket *so, struct mbuf *m, struct sockaddr_in *sin,
error = ENOBUFS;
}
INP_RUNLOCK(inp);
- INP_INFO_WUNLOCK(&divcbinfo);
+ INP_INFO_WUNLOCK(&V_divcbinfo);
if (error == ENOBUFS) {
m_freem(m);
return (error);
@@ -468,14 +469,14 @@ div_attach(struct socket *so, int proto, struct thread *td)
error = soreserve(so, div_sendspace, div_recvspace);
if (error)
return error;
- INP_INFO_WLOCK(&divcbinfo);
- error = in_pcballoc(so, &divcbinfo);
+ INP_INFO_WLOCK(&V_divcbinfo);
+ error = in_pcballoc(so, &V_divcbinfo);
if (error) {
- INP_INFO_WUNLOCK(&divcbinfo);
+ INP_INFO_WUNLOCK(&V_divcbinfo);
return error;
}
inp = (struct inpcb *)so->so_pcb;
- INP_INFO_WUNLOCK(&divcbinfo);
+ INP_INFO_WUNLOCK(&V_divcbinfo);
inp->inp_ip_p = proto;
inp->inp_vflag |= INP_IPV4;
inp->inp_flags |= INP_HDRINCL;
@@ -490,11 +491,11 @@ div_detach(struct socket *so)
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("div_detach: inp == NULL"));
- INP_INFO_WLOCK(&divcbinfo);
+ INP_INFO_WLOCK(&V_divcbinfo);
INP_WLOCK(inp);
in_pcbdetach(inp);
in_pcbfree(inp);
- INP_INFO_WUNLOCK(&divcbinfo);
+ INP_INFO_WUNLOCK(&V_divcbinfo);
}
static int
@@ -515,11 +516,11 @@ div_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
if (nam->sa_family != AF_INET)
return EAFNOSUPPORT;
((struct sockaddr_in *)nam)->sin_addr.s_addr = INADDR_ANY;
- INP_INFO_WLOCK(&divcbinfo);
+ INP_INFO_WLOCK(&V_divcbinfo);
INP_WLOCK(inp);
error = in_pcbbind(inp, nam, td->td_ucred);
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&divcbinfo);
+ INP_INFO_WUNLOCK(&V_divcbinfo);
return error;
}
@@ -543,7 +544,7 @@ div_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
/* Packet must have a header (but that's about it) */
if (m->m_len < sizeof (struct ip) &&
(m = m_pullup(m, sizeof (struct ip))) == 0) {
- ipstat.ips_toosmall++;
+ V_ipstat.ips_toosmall++;
m_freem(m);
return EINVAL;
}
@@ -577,7 +578,7 @@ div_pcblist(SYSCTL_HANDLER_ARGS)
* resource-intensive to repeat twice on every request.
*/
if (req->oldptr == 0) {
- n = divcbinfo.ipi_count;
+ n = V_divcbinfo.ipi_count;
req->oldidx = 2 * (sizeof xig)
+ (n + n/8) * sizeof(struct xinpcb);
return 0;
@@ -589,10 +590,10 @@ div_pcblist(SYSCTL_HANDLER_ARGS)
/*
* OK, now we're committed to doing something.
*/
- INP_INFO_RLOCK(&divcbinfo);
- gencnt = divcbinfo.ipi_gencnt;
- n = divcbinfo.ipi_count;
- INP_INFO_RUNLOCK(&divcbinfo);
+ INP_INFO_RLOCK(&V_divcbinfo);
+ gencnt = V_divcbinfo.ipi_gencnt;
+ n = V_divcbinfo.ipi_count;
+ INP_INFO_RUNLOCK(&V_divcbinfo);
error = sysctl_wire_old_buffer(req,
2 * sizeof(xig) + n*sizeof(struct xinpcb));
@@ -611,8 +612,8 @@ div_pcblist(SYSCTL_HANDLER_ARGS)
if (inp_list == 0)
return ENOMEM;
- INP_INFO_RLOCK(&divcbinfo);
- for (inp = LIST_FIRST(divcbinfo.ipi_listhead), i = 0; inp && i < n;
+ INP_INFO_RLOCK(&V_divcbinfo);
+ for (inp = LIST_FIRST(V_divcbinfo.ipi_listhead), i = 0; inp && i < n;
inp = LIST_NEXT(inp, inp_list)) {
INP_RLOCK(inp);
if (inp->inp_gencnt <= gencnt &&
@@ -620,7 +621,7 @@ div_pcblist(SYSCTL_HANDLER_ARGS)
inp_list[i++] = inp;
INP_RUNLOCK(inp);
}
- INP_INFO_RUNLOCK(&divcbinfo);
+ INP_INFO_RUNLOCK(&V_divcbinfo);
n = i;
error = 0;
@@ -648,11 +649,11 @@ div_pcblist(SYSCTL_HANDLER_ARGS)
* while we were processing this request, and it
* might be necessary to retry.
*/
- INP_INFO_RLOCK(&divcbinfo);
- xig.xig_gen = divcbinfo.ipi_gencnt;
+ INP_INFO_RLOCK(&V_divcbinfo);
+ xig.xig_gen = V_divcbinfo.ipi_gencnt;
xig.xig_sogen = so_gencnt;
- xig.xig_count = divcbinfo.ipi_count;
- INP_INFO_RUNLOCK(&divcbinfo);
+ xig.xig_count = V_divcbinfo.ipi_count;
+ INP_INFO_RUNLOCK(&V_divcbinfo);
error = SYSCTL_OUT(req, &xig, sizeof xig);
}
free(inp_list, M_TEMP);
@@ -724,18 +725,18 @@ div_modevent(module_t mod, int type, void *unused)
* socket open request could be spinning on the lock and then
* we destroy the lock.
*/
- INP_INFO_WLOCK(&divcbinfo);
- n = divcbinfo.ipi_count;
+ INP_INFO_WLOCK(&V_divcbinfo);
+ n = V_divcbinfo.ipi_count;
if (n != 0) {
err = EBUSY;
- INP_INFO_WUNLOCK(&divcbinfo);
+ INP_INFO_WUNLOCK(&V_divcbinfo);
break;
}
ip_divert_ptr = NULL;
err = pf_proto_unregister(PF_INET, IPPROTO_DIVERT, SOCK_RAW);
- INP_INFO_WUNLOCK(&divcbinfo);
- INP_INFO_LOCK_DESTROY(&divcbinfo);
- uma_zdestroy(divcbinfo.ipi_zone);
+ INP_INFO_WUNLOCK(&V_divcbinfo);
+ INP_INFO_LOCK_DESTROY(&V_divcbinfo);
+ uma_zdestroy(V_divcbinfo.ipi_zone);
break;
default:
err = EOPNOTSUPP;
diff --git a/sys/netinet/ip_fastfwd.c b/sys/netinet/ip_fastfwd.c
index bb8c74a..b38dfc0 100644
--- a/sys/netinet/ip_fastfwd.c
+++ b/sys/netinet/ip_fastfwd.c
@@ -87,6 +87,7 @@ __FBSDID("$FreeBSD$");
#include <sys/protosw.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
+#include <sys/vimage.h>
#include <net/pfil.h>
#include <net/if.h>
@@ -135,8 +136,8 @@ ip_findroute(struct route *ro, struct in_addr dest, struct mbuf *m)
if (rt->rt_flags & RTF_GATEWAY)
dst = (struct sockaddr_in *)rt->rt_gateway;
} else {
- ipstat.ips_noroute++;
- ipstat.ips_cantforward++;
+ V_ipstat.ips_noroute++;
+ V_ipstat.ips_cantforward++;
if (rt)
RTFREE(rt);
icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0);
@@ -171,7 +172,7 @@ ip_fastforward(struct mbuf *m)
/*
* Are we active and forwarding packets?
*/
- if (!ipfastforward_active || !ipforwarding)
+ if (!V_ipfastforward_active || !V_ipforwarding)
return m;
M_ASSERTVALID(m);
@@ -187,7 +188,7 @@ ip_fastforward(struct mbuf *m)
* Is entire packet big enough?
*/
if (m->m_pkthdr.len < sizeof(struct ip)) {
- ipstat.ips_tooshort++;
+ V_ipstat.ips_tooshort++;
goto drop;
}
@@ -196,7 +197,7 @@ ip_fastforward(struct mbuf *m)
*/
if (m->m_len < sizeof (struct ip) &&
(m = m_pullup(m, sizeof (struct ip))) == NULL) {
- ipstat.ips_toosmall++;
+ V_ipstat.ips_toosmall++;
return NULL; /* mbuf already free'd */
}
@@ -206,7 +207,7 @@ ip_fastforward(struct mbuf *m)
* Is it IPv4?
*/
if (ip->ip_v != IPVERSION) {
- ipstat.ips_badvers++;
+ V_ipstat.ips_badvers++;
goto drop;
}
@@ -215,12 +216,12 @@ ip_fastforward(struct mbuf *m)
*/
hlen = ip->ip_hl << 2;
if (hlen < sizeof(struct ip)) { /* minimum header length */
- ipstat.ips_badlen++;
+ V_ipstat.ips_badlen++;
goto drop;
}
if (hlen > m->m_len) {
if ((m = m_pullup(m, hlen)) == NULL) {
- ipstat.ips_badhlen++;
+ V_ipstat.ips_badhlen++;
return NULL; /* mbuf already free'd */
}
ip = mtod(m, struct ip *);
@@ -238,7 +239,7 @@ ip_fastforward(struct mbuf *m)
sum = in_cksum(m, hlen);
}
if (sum) {
- ipstat.ips_badsum++;
+ V_ipstat.ips_badsum++;
goto drop;
}
@@ -253,7 +254,7 @@ ip_fastforward(struct mbuf *m)
* Is IP length longer than packet we have got?
*/
if (m->m_pkthdr.len < ip_len) {
- ipstat.ips_tooshort++;
+ V_ipstat.ips_tooshort++;
goto drop;
}
@@ -273,7 +274,7 @@ ip_fastforward(struct mbuf *m)
*/
if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
(ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
- ipstat.ips_badaddr++;
+ V_ipstat.ips_badaddr++;
goto drop;
}
@@ -331,7 +332,7 @@ ip_fastforward(struct mbuf *m)
if (in_localip(ip->ip_dst))
return m;
- ipstat.ips_total++;
+ V_ipstat.ips_total++;
/*
* Step 3: incoming packet firewall processing
@@ -392,7 +393,7 @@ passin:
* Check TTL
*/
#ifdef IPSTEALTH
- if (!ipstealth) {
+ if (!V_ipstealth) {
#endif
if (ip->ip_ttl <= IPTTLDEC) {
icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 0, 0);
@@ -513,7 +514,7 @@ passout:
*/
if ((ifp->if_snd.ifq_len + ip->ip_len / ifp->if_mtu + 1) >=
ifp->if_snd.ifq_maxlen) {
- ipstat.ips_odropped++;
+ V_ipstat.ips_odropped++;
/* would send source quench here but that is depreciated */
goto drop;
}
@@ -552,7 +553,7 @@ passout:
* Handle EMSGSIZE with icmp reply needfrag for TCP MTU discovery
*/
if (ip->ip_off & IP_DF) {
- ipstat.ips_cantfrag++;
+ V_ipstat.ips_cantfrag++;
icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG,
0, mtu);
goto consumed;
@@ -590,16 +591,16 @@ passout:
m_freem(m);
}
} else
- ipstat.ips_fragmented++;
+ V_ipstat.ips_fragmented++;
}
}
if (error != 0)
- ipstat.ips_odropped++;
+ V_ipstat.ips_odropped++;
else {
ro.ro_rt->rt_rmx.rmx_pksent++;
- ipstat.ips_forward++;
- ipstat.ips_fastforward++;
+ V_ipstat.ips_forward++;
+ V_ipstat.ips_fastforward++;
}
consumed:
RTFREE(ro.ro_rt);
diff --git a/sys/netinet/ip_fw2.c b/sys/netinet/ip_fw2.c
index 650ed1a..7cfe653 100644
--- a/sys/netinet/ip_fw2.c
+++ b/sys/netinet/ip_fw2.c
@@ -64,6 +64,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/syslog.h>
#include <sys/ucred.h>
+#include <sys/vimage.h>
#include <net/if.h>
#include <net/radix.h>
#include <net/route.h>
@@ -567,7 +568,7 @@ search_ip6_addr_net (struct in6_addr * ip6_addr)
struct in6_ifaddr *fdm;
struct in6_addr copia;
- TAILQ_FOREACH(mdc, &ifnet, if_link)
+ TAILQ_FOREACH(mdc, &V_ifnet, if_link)
TAILQ_FOREACH(mdc2, &mdc->if_addrlist, ifa_list) {
if (mdc2->ifa_addr->sa_family == AF_INET6) {
fdm = (struct in6_ifaddr *)mdc2;
@@ -762,11 +763,11 @@ ipfw_log(struct ip_fw *f, u_int hlen, struct ip_fw_args *args,
proto[0] = '\0';
if (f == NULL) { /* bogus pkt */
- if (verbose_limit != 0 && norule_counter >= verbose_limit)
+ if (V_verbose_limit != 0 && V_norule_counter >= V_verbose_limit)
return;
- norule_counter++;
- if (norule_counter == verbose_limit)
- limit_reached = verbose_limit;
+ V_norule_counter++;
+ if (V_norule_counter == V_verbose_limit)
+ limit_reached = V_verbose_limit;
action = "Refuse";
} else { /* O_LOG is the first action, find the real one */
ipfw_insn *cmd = ACTION_PTR(f);
@@ -1031,7 +1032,7 @@ hash_packet(struct ipfw_flow_id *id)
else
#endif /* INET6 */
i = (id->dst_ip) ^ (id->src_ip) ^ (id->dst_port) ^ (id->src_port);
- i &= (curr_dyn_buckets - 1);
+ i &= (V_curr_dyn_buckets - 1);
return i;
}
@@ -1049,12 +1050,12 @@ hash_packet(struct ipfw_flow_id *id)
q->parent->count--; \
DEB(printf("ipfw: unlink entry 0x%08x %d -> 0x%08x %d, %d left\n",\
(q->id.src_ip), (q->id.src_port), \
- (q->id.dst_ip), (q->id.dst_port), dyn_count-1 ); ) \
+ (q->id.dst_ip), (q->id.dst_port), V_dyn_count-1 ); ) \
if (prev != NULL) \
prev->next = q = q->next; \
else \
head = q = q->next; \
- dyn_count--; \
+ V_dyn_count--; \
uma_zfree(ipfw_dyn_rule_zone, old_q); }
#define TIME_LEQ(a,b) ((int)((a)-(b)) <= 0)
@@ -1083,7 +1084,7 @@ remove_dyn_rule(struct ip_fw *rule, ipfw_dyn_rule *keep_me)
IPFW_DYN_LOCK_ASSERT();
- if (ipfw_dyn_v == NULL || dyn_count == 0)
+ if (V_ipfw_dyn_v == NULL || V_dyn_count == 0)
return;
/* do not expire more than once per second, it is useless */
if (!FORCE && last_remove == time_uptime)
@@ -1096,8 +1097,8 @@ remove_dyn_rule(struct ip_fw *rule, ipfw_dyn_rule *keep_me)
* them in a second pass.
*/
next_pass:
- for (i = 0 ; i < curr_dyn_buckets ; i++) {
- for (prev=NULL, q = ipfw_dyn_v[i] ; q ; ) {
+ for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
+ for (prev=NULL, q = V_ipfw_dyn_v[i] ; q ; ) {
/*
* Logic can become complex here, so we split tests.
*/
@@ -1124,7 +1125,7 @@ next_pass:
goto next;
}
if (q->dyn_type != O_LIMIT_PARENT || !q->count) {
- UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q);
+ UNLINK_DYN_RULE(prev, V_ipfw_dyn_v[i], q);
continue;
}
next:
@@ -1157,14 +1158,14 @@ lookup_dyn_rule_locked(struct ipfw_flow_id *pkt, int *match_direction,
IPFW_DYN_LOCK_ASSERT();
- if (ipfw_dyn_v == NULL)
+ if (V_ipfw_dyn_v == NULL)
goto done; /* not found */
i = hash_packet( pkt );
- for (prev=NULL, q = ipfw_dyn_v[i] ; q != NULL ; ) {
+ for (prev=NULL, q = V_ipfw_dyn_v[i] ; q != NULL ; ) {
if (q->dyn_type == O_LIMIT_PARENT && q->count)
goto next;
if (TIME_LEQ( q->expire, time_uptime)) { /* expire entry */
- UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q);
+ UNLINK_DYN_RULE(prev, V_ipfw_dyn_v[i], q);
continue;
}
if (pkt->proto == q->id.proto &&
@@ -1214,8 +1215,8 @@ next:
if ( prev != NULL) { /* found and not in front */
prev->next = q->next;
- q->next = ipfw_dyn_v[i];
- ipfw_dyn_v[i] = q;
+ q->next = V_ipfw_dyn_v[i];
+ V_ipfw_dyn_v[i] = q;
}
if (pkt->proto == IPPROTO_TCP) { /* update state according to flags */
u_char flags = pkt->flags & (TH_FIN|TH_SYN|TH_RST);
@@ -1225,7 +1226,7 @@ next:
q->state |= (dir == MATCH_FORWARD ) ? flags : (flags << 8);
switch (q->state) {
case TH_SYN: /* opening */
- q->expire = time_uptime + dyn_syn_lifetime;
+ q->expire = time_uptime + V_dyn_syn_lifetime;
break;
case BOTH_SYN: /* move to established */
@@ -1248,13 +1249,13 @@ next:
}
}
}
- q->expire = time_uptime + dyn_ack_lifetime;
+ q->expire = time_uptime + V_dyn_ack_lifetime;
break;
case BOTH_SYN | BOTH_FIN: /* both sides closed */
- if (dyn_fin_lifetime >= dyn_keepalive_period)
- dyn_fin_lifetime = dyn_keepalive_period - 1;
- q->expire = time_uptime + dyn_fin_lifetime;
+ if (V_dyn_fin_lifetime >= V_dyn_keepalive_period)
+ V_dyn_fin_lifetime = V_dyn_keepalive_period - 1;
+ q->expire = time_uptime + V_dyn_fin_lifetime;
break;
default:
@@ -1266,16 +1267,16 @@ next:
if ( (q->state & ((TH_RST << 8)|TH_RST)) == 0)
printf("invalid state: 0x%x\n", q->state);
#endif
- if (dyn_rst_lifetime >= dyn_keepalive_period)
- dyn_rst_lifetime = dyn_keepalive_period - 1;
- q->expire = time_uptime + dyn_rst_lifetime;
+ if (V_dyn_rst_lifetime >= V_dyn_keepalive_period)
+ V_dyn_rst_lifetime = V_dyn_keepalive_period - 1;
+ q->expire = time_uptime + V_dyn_rst_lifetime;
break;
}
} else if (pkt->proto == IPPROTO_UDP) {
- q->expire = time_uptime + dyn_udp_lifetime;
+ q->expire = time_uptime + V_dyn_udp_lifetime;
} else {
/* other protocols */
- q->expire = time_uptime + dyn_short_lifetime;
+ q->expire = time_uptime + V_dyn_short_lifetime;
}
done:
if (match_direction)
@@ -1308,21 +1309,21 @@ realloc_dynamic_table(void)
* default to 1024.
*/
- if (dyn_buckets > 65536)
- dyn_buckets = 1024;
- if ((dyn_buckets & (dyn_buckets-1)) != 0) { /* not a power of 2 */
- dyn_buckets = curr_dyn_buckets; /* reset */
+ if (V_dyn_buckets > 65536)
+ V_dyn_buckets = 1024;
+ if ((V_dyn_buckets & (V_dyn_buckets-1)) != 0) { /* not a power of 2 */
+ V_dyn_buckets = V_curr_dyn_buckets; /* reset */
return;
}
- curr_dyn_buckets = dyn_buckets;
- if (ipfw_dyn_v != NULL)
- free(ipfw_dyn_v, M_IPFW);
+ V_curr_dyn_buckets = V_dyn_buckets;
+ if (V_ipfw_dyn_v != NULL)
+ free(V_ipfw_dyn_v, M_IPFW);
for (;;) {
- ipfw_dyn_v = malloc(curr_dyn_buckets * sizeof(ipfw_dyn_rule *),
+ V_ipfw_dyn_v = malloc(V_curr_dyn_buckets * sizeof(ipfw_dyn_rule *),
M_IPFW, M_NOWAIT | M_ZERO);
- if (ipfw_dyn_v != NULL || curr_dyn_buckets <= 2)
+ if (V_ipfw_dyn_v != NULL || V_curr_dyn_buckets <= 2)
break;
- curr_dyn_buckets /= 2;
+ V_curr_dyn_buckets /= 2;
}
}
@@ -1344,10 +1345,10 @@ add_dyn_rule(struct ipfw_flow_id *id, u_int8_t dyn_type, struct ip_fw *rule)
IPFW_DYN_LOCK_ASSERT();
- if (ipfw_dyn_v == NULL ||
- (dyn_count == 0 && dyn_buckets != curr_dyn_buckets)) {
+ if (V_ipfw_dyn_v == NULL ||
+ (V_dyn_count == 0 && V_dyn_buckets != V_curr_dyn_buckets)) {
realloc_dynamic_table();
- if (ipfw_dyn_v == NULL)
+ if (V_ipfw_dyn_v == NULL)
return NULL; /* failed ! */
}
i = hash_packet(id);
@@ -1369,21 +1370,21 @@ add_dyn_rule(struct ipfw_flow_id *id, u_int8_t dyn_type, struct ip_fw *rule)
}
r->id = *id;
- r->expire = time_uptime + dyn_syn_lifetime;
+ r->expire = time_uptime + V_dyn_syn_lifetime;
r->rule = rule;
r->dyn_type = dyn_type;
r->pcnt = r->bcnt = 0;
r->count = 0;
r->bucket = i;
- r->next = ipfw_dyn_v[i];
- ipfw_dyn_v[i] = r;
- dyn_count++;
+ r->next = V_ipfw_dyn_v[i];
+ V_ipfw_dyn_v[i] = r;
+ V_dyn_count++;
DEB(printf("ipfw: add dyn entry ty %d 0x%08x %d -> 0x%08x %d, total %d\n",
dyn_type,
(r->id.src_ip), (r->id.src_port),
(r->id.dst_ip), (r->id.dst_port),
- dyn_count ); )
+ V_dyn_count ); )
return r;
}
@@ -1399,10 +1400,10 @@ lookup_dyn_parent(struct ipfw_flow_id *pkt, struct ip_fw *rule)
IPFW_DYN_LOCK_ASSERT();
- if (ipfw_dyn_v) {
+ if (V_ipfw_dyn_v) {
int is_v6 = IS_IP6_FLOW_ID(pkt);
i = hash_packet( pkt );
- for (q = ipfw_dyn_v[i] ; q != NULL ; q=q->next)
+ for (q = V_ipfw_dyn_v[i] ; q != NULL ; q=q->next)
if (q->dyn_type == O_LIMIT_PARENT &&
rule== q->rule &&
pkt->proto == q->id.proto &&
@@ -1419,7 +1420,7 @@ lookup_dyn_parent(struct ipfw_flow_id *pkt, struct ip_fw *rule)
pkt->dst_ip == q->id.dst_ip)
)
) {
- q->expire = time_uptime + dyn_short_lifetime;
+ q->expire = time_uptime + V_dyn_short_lifetime;
DEB(printf("ipfw: lookup_dyn_parent found 0x%p\n",q);)
return q;
}
@@ -1466,11 +1467,11 @@ install_state(struct ip_fw *rule, ipfw_insn_limit *cmd,
return (0);
}
- if (dyn_count >= dyn_max)
+ if (V_dyn_count >= V_dyn_max)
/* Run out of slots, try to remove any expired rule. */
remove_dyn_rule(NULL, (ipfw_dyn_rule *)1);
- if (dyn_count >= dyn_max) {
+ if (V_dyn_count >= V_dyn_max) {
if (last_log != time_uptime) {
last_log = time_uptime;
printf("ipfw: %s: Too many dynamic rules\n", __func__);
@@ -1532,7 +1533,7 @@ install_state(struct ip_fw *rule, ipfw_insn_limit *cmd,
/* See if we can remove some expired rule. */
remove_dyn_rule(rule, parent);
if (parent->count >= conn_limit) {
- if (fw_verbose && last_log != time_uptime) {
+ if (V_fw_verbose && last_log != time_uptime) {
last_log = time_uptime;
#ifdef INET6
/*
@@ -1675,7 +1676,7 @@ send_pkt(struct mbuf *replyto, struct ipfw_flow_id *id, u_int32_t seq,
/*
* now fill fields left out earlier
*/
- ip->ip_ttl = ip_defttl;
+ ip->ip_ttl = V_ip_defttl;
ip->ip_len = m->m_pkthdr.len;
m->m_flags |= M_SKIP_FIREWALL;
return (m);
@@ -1787,14 +1788,14 @@ add_table_entry(struct ip_fw_chain *ch, uint16_t tbl, in_addr_t addr,
ent->addr.sin_len = ent->mask.sin_len = 8;
ent->mask.sin_addr.s_addr = htonl(mlen ? ~((1 << (32 - mlen)) - 1) : 0);
ent->addr.sin_addr.s_addr = addr & ent->mask.sin_addr.s_addr;
- IPFW_WLOCK(&layer3_chain);
+ IPFW_WLOCK(&V_layer3_chain);
if (rnh->rnh_addaddr(&ent->addr, &ent->mask, rnh, (void *)ent) ==
NULL) {
- IPFW_WUNLOCK(&layer3_chain);
+ IPFW_WUNLOCK(&V_layer3_chain);
free(ent, M_IPFW_TBL);
return (EEXIST);
}
- IPFW_WUNLOCK(&layer3_chain);
+ IPFW_WUNLOCK(&V_layer3_chain);
return (0);
}
@@ -2005,10 +2006,10 @@ check_uidgid(ipfw_insn_u32 *insn, int proto, struct ifnet *oif,
return (0);
if (proto == IPPROTO_TCP) {
wildcard = 0;
- pi = &tcbinfo;
+ pi = &V_tcbinfo;
} else if (proto == IPPROTO_UDP) {
wildcard = INPLOOKUP_WILDCARD;
- pi = &udbinfo;
+ pi = &V_udbinfo;
} else
return 0;
match = 0;
@@ -2196,7 +2197,7 @@ ipfw_chk(struct ip_fw_args *args)
*/
int dyn_dir = MATCH_UNKNOWN;
ipfw_dyn_rule *q = NULL;
- struct ip_fw_chain *chain = &layer3_chain;
+ struct ip_fw_chain *chain = &V_layer3_chain;
struct m_tag *mtag;
/*
@@ -2300,7 +2301,7 @@ do { \
printf("IPFW2: IPV6 - Unknown Routing "
"Header type(%d)\n",
((struct ip6_rthdr *)ulp)->ip6r_type);
- if (fw_deny_unknown_exthdrs)
+ if (V_fw_deny_unknown_exthdrs)
return (IP_FW_DENY);
break;
}
@@ -2324,7 +2325,7 @@ do { \
if (offset == 0) {
printf("IPFW2: IPV6 - Invalid Fragment "
"Header\n");
- if (fw_deny_unknown_exthdrs)
+ if (V_fw_deny_unknown_exthdrs)
return (IP_FW_DENY);
break;
}
@@ -2396,7 +2397,7 @@ do { \
default:
printf("IPFW2: IPV6 - Unknown Extension "
"Header(%d), ext_hd=%x\n", proto, ext_hd);
- if (fw_deny_unknown_exthdrs)
+ if (V_fw_deny_unknown_exthdrs)
return (IP_FW_DENY);
PULLUP_TO(hlen, ulp, struct ip6_ext);
break;
@@ -2477,7 +2478,7 @@ do { \
* XXX should not happen here, but optimized out in
* the caller.
*/
- if (fw_one_pass) {
+ if (V_fw_one_pass) {
IPFW_RUNLOCK(chain);
return (IP_FW_PASS);
}
@@ -2522,7 +2523,7 @@ do { \
int l, cmdlen, skip_or; /* skip rest of OR block */
again:
- if (set_disable & (1 << f->set) )
+ if (V_set_disable & (1 << f->set) )
continue;
skip_or = 0;
@@ -2908,7 +2909,7 @@ check_body:
}
case O_LOG:
- if (fw_verbose)
+ if (V_fw_verbose)
ipfw_log(f, hlen, args, m,
oif, offset, tablearg, ip);
match = 1;
@@ -3328,7 +3329,7 @@ check_body:
if (t == NULL) {
nat_id = (cmd->arg1 == IP_FW_TABLEARG) ?
tablearg : cmd->arg1;
- LOOKUP_NAT(layer3_chain, nat_id, t);
+ LOOKUP_NAT(V_layer3_chain, nat_id, t);
if (t == NULL) {
retval = IP_FW_DENY;
goto done;
@@ -3375,7 +3376,7 @@ done:
return (retval);
pullup_failed:
- if (fw_verbose)
+ if (V_fw_verbose)
printf("ipfw: pullup failed\n");
return (IP_FW_DENY);
}
@@ -3433,10 +3434,10 @@ add_rule(struct ip_fw_chain *chain, struct ip_fw *input_rule)
* If rulenum is 0, find highest numbered rule before the
* default rule, and add autoinc_step
*/
- if (autoinc_step < 1)
- autoinc_step = 1;
- else if (autoinc_step > 1000)
- autoinc_step = 1000;
+ if (V_autoinc_step < 1)
+ V_autoinc_step = 1;
+ else if (V_autoinc_step > 1000)
+ V_autoinc_step = 1000;
if (rule->rulenum == 0) {
/*
* locate the highest numbered rule before default
@@ -3446,8 +3447,8 @@ add_rule(struct ip_fw_chain *chain, struct ip_fw *input_rule)
break;
rule->rulenum = f->rulenum;
}
- if (rule->rulenum < IPFW_DEFAULT_RULE - autoinc_step)
- rule->rulenum += autoinc_step;
+ if (rule->rulenum < IPFW_DEFAULT_RULE - V_autoinc_step)
+ rule->rulenum += V_autoinc_step;
input_rule->rulenum = rule->rulenum;
}
@@ -3468,11 +3469,11 @@ add_rule(struct ip_fw_chain *chain, struct ip_fw *input_rule)
}
flush_rule_ptrs(chain);
done:
- static_count++;
+ V_static_count++;
static_len += l;
IPFW_WUNLOCK(chain);
DEB(printf("ipfw: installed rule %d, static count now %d\n",
- rule->rulenum, static_count);)
+ rule->rulenum, V_static_count);)
return (0);
}
@@ -3501,7 +3502,7 @@ remove_rule(struct ip_fw_chain *chain, struct ip_fw *rule,
chain->rules = n;
else
prev->next = n;
- static_count--;
+ V_static_count--;
static_len -= l;
rule->next = chain->reap;
@@ -3716,7 +3717,7 @@ zero_entry(struct ip_fw_chain *chain, u_int32_t arg, int log_only)
IPFW_WLOCK(chain);
if (rulenum == 0) {
- norule_counter = 0;
+ V_norule_counter = 0;
for (rule = chain->rules; rule; rule = rule->next) {
/* Skip rules from another set. */
if (cmd == 1 && rule->set != set)
@@ -3750,7 +3751,7 @@ zero_entry(struct ip_fw_chain *chain, u_int32_t arg, int log_only)
}
IPFW_WUNLOCK(chain);
- if (fw_verbose)
+ if (V_fw_verbose)
log(LOG_SECURITY | LOG_NOTICE, msg, rulenum);
return (0);
}
@@ -4093,20 +4094,20 @@ ipfw_getrules(struct ip_fw_chain *chain, void *buf, size_t space)
* in a wild attempt to keep the ABI the same.
* Why do we do this on EVERY rule?
*/
- bcopy(&set_disable, &(((struct ip_fw *)bp)->next_rule),
- sizeof(set_disable));
+ bcopy(&V_set_disable, &(((struct ip_fw *)bp)->next_rule),
+ sizeof(V_set_disable));
if (((struct ip_fw *)bp)->timestamp)
((struct ip_fw *)bp)->timestamp += boot_seconds;
bp += i;
}
}
IPFW_RUNLOCK(chain);
- if (ipfw_dyn_v) {
+ if (V_ipfw_dyn_v) {
ipfw_dyn_rule *p, *last = NULL;
IPFW_DYN_LOCK();
- for (i = 0 ; i < curr_dyn_buckets; i++)
- for (p = ipfw_dyn_v[i] ; p != NULL; p = p->next) {
+ for (i = 0 ; i < V_curr_dyn_buckets; i++)
+ for (p = V_ipfw_dyn_v[i] ; p != NULL; p = p->next) {
if (bp + sizeof *p <= ep) {
ipfw_dyn_rule *dst =
(ipfw_dyn_rule *)bp;
@@ -4186,8 +4187,8 @@ ipfw_ctl(struct sockopt *sopt)
* data in which case we'll just return what fits.
*/
size = static_len; /* size of static rules */
- if (ipfw_dyn_v) /* add size of dyn.rules */
- size += (dyn_count * sizeof(ipfw_dyn_rule));
+ if (V_ipfw_dyn_v) /* add size of dyn.rules */
+ size += (V_dyn_count * sizeof(ipfw_dyn_rule));
/*
* XXX todo: if the user passes a short length just to know
@@ -4196,7 +4197,7 @@ ipfw_ctl(struct sockopt *sopt)
*/
buf = malloc(size, M_TEMP, M_WAITOK);
error = sooptcopyout(sopt, buf,
- ipfw_getrules(&layer3_chain, buf, size));
+ ipfw_getrules(&V_layer3_chain, buf, size));
free(buf, M_TEMP);
break;
@@ -4214,12 +4215,12 @@ ipfw_ctl(struct sockopt *sopt)
* the old list without the need for a lock.
*/
- IPFW_WLOCK(&layer3_chain);
- layer3_chain.reap = NULL;
- free_chain(&layer3_chain, 0 /* keep default rule */);
- rule = layer3_chain.reap;
- layer3_chain.reap = NULL;
- IPFW_WUNLOCK(&layer3_chain);
+ IPFW_WLOCK(&V_layer3_chain);
+ V_layer3_chain.reap = NULL;
+ free_chain(&V_layer3_chain, 0 /* keep default rule */);
+ rule = V_layer3_chain.reap;
+ V_layer3_chain.reap = NULL;
+ IPFW_WUNLOCK(&V_layer3_chain);
if (rule != NULL)
reap_rules(rule);
break;
@@ -4231,7 +4232,7 @@ ipfw_ctl(struct sockopt *sopt)
if (error == 0)
error = check_ipfw_struct(rule, sopt->sopt_valsize);
if (error == 0) {
- error = add_rule(&layer3_chain, rule);
+ error = add_rule(&V_layer3_chain, rule);
size = RULESIZE(rule);
if (!error && sopt->sopt_dir == SOPT_GET)
error = sooptcopyout(sopt, rule, size);
@@ -4258,10 +4259,10 @@ ipfw_ctl(struct sockopt *sopt)
break;
size = sopt->sopt_valsize;
if (size == sizeof(u_int32_t)) /* delete or reassign */
- error = del_entry(&layer3_chain, rulenum[0]);
+ error = del_entry(&V_layer3_chain, rulenum[0]);
else if (size == 2*sizeof(u_int32_t)) /* set enable/disable */
- set_disable =
- (set_disable | rulenum[0]) & ~rulenum[1] &
+ V_set_disable =
+ (V_set_disable | rulenum[0]) & ~rulenum[1] &
~(1<<RESVD_SET); /* set RESVD_SET always enabled */
else
error = EINVAL;
@@ -4276,7 +4277,7 @@ ipfw_ctl(struct sockopt *sopt)
if (error)
break;
}
- error = zero_entry(&layer3_chain, rulenum[0],
+ error = zero_entry(&V_layer3_chain, rulenum[0],
sopt->sopt_name == IP_FW_RESETLOG);
break;
@@ -4288,7 +4289,7 @@ ipfw_ctl(struct sockopt *sopt)
sizeof(ent), sizeof(ent));
if (error)
break;
- error = add_table_entry(&layer3_chain, ent.tbl,
+ error = add_table_entry(&V_layer3_chain, ent.tbl,
ent.addr, ent.masklen, ent.value);
}
break;
@@ -4301,7 +4302,7 @@ ipfw_ctl(struct sockopt *sopt)
sizeof(ent), sizeof(ent));
if (error)
break;
- error = del_table_entry(&layer3_chain, ent.tbl,
+ error = del_table_entry(&V_layer3_chain, ent.tbl,
ent.addr, ent.masklen);
}
break;
@@ -4314,9 +4315,9 @@ ipfw_ctl(struct sockopt *sopt)
sizeof(tbl), sizeof(tbl));
if (error)
break;
- IPFW_WLOCK(&layer3_chain);
- error = flush_table(&layer3_chain, tbl);
- IPFW_WUNLOCK(&layer3_chain);
+ IPFW_WLOCK(&V_layer3_chain);
+ error = flush_table(&V_layer3_chain, tbl);
+ IPFW_WUNLOCK(&V_layer3_chain);
}
break;
@@ -4327,9 +4328,9 @@ ipfw_ctl(struct sockopt *sopt)
if ((error = sooptcopyin(sopt, &tbl, sizeof(tbl),
sizeof(tbl))))
break;
- IPFW_RLOCK(&layer3_chain);
- error = count_table(&layer3_chain, tbl, &cnt);
- IPFW_RUNLOCK(&layer3_chain);
+ IPFW_RLOCK(&V_layer3_chain);
+ error = count_table(&V_layer3_chain, tbl, &cnt);
+ IPFW_RUNLOCK(&V_layer3_chain);
if (error)
break;
error = sooptcopyout(sopt, &cnt, sizeof(cnt));
@@ -4353,9 +4354,9 @@ ipfw_ctl(struct sockopt *sopt)
}
tbl->size = (size - sizeof(*tbl)) /
sizeof(ipfw_table_entry);
- IPFW_RLOCK(&layer3_chain);
- error = dump_table(&layer3_chain, tbl);
- IPFW_RUNLOCK(&layer3_chain);
+ IPFW_RLOCK(&V_layer3_chain);
+ error = dump_table(&V_layer3_chain, tbl);
+ IPFW_RUNLOCK(&V_layer3_chain);
if (error) {
free(tbl, M_TEMP);
break;
@@ -4439,7 +4440,7 @@ ipfw_tick(void * __unused unused)
int i;
ipfw_dyn_rule *q;
- if (dyn_keepalive == 0 || ipfw_dyn_v == NULL || dyn_count == 0)
+ if (V_dyn_keepalive == 0 || V_ipfw_dyn_v == NULL || V_dyn_count == 0)
goto done;
/*
@@ -4451,15 +4452,15 @@ ipfw_tick(void * __unused unused)
m0 = NULL;
mtailp = &m0;
IPFW_DYN_LOCK();
- for (i = 0 ; i < curr_dyn_buckets ; i++) {
- for (q = ipfw_dyn_v[i] ; q ; q = q->next ) {
+ for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
+ for (q = V_ipfw_dyn_v[i] ; q ; q = q->next ) {
if (q->dyn_type == O_LIMIT_PARENT)
continue;
if (q->id.proto != IPPROTO_TCP)
continue;
if ( (q->state & BOTH_SYN) != BOTH_SYN)
continue;
- if (TIME_LEQ( time_uptime+dyn_keepalive_interval,
+ if (TIME_LEQ( time_uptime+V_dyn_keepalive_interval,
q->expire))
continue; /* too early */
if (TIME_LEQ(q->expire, time_uptime))
@@ -4482,7 +4483,7 @@ ipfw_tick(void * __unused unused)
ip_output(m, NULL, NULL, 0, NULL, NULL);
}
done:
- callout_reset(&ipfw_timeout, dyn_keepalive_period*hz, ipfw_tick, NULL);
+ callout_reset(&V_ipfw_timeout, V_dyn_keepalive_period*hz, ipfw_tick, NULL);
}
int
@@ -4499,20 +4500,20 @@ ipfw_init(void)
CTLFLAG_RW | CTLFLAG_SECURE, 0, "Firewall");
SYSCTL_ADD_PROC(&ip6_fw_sysctl_ctx, SYSCTL_CHILDREN(ip6_fw_sysctl_tree),
OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_SECURE3,
- &fw6_enable, 0, ipfw_chg_hook, "I", "Enable ipfw+6");
+ &V_fw6_enable, 0, ipfw_chg_hook, "I", "Enable ipfw+6");
SYSCTL_ADD_INT(&ip6_fw_sysctl_ctx, SYSCTL_CHILDREN(ip6_fw_sysctl_tree),
OID_AUTO, "deny_unknown_exthdrs", CTLFLAG_RW | CTLFLAG_SECURE,
- &fw_deny_unknown_exthdrs, 0,
+ &V_fw_deny_unknown_exthdrs, 0,
"Deny packets with unknown IPv6 Extension Headers");
#endif
- layer3_chain.rules = NULL;
- IPFW_LOCK_INIT(&layer3_chain);
+ V_layer3_chain.rules = NULL;
+ IPFW_LOCK_INIT(&V_layer3_chain);
ipfw_dyn_rule_zone = uma_zcreate("IPFW dynamic rule",
sizeof(ipfw_dyn_rule), NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, 0);
IPFW_DYN_LOCK_INIT();
- callout_init(&ipfw_timeout, CALLOUT_MPSAFE);
+ callout_init(&V_ipfw_timeout, CALLOUT_MPSAFE);
bzero(&default_rule, sizeof default_rule);
@@ -4528,17 +4529,17 @@ ipfw_init(void)
#endif
O_DENY;
- error = add_rule(&layer3_chain, &default_rule);
+ error = add_rule(&V_layer3_chain, &default_rule);
if (error != 0) {
printf("ipfw2: error %u initializing default rule "
"(support disabled)\n", error);
IPFW_DYN_LOCK_DESTROY();
- IPFW_LOCK_DESTROY(&layer3_chain);
+ IPFW_LOCK_DESTROY(&V_layer3_chain);
uma_zdestroy(ipfw_dyn_rule_zone);
return (error);
}
- ip_fw_default_rule = layer3_chain.rules;
+ ip_fw_default_rule = V_layer3_chain.rules;
printf("ipfw2 "
#ifdef INET6
"(+ipv6) "
@@ -4565,30 +4566,30 @@ ipfw_init(void)
default_rule.cmd[0].opcode == O_ACCEPT ? "accept" : "deny");
#ifdef IPFIREWALL_VERBOSE
- fw_verbose = 1;
+ V_fw_verbose = 1;
#endif
#ifdef IPFIREWALL_VERBOSE_LIMIT
- verbose_limit = IPFIREWALL_VERBOSE_LIMIT;
+ V_verbose_limit = IPFIREWALL_VERBOSE_LIMIT;
#endif
- if (fw_verbose == 0)
+ if (V_fw_verbose == 0)
printf("disabled\n");
- else if (verbose_limit == 0)
+ else if (V_verbose_limit == 0)
printf("unlimited\n");
else
printf("limited to %d packets/entry by default\n",
- verbose_limit);
+ V_verbose_limit);
- error = init_tables(&layer3_chain);
+ error = init_tables(&V_layer3_chain);
if (error) {
IPFW_DYN_LOCK_DESTROY();
- IPFW_LOCK_DESTROY(&layer3_chain);
+ IPFW_LOCK_DESTROY(&V_layer3_chain);
uma_zdestroy(ipfw_dyn_rule_zone);
return (error);
}
ip_fw_ctl_ptr = ipfw_ctl;
ip_fw_chk_ptr = ipfw_chk;
- callout_reset(&ipfw_timeout, hz, ipfw_tick, NULL);
- LIST_INIT(&layer3_chain.nat);
+ callout_reset(&V_ipfw_timeout, hz, ipfw_tick, NULL);
+ LIST_INIT(&V_layer3_chain.nat);
return (0);
}
@@ -4599,20 +4600,20 @@ ipfw_destroy(void)
ip_fw_chk_ptr = NULL;
ip_fw_ctl_ptr = NULL;
- callout_drain(&ipfw_timeout);
- IPFW_WLOCK(&layer3_chain);
- flush_tables(&layer3_chain);
- layer3_chain.reap = NULL;
- free_chain(&layer3_chain, 1 /* kill default rule */);
- reap = layer3_chain.reap, layer3_chain.reap = NULL;
- IPFW_WUNLOCK(&layer3_chain);
+ callout_drain(&V_ipfw_timeout);
+ IPFW_WLOCK(&V_layer3_chain);
+ flush_tables(&V_layer3_chain);
+ V_layer3_chain.reap = NULL;
+ free_chain(&V_layer3_chain, 1 /* kill default rule */);
+ reap = V_layer3_chain.reap, V_layer3_chain.reap = NULL;
+ IPFW_WUNLOCK(&V_layer3_chain);
if (reap != NULL)
reap_rules(reap);
IPFW_DYN_LOCK_DESTROY();
uma_zdestroy(ipfw_dyn_rule_zone);
- if (ipfw_dyn_v != NULL)
- free(ipfw_dyn_v, M_IPFW);
- IPFW_LOCK_DESTROY(&layer3_chain);
+ if (V_ipfw_dyn_v != NULL)
+ free(V_ipfw_dyn_v, M_IPFW);
+ IPFW_LOCK_DESTROY(&V_layer3_chain);
#ifdef INET6
/* Free IPv6 fw sysctl tree. */
diff --git a/sys/netinet/ip_fw_nat.c b/sys/netinet/ip_fw_nat.c
index 3a4f4a1..688a181 100644
--- a/sys/netinet/ip_fw_nat.c
+++ b/sys/netinet/ip_fw_nat.c
@@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/syslog.h>
#include <sys/ucred.h>
+#include <sys/vimage.h>
#include <netinet/libalias/alias.h>
#include <netinet/libalias/alias_local.h>
@@ -84,9 +85,9 @@ ifaddr_change(void *arg __unused, struct ifnet *ifp)
struct cfg_nat *ptr;
struct ifaddr *ifa;
- IPFW_WLOCK(&layer3_chain);
+ IPFW_WLOCK(&V_layer3_chain);
/* Check every nat entry... */
- LIST_FOREACH(ptr, &layer3_chain.nat, _next) {
+ LIST_FOREACH(ptr, &V_layer3_chain.nat, _next) {
/* ...using nic 'ifp->if_xname' as dynamic alias address. */
if (strncmp(ptr->if_name, ifp->if_xname, IF_NAMESIZE) == 0) {
mtx_lock(&ifp->if_addr_mtx);
@@ -102,7 +103,7 @@ ifaddr_change(void *arg __unused, struct ifnet *ifp)
mtx_unlock(&ifp->if_addr_mtx);
}
}
- IPFW_WUNLOCK(&layer3_chain);
+ IPFW_WUNLOCK(&V_layer3_chain);
}
static void
@@ -110,8 +111,8 @@ flush_nat_ptrs(const int i)
{
struct ip_fw *rule;
- IPFW_WLOCK_ASSERT(&layer3_chain);
- for (rule = layer3_chain.rules; rule; rule = rule->next) {
+ IPFW_WLOCK_ASSERT(&V_layer3_chain);
+ for (rule = V_layer3_chain.rules; rule; rule = rule->next) {
ipfw_insn_nat *cmd = (ipfw_insn_nat *)ACTION_PTR(rule);
if (cmd->o.opcode != O_NAT)
continue;
@@ -121,12 +122,12 @@ flush_nat_ptrs(const int i)
}
#define HOOK_NAT(b, p) do { \
- IPFW_WLOCK_ASSERT(&layer3_chain); \
+ IPFW_WLOCK_ASSERT(&V_layer3_chain); \
LIST_INSERT_HEAD(b, p, _next); \
} while (0)
#define UNHOOK_NAT(p) do { \
- IPFW_WLOCK_ASSERT(&layer3_chain); \
+ IPFW_WLOCK_ASSERT(&V_layer3_chain); \
LIST_REMOVE(p, _next); \
} while (0)
@@ -414,20 +415,20 @@ ipfw_nat_cfg(struct sockopt *sopt)
/*
* Find/create nat rule.
*/
- IPFW_WLOCK(&layer3_chain);
- LOOKUP_NAT(layer3_chain, ser_n->id, ptr);
+ IPFW_WLOCK(&V_layer3_chain);
+ LOOKUP_NAT(V_layer3_chain, ser_n->id, ptr);
if (ptr == NULL) {
/* New rule: allocate and init new instance. */
ptr = malloc(sizeof(struct cfg_nat),
M_IPFW, M_NOWAIT | M_ZERO);
if (ptr == NULL) {
- IPFW_WUNLOCK(&layer3_chain);
+ IPFW_WUNLOCK(&V_layer3_chain);
free(buf, M_IPFW);
return (ENOSPC);
}
ptr->lib = LibAliasInit(NULL);
if (ptr->lib == NULL) {
- IPFW_WUNLOCK(&layer3_chain);
+ IPFW_WUNLOCK(&V_layer3_chain);
free(ptr, M_IPFW);
free(buf, M_IPFW);
return (EINVAL);
@@ -438,7 +439,7 @@ ipfw_nat_cfg(struct sockopt *sopt)
UNHOOK_NAT(ptr);
flush_nat_ptrs(ser_n->id);
}
- IPFW_WUNLOCK(&layer3_chain);
+ IPFW_WUNLOCK(&V_layer3_chain);
/*
* Basic nat configuration.
@@ -464,9 +465,9 @@ ipfw_nat_cfg(struct sockopt *sopt)
/* Add new entries. */
add_redir_spool_cfg(&buf[(sizeof(struct cfg_nat))], ptr);
free(buf, M_IPFW);
- IPFW_WLOCK(&layer3_chain);
- HOOK_NAT(&layer3_chain.nat, ptr);
- IPFW_WUNLOCK(&layer3_chain);
+ IPFW_WLOCK(&V_layer3_chain);
+ HOOK_NAT(&V_layer3_chain.nat, ptr);
+ IPFW_WUNLOCK(&V_layer3_chain);
return (0);
}
@@ -477,15 +478,15 @@ ipfw_nat_del(struct sockopt *sopt)
int i;
sooptcopyin(sopt, &i, sizeof i, sizeof i);
- IPFW_WLOCK(&layer3_chain);
- LOOKUP_NAT(layer3_chain, i, ptr);
+ IPFW_WLOCK(&V_layer3_chain);
+ LOOKUP_NAT(V_layer3_chain, i, ptr);
if (ptr == NULL) {
- IPFW_WUNLOCK(&layer3_chain);
+ IPFW_WUNLOCK(&V_layer3_chain);
return (EINVAL);
}
UNHOOK_NAT(ptr);
flush_nat_ptrs(i);
- IPFW_WUNLOCK(&layer3_chain);
+ IPFW_WUNLOCK(&V_layer3_chain);
del_redir_spool_cfg(ptr, &ptr->redir_chain);
LibAliasUninit(ptr->lib);
free(ptr, M_IPFW);
@@ -505,9 +506,9 @@ ipfw_nat_get_cfg(struct sockopt *sopt)
off = sizeof(nat_cnt);
data = malloc(NAT_BUF_LEN, M_IPFW, M_WAITOK | M_ZERO);
- IPFW_RLOCK(&layer3_chain);
+ IPFW_RLOCK(&V_layer3_chain);
/* Serialize all the data. */
- LIST_FOREACH(n, &layer3_chain.nat, _next) {
+ LIST_FOREACH(n, &V_layer3_chain.nat, _next) {
nat_cnt++;
if (off + SOF_NAT < NAT_BUF_LEN) {
bcopy(n, &data[off], SOF_NAT);
@@ -534,12 +535,12 @@ ipfw_nat_get_cfg(struct sockopt *sopt)
goto nospace;
}
bcopy(&nat_cnt, data, sizeof(nat_cnt));
- IPFW_RUNLOCK(&layer3_chain);
+ IPFW_RUNLOCK(&V_layer3_chain);
sooptcopyout(sopt, data, NAT_BUF_LEN);
free(data, M_IPFW);
return (0);
nospace:
- IPFW_RUNLOCK(&layer3_chain);
+ IPFW_RUNLOCK(&V_layer3_chain);
printf("serialized data buffer not big enough:"
"please increase NAT_BUF_LEN\n");
free(data, M_IPFW);
@@ -557,16 +558,16 @@ ipfw_nat_get_log(struct sockopt *sopt)
sof = LIBALIAS_BUF_SIZE;
cnt = 0;
- IPFW_RLOCK(&layer3_chain);
+ IPFW_RLOCK(&V_layer3_chain);
size = i = 0;
- LIST_FOREACH(ptr, &layer3_chain.nat, _next) {
+ LIST_FOREACH(ptr, &V_layer3_chain.nat, _next) {
if (ptr->lib->logDesc == NULL)
continue;
cnt++;
size = cnt * (sof + sizeof(int));
data = realloc(data, size, M_IPFW, M_NOWAIT | M_ZERO);
if (data == NULL) {
- IPFW_RUNLOCK(&layer3_chain);
+ IPFW_RUNLOCK(&V_layer3_chain);
return (ENOSPC);
}
bcopy(&ptr->id, &data[i], sizeof(int));
@@ -574,7 +575,7 @@ ipfw_nat_get_log(struct sockopt *sopt)
bcopy(ptr->lib->logDesc, &data[i], sof);
i += sof;
}
- IPFW_RUNLOCK(&layer3_chain);
+ IPFW_RUNLOCK(&V_layer3_chain);
sooptcopyout(sopt, data, size);
free(data, M_IPFW);
return(0);
@@ -584,15 +585,15 @@ static void
ipfw_nat_init(void)
{
- IPFW_WLOCK(&layer3_chain);
+ IPFW_WLOCK(&V_layer3_chain);
/* init ipfw hooks */
ipfw_nat_ptr = ipfw_nat;
ipfw_nat_cfg_ptr = ipfw_nat_cfg;
ipfw_nat_del_ptr = ipfw_nat_del;
ipfw_nat_get_cfg_ptr = ipfw_nat_get_cfg;
ipfw_nat_get_log_ptr = ipfw_nat_get_log;
- IPFW_WUNLOCK(&layer3_chain);
- ifaddr_event_tag = EVENTHANDLER_REGISTER(ifaddr_event, ifaddr_change,
+ IPFW_WUNLOCK(&V_layer3_chain);
+ V_ifaddr_event_tag = EVENTHANDLER_REGISTER(ifaddr_event, ifaddr_change,
NULL, EVENTHANDLER_PRI_ANY);
}
@@ -602,23 +603,23 @@ ipfw_nat_destroy(void)
struct ip_fw *rule;
struct cfg_nat *ptr, *ptr_temp;
- IPFW_WLOCK(&layer3_chain);
- LIST_FOREACH_SAFE(ptr, &layer3_chain.nat, _next, ptr_temp) {
+ IPFW_WLOCK(&V_layer3_chain);
+ LIST_FOREACH_SAFE(ptr, &V_layer3_chain.nat, _next, ptr_temp) {
LIST_REMOVE(ptr, _next);
del_redir_spool_cfg(ptr, &ptr->redir_chain);
LibAliasUninit(ptr->lib);
free(ptr, M_IPFW);
}
- EVENTHANDLER_DEREGISTER(ifaddr_event, ifaddr_event_tag);
+ EVENTHANDLER_DEREGISTER(ifaddr_event, V_ifaddr_event_tag);
/* flush all nat ptrs */
- for (rule = layer3_chain.rules; rule; rule = rule->next) {
+ for (rule = V_layer3_chain.rules; rule; rule = rule->next) {
ipfw_insn_nat *cmd = (ipfw_insn_nat *)ACTION_PTR(rule);
if (cmd->o.opcode == O_NAT)
cmd->nat = NULL;
}
/* deregister ipfw_nat */
ipfw_nat_ptr = NULL;
- IPFW_WUNLOCK(&layer3_chain);
+ IPFW_WUNLOCK(&V_layer3_chain);
}
static int
diff --git a/sys/netinet/ip_fw_pfil.c b/sys/netinet/ip_fw_pfil.c
index 896194c..64a1752 100644
--- a/sys/netinet/ip_fw_pfil.c
+++ b/sys/netinet/ip_fw_pfil.c
@@ -47,6 +47,7 @@ __FBSDID("$FreeBSD$");
#include <sys/socketvar.h>
#include <sys/sysctl.h>
#include <sys/ucred.h>
+#include <sys/vimage.h>
#include <net/if.h>
#include <net/route.h>
@@ -498,14 +499,14 @@ ipfw_chg_hook(SYSCTL_HANDLER_ARGS)
if (enable == *(int *)arg1)
return (0);
- if (arg1 == &fw_enable) {
+ if (arg1 == &V_fw_enable) {
if (enable)
error = ipfw_hook();
else
error = ipfw_unhook();
}
#ifdef INET6
- if (arg1 == &fw6_enable) {
+ if (arg1 == &V_fw6_enable) {
if (enable)
error = ipfw6_hook();
else
diff --git a/sys/netinet/ip_icmp.c b/sys/netinet/ip_icmp.c
index 153a815..bd8ac70 100644
--- a/sys/netinet/ip_icmp.c
+++ b/sys/netinet/ip_icmp.c
@@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$");
#include <sys/time.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
+#include <sys/vimage.h>
#include <net/if.h>
#include <net/if_types.h>
@@ -155,7 +156,7 @@ icmp_error(struct mbuf *n, int type, int code, n_long dest, int mtu)
printf("icmp_error(%p, %x, %d)\n", oip, type, code);
#endif
if (type != ICMP_REDIRECT)
- icmpstat.icps_error++;
+ V_icmpstat.icps_error++;
/*
* Don't send error:
* if the original packet was encrypted.
@@ -172,7 +173,7 @@ icmp_error(struct mbuf *n, int type, int code, n_long dest, int mtu)
if (oip->ip_p == IPPROTO_ICMP && type != ICMP_REDIRECT &&
n->m_len >= oiphlen + ICMP_MINLEN &&
!ICMP_INFOTYPE(((struct icmp *)((caddr_t)oip + oiphlen))->icmp_type)) {
- icmpstat.icps_oldicmp++;
+ V_icmpstat.icps_oldicmp++;
goto freeit;
}
/* Drop if IP header plus 8 bytes is not contignous in first mbuf. */
@@ -232,7 +233,7 @@ stdreply: icmpelen = max(8, min(icmp_quotelen, oip->ip_len - oiphlen));
*/
M_SETFIB(m, M_GETFIB(n));
icp = mtod(m, struct icmp *);
- icmpstat.icps_outhist[type]++;
+ V_icmpstat.icps_outhist[type]++;
icp->icmp_type = type;
if (type == ICMP_REDIRECT)
icp->icmp_gwaddr.s_addr = dest;
@@ -314,12 +315,12 @@ icmp_input(struct mbuf *m, int off)
}
#endif
if (icmplen < ICMP_MINLEN) {
- icmpstat.icps_tooshort++;
+ V_icmpstat.icps_tooshort++;
goto freeit;
}
i = hlen + min(icmplen, ICMP_ADVLENMIN);
if (m->m_len < i && (m = m_pullup(m, i)) == 0) {
- icmpstat.icps_tooshort++;
+ V_icmpstat.icps_tooshort++;
return;
}
ip = mtod(m, struct ip *);
@@ -327,7 +328,7 @@ icmp_input(struct mbuf *m, int off)
m->m_data += hlen;
icp = mtod(m, struct icmp *);
if (in_cksum(m, icmplen)) {
- icmpstat.icps_checksum++;
+ V_icmpstat.icps_checksum++;
goto freeit;
}
m->m_len += hlen;
@@ -369,7 +370,7 @@ icmp_input(struct mbuf *m, int off)
icmpgw.sin_len = sizeof(struct sockaddr_in);
icmpgw.sin_family = AF_INET;
- icmpstat.icps_inhist[icp->icmp_type]++;
+ V_icmpstat.icps_inhist[icp->icmp_type]++;
code = icp->icmp_code;
switch (icp->icmp_type) {
@@ -434,7 +435,7 @@ icmp_input(struct mbuf *m, int off)
*/
if (icmplen < ICMP_ADVLENMIN || icmplen < ICMP_ADVLEN(icp) ||
icp->icmp_ip.ip_hl < (sizeof(struct ip) >> 2)) {
- icmpstat.icps_badlen++;
+ V_icmpstat.icps_badlen++;
goto freeit;
}
icp->icmp_ip.ip_len = ntohs(icp->icmp_ip.ip_len);
@@ -457,13 +458,13 @@ icmp_input(struct mbuf *m, int off)
break;
badcode:
- icmpstat.icps_badcode++;
+ V_icmpstat.icps_badcode++;
break;
case ICMP_ECHO:
if (!icmpbmcastecho
&& (m->m_flags & (M_MCAST | M_BCAST)) != 0) {
- icmpstat.icps_bmcastecho++;
+ V_icmpstat.icps_bmcastecho++;
break;
}
icp->icmp_type = ICMP_ECHOREPLY;
@@ -475,11 +476,11 @@ icmp_input(struct mbuf *m, int off)
case ICMP_TSTAMP:
if (!icmpbmcastecho
&& (m->m_flags & (M_MCAST | M_BCAST)) != 0) {
- icmpstat.icps_bmcasttstamp++;
+ V_icmpstat.icps_bmcasttstamp++;
break;
}
if (icmplen < ICMP_TSLEN) {
- icmpstat.icps_badlen++;
+ V_icmpstat.icps_badlen++;
break;
}
icp->icmp_type = ICMP_TSTAMPREPLY;
@@ -528,8 +529,8 @@ icmp_input(struct mbuf *m, int off)
}
reflect:
ip->ip_len += hlen; /* since ip_input deducts this */
- icmpstat.icps_reflect++;
- icmpstat.icps_outhist[icp->icmp_type]++;
+ V_icmpstat.icps_reflect++;
+ V_icmpstat.icps_outhist[icp->icmp_type]++;
icmp_reflect(m);
return;
@@ -553,13 +554,13 @@ reflect:
* RFC1812 says we must ignore ICMP redirects if we
* are acting as router.
*/
- if (drop_redirect || ipforwarding)
+ if (drop_redirect || V_ipforwarding)
break;
if (code > 3)
goto badcode;
if (icmplen < ICMP_ADVLENMIN || icmplen < ICMP_ADVLEN(icp) ||
icp->icmp_ip.ip_hl < (sizeof(struct ip) >> 2)) {
- icmpstat.icps_badlen++;
+ V_icmpstat.icps_badlen++;
break;
}
/*
@@ -633,7 +634,7 @@ icmp_reflect(struct mbuf *m)
IN_EXPERIMENTAL(ntohl(ip->ip_src.s_addr)) ||
IN_ZERONET(ntohl(ip->ip_src.s_addr)) ) {
m_freem(m); /* Bad return address */
- icmpstat.icps_badaddr++;
+ V_icmpstat.icps_badaddr++;
goto done; /* Ip_output() will check for broadcast */
}
@@ -702,7 +703,7 @@ icmp_reflect(struct mbuf *m)
ia = ip_rtaddr(ip->ip_dst, M_GETFIB(m));
if (ia == NULL) {
m_freem(m);
- icmpstat.icps_noroute++;
+ V_icmpstat.icps_noroute++;
goto done;
}
match:
@@ -711,7 +712,7 @@ match:
#endif
t = IA_SIN(ia)->sin_addr;
ip->ip_src = t;
- ip->ip_ttl = ip_defttl;
+ ip->ip_ttl = V_ip_defttl;
if (optlen > 0) {
register u_char *cp;
diff --git a/sys/netinet/ip_input.c b/sys/netinet/ip_input.c
index 7abb4e4..4ab975e6 100644
--- a/sys/netinet/ip_input.c
+++ b/sys/netinet/ip_input.c
@@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h>
#include <sys/syslog.h>
#include <sys/sysctl.h>
+#include <sys/vimage.h>
#include <net/pfil.h>
#include <net/if.h>
@@ -216,8 +217,8 @@ ip_init(void)
struct protosw *pr;
int i;
- TAILQ_INIT(&in_ifaddrhead);
- in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &in_ifaddrhmask);
+ TAILQ_INIT(&V_in_ifaddrhead);
+ V_in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &V_in_ifaddrhmask);
pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
if (pr == NULL)
panic("ip_init: PF_INET not found");
@@ -248,10 +249,10 @@ ip_init(void)
/* Initialize IP reassembly queue. */
IPQ_LOCK_INIT();
for (i = 0; i < IPREASS_NHASH; i++)
- TAILQ_INIT(&ipq[i]);
- maxnipq = nmbclusters / 32;
- maxfragsperpacket = 16;
- ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL,
+ TAILQ_INIT(&V_ipq[i]);
+ V_maxnipq = nmbclusters / 32;
+ V_maxfragsperpacket = 16;
+ V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL,
NULL, UMA_ALIGN_PTR, 0);
maxnipq_update();
@@ -306,31 +307,31 @@ ip_input(struct mbuf *m)
goto ours;
}
- ipstat.ips_total++;
+ V_ipstat.ips_total++;
if (m->m_pkthdr.len < sizeof(struct ip))
goto tooshort;
if (m->m_len < sizeof (struct ip) &&
(m = m_pullup(m, sizeof (struct ip))) == NULL) {
- ipstat.ips_toosmall++;
+ V_ipstat.ips_toosmall++;
return;
}
ip = mtod(m, struct ip *);
if (ip->ip_v != IPVERSION) {
- ipstat.ips_badvers++;
+ V_ipstat.ips_badvers++;
goto bad;
}
hlen = ip->ip_hl << 2;
if (hlen < sizeof(struct ip)) { /* minimum header length */
- ipstat.ips_badhlen++;
+ V_ipstat.ips_badhlen++;
goto bad;
}
if (hlen > m->m_len) {
if ((m = m_pullup(m, hlen)) == NULL) {
- ipstat.ips_badhlen++;
+ V_ipstat.ips_badhlen++;
return;
}
ip = mtod(m, struct ip *);
@@ -340,7 +341,7 @@ ip_input(struct mbuf *m)
if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
(ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) == 0) {
- ipstat.ips_badaddr++;
+ V_ipstat.ips_badaddr++;
goto bad;
}
}
@@ -355,7 +356,7 @@ ip_input(struct mbuf *m)
}
}
if (sum) {
- ipstat.ips_badsum++;
+ V_ipstat.ips_badsum++;
goto bad;
}
@@ -370,7 +371,7 @@ ip_input(struct mbuf *m)
*/
ip->ip_len = ntohs(ip->ip_len);
if (ip->ip_len < hlen) {
- ipstat.ips_badlen++;
+ V_ipstat.ips_badlen++;
goto bad;
}
ip->ip_off = ntohs(ip->ip_off);
@@ -383,7 +384,7 @@ ip_input(struct mbuf *m)
*/
if (m->m_pkthdr.len < ip->ip_len) {
tooshort:
- ipstat.ips_tooshort++;
+ V_ipstat.ips_tooshort++;
goto bad;
}
if (m->m_pkthdr.len > ip->ip_len) {
@@ -455,7 +456,7 @@ passin:
* anywhere else. Also checks if the rsvp daemon is running before
* grabbing the packet.
*/
- if (rsvp_on && ip->ip_p==IPPROTO_RSVP)
+ if (V_rsvp_on && ip->ip_p==IPPROTO_RSVP)
goto ours;
/*
@@ -464,7 +465,7 @@ passin:
* we receive might be for us (and let the upper layers deal
* with it).
*/
- if (TAILQ_EMPTY(&in_ifaddrhead) &&
+ if (TAILQ_EMPTY(&V_in_ifaddrhead) &&
(m->m_flags & (M_MCAST|M_BCAST)) == 0)
goto ours;
@@ -486,7 +487,7 @@ passin:
* insert a workaround. If the packet got here, we already
* checked with carp_iamatch() and carp_forus().
*/
- checkif = ip_checkinterface && (ipforwarding == 0) &&
+ checkif = V_ip_checkinterface && (V_ipforwarding == 0) &&
m->m_pkthdr.rcvif != NULL &&
((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) == 0) &&
#ifdef DEV_CARP
@@ -534,13 +535,13 @@ passin:
}
/* RFC 3927 2.7: Do not forward datagrams for 169.254.0.0/16. */
if (IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))) {
- ipstat.ips_cantforward++;
+ V_ipstat.ips_cantforward++;
m_freem(m);
return;
}
if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
struct in_multi *inm;
- if (ip_mrouter) {
+ if (V_ip_mrouter) {
/*
* If we are acting as a multicast router, all
* incoming multicast packets are passed to the
@@ -551,7 +552,7 @@ passin:
*/
if (ip_mforward &&
ip_mforward(ip, m->m_pkthdr.rcvif, m, 0) != 0) {
- ipstat.ips_cantforward++;
+ V_ipstat.ips_cantforward++;
m_freem(m);
return;
}
@@ -563,7 +564,7 @@ passin:
*/
if (ip->ip_p == IPPROTO_IGMP)
goto ours;
- ipstat.ips_forward++;
+ V_ipstat.ips_forward++;
}
/*
* See if we belong to the destination multicast group on the
@@ -573,7 +574,7 @@ passin:
IN_LOOKUP_MULTI(ip->ip_dst, m->m_pkthdr.rcvif, inm);
IN_MULTI_UNLOCK();
if (inm == NULL) {
- ipstat.ips_notmember++;
+ V_ipstat.ips_notmember++;
m_freem(m);
return;
}
@@ -588,7 +589,7 @@ passin:
* FAITH(Firewall Aided Internet Translator)
*/
if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type == IFT_FAITH) {
- if (ip_keepfaith) {
+ if (V_ip_keepfaith) {
if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP)
goto ours;
}
@@ -599,8 +600,8 @@ passin:
/*
* Not for us; forward if possible and desirable.
*/
- if (ipforwarding == 0) {
- ipstat.ips_cantforward++;
+ if (V_ipforwarding == 0) {
+ V_ipstat.ips_cantforward++;
m_freem(m);
} else {
#ifdef IPSEC
@@ -617,7 +618,7 @@ ours:
* IPSTEALTH: Process non-routing options only
* if the packet is destined for us.
*/
- if (ipstealth && hlen > sizeof (struct ip) &&
+ if (V_ipstealth && hlen > sizeof (struct ip) &&
ip_dooptions(m, 1))
return;
#endif /* IPSTEALTH */
@@ -660,7 +661,7 @@ ours:
/*
* Switch out to protocol's input routine.
*/
- ipstat.ips_delivered++;
+ V_ipstat.ips_delivered++;
(*inetsw[ip_protox[ip->ip_p]].pr_input)(m, hlen);
return;
@@ -680,28 +681,28 @@ maxnipq_update(void)
/*
* -1 for unlimited allocation.
*/
- if (maxnipq < 0)
- uma_zone_set_max(ipq_zone, 0);
+ if (V_maxnipq < 0)
+ uma_zone_set_max(V_ipq_zone, 0);
/*
* Positive number for specific bound.
*/
- if (maxnipq > 0)
- uma_zone_set_max(ipq_zone, maxnipq);
+ if (V_maxnipq > 0)
+ uma_zone_set_max(V_ipq_zone, V_maxnipq);
/*
* Zero specifies no further fragment queue allocation -- set the
* bound very low, but rely on implementation elsewhere to actually
* prevent allocation and reclaim current queues.
*/
- if (maxnipq == 0)
- uma_zone_set_max(ipq_zone, 1);
+ if (V_maxnipq == 0)
+ uma_zone_set_max(V_ipq_zone, 1);
}
static void
ipq_zone_change(void *tag)
{
- if (maxnipq > 0 && maxnipq < (nmbclusters / 32)) {
- maxnipq = nmbclusters / 32;
+ if (V_maxnipq > 0 && V_maxnipq < (nmbclusters / 32)) {
+ V_maxnipq = nmbclusters / 32;
maxnipq_update();
}
}
@@ -711,7 +712,7 @@ sysctl_maxnipq(SYSCTL_HANDLER_ARGS)
{
int error, i;
- i = maxnipq;
+ i = V_maxnipq;
error = sysctl_handle_int(oidp, &i, 0, req);
if (error || !req->newptr)
return (error);
@@ -722,7 +723,7 @@ sysctl_maxnipq(SYSCTL_HANDLER_ARGS)
*/
if (i < -1)
return (EINVAL);
- maxnipq = i;
+ V_maxnipq = i;
maxnipq_update();
return (0);
}
@@ -753,9 +754,9 @@ ip_reass(struct mbuf *m)
u_short hash;
/* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
- if (maxnipq == 0 || maxfragsperpacket == 0) {
- ipstat.ips_fragments++;
- ipstat.ips_fragdropped++;
+ if (V_maxnipq == 0 || V_maxfragsperpacket == 0) {
+ V_ipstat.ips_fragments++;
+ V_ipstat.ips_fragdropped++;
m_freem(m);
return (NULL);
}
@@ -764,7 +765,7 @@ ip_reass(struct mbuf *m)
hlen = ip->ip_hl << 2;
hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
- head = &ipq[hash];
+ head = &V_ipq[hash];
IPQ_LOCK();
/*
@@ -787,7 +788,7 @@ ip_reass(struct mbuf *m)
* Attempt to trim the number of allocated fragment queues if it
* exceeds the administrative limit.
*/
- if ((nipq > maxnipq) && (maxnipq > 0)) {
+ if ((V_nipq > V_maxnipq) && (V_maxnipq > 0)) {
/*
* drop something from the tail of the current queue
* before proceeding further
@@ -795,15 +796,15 @@ ip_reass(struct mbuf *m)
struct ipq *q = TAILQ_LAST(head, ipqhead);
if (q == NULL) { /* gak */
for (i = 0; i < IPREASS_NHASH; i++) {
- struct ipq *r = TAILQ_LAST(&ipq[i], ipqhead);
+ struct ipq *r = TAILQ_LAST(&V_ipq[i], ipqhead);
if (r) {
- ipstat.ips_fragtimeout += r->ipq_nfrags;
- ip_freef(&ipq[i], r);
+ V_ipstat.ips_fragtimeout += r->ipq_nfrags;
+ ip_freef(&V_ipq[i], r);
break;
}
}
} else {
- ipstat.ips_fragtimeout += q->ipq_nfrags;
+ V_ipstat.ips_fragtimeout += q->ipq_nfrags;
ip_freef(head, q);
}
}
@@ -820,7 +821,7 @@ found:
* that's a non-zero multiple of 8 bytes.
*/
if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) {
- ipstat.ips_toosmall++; /* XXX */
+ V_ipstat.ips_toosmall++; /* XXX */
goto dropfrag;
}
m->m_flags |= M_FRAG;
@@ -833,7 +834,7 @@ found:
* Attempt reassembly; if it succeeds, proceed.
* ip_reass() will return a different mbuf.
*/
- ipstat.ips_fragments++;
+ V_ipstat.ips_fragments++;
m->m_pkthdr.header = ip;
/* Previous ip_reass() started here. */
@@ -848,19 +849,19 @@ found:
* If first fragment to arrive, create a reassembly queue.
*/
if (fp == NULL) {
- fp = uma_zalloc(ipq_zone, M_NOWAIT);
+ fp = uma_zalloc(V_ipq_zone, M_NOWAIT);
if (fp == NULL)
goto dropfrag;
#ifdef MAC
if (mac_ipq_init(fp, M_NOWAIT) != 0) {
- uma_zfree(ipq_zone, fp);
+ uma_zfree(V_ipq_zone, fp);
fp = NULL;
goto dropfrag;
}
mac_ipq_create(m, fp);
#endif
TAILQ_INSERT_HEAD(head, fp, ipq_list);
- nipq++;
+ V_nipq++;
fp->ipq_nfrags = 1;
fp->ipq_ttl = IPFRAGTTL;
fp->ipq_p = ip->ip_p;
@@ -944,7 +945,7 @@ found:
}
nq = q->m_nextpkt;
m->m_nextpkt = nq;
- ipstat.ips_fragdropped++;
+ V_ipstat.ips_fragdropped++;
fp->ipq_nfrags--;
m_freem(q);
}
@@ -962,8 +963,8 @@ found:
next = 0;
for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
if (GETIP(q)->ip_off != next) {
- if (fp->ipq_nfrags > maxfragsperpacket) {
- ipstat.ips_fragdropped += fp->ipq_nfrags;
+ if (fp->ipq_nfrags > V_maxfragsperpacket) {
+ V_ipstat.ips_fragdropped += fp->ipq_nfrags;
ip_freef(head, fp);
}
goto done;
@@ -972,8 +973,8 @@ found:
}
/* Make sure the last packet didn't have the IP_MF flag */
if (p->m_flags & M_FRAG) {
- if (fp->ipq_nfrags > maxfragsperpacket) {
- ipstat.ips_fragdropped += fp->ipq_nfrags;
+ if (fp->ipq_nfrags > V_maxfragsperpacket) {
+ V_ipstat.ips_fragdropped += fp->ipq_nfrags;
ip_freef(head, fp);
}
goto done;
@@ -985,8 +986,8 @@ found:
q = fp->ipq_frags;
ip = GETIP(q);
if (next + (ip->ip_hl << 2) > IP_MAXPACKET) {
- ipstat.ips_toolong++;
- ipstat.ips_fragdropped += fp->ipq_nfrags;
+ V_ipstat.ips_toolong++;
+ V_ipstat.ips_fragdropped += fp->ipq_nfrags;
ip_freef(head, fp);
goto done;
}
@@ -1028,19 +1029,19 @@ found:
ip->ip_src = fp->ipq_src;
ip->ip_dst = fp->ipq_dst;
TAILQ_REMOVE(head, fp, ipq_list);
- nipq--;
- uma_zfree(ipq_zone, fp);
+ V_nipq--;
+ uma_zfree(V_ipq_zone, fp);
m->m_len += (ip->ip_hl << 2);
m->m_data -= (ip->ip_hl << 2);
/* some debugging cruft by sklower, below, will go away soon */
if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
m_fixhdr(m);
- ipstat.ips_reassembled++;
+ V_ipstat.ips_reassembled++;
IPQ_UNLOCK();
return (m);
dropfrag:
- ipstat.ips_fragdropped++;
+ V_ipstat.ips_fragdropped++;
if (fp != NULL)
fp->ipq_nfrags--;
m_freem(m);
@@ -1068,8 +1069,8 @@ ip_freef(struct ipqhead *fhp, struct ipq *fp)
m_freem(q);
}
TAILQ_REMOVE(fhp, fp, ipq_list);
- uma_zfree(ipq_zone, fp);
- nipq--;
+ uma_zfree(V_ipq_zone, fp);
+ V_nipq--;
}
/*
@@ -1085,14 +1086,14 @@ ip_slowtimo(void)
IPQ_LOCK();
for (i = 0; i < IPREASS_NHASH; i++) {
- for(fp = TAILQ_FIRST(&ipq[i]); fp;) {
+ for(fp = TAILQ_FIRST(&V_ipq[i]); fp;) {
struct ipq *fpp;
fpp = fp;
fp = TAILQ_NEXT(fp, ipq_list);
if(--fpp->ipq_ttl == 0) {
- ipstat.ips_fragtimeout += fpp->ipq_nfrags;
- ip_freef(&ipq[i], fpp);
+ V_ipstat.ips_fragtimeout += fpp->ipq_nfrags;
+ ip_freef(&V_ipq[i], fpp);
}
}
}
@@ -1101,12 +1102,12 @@ ip_slowtimo(void)
* (due to the limit being lowered), drain off
* enough to get down to the new limit.
*/
- if (maxnipq >= 0 && nipq > maxnipq) {
+ if (V_maxnipq >= 0 && V_nipq > V_maxnipq) {
for (i = 0; i < IPREASS_NHASH; i++) {
- while (nipq > maxnipq && !TAILQ_EMPTY(&ipq[i])) {
- ipstat.ips_fragdropped +=
- TAILQ_FIRST(&ipq[i])->ipq_nfrags;
- ip_freef(&ipq[i], TAILQ_FIRST(&ipq[i]));
+ while (V_nipq > V_maxnipq && !TAILQ_EMPTY(&V_ipq[i])) {
+ V_ipstat.ips_fragdropped +=
+ TAILQ_FIRST(&V_ipq[i])->ipq_nfrags;
+ ip_freef(&V_ipq[i], TAILQ_FIRST(&V_ipq[i]));
}
}
}
@@ -1123,10 +1124,10 @@ ip_drain(void)
IPQ_LOCK();
for (i = 0; i < IPREASS_NHASH; i++) {
- while(!TAILQ_EMPTY(&ipq[i])) {
- ipstat.ips_fragdropped +=
- TAILQ_FIRST(&ipq[i])->ipq_nfrags;
- ip_freef(&ipq[i], TAILQ_FIRST(&ipq[i]));
+ while(!TAILQ_EMPTY(&V_ipq[i])) {
+ V_ipstat.ips_fragdropped +=
+ TAILQ_FIRST(&V_ipq[i])->ipq_nfrags;
+ ip_freef(&V_ipq[i], TAILQ_FIRST(&V_ipq[i]));
}
}
IPQ_UNLOCK();
@@ -1253,12 +1254,12 @@ ip_forward(struct mbuf *m, int srcrt)
int error, type = 0, code = 0, mtu = 0;
if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) {
- ipstat.ips_cantforward++;
+ V_ipstat.ips_cantforward++;
m_freem(m);
return;
}
#ifdef IPSTEALTH
- if (!ipstealth) {
+ if (!V_ipstealth) {
#endif
if (ip->ip_ttl <= IPTTLDEC) {
icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS,
@@ -1309,7 +1310,7 @@ ip_forward(struct mbuf *m, int srcrt)
}
#ifdef IPSTEALTH
- if (!ipstealth) {
+ if (!V_ipstealth) {
#endif
ip->ip_ttl -= IPTTLDEC;
#ifdef IPSTEALTH
@@ -1325,7 +1326,7 @@ ip_forward(struct mbuf *m, int srcrt)
* or a route modified by a redirect.
*/
dest.s_addr = 0;
- if (!srcrt && ipsendredirects && ia->ia_ifp == m->m_pkthdr.rcvif) {
+ if (!srcrt && V_ipsendredirects && ia->ia_ifp == m->m_pkthdr.rcvif) {
struct sockaddr_in *sin;
struct rtentry *rt;
@@ -1372,11 +1373,11 @@ ip_forward(struct mbuf *m, int srcrt)
RTFREE(ro.ro_rt);
if (error)
- ipstat.ips_cantforward++;
+ V_ipstat.ips_cantforward++;
else {
- ipstat.ips_forward++;
+ V_ipstat.ips_forward++;
if (type)
- ipstat.ips_redirectsent++;
+ V_ipstat.ips_redirectsent++;
else {
if (mcopy)
m_freem(mcopy);
@@ -1428,7 +1429,7 @@ ip_forward(struct mbuf *m, int srcrt)
else
mtu = ip_next_mtu(ip->ip_len, 0);
}
- ipstat.ips_cantfrag++;
+ V_ipstat.ips_cantfrag++;
break;
case ENOBUFS:
@@ -1440,7 +1441,7 @@ ip_forward(struct mbuf *m, int srcrt)
* Those who need source quench packets may re-enable them
* via the net.inet.ip.sendsourcequench sysctl.
*/
- if (ip_sendsourcequench == 0) {
+ if (V_ip_sendsourcequench == 0) {
m_freem(mcopy);
return;
} else {
@@ -1522,7 +1523,7 @@ ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip,
struct sockaddr_dl *sdl2 = &sdlbuf.sdl;
if (((ifp = m->m_pkthdr.rcvif))
- && ( ifp->if_index && (ifp->if_index <= if_index))) {
+ && ( ifp->if_index && (ifp->if_index <= V_if_index))) {
sdp = (struct sockaddr_dl *)ifp->if_addr->ifa_addr;
/*
* Change our mind and don't try copy.
@@ -1562,17 +1563,17 @@ ip_rsvp_init(struct socket *so)
so->so_proto->pr_protocol != IPPROTO_RSVP)
return EOPNOTSUPP;
- if (ip_rsvpd != NULL)
+ if (V_ip_rsvpd != NULL)
return EADDRINUSE;
- ip_rsvpd = so;
+ V_ip_rsvpd = so;
/*
* This may seem silly, but we need to be sure we don't over-increment
* the RSVP counter, in case something slips up.
*/
- if (!ip_rsvp_on) {
- ip_rsvp_on = 1;
- rsvp_on++;
+ if (!V_ip_rsvp_on) {
+ V_ip_rsvp_on = 1;
+ V_rsvp_on++;
}
return 0;
@@ -1581,14 +1582,14 @@ ip_rsvp_init(struct socket *so)
int
ip_rsvp_done(void)
{
- ip_rsvpd = NULL;
+ V_ip_rsvpd = NULL;
/*
* This may seem silly, but we need to be sure we don't over-decrement
* the RSVP counter, in case something slips up.
*/
- if (ip_rsvp_on) {
- ip_rsvp_on = 0;
- rsvp_on--;
+ if (V_ip_rsvp_on) {
+ V_ip_rsvp_on = 0;
+ V_rsvp_on--;
}
return 0;
}
@@ -1606,12 +1607,12 @@ rsvp_input(struct mbuf *m, int off) /* XXX must fixup manually */
* case we want to throw the packet away.
*/
- if (!rsvp_on) {
+ if (!V_rsvp_on) {
m_freem(m);
return;
}
- if (ip_rsvpd != NULL) {
+ if (V_ip_rsvpd != NULL) {
rip_input(m, off);
return;
}
diff --git a/sys/netinet/ip_ipsec.c b/sys/netinet/ip_ipsec.c
index fbe7430..3e8e826 100644
--- a/sys/netinet/ip_ipsec.c
+++ b/sys/netinet/ip_ipsec.c
@@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/sysctl.h>
+#include <sys/vimage.h>
#include <net/if.h>
#include <net/route.h>
@@ -121,7 +122,7 @@ ip_ipsec_fwd(struct mbuf *m)
KEY_FREESP(&sp);
splx(s);
if (error) {
- ipstat.ips_cantforward++;
+ V_ipstat.ips_cantforward++;
return 1;
}
#endif /* IPSEC */
diff --git a/sys/netinet/ip_mroute.c b/sys/netinet/ip_mroute.c
index d60e8bd..0476a36 100644
--- a/sys/netinet/ip_mroute.c
+++ b/sys/netinet/ip_mroute.c
@@ -80,6 +80,7 @@ __FBSDID("$FreeBSD$");
#include <sys/syslog.h>
#include <sys/systm.h>
#include <sys/time.h>
+#include <sys/vimage.h>
#include <net/if.h>
#include <net/netisr.h>
#include <net/route.h>
@@ -428,7 +429,7 @@ X_ip_mrouter_set(struct socket *so, struct sockopt *sopt)
struct bw_upcall bw_upcall;
uint32_t i;
- if (so != ip_mrouter && sopt->sopt_name != MRT_INIT)
+ if (so != V_ip_mrouter && sopt->sopt_name != MRT_INIT)
return EPERM;
error = 0;
@@ -654,7 +655,7 @@ if_detached_event(void *arg __unused, struct ifnet *ifp)
struct rtdetq *npq;
MROUTER_LOCK();
- if (ip_mrouter == NULL) {
+ if (V_ip_mrouter == NULL) {
MROUTER_UNLOCK();
}
@@ -720,7 +721,7 @@ ip_mrouter_init(struct socket *so, int version)
MROUTER_LOCK();
- if (ip_mrouter != NULL) {
+ if (V_ip_mrouter != NULL) {
MROUTER_UNLOCK();
return EADDRINUSE;
}
@@ -738,7 +739,7 @@ ip_mrouter_init(struct socket *so, int version)
expire_bw_upcalls_send, NULL);
callout_reset(&bw_meter_ch, BW_METER_PERIOD, expire_bw_meter_process, NULL);
- ip_mrouter = so;
+ V_ip_mrouter = so;
MROUTER_UNLOCK();
@@ -763,7 +764,7 @@ X_ip_mrouter_done(void)
MROUTER_LOCK();
- if (ip_mrouter == NULL) {
+ if (V_ip_mrouter == NULL) {
MROUTER_UNLOCK();
return EINVAL;
}
@@ -771,7 +772,7 @@ X_ip_mrouter_done(void)
/*
* Detach/disable hooks to the reset of the system.
*/
- ip_mrouter = NULL;
+ V_ip_mrouter = NULL;
mrt_api_config = 0;
VIF_LOCK();
@@ -1449,7 +1450,7 @@ X_ip_mforward(struct ip *ip, struct ifnet *ifp, struct mbuf *m,
mrtstat.mrts_upcalls++;
k_igmpsrc.sin_addr = ip->ip_src;
- if (socket_send(ip_mrouter, mm, &k_igmpsrc) < 0) {
+ if (socket_send(V_ip_mrouter, mm, &k_igmpsrc) < 0) {
log(LOG_WARNING, "ip_mforward: ip_mrouter socket queue full\n");
++mrtstat.mrts_upq_sockfull;
fail1:
@@ -1668,7 +1669,7 @@ ip_mdq(struct mbuf *m, struct ifnet *ifp, struct mfc *rt, vifi_t xmt_vif)
mrtstat.mrts_upcalls++;
k_igmpsrc.sin_addr = im->im_src;
- if (socket_send(ip_mrouter, mm, &k_igmpsrc) < 0) {
+ if (socket_send(V_ip_mrouter, mm, &k_igmpsrc) < 0) {
log(LOG_WARNING,
"ip_mforward: ip_mrouter socket queue full\n");
++mrtstat.mrts_upq_sockfull;
@@ -1829,7 +1830,7 @@ X_ip_rsvp_vif(struct socket *so, struct sockopt *sopt)
*/
if (!viftable[vifi].v_rsvp_on) {
viftable[vifi].v_rsvp_on = 1;
- rsvp_on++;
+ V_rsvp_on++;
}
} else { /* must be VIF_OFF */
/*
@@ -1844,7 +1845,7 @@ X_ip_rsvp_vif(struct socket *so, struct sockopt *sopt)
*/
if (viftable[vifi].v_rsvp_on) {
viftable[vifi].v_rsvp_on = 0;
- rsvp_on--;
+ V_rsvp_on--;
}
}
VIF_UNLOCK();
@@ -1873,7 +1874,7 @@ X_ip_rsvp_force_done(struct socket *so)
*/
if (viftable[vifi].v_rsvp_on) {
viftable[vifi].v_rsvp_on = 0;
- rsvp_on--;
+ V_rsvp_on--;
}
}
}
@@ -1890,13 +1891,13 @@ X_rsvp_input(struct mbuf *m, int off)
struct ifnet *ifp;
if (rsvpdebug)
- printf("rsvp_input: rsvp_on %d\n",rsvp_on);
+ printf("rsvp_input: rsvp_on %d\n",V_rsvp_on);
/* Can still get packets with rsvp_on = 0 if there is a local member
* of the group to which the RSVP packet is addressed. But in this
* case we want to throw the packet away.
*/
- if (!rsvp_on) {
+ if (!V_rsvp_on) {
m_freem(m);
return;
}
@@ -1928,7 +1929,7 @@ X_rsvp_input(struct mbuf *m, int off)
* then use it. Otherwise, drop packet since there
* is no specific socket for this vif.
*/
- if (ip_rsvpd != NULL) {
+ if (V_ip_rsvpd != NULL) {
if (rsvpdebug)
printf("rsvp_input: Sending packet up old-style socket\n");
rip_input(m, off); /* xxx */
@@ -2323,7 +2324,7 @@ bw_upcalls_send(void)
* XXX do we need to set the address in k_igmpsrc ?
*/
mrtstat.mrts_upcalls++;
- if (socket_send(ip_mrouter, m, &k_igmpsrc) < 0) {
+ if (socket_send(V_ip_mrouter, m, &k_igmpsrc) < 0) {
log(LOG_WARNING, "bw_upcalls_send: ip_mrouter socket queue full\n");
++mrtstat.mrts_upq_sockfull;
}
@@ -2677,7 +2678,7 @@ pim_register_send_upcall(struct ip *ip, struct vif *vifp,
mrtstat.mrts_upcalls++;
- if (socket_send(ip_mrouter, mb_first, &k_igmpsrc) < 0) {
+ if (socket_send(V_ip_mrouter, mb_first, &k_igmpsrc) < 0) {
if (mrtdebug & DEBUG_PIM)
log(LOG_WARNING,
"mcast: pim_register_send_upcall: ip_mrouter socket queue full");
@@ -3094,7 +3095,7 @@ ip_mroute_modevent(module_t mod, int type, void *unused)
* just loaded and then unloaded w/o starting up a user
* process we still need to cleanup.
*/
- if (ip_mrouter
+ if (V_ip_mrouter
#ifdef INET6
|| ip6_mrouter
#endif
diff --git a/sys/netinet/ip_options.c b/sys/netinet/ip_options.c
index 0019f7a..b58dbee 100644
--- a/sys/netinet/ip_options.c
+++ b/sys/netinet/ip_options.c
@@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h>
#include <sys/syslog.h>
#include <sys/sysctl.h>
+#include <sys/vimage.h>
#include <net/if.h>
#include <net/if_types.h>
@@ -150,7 +151,7 @@ ip_dooptions(struct mbuf *m, int pass)
case IPOPT_LSRR:
case IPOPT_SSRR:
#ifdef IPSTEALTH
- if (ipstealth && pass > 0)
+ if (V_ipstealth && pass > 0)
break;
#endif
if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
@@ -189,11 +190,11 @@ ip_dooptions(struct mbuf *m, int pass)
break;
}
#ifdef IPSTEALTH
- if (ipstealth)
+ if (V_ipstealth)
goto dropit;
#endif
if (!ip_dosourceroute) {
- if (ipforwarding) {
+ if (V_ipforwarding) {
char buf[16]; /* aaa.bbb.ccc.ddd\0 */
/*
* Acting as a router, so generate
@@ -215,7 +216,7 @@ nosourcerouting:
#ifdef IPSTEALTH
dropit:
#endif
- ipstat.ips_cantforward++;
+ V_ipstat.ips_cantforward++;
m_freem(m);
return (1);
}
@@ -252,7 +253,7 @@ dropit:
case IPOPT_RR:
#ifdef IPSTEALTH
- if (ipstealth && pass == 0)
+ if (V_ipstealth && pass == 0)
break;
#endif
if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
@@ -289,7 +290,7 @@ dropit:
case IPOPT_TS:
#ifdef IPSTEALTH
- if (ipstealth && pass == 0)
+ if (V_ipstealth && pass == 0)
break;
#endif
code = cp - (u_char *)ip;
@@ -356,14 +357,14 @@ dropit:
cp[IPOPT_OFFSET] += sizeof(n_time);
}
}
- if (forward && ipforwarding) {
+ if (forward && V_ipforwarding) {
ip_forward(m, 1);
return (1);
}
return (0);
bad:
icmp_error(m, type, code, 0, 0);
- ipstat.ips_badoptions++;
+ V_ipstat.ips_badoptions++;
return (1);
}
diff --git a/sys/netinet/ip_output.c b/sys/netinet/ip_output.c
index 231510a..946626a 100644
--- a/sys/netinet/ip_output.c
+++ b/sys/netinet/ip_output.c
@@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$");
#include <sys/socketvar.h>
#include <sys/sysctl.h>
#include <sys/ucred.h>
+#include <sys/vimage.h>
#include <net/if.h>
#include <net/netisr.h>
@@ -155,7 +156,7 @@ ip_output(struct mbuf *m, struct mbuf *opt, struct route *ro, int flags,
ip->ip_v = IPVERSION;
ip->ip_hl = hlen >> 2;
ip->ip_id = ip_newid();
- ipstat.ips_localout++;
+ V_ipstat.ips_localout++;
} else {
hlen = ip->ip_hl << 2;
}
@@ -194,7 +195,7 @@ again:
if (flags & IP_SENDONES) {
if ((ia = ifatoia(ifa_ifwithbroadaddr(sintosa(dst)))) == NULL &&
(ia = ifatoia(ifa_ifwithdstaddr(sintosa(dst)))) == NULL) {
- ipstat.ips_noroute++;
+ V_ipstat.ips_noroute++;
error = ENETUNREACH;
goto bad;
}
@@ -206,7 +207,7 @@ again:
} else if (flags & IP_ROUTETOIF) {
if ((ia = ifatoia(ifa_ifwithdstaddr(sintosa(dst)))) == NULL &&
(ia = ifatoia(ifa_ifwithnet(sintosa(dst)))) == NULL) {
- ipstat.ips_noroute++;
+ V_ipstat.ips_noroute++;
error = ENETUNREACH;
goto bad;
}
@@ -238,7 +239,7 @@ again:
inp ? inp->inp_inc.inc_fibnum : M_GETFIB(m));
#endif
if (ro->ro_rt == NULL) {
- ipstat.ips_noroute++;
+ V_ipstat.ips_noroute++;
error = EHOSTUNREACH;
goto bad;
}
@@ -297,7 +298,7 @@ again:
*/
if ((imo == NULL) || (imo->imo_multicast_vif == -1)) {
if ((ifp->if_flags & IFF_MULTICAST) == 0) {
- ipstat.ips_noroute++;
+ V_ipstat.ips_noroute++;
error = ENETUNREACH;
goto bad;
}
@@ -338,14 +339,14 @@ again:
* above, will be forwarded by the ip_input() routine,
* if necessary.
*/
- if (ip_mrouter && (flags & IP_FORWARDING) == 0) {
+ if (V_ip_mrouter && (flags & IP_FORWARDING) == 0) {
/*
* If rsvp daemon is not running, do not
* set ip_moptions. This ensures that the packet
* is multicast and not just sent down one link
* as prescribed by rsvpd.
*/
- if (!rsvp_on)
+ if (!V_rsvp_on)
imo = NULL;
if (ip_mforward &&
ip_mforward(ip, ifp, m, imo) != 0) {
@@ -397,7 +398,7 @@ again:
#endif /* ALTQ */
{
error = ENOBUFS;
- ipstat.ips_odropped++;
+ V_ipstat.ips_odropped++;
ifp->if_snd.ifq_drops += (ip->ip_len / ifp->if_mtu + 1);
goto bad;
}
@@ -461,7 +462,7 @@ sendit:
if (in_localip(ip->ip_dst)) {
m->m_flags |= M_FASTFWD_OURS;
if (m->m_pkthdr.rcvif == NULL)
- m->m_pkthdr.rcvif = loif;
+ m->m_pkthdr.rcvif = V_loif;
if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
m->m_pkthdr.csum_flags |=
CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
@@ -480,7 +481,7 @@ sendit:
/* See if local, if yes, send it to netisr with IP_FASTFWD_OURS. */
if (m->m_flags & M_FASTFWD_OURS) {
if (m->m_pkthdr.rcvif == NULL)
- m->m_pkthdr.rcvif = loif;
+ m->m_pkthdr.rcvif = V_loif;
if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
m->m_pkthdr.csum_flags |=
CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
@@ -508,7 +509,7 @@ passout:
if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
(ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
if ((ifp->if_flags & IFF_LOOPBACK) == 0) {
- ipstat.ips_badaddr++;
+ V_ipstat.ips_badaddr++;
error = EADDRNOTAVAIL;
goto bad;
}
@@ -567,7 +568,7 @@ passout:
/* Balk when DF bit is set or the interface didn't support TSO. */
if ((ip->ip_off & IP_DF) || (m->m_pkthdr.csum_flags & CSUM_TSO)) {
error = EMSGSIZE;
- ipstat.ips_cantfrag++;
+ V_ipstat.ips_cantfrag++;
goto bad;
}
@@ -600,7 +601,7 @@ passout:
}
if (error == 0)
- ipstat.ips_fragmented++;
+ V_ipstat.ips_fragmented++;
done:
if (ro == &iproute && ro->ro_rt) {
@@ -635,7 +636,7 @@ ip_fragment(struct ip *ip, struct mbuf **m_frag, int mtu,
int nfrags;
if (ip->ip_off & IP_DF) { /* Fragmentation not allowed */
- ipstat.ips_cantfrag++;
+ V_ipstat.ips_cantfrag++;
return EMSGSIZE;
}
@@ -710,7 +711,7 @@ smart_frag_failure:
MGETHDR(m, M_DONTWAIT, MT_DATA);
if (m == NULL) {
error = ENOBUFS;
- ipstat.ips_odropped++;
+ V_ipstat.ips_odropped++;
goto done;
}
m->m_flags |= (m0->m_flags & M_MCAST) | M_FRAG;
@@ -740,7 +741,7 @@ smart_frag_failure:
if (m->m_next == NULL) { /* copy failed */
m_free(m);
error = ENOBUFS; /* ??? */
- ipstat.ips_odropped++;
+ V_ipstat.ips_odropped++;
goto done;
}
m->m_pkthdr.len = mhlen + len;
@@ -756,7 +757,7 @@ smart_frag_failure:
*mnext = m;
mnext = &m->m_nextpkt;
}
- ipstat.ips_ofragments += nfrags;
+ V_ipstat.ips_ofragments += nfrags;
/* set first marker for fragment chain */
m0->m_flags |= M_FIRSTFRAG | M_FRAG;
diff --git a/sys/netinet/ip_var.h b/sys/netinet/ip_var.h
index 436a4a0..75d5f7f 100644
--- a/sys/netinet/ip_var.h
+++ b/sys/netinet/ip_var.h
@@ -34,6 +34,9 @@
#define _NETINET_IP_VAR_H_
#include <sys/queue.h>
+#ifdef _KERNEL
+#include <sys/vimage.h>
+#endif
/*
* Overlay for ip header used by other protocols (tcp, udp).
@@ -237,7 +240,7 @@ extern int ip_do_randomid;
static __inline uint16_t
ip_newid(void)
{
- if (ip_do_randomid)
+ if (V_ip_do_randomid)
return ip_randomid();
return htons(ip_id++);
diff --git a/sys/netinet/raw_ip.c b/sys/netinet/raw_ip.c
index 26718ef..7ea0113 100644
--- a/sys/netinet/raw_ip.c
+++ b/sys/netinet/raw_ip.c
@@ -52,6 +52,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sx.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
+#include <sys/vimage.h>
#include <vm/uma.h>
@@ -158,7 +159,7 @@ static void
rip_zone_change(void *tag)
{
- uma_zone_set_max(ripcbinfo.ipi_zone, maxsockets);
+ uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets);
}
static int
@@ -174,16 +175,16 @@ void
rip_init(void)
{
- INP_INFO_LOCK_INIT(&ripcbinfo, "rip");
- LIST_INIT(&ripcb);
- ripcbinfo.ipi_listhead = &ripcb;
- ripcbinfo.ipi_hashbase = hashinit(INP_PCBHASH_RAW_SIZE, M_PCB,
- &ripcbinfo.ipi_hashmask);
- ripcbinfo.ipi_porthashbase = hashinit(1, M_PCB,
- &ripcbinfo.ipi_porthashmask);
- ripcbinfo.ipi_zone = uma_zcreate("ripcb", sizeof(struct inpcb),
+ INP_INFO_LOCK_INIT(&V_ripcbinfo, "rip");
+ LIST_INIT(&V_ripcb);
+ V_ripcbinfo.ipi_listhead = &V_ripcb;
+ V_ripcbinfo.ipi_hashbase = hashinit(INP_PCBHASH_RAW_SIZE, M_PCB,
+ &V_ripcbinfo.ipi_hashmask);
+ V_ripcbinfo.ipi_porthashbase = hashinit(1, M_PCB,
+ &V_ripcbinfo.ipi_porthashmask);
+ V_ripcbinfo.ipi_zone = uma_zcreate("ripcb", sizeof(struct inpcb),
NULL, NULL, rip_inpcb_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
- uma_zone_set_max(ripcbinfo.ipi_zone, maxsockets);
+ uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets);
EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, NULL,
EVENTHANDLER_PRI_ANY);
}
@@ -251,9 +252,9 @@ rip_input(struct mbuf *m, int off)
ripsrc.sin_addr = ip->ip_src;
last = NULL;
hash = INP_PCBHASH_RAW(proto, ip->ip_src.s_addr,
- ip->ip_dst.s_addr, ripcbinfo.ipi_hashmask);
- INP_INFO_RLOCK(&ripcbinfo);
- LIST_FOREACH(inp, &ripcbinfo.ipi_hashbase[hash], inp_hash) {
+ ip->ip_dst.s_addr, V_ripcbinfo.ipi_hashmask);
+ INP_INFO_RLOCK(&V_ripcbinfo);
+ LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[hash], inp_hash) {
if (inp->inp_ip_p != proto)
continue;
#ifdef INET6
@@ -282,7 +283,7 @@ rip_input(struct mbuf *m, int off)
}
last = inp;
}
- LIST_FOREACH(inp, &ripcbinfo.ipi_hashbase[0], inp_hash) {
+ LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[0], inp_hash) {
if (inp->inp_ip_p && inp->inp_ip_p != proto)
continue;
#ifdef INET6
@@ -313,15 +314,15 @@ rip_input(struct mbuf *m, int off)
}
last = inp;
}
- INP_INFO_RUNLOCK(&ripcbinfo);
+ INP_INFO_RUNLOCK(&V_ripcbinfo);
if (last != NULL) {
if (rip_append(last, ip, m, &ripsrc) != 0)
- ipstat.ips_delivered--;
+ V_ipstat.ips_delivered--;
INP_RUNLOCK(last);
} else {
m_freem(m);
- ipstat.ips_noproto++;
- ipstat.ips_delivered--;
+ V_ipstat.ips_noproto++;
+ V_ipstat.ips_delivered--;
}
}
@@ -401,7 +402,7 @@ rip_output(struct mbuf *m, struct socket *so, u_long dst)
* XXX prevent ip_output from overwriting header fields.
*/
flags |= IP_RAWOUTPUT;
- ipstat.ips_rawout++;
+ V_ipstat.ips_rawout++;
}
if (inp->inp_flags & INP_ONESBCAST)
@@ -604,7 +605,7 @@ rip_ctlinput(int cmd, struct sockaddr *sa, void *vip)
switch (cmd) {
case PRC_IFDOWN:
- TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) {
+ TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
if (ia->ia_ifa.ifa_addr == sa
&& (ia->ia_flags & IFA_ROUTE)) {
/*
@@ -624,7 +625,7 @@ rip_ctlinput(int cmd, struct sockaddr *sa, void *vip)
break;
case PRC_IFUP:
- TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) {
+ TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
if (ia->ia_ifa.ifa_addr == sa)
break;
}
@@ -669,18 +670,18 @@ rip_attach(struct socket *so, int proto, struct thread *td)
error = soreserve(so, rip_sendspace, rip_recvspace);
if (error)
return (error);
- INP_INFO_WLOCK(&ripcbinfo);
- error = in_pcballoc(so, &ripcbinfo);
+ INP_INFO_WLOCK(&V_ripcbinfo);
+ error = in_pcballoc(so, &V_ripcbinfo);
if (error) {
- INP_INFO_WUNLOCK(&ripcbinfo);
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
return (error);
}
inp = (struct inpcb *)so->so_pcb;
inp->inp_vflag |= INP_IPV4;
inp->inp_ip_p = proto;
- inp->inp_ip_ttl = ip_defttl;
+ inp->inp_ip_ttl = V_ip_defttl;
rip_inshash(inp);
- INP_INFO_WUNLOCK(&ripcbinfo);
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
INP_WUNLOCK(inp);
return (0);
}
@@ -695,18 +696,18 @@ rip_detach(struct socket *so)
KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
("rip_detach: not closed"));
- INP_INFO_WLOCK(&ripcbinfo);
+ INP_INFO_WLOCK(&V_ripcbinfo);
INP_WLOCK(inp);
rip_delhash(inp);
- if (so == ip_mrouter && ip_mrouter_done)
+ if (so == V_ip_mrouter && ip_mrouter_done)
ip_mrouter_done();
if (ip_rsvp_force_done)
ip_rsvp_force_done(so);
- if (so == ip_rsvpd)
+ if (so == V_ip_rsvpd)
ip_rsvp_done();
in_pcbdetach(inp);
in_pcbfree(inp);
- INP_INFO_WUNLOCK(&ripcbinfo);
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
}
static void
@@ -732,11 +733,11 @@ rip_abort(struct socket *so)
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("rip_abort: inp == NULL"));
- INP_INFO_WLOCK(&ripcbinfo);
+ INP_INFO_WLOCK(&V_ripcbinfo);
INP_WLOCK(inp);
rip_dodisconnect(so, inp);
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&ripcbinfo);
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
}
static void
@@ -747,11 +748,11 @@ rip_close(struct socket *so)
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("rip_close: inp == NULL"));
- INP_INFO_WLOCK(&ripcbinfo);
+ INP_INFO_WLOCK(&V_ripcbinfo);
INP_WLOCK(inp);
rip_dodisconnect(so, inp);
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&ripcbinfo);
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
}
static int
@@ -765,11 +766,11 @@ rip_disconnect(struct socket *so)
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("rip_disconnect: inp == NULL"));
- INP_INFO_WLOCK(&ripcbinfo);
+ INP_INFO_WLOCK(&V_ripcbinfo);
INP_WLOCK(inp);
rip_dodisconnect(so, inp);
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&ripcbinfo);
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
return (0);
}
@@ -790,7 +791,7 @@ rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
return (EADDRNOTAVAIL);
}
- if (TAILQ_EMPTY(&ifnet) ||
+ if (TAILQ_EMPTY(&V_ifnet) ||
(addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) ||
(addr->sin_addr.s_addr &&
ifa_ifwithaddr((struct sockaddr *)addr) == 0))
@@ -799,13 +800,13 @@ rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("rip_bind: inp == NULL"));
- INP_INFO_WLOCK(&ripcbinfo);
+ INP_INFO_WLOCK(&V_ripcbinfo);
INP_WLOCK(inp);
rip_delhash(inp);
inp->inp_laddr = addr->sin_addr;
rip_inshash(inp);
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&ripcbinfo);
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
return (0);
}
@@ -817,7 +818,7 @@ rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
if (nam->sa_len != sizeof(*addr))
return (EINVAL);
- if (TAILQ_EMPTY(&ifnet))
+ if (TAILQ_EMPTY(&V_ifnet))
return (EADDRNOTAVAIL);
if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK)
return (EAFNOSUPPORT);
@@ -825,14 +826,14 @@ rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("rip_connect: inp == NULL"));
- INP_INFO_WLOCK(&ripcbinfo);
+ INP_INFO_WLOCK(&V_ripcbinfo);
INP_WLOCK(inp);
rip_delhash(inp);
inp->inp_faddr = addr->sin_addr;
rip_inshash(inp);
soisconnected(so);
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&ripcbinfo);
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
return (0);
}
@@ -892,7 +893,7 @@ rip_pcblist(SYSCTL_HANDLER_ARGS)
* resource-intensive to repeat twice on every request.
*/
if (req->oldptr == 0) {
- n = ripcbinfo.ipi_count;
+ n = V_ripcbinfo.ipi_count;
req->oldidx = 2 * (sizeof xig)
+ (n + n/8) * sizeof(struct xinpcb);
return (0);
@@ -904,10 +905,10 @@ rip_pcblist(SYSCTL_HANDLER_ARGS)
/*
* OK, now we're committed to doing something.
*/
- INP_INFO_RLOCK(&ripcbinfo);
- gencnt = ripcbinfo.ipi_gencnt;
- n = ripcbinfo.ipi_count;
- INP_INFO_RUNLOCK(&ripcbinfo);
+ INP_INFO_RLOCK(&V_ripcbinfo);
+ gencnt = V_ripcbinfo.ipi_gencnt;
+ n = V_ripcbinfo.ipi_count;
+ INP_INFO_RUNLOCK(&V_ripcbinfo);
xig.xig_len = sizeof xig;
xig.xig_count = n;
@@ -921,8 +922,8 @@ rip_pcblist(SYSCTL_HANDLER_ARGS)
if (inp_list == 0)
return (ENOMEM);
- INP_INFO_RLOCK(&ripcbinfo);
- for (inp = LIST_FIRST(ripcbinfo.ipi_listhead), i = 0; inp && i < n;
+ INP_INFO_RLOCK(&V_ripcbinfo);
+ for (inp = LIST_FIRST(V_ripcbinfo.ipi_listhead), i = 0; inp && i < n;
inp = LIST_NEXT(inp, inp_list)) {
INP_RLOCK(inp);
if (inp->inp_gencnt <= gencnt &&
@@ -932,7 +933,7 @@ rip_pcblist(SYSCTL_HANDLER_ARGS)
}
INP_RUNLOCK(inp);
}
- INP_INFO_RUNLOCK(&ripcbinfo);
+ INP_INFO_RUNLOCK(&V_ripcbinfo);
n = i;
error = 0;
@@ -959,11 +960,11 @@ rip_pcblist(SYSCTL_HANDLER_ARGS)
* that something happened while we were processing this
* request, and it might be necessary to retry.
*/
- INP_INFO_RLOCK(&ripcbinfo);
- xig.xig_gen = ripcbinfo.ipi_gencnt;
+ INP_INFO_RLOCK(&V_ripcbinfo);
+ xig.xig_gen = V_ripcbinfo.ipi_gencnt;
xig.xig_sogen = so_gencnt;
- xig.xig_count = ripcbinfo.ipi_count;
- INP_INFO_RUNLOCK(&ripcbinfo);
+ xig.xig_count = V_ripcbinfo.ipi_count;
+ INP_INFO_RUNLOCK(&V_ripcbinfo);
error = SYSCTL_OUT(req, &xig, sizeof xig);
}
free(inp_list, M_TEMP);
diff --git a/sys/netinet/sctp_os_bsd.h b/sys/netinet/sctp_os_bsd.h
index a1795d2..9514f7a 100644
--- a/sys/netinet/sctp_os_bsd.h
+++ b/sys/netinet/sctp_os_bsd.h
@@ -39,6 +39,7 @@ __FBSDID("$FreeBSD$");
#include "opt_inet6.h"
#include "opt_inet.h"
#include "opt_sctp.h"
+
#include <sys/param.h>
#include <sys/ktr.h>
#include <sys/systm.h>
@@ -60,7 +61,7 @@ __FBSDID("$FreeBSD$");
#include <sys/random.h>
#include <sys/limits.h>
#include <sys/queue.h>
-#if defined(__FreeBSD__) && __FreeBSD_version > 800000 && defined(VIMAGE)
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800044
#include <sys/vimage.h>
#endif
#include <machine/cpu.h>
@@ -144,9 +145,7 @@ MALLOC_DECLARE(SCTP_M_SOCKOPT);
/*
* Macros to expand out globals defined by various modules
* to either a real global or a virtualized instance of one,
- * depending on whether VIMAGE is defined in opt_vimage.h
- * XXX opt_vimage.h not yet present, more framework to come.
- * XXX so will always evaluate to the global for now (VIMAGE not defined)
+ * depending on whether VIMAGE is defined.
*/
/* first define modules that supply us information */
#define MOD_NET net
@@ -155,10 +154,14 @@ MALLOC_DECLARE(SCTP_M_SOCKOPT);
#define MOD_IPSEC ipsec
/* then define the macro(s) that hook into the vimage macros */
-#if defined(__FreeBSD__) && __FreeBSD_version > 800000 && defined(VIMAGE)
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800044 && defined(VIMAGE)
+#if 0
#define VSYMNAME(__MODULE) vnet_ ## __MODULE
#define MODULE_GLOBAL(__MODULE, __SYMBOL) VSYM(VSYMNAME(__MODULE), __SYMBOL)
#else
+#define MODULE_GLOBAL(__MODULE, __SYMBOL) V_ ## __SYMBOL
+#endif
+#else
#define MODULE_GLOBAL(__MODULE, __SYMBOL) (__SYMBOL)
#endif
/*
diff --git a/sys/netinet/sctp_pcb.c b/sys/netinet/sctp_pcb.c
index 55be58b..ceca8bd 100644
--- a/sys/netinet/sctp_pcb.c
+++ b/sys/netinet/sctp_pcb.c
@@ -2784,8 +2784,8 @@ continue_anyway:
first = MODULE_GLOBAL(MOD_INET, ipport_lowfirstauto);
last = MODULE_GLOBAL(MOD_INET, ipport_lowlastauto);
} else {
- first = ipport_firstauto;
- last = ipport_lastauto;
+ first = MODULE_GLOBAL(MOD_INET, ipport_firstauto);
+ last = MODULE_GLOBAL(MOD_INET, ipport_lastauto);
}
if (first > last) {
uint16_t temp;
diff --git a/sys/netinet/tcp_hostcache.c b/sys/netinet/tcp_hostcache.c
index 3a68607..bf8f616 100644
--- a/sys/netinet/tcp_hostcache.c
+++ b/sys/netinet/tcp_hostcache.c
@@ -76,6 +76,7 @@ __FBSDID("$FreeBSD$");
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/sysctl.h>
+#include <sys/vimage.h>
#include <net/if.h>
@@ -186,7 +187,7 @@ static MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "TCP hostcache");
#define HOSTCACHE_HASH(ip) \
(((ip)->s_addr ^ ((ip)->s_addr >> 7) ^ ((ip)->s_addr >> 17)) & \
- tcp_hostcache.hashmask)
+ V_tcp_hostcache.hashmask)
/* XXX: What is the recommended hash to get good entropy for IPv6 addresses? */
#define HOSTCACHE_HASH6(ip6) \
@@ -194,7 +195,7 @@ static MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "TCP hostcache");
(ip6)->s6_addr32[1] ^ \
(ip6)->s6_addr32[2] ^ \
(ip6)->s6_addr32[3]) & \
- tcp_hostcache.hashmask)
+ V_tcp_hostcache.hashmask)
#define THC_LOCK(lp) mtx_lock(lp)
#define THC_UNLOCK(lp) mtx_unlock(lp)
@@ -207,55 +208,55 @@ tcp_hc_init(void)
/*
* Initialize hostcache structures.
*/
- tcp_hostcache.cache_count = 0;
- tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE;
- tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT;
- tcp_hostcache.cache_limit =
- tcp_hostcache.hashsize * tcp_hostcache.bucket_limit;
- tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE;
- tcp_hostcache.prune = TCP_HOSTCACHE_PRUNE;
+ V_tcp_hostcache.cache_count = 0;
+ V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE;
+ V_tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT;
+ V_tcp_hostcache.cache_limit =
+ V_tcp_hostcache.hashsize * V_tcp_hostcache.bucket_limit;
+ V_tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE;
+ V_tcp_hostcache.prune = TCP_HOSTCACHE_PRUNE;
TUNABLE_INT_FETCH("net.inet.tcp.hostcache.hashsize",
- &tcp_hostcache.hashsize);
+ &V_tcp_hostcache.hashsize);
TUNABLE_INT_FETCH("net.inet.tcp.hostcache.cachelimit",
- &tcp_hostcache.cache_limit);
+ &V_tcp_hostcache.cache_limit);
TUNABLE_INT_FETCH("net.inet.tcp.hostcache.bucketlimit",
- &tcp_hostcache.bucket_limit);
- if (!powerof2(tcp_hostcache.hashsize)) {
+ &V_tcp_hostcache.bucket_limit);
+ if (!powerof2(V_tcp_hostcache.hashsize)) {
printf("WARNING: hostcache hash size is not a power of 2.\n");
- tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; /* default */
+ V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; /* default */
}
- tcp_hostcache.hashmask = tcp_hostcache.hashsize - 1;
+ V_tcp_hostcache.hashmask = V_tcp_hostcache.hashsize - 1;
/*
* Allocate the hash table.
*/
- tcp_hostcache.hashbase = (struct hc_head *)
- malloc(tcp_hostcache.hashsize * sizeof(struct hc_head),
+ V_tcp_hostcache.hashbase = (struct hc_head *)
+ malloc(V_tcp_hostcache.hashsize * sizeof(struct hc_head),
M_HOSTCACHE, M_WAITOK | M_ZERO);
/*
* Initialize the hash buckets.
*/
- for (i = 0; i < tcp_hostcache.hashsize; i++) {
- TAILQ_INIT(&tcp_hostcache.hashbase[i].hch_bucket);
- tcp_hostcache.hashbase[i].hch_length = 0;
- mtx_init(&tcp_hostcache.hashbase[i].hch_mtx, "tcp_hc_entry",
+ for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
+ TAILQ_INIT(&V_tcp_hostcache.hashbase[i].hch_bucket);
+ V_tcp_hostcache.hashbase[i].hch_length = 0;
+ mtx_init(&V_tcp_hostcache.hashbase[i].hch_mtx, "tcp_hc_entry",
NULL, MTX_DEF);
}
/*
* Allocate the hostcache entries.
*/
- tcp_hostcache.zone = uma_zcreate("hostcache", sizeof(struct hc_metrics),
+ V_tcp_hostcache.zone = uma_zcreate("hostcache", sizeof(struct hc_metrics),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
- uma_zone_set_max(tcp_hostcache.zone, tcp_hostcache.cache_limit);
+ uma_zone_set_max(V_tcp_hostcache.zone, V_tcp_hostcache.cache_limit);
/*
* Set up periodic cache cleanup.
*/
- callout_init(&tcp_hc_callout, CALLOUT_MPSAFE);
- callout_reset(&tcp_hc_callout, tcp_hostcache.prune * hz, tcp_hc_purge, 0);
+ callout_init(&V_tcp_hc_callout, CALLOUT_MPSAFE);
+ callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz, tcp_hc_purge, 0);
}
/*
@@ -281,7 +282,7 @@ tcp_hc_lookup(struct in_conninfo *inc)
else
hash = HOSTCACHE_HASH(&inc->inc_faddr);
- hc_head = &tcp_hostcache.hashbase[hash];
+ hc_head = &V_tcp_hostcache.hashbase[hash];
/*
* Acquire lock for this bucket row; we release the lock if we don't
@@ -336,7 +337,7 @@ tcp_hc_insert(struct in_conninfo *inc)
else
hash = HOSTCACHE_HASH(&inc->inc_faddr);
- hc_head = &tcp_hostcache.hashbase[hash];
+ hc_head = &V_tcp_hostcache.hashbase[hash];
/*
* Acquire lock for this bucket row; we release the lock if we don't
@@ -348,8 +349,8 @@ tcp_hc_insert(struct in_conninfo *inc)
/*
* If the bucket limit is reached, reuse the least-used element.
*/
- if (hc_head->hch_length >= tcp_hostcache.bucket_limit ||
- tcp_hostcache.cache_count >= tcp_hostcache.cache_limit) {
+ if (hc_head->hch_length >= V_tcp_hostcache.bucket_limit ||
+ V_tcp_hostcache.cache_count >= V_tcp_hostcache.cache_limit) {
hc_entry = TAILQ_LAST(&hc_head->hch_bucket, hc_qhead);
/*
* At first we were dropping the last element, just to
@@ -365,17 +366,17 @@ tcp_hc_insert(struct in_conninfo *inc)
return NULL;
}
TAILQ_REMOVE(&hc_head->hch_bucket, hc_entry, rmx_q);
- tcp_hostcache.hashbase[hash].hch_length--;
- tcp_hostcache.cache_count--;
- tcpstat.tcps_hc_bucketoverflow++;
+ V_tcp_hostcache.hashbase[hash].hch_length--;
+ V_tcp_hostcache.cache_count--;
+ V_tcpstat.tcps_hc_bucketoverflow++;
#if 0
- uma_zfree(tcp_hostcache.zone, hc_entry);
+ uma_zfree(V_tcp_hostcache.zone, hc_entry);
#endif
} else {
/*
* Allocate a new entry, or balk if not possible.
*/
- hc_entry = uma_zalloc(tcp_hostcache.zone, M_NOWAIT);
+ hc_entry = uma_zalloc(V_tcp_hostcache.zone, M_NOWAIT);
if (hc_entry == NULL) {
THC_UNLOCK(&hc_head->hch_mtx);
return NULL;
@@ -391,15 +392,15 @@ tcp_hc_insert(struct in_conninfo *inc)
else
hc_entry->ip4 = inc->inc_faddr;
hc_entry->rmx_head = hc_head;
- hc_entry->rmx_expire = tcp_hostcache.expire;
+ hc_entry->rmx_expire = V_tcp_hostcache.expire;
/*
* Put it upfront.
*/
TAILQ_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q);
- tcp_hostcache.hashbase[hash].hch_length++;
- tcp_hostcache.cache_count++;
- tcpstat.tcps_hc_added++;
+ V_tcp_hostcache.hashbase[hash].hch_length++;
+ V_tcp_hostcache.cache_count++;
+ V_tcpstat.tcps_hc_added++;
return hc_entry;
}
@@ -427,7 +428,7 @@ tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite)
return;
}
hc_entry->rmx_hits++;
- hc_entry->rmx_expire = tcp_hostcache.expire; /* start over again */
+ hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
hc_metrics_lite->rmx_mtu = hc_entry->rmx_mtu;
hc_metrics_lite->rmx_ssthresh = hc_entry->rmx_ssthresh;
@@ -460,7 +461,7 @@ tcp_hc_getmtu(struct in_conninfo *inc)
return 0;
}
hc_entry->rmx_hits++;
- hc_entry->rmx_expire = tcp_hostcache.expire; /* start over again */
+ hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
mtu = hc_entry->rmx_mtu;
THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
@@ -490,7 +491,7 @@ tcp_hc_updatemtu(struct in_conninfo *inc, u_long mtu)
return;
}
hc_entry->rmx_updates++;
- hc_entry->rmx_expire = tcp_hostcache.expire; /* start over again */
+ hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
hc_entry->rmx_mtu = mtu;
@@ -522,7 +523,7 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
return;
}
hc_entry->rmx_updates++;
- hc_entry->rmx_expire = tcp_hostcache.expire; /* start over again */
+ hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
if (hcml->rmx_rtt != 0) {
if (hc_entry->rmx_rtt == 0)
@@ -530,7 +531,7 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
else
hc_entry->rmx_rtt =
(hc_entry->rmx_rtt + hcml->rmx_rtt) / 2;
- tcpstat.tcps_cachedrtt++;
+ V_tcpstat.tcps_cachedrtt++;
}
if (hcml->rmx_rttvar != 0) {
if (hc_entry->rmx_rttvar == 0)
@@ -538,7 +539,7 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
else
hc_entry->rmx_rttvar =
(hc_entry->rmx_rttvar + hcml->rmx_rttvar) / 2;
- tcpstat.tcps_cachedrttvar++;
+ V_tcpstat.tcps_cachedrttvar++;
}
if (hcml->rmx_ssthresh != 0) {
if (hc_entry->rmx_ssthresh == 0)
@@ -546,7 +547,7 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
else
hc_entry->rmx_ssthresh =
(hc_entry->rmx_ssthresh + hcml->rmx_ssthresh) / 2;
- tcpstat.tcps_cachedssthresh++;
+ V_tcpstat.tcps_cachedssthresh++;
}
if (hcml->rmx_bandwidth != 0) {
if (hc_entry->rmx_bandwidth == 0)
@@ -554,7 +555,7 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
else
hc_entry->rmx_bandwidth =
(hc_entry->rmx_bandwidth + hcml->rmx_bandwidth) / 2;
- /* tcpstat.tcps_cachedbandwidth++; */
+ /* V_tcpstat.tcps_cachedbandwidth++; */
}
if (hcml->rmx_cwnd != 0) {
if (hc_entry->rmx_cwnd == 0)
@@ -562,7 +563,7 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
else
hc_entry->rmx_cwnd =
(hc_entry->rmx_cwnd + hcml->rmx_cwnd) / 2;
- /* tcpstat.tcps_cachedcwnd++; */
+ /* V_tcpstat.tcps_cachedcwnd++; */
}
if (hcml->rmx_sendpipe != 0) {
if (hc_entry->rmx_sendpipe == 0)
@@ -570,7 +571,7 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
else
hc_entry->rmx_sendpipe =
(hc_entry->rmx_sendpipe + hcml->rmx_sendpipe) /2;
- /* tcpstat.tcps_cachedsendpipe++; */
+ /* V_tcpstat.tcps_cachedsendpipe++; */
}
if (hcml->rmx_recvpipe != 0) {
if (hc_entry->rmx_recvpipe == 0)
@@ -578,7 +579,7 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
else
hc_entry->rmx_recvpipe =
(hc_entry->rmx_recvpipe + hcml->rmx_recvpipe) /2;
- /* tcpstat.tcps_cachedrecvpipe++; */
+ /* V_tcpstat.tcps_cachedrecvpipe++; */
}
TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
@@ -602,7 +603,7 @@ sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
char ip6buf[INET6_ADDRSTRLEN];
#endif
- bufsize = linesize * (tcp_hostcache.cache_count + 1);
+ bufsize = linesize * (V_tcp_hostcache.cache_count + 1);
p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
@@ -612,9 +613,9 @@ sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
p += len;
#define msec(u) (((u) + 500) / 1000)
- for (i = 0; i < tcp_hostcache.hashsize; i++) {
- THC_LOCK(&tcp_hostcache.hashbase[i].hch_mtx);
- TAILQ_FOREACH(hc_entry, &tcp_hostcache.hashbase[i].hch_bucket,
+ for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
+ THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
+ TAILQ_FOREACH(hc_entry, &V_tcp_hostcache.hashbase[i].hch_bucket,
rmx_q) {
len = snprintf(p, linesize,
"%-15s %5lu %8lu %6lums %6lums %9lu %8lu %8lu %8lu "
@@ -640,7 +641,7 @@ sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
hc_entry->rmx_expire);
p += len;
}
- THC_UNLOCK(&tcp_hostcache.hashbase[i].hch_mtx);
+ THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
}
#undef msec
error = SYSCTL_OUT(req, buf, p - buf);
@@ -659,25 +660,25 @@ tcp_hc_purge(void *arg)
int all = (intptr_t)arg;
int i;
- if (tcp_hostcache.purgeall) {
+ if (V_tcp_hostcache.purgeall) {
all = 1;
- tcp_hostcache.purgeall = 0;
+ V_tcp_hostcache.purgeall = 0;
}
- for (i = 0; i < tcp_hostcache.hashsize; i++) {
- THC_LOCK(&tcp_hostcache.hashbase[i].hch_mtx);
- TAILQ_FOREACH_SAFE(hc_entry, &tcp_hostcache.hashbase[i].hch_bucket,
+ for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
+ THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
+ TAILQ_FOREACH_SAFE(hc_entry, &V_tcp_hostcache.hashbase[i].hch_bucket,
rmx_q, hc_next) {
if (all || hc_entry->rmx_expire <= 0) {
- TAILQ_REMOVE(&tcp_hostcache.hashbase[i].hch_bucket,
+ TAILQ_REMOVE(&V_tcp_hostcache.hashbase[i].hch_bucket,
hc_entry, rmx_q);
- uma_zfree(tcp_hostcache.zone, hc_entry);
- tcp_hostcache.hashbase[i].hch_length--;
- tcp_hostcache.cache_count--;
+ uma_zfree(V_tcp_hostcache.zone, hc_entry);
+ V_tcp_hostcache.hashbase[i].hch_length--;
+ V_tcp_hostcache.cache_count--;
} else
- hc_entry->rmx_expire -= tcp_hostcache.prune;
+ hc_entry->rmx_expire -= V_tcp_hostcache.prune;
}
- THC_UNLOCK(&tcp_hostcache.hashbase[i].hch_mtx);
+ THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
}
- callout_reset(&tcp_hc_callout, tcp_hostcache.prune * hz, tcp_hc_purge, 0);
+ callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz, tcp_hc_purge, 0);
}
diff --git a/sys/netinet/tcp_input.c b/sys/netinet/tcp_input.c
index 097c40b..fae2ba6 100644
--- a/sys/netinet/tcp_input.c
+++ b/sys/netinet/tcp_input.c
@@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/syslog.h>
#include <sys/systm.h>
+#include <sys/vimage.h>
#include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
@@ -209,7 +210,7 @@ do { \
#define DELAY_ACK(tp) \
((!tcp_timer_active(tp, TT_DELACK) && \
(tp->t_flags & TF_RXWIN0SENT) == 0) && \
- (tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
+ (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
/*
@@ -293,7 +294,7 @@ tcp_input(struct mbuf *m, int off0)
#endif
to.to_flags = 0;
- tcpstat.tcps_rcvtotal++;
+ V_tcpstat.tcps_rcvtotal++;
if (isipv6) {
#ifdef INET6
@@ -301,7 +302,7 @@ tcp_input(struct mbuf *m, int off0)
ip6 = mtod(m, struct ip6_hdr *);
tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) {
- tcpstat.tcps_rcvbadsum++;
+ V_tcpstat.tcps_rcvbadsum++;
goto drop;
}
th = (struct tcphdr *)((caddr_t)ip6 + off0);
@@ -333,7 +334,7 @@ tcp_input(struct mbuf *m, int off0)
if (m->m_len < sizeof (struct tcpiphdr)) {
if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
== NULL) {
- tcpstat.tcps_rcvshort++;
+ V_tcpstat.tcps_rcvshort++;
return;
}
}
@@ -367,7 +368,7 @@ tcp_input(struct mbuf *m, int off0)
th->th_sum = in_cksum(m, len);
}
if (th->th_sum) {
- tcpstat.tcps_rcvbadsum++;
+ V_tcpstat.tcps_rcvbadsum++;
goto drop;
}
/* Re-initialization for later version check */
@@ -387,7 +388,7 @@ tcp_input(struct mbuf *m, int off0)
*/
off = th->th_off << 2;
if (off < sizeof (struct tcphdr) || off > tlen) {
- tcpstat.tcps_rcvbadoff++;
+ V_tcpstat.tcps_rcvbadoff++;
goto drop;
}
tlen -= off; /* tlen is used instead of ti->ti_len */
@@ -402,7 +403,7 @@ tcp_input(struct mbuf *m, int off0)
if (m->m_len < sizeof(struct ip) + off) {
if ((m = m_pullup(m, sizeof (struct ip) + off))
== NULL) {
- tcpstat.tcps_rcvshort++;
+ V_tcpstat.tcps_rcvshort++;
return;
}
ip = mtod(m, struct ip *);
@@ -431,9 +432,9 @@ tcp_input(struct mbuf *m, int off0)
/*
* Locate pcb for segment.
*/
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
findpcb:
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
#ifdef IPFIREWALL_FORWARD
/*
* Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
@@ -448,13 +449,13 @@ findpcb:
* Transparently forwarded. Pretend to be the destination.
* already got one like this?
*/
- inp = in_pcblookup_hash(&tcbinfo,
+ inp = in_pcblookup_hash(&V_tcbinfo,
ip->ip_src, th->th_sport,
ip->ip_dst, th->th_dport,
0, m->m_pkthdr.rcvif);
if (!inp) {
/* It's new. Try to find the ambushing socket. */
- inp = in_pcblookup_hash(&tcbinfo,
+ inp = in_pcblookup_hash(&V_tcbinfo,
ip->ip_src, th->th_sport,
next_hop->sin_addr,
next_hop->sin_port ?
@@ -470,14 +471,14 @@ findpcb:
{
if (isipv6) {
#ifdef INET6
- inp = in6_pcblookup_hash(&tcbinfo,
+ inp = in6_pcblookup_hash(&V_tcbinfo,
&ip6->ip6_src, th->th_sport,
&ip6->ip6_dst, th->th_dport,
INPLOOKUP_WILDCARD,
m->m_pkthdr.rcvif);
#endif
} else
- inp = in_pcblookup_hash(&tcbinfo,
+ inp = in_pcblookup_hash(&V_tcbinfo,
ip->ip_src, th->th_sport,
ip->ip_dst, th->th_dport,
INPLOOKUP_WILDCARD,
@@ -504,8 +505,8 @@ findpcb:
* When blackholing do not respond with a RST but
* completely ignore the segment and drop it.
*/
- if ((blackhole == 1 && (thflags & TH_SYN)) ||
- blackhole == 2)
+ if ((V_blackhole == 1 && (thflags & TH_SYN)) ||
+ V_blackhole == 2)
goto dropunlock;
rstreason = BANDLIM_RST_CLOSEDPORT;
@@ -516,12 +517,12 @@ findpcb:
#ifdef IPSEC
#ifdef INET6
if (isipv6 && ipsec6_in_reject(m, inp)) {
- ipsec6stat.in_polvio++;
+ V_ipsec6stat.in_polvio++;
goto dropunlock;
} else
#endif /* INET6 */
if (ipsec4_in_reject(m, inp) != 0) {
- ipsec4stat.in_polvio++;
+ V_ipsec4stat.in_polvio++;
goto dropunlock;
}
#endif /* IPSEC */
@@ -553,7 +554,7 @@ findpcb:
*/
if (tcp_twcheck(inp, &to, th, m, tlen))
goto findpcb;
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return;
}
/*
@@ -653,9 +654,9 @@ findpcb:
log(LOG_DEBUG, "%s; %s: Listen socket: "
"Socket allocation failed due to "
"limits or memory shortage, %s\n",
- s, __func__, (tcp_sc_rst_sock_fail ?
+ s, __func__, (V_tcp_sc_rst_sock_fail ?
"sending RST" : "try again"));
- if (tcp_sc_rst_sock_fail) {
+ if (V_tcp_sc_rst_sock_fail) {
rstreason = BANDLIM_UNLIMITED;
goto dropwithreset;
} else
@@ -679,7 +680,7 @@ findpcb:
*/
tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen,
iptos);
- INP_INFO_UNLOCK_ASSERT(&tcbinfo);
+ INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
return;
}
/*
@@ -704,7 +705,7 @@ findpcb:
log(LOG_DEBUG, "%s; %s: Listen socket: "
"SYN is missing, segment ignored\n",
s, __func__);
- tcpstat.tcps_badsyn++;
+ V_tcpstat.tcps_badsyn++;
goto dropunlock;
}
/*
@@ -716,7 +717,7 @@ findpcb:
"SYN|ACK invalid, segment rejected\n",
s, __func__);
syncache_badack(&inc); /* XXX: Not needed! */
- tcpstat.tcps_badsyn++;
+ V_tcpstat.tcps_badsyn++;
rstreason = BANDLIM_RST_OPENPORT;
goto dropwithreset;
}
@@ -731,12 +732,12 @@ findpcb:
* XXX: This is a violation of the TCP specification
* and was used by RFC1644.
*/
- if ((thflags & TH_FIN) && drop_synfin) {
+ if ((thflags & TH_FIN) && V_drop_synfin) {
if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
log(LOG_DEBUG, "%s; %s: Listen socket: "
"SYN|FIN segment ignored (based on "
"sysctl setting)\n", s, __func__);
- tcpstat.tcps_badsyn++;
+ V_tcpstat.tcps_badsyn++;
goto dropunlock;
}
/*
@@ -781,7 +782,7 @@ findpcb:
* handling - worse, they are not exactly the same.
* I believe 5.5.4 is the best one, so we follow 5.5.4.
*/
- if (isipv6 && !ip6_use_deprecated) {
+ if (isipv6 && !V_ip6_use_deprecated) {
struct in6_ifaddr *ia6;
if ((ia6 = ip6_getdstifaddr(m)) &&
@@ -869,7 +870,7 @@ findpcb:
* Entry added to syncache and mbuf consumed.
* Everything already unlocked by syncache_add().
*/
- INP_INFO_UNLOCK_ASSERT(&tcbinfo);
+ INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
return;
}
@@ -879,20 +880,20 @@ findpcb:
* the inpcb, and unlocks pcbinfo.
*/
tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos);
- INP_INFO_UNLOCK_ASSERT(&tcbinfo);
+ INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
return;
dropwithreset:
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
tcp_dropwithreset(m, th, tp, tlen, rstreason);
m = NULL; /* mbuf chain got consumed. */
dropunlock:
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
if (inp != NULL)
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
drop:
- INP_INFO_UNLOCK_ASSERT(&tcbinfo);
+ INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
if (s != NULL)
free(s, M_TCPLOG);
if (m != NULL)
@@ -921,7 +922,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
#endif
thflags = th->th_flags;
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
INP_WLOCK_ASSERT(tp->t_inpcb);
KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
__func__));
@@ -951,13 +952,13 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
switch (iptos & IPTOS_ECN_MASK) {
case IPTOS_ECN_CE:
tp->t_flags |= TF_ECN_SND_ECE;
- tcpstat.tcps_ecn_ce++;
+ V_tcpstat.tcps_ecn_ce++;
break;
case IPTOS_ECN_ECT0:
- tcpstat.tcps_ecn_ect0++;
+ V_tcpstat.tcps_ecn_ect0++;
break;
case IPTOS_ECN_ECT1:
- tcpstat.tcps_ecn_ect1++;
+ V_tcpstat.tcps_ecn_ect1++;
break;
}
@@ -970,7 +971,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
*/
if ((thflags & TH_ECE) &&
SEQ_LEQ(th->th_ack, tp->snd_recover)) {
- tcpstat.tcps_ecn_rcwnd++;
+ V_tcpstat.tcps_ecn_rcwnd++;
tcp_congestion_exp(tp);
}
}
@@ -1067,28 +1068,28 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (SEQ_GT(th->th_ack, tp->snd_una) &&
SEQ_LEQ(th->th_ack, tp->snd_max) &&
tp->snd_cwnd >= tp->snd_wnd &&
- ((!tcp_do_newreno &&
+ ((!V_tcp_do_newreno &&
!(tp->t_flags & TF_SACK_PERMIT) &&
tp->t_dupacks < tcprexmtthresh) ||
- ((tcp_do_newreno ||
+ ((V_tcp_do_newreno ||
(tp->t_flags & TF_SACK_PERMIT)) &&
!IN_FASTRECOVERY(tp) &&
(to.to_flags & TOF_SACK) == 0 &&
TAILQ_EMPTY(&tp->snd_holes)))) {
KASSERT(headlocked,
("%s: headlocked", __func__));
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
headlocked = 0;
/*
* This is a pure ack for outstanding data.
*/
- ++tcpstat.tcps_predack;
+ ++V_tcpstat.tcps_predack;
/*
* "bad retransmit" recovery.
*/
if (tp->t_rxtshift == 1 &&
ticks < tp->t_badrxtwin) {
- ++tcpstat.tcps_sndrexmitbad;
+ ++V_tcpstat.tcps_sndrexmitbad;
tp->snd_cwnd = tp->snd_cwnd_prev;
tp->snd_ssthresh =
tp->snd_ssthresh_prev;
@@ -1124,8 +1125,8 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
}
tcp_xmit_bandwidth_limit(tp, th->th_ack);
acked = th->th_ack - tp->snd_una;
- tcpstat.tcps_rcvackpack++;
- tcpstat.tcps_rcvackbyte += acked;
+ V_tcpstat.tcps_rcvackpack++;
+ V_tcpstat.tcps_rcvackbyte += acked;
sbdrop(&so->so_snd, acked);
if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
SEQ_LEQ(th->th_ack, tp->snd_recover))
@@ -1170,7 +1171,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
int newsize = 0; /* automatic sockbuf scaling */
KASSERT(headlocked, ("%s: headlocked", __func__));
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
headlocked = 0;
/*
* This is a pure, in-sequence data packet
@@ -1180,7 +1181,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
/* Clean receiver SACK report if present */
if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
tcp_clean_sackreport(tp);
- ++tcpstat.tcps_preddat;
+ ++V_tcpstat.tcps_preddat;
tp->rcv_nxt += tlen;
/*
* Pull snd_wl1 up to prevent seq wrap relative to
@@ -1192,8 +1193,8 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
* rcv_nxt.
*/
tp->rcv_up = tp->rcv_nxt;
- tcpstat.tcps_rcvpack++;
- tcpstat.tcps_rcvbyte += tlen;
+ V_tcpstat.tcps_rcvpack++;
+ V_tcpstat.tcps_rcvbyte += tlen;
ND6_HINT(tp); /* Some progress has been made */
#ifdef TCPDEBUG
if (so->so_options & SO_DEBUG)
@@ -1233,7 +1234,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
* TODO: Only step up if the application is actually serving
* the buffer to better manage the socket buffer resources.
*/
- if (tcp_do_autorcvbuf &&
+ if (V_tcp_do_autorcvbuf &&
to.to_tsecr &&
(so->so_rcv.sb_flags & SB_AUTOSIZE)) {
if (to.to_tsecr > tp->rfbuf_ts &&
@@ -1241,11 +1242,11 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (tp->rfbuf_cnt >
(so->so_rcv.sb_hiwat / 8 * 7) &&
so->so_rcv.sb_hiwat <
- tcp_autorcvbuf_max) {
+ V_tcp_autorcvbuf_max) {
newsize =
min(so->so_rcv.sb_hiwat +
- tcp_autorcvbuf_inc,
- tcp_autorcvbuf_max);
+ V_tcp_autorcvbuf_inc,
+ V_tcp_autorcvbuf_max);
}
/* Start over with next RTT. */
tp->rfbuf_ts = 0;
@@ -1343,7 +1344,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
tp->irs = th->th_seq;
tcp_rcvseqinit(tp);
if (thflags & TH_ACK) {
- tcpstat.tcps_connects++;
+ V_tcpstat.tcps_connects++;
soisconnected(so);
#ifdef MAC
SOCK_LOCK(so);
@@ -1367,9 +1368,9 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
else
tp->t_flags |= TF_ACKNOW;
- if ((thflags & TH_ECE) && tcp_do_ecn) {
+ if ((thflags & TH_ECE) && V_tcp_do_ecn) {
tp->t_flags |= TF_ECN_PERMIT;
- tcpstat.tcps_ecn_shs++;
+ V_tcpstat.tcps_ecn_shs++;
}
/*
@@ -1418,8 +1419,8 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
m_adj(m, -todrop);
tlen = tp->rcv_wnd;
thflags &= ~TH_FIN;
- tcpstat.tcps_rcvpackafterwin++;
- tcpstat.tcps_rcvbyteafterwin += todrop;
+ V_tcpstat.tcps_rcvpackafterwin++;
+ V_tcpstat.tcps_rcvbyteafterwin += todrop;
}
tp->snd_wl1 = th->th_seq - 1;
tp->rcv_up = th->th_seq;
@@ -1518,12 +1519,12 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
goto close;
case TCPS_ESTABLISHED:
- if (tcp_insecure_rst == 0 &&
+ if (V_tcp_insecure_rst == 0 &&
!(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) &&
SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) &&
!(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) {
- tcpstat.tcps_badrst++;
+ V_tcpstat.tcps_badrst++;
goto drop;
}
/* FALLTHROUGH */
@@ -1533,7 +1534,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
so->so_error = ECONNRESET;
close:
tp->t_state = TCPS_CLOSED;
- tcpstat.tcps_drops++;
+ V_tcpstat.tcps_drops++;
KASSERT(headlocked, ("%s: trimthenstep6: "
"tcp_close: head not locked", __func__));
tp = tcp_close(tp);
@@ -1572,9 +1573,9 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
*/
tp->ts_recent = 0;
} else {
- tcpstat.tcps_rcvduppack++;
- tcpstat.tcps_rcvdupbyte += tlen;
- tcpstat.tcps_pawsdrop++;
+ V_tcpstat.tcps_rcvduppack++;
+ V_tcpstat.tcps_rcvdupbyte += tlen;
+ V_tcpstat.tcps_pawsdrop++;
if (tlen)
goto dropafterack;
goto drop;
@@ -1622,11 +1623,11 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
*/
tp->t_flags |= TF_ACKNOW;
todrop = tlen;
- tcpstat.tcps_rcvduppack++;
- tcpstat.tcps_rcvdupbyte += todrop;
+ V_tcpstat.tcps_rcvduppack++;
+ V_tcpstat.tcps_rcvdupbyte += todrop;
} else {
- tcpstat.tcps_rcvpartduppack++;
- tcpstat.tcps_rcvpartdupbyte += todrop;
+ V_tcpstat.tcps_rcvpartduppack++;
+ V_tcpstat.tcps_rcvpartdupbyte += todrop;
}
drop_hdrlen += todrop; /* drop from the top afterwards */
th->th_seq += todrop;
@@ -1656,7 +1657,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
free(s, M_TCPLOG);
}
tp = tcp_close(tp);
- tcpstat.tcps_rcvafterclose++;
+ V_tcpstat.tcps_rcvafterclose++;
rstreason = BANDLIM_UNLIMITED;
goto dropwithreset;
}
@@ -1667,9 +1668,9 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
*/
todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
if (todrop > 0) {
- tcpstat.tcps_rcvpackafterwin++;
+ V_tcpstat.tcps_rcvpackafterwin++;
if (todrop >= tlen) {
- tcpstat.tcps_rcvbyteafterwin += tlen;
+ V_tcpstat.tcps_rcvbyteafterwin += tlen;
/*
* If window is closed can only take segments at
* window edge, and have to drop data and PUSH from
@@ -1679,11 +1680,11 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
*/
if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
tp->t_flags |= TF_ACKNOW;
- tcpstat.tcps_rcvwinprobe++;
+ V_tcpstat.tcps_rcvwinprobe++;
} else
goto dropafterack;
} else
- tcpstat.tcps_rcvbyteafterwin += todrop;
+ V_tcpstat.tcps_rcvbyteafterwin += todrop;
m_adj(m, -todrop);
tlen -= todrop;
thflags &= ~(TH_PUSH|TH_FIN);
@@ -1754,7 +1755,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
*/
case TCPS_SYN_RECEIVED:
- tcpstat.tcps_connects++;
+ V_tcpstat.tcps_connects++;
soisconnected(so);
/* Do window scaling? */
if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
@@ -1800,7 +1801,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
case TCPS_CLOSING:
case TCPS_LAST_ACK:
if (SEQ_GT(th->th_ack, tp->snd_max)) {
- tcpstat.tcps_rcvacktoomuch++;
+ V_tcpstat.tcps_rcvacktoomuch++;
goto dropafterack;
}
if ((tp->t_flags & TF_SACK_PERMIT) &&
@@ -1809,7 +1810,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
tcp_sack_doack(tp, &to, th->th_ack);
if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
if (tlen == 0 && tiwin == tp->snd_wnd) {
- tcpstat.tcps_rcvdupack++;
+ V_tcpstat.tcps_rcvdupack++;
/*
* If we have outstanding data (other than
* a window probe), this is a completely
@@ -1841,7 +1842,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
th->th_ack != tp->snd_una)
tp->t_dupacks = 0;
else if (++tp->t_dupacks > tcprexmtthresh ||
- ((tcp_do_newreno ||
+ ((V_tcp_do_newreno ||
(tp->t_flags & TF_SACK_PERMIT)) &&
IN_FASTRECOVERY(tp))) {
if ((tp->t_flags & TF_SACK_PERMIT) &&
@@ -1880,8 +1881,8 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
tp->t_dupacks = 0;
break;
}
- } else if (tcp_do_newreno ||
- tcp_do_ecn) {
+ } else if (V_tcp_do_newreno ||
+ V_tcp_do_ecn) {
if (SEQ_LEQ(th->th_ack,
tp->snd_recover)) {
tp->t_dupacks = 0;
@@ -1892,7 +1893,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
tcp_timer_activate(tp, TT_REXMT, 0);
tp->t_rtttime = 0;
if (tp->t_flags & TF_SACK_PERMIT) {
- tcpstat.tcps_sack_recovery_episode++;
+ V_tcpstat.tcps_sack_recovery_episode++;
tp->sack_newdata = tp->snd_nxt;
tp->snd_cwnd = tp->t_maxseg;
(void) tcp_output(tp);
@@ -1910,7 +1911,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (SEQ_GT(onxt, tp->snd_nxt))
tp->snd_nxt = onxt;
goto drop;
- } else if (tcp_do_rfc3042) {
+ } else if (V_tcp_do_rfc3042) {
u_long oldcwnd = tp->snd_cwnd;
tcp_seq oldsndmax = tp->snd_max;
u_int sent;
@@ -1952,7 +1953,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
* If the congestion window was inflated to account
* for the other side's cached packets, retract it.
*/
- if (tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) {
+ if (V_tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) {
if (IN_FASTRECOVERY(tp)) {
if (SEQ_LT(th->th_ack, tp->snd_recover)) {
if (tp->t_flags & TF_SACK_PERMIT)
@@ -2013,8 +2014,8 @@ process_ACK:
INP_WLOCK_ASSERT(tp->t_inpcb);
acked = th->th_ack - tp->snd_una;
- tcpstat.tcps_rcvackpack++;
- tcpstat.tcps_rcvackbyte += acked;
+ V_tcpstat.tcps_rcvackpack++;
+ V_tcpstat.tcps_rcvackbyte += acked;
/*
* If we just performed our first retransmit, and the ACK
@@ -2024,7 +2025,7 @@ process_ACK:
* we left off.
*/
if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) {
- ++tcpstat.tcps_sndrexmitbad;
+ ++V_tcpstat.tcps_sndrexmitbad;
tp->snd_cwnd = tp->snd_cwnd_prev;
tp->snd_ssthresh = tp->snd_ssthresh_prev;
tp->snd_recover = tp->snd_recover_prev;
@@ -2086,7 +2087,7 @@ process_ACK:
* Otherwise open linearly: maxseg per window
* (maxseg^2 / cwnd per packet).
*/
- if ((!tcp_do_newreno && !(tp->t_flags & TF_SACK_PERMIT)) ||
+ if ((!V_tcp_do_newreno && !(tp->t_flags & TF_SACK_PERMIT)) ||
!IN_FASTRECOVERY(tp)) {
u_int cw = tp->snd_cwnd;
u_int incr = tp->t_maxseg;
@@ -2107,12 +2108,12 @@ process_ACK:
/* NB: sowwakeup_locked() does an implicit unlock. */
sowwakeup_locked(so);
/* Detect una wraparound. */
- if ((tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) &&
+ if ((V_tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) &&
!IN_FASTRECOVERY(tp) &&
SEQ_GT(tp->snd_una, tp->snd_recover) &&
SEQ_LEQ(th->th_ack, tp->snd_recover))
tp->snd_recover = th->th_ack - 1;
- if ((tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) &&
+ if ((V_tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) &&
IN_FASTRECOVERY(tp) &&
SEQ_GEQ(th->th_ack, tp->snd_recover))
EXIT_FASTRECOVERY(tp);
@@ -2167,7 +2168,7 @@ process_ACK:
KASSERT(headlocked, ("%s: process_ACK: "
"head not locked", __func__));
tcp_twstart(tp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
headlocked = 0;
m_freem(m);
return;
@@ -2206,7 +2207,7 @@ step6:
/* keep track of pure window updates */
if (tlen == 0 &&
tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
- tcpstat.tcps_rcvwinupd++;
+ V_tcpstat.tcps_rcvwinupd++;
tp->snd_wnd = tiwin;
tp->snd_wl1 = th->th_seq;
tp->snd_wl2 = th->th_ack;
@@ -2314,8 +2315,8 @@ dodata: /* XXX */
tp->t_flags |= TF_ACKNOW;
tp->rcv_nxt += tlen;
thflags = th->th_flags & TH_FIN;
- tcpstat.tcps_rcvpack++;
- tcpstat.tcps_rcvbyte += tlen;
+ V_tcpstat.tcps_rcvpack++;
+ V_tcpstat.tcps_rcvbyte += tlen;
ND6_HINT(tp);
SOCKBUF_LOCK(&so->so_rcv);
if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
@@ -2400,11 +2401,11 @@ dodata: /* XXX */
KASSERT(headlocked == 1, ("%s: dodata: "
"TCP_FIN_WAIT_2: head not locked", __func__));
tcp_twstart(tp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return;
}
}
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
headlocked = 0;
#ifdef TCPDEBUG
if (so->so_options & SO_DEBUG)
@@ -2421,7 +2422,7 @@ dodata: /* XXX */
check_delack:
KASSERT(headlocked == 0, ("%s: check_delack: head locked",
__func__));
- INP_INFO_UNLOCK_ASSERT(&tcbinfo);
+ INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
INP_WLOCK_ASSERT(tp->t_inpcb);
if (tp->t_flags & TF_DELACK) {
tp->t_flags &= ~TF_DELACK;
@@ -2459,7 +2460,7 @@ dropafterack:
&tcp_savetcp, 0);
#endif
KASSERT(headlocked, ("%s: headlocked should be 1", __func__));
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
tp->t_flags |= TF_ACKNOW;
(void) tcp_output(tp);
INP_WUNLOCK(tp->t_inpcb);
@@ -2474,7 +2475,7 @@ dropwithreset:
if (tp != NULL)
INP_WUNLOCK(tp->t_inpcb);
if (headlocked)
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return;
drop:
@@ -2489,7 +2490,7 @@ drop:
if (tp != NULL)
INP_WUNLOCK(tp->t_inpcb);
if (headlocked)
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
m_freem(m);
return;
}
@@ -2624,7 +2625,7 @@ tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
continue;
if (!(flags & TO_SYN))
continue;
- if (!tcp_do_sack)
+ if (!V_tcp_do_sack)
continue;
to->to_flags |= TOF_SACKPERM;
break;
@@ -2636,7 +2637,7 @@ tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
to->to_flags |= TOF_SACK;
to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
to->to_sacks = cp + 2;
- tcpstat.tcps_sack_rcv_blocks++;
+ V_tcpstat.tcps_sack_rcv_blocks++;
break;
default:
continue;
@@ -2690,7 +2691,7 @@ tcp_xmit_timer(struct tcpcb *tp, int rtt)
INP_WLOCK_ASSERT(tp->t_inpcb);
- tcpstat.tcps_rttupdated++;
+ V_tcpstat.tcps_rttupdated++;
tp->t_rttupdated++;
if (tp->t_srtt != 0) {
/*
@@ -2814,12 +2815,12 @@ tcp_mss(struct tcpcb *tp, int offer)
#ifdef INET6
if (isipv6) {
maxmtu = tcp_maxmtu6(&inp->inp_inc, &mtuflags);
- tp->t_maxopd = tp->t_maxseg = tcp_v6mssdflt;
+ tp->t_maxopd = tp->t_maxseg = V_tcp_v6mssdflt;
} else
#endif
{
maxmtu = tcp_maxmtu(&inp->inp_inc, &mtuflags);
- tp->t_maxopd = tp->t_maxseg = tcp_mssdflt;
+ tp->t_maxopd = tp->t_maxseg = V_tcp_mssdflt;
}
/*
@@ -2850,7 +2851,7 @@ tcp_mss(struct tcpcb *tp, int offer)
* Prevent DoS attack with too small MSS. Round up
* to at least minmss.
*/
- offer = max(offer, tcp_minmss);
+ offer = max(offer, V_tcp_minmss);
/*
* Sanity check: make sure that maxopd will be large
* enough to allow some data on segments even if the
@@ -2875,16 +2876,16 @@ tcp_mss(struct tcpcb *tp, int offer)
#ifdef INET6
if (isipv6) {
mss = maxmtu - min_protoh;
- if (!path_mtu_discovery &&
+ if (!V_path_mtu_discovery &&
!in6_localaddr(&inp->in6p_faddr))
- mss = min(mss, tcp_v6mssdflt);
+ mss = min(mss, V_tcp_v6mssdflt);
} else
#endif
{
mss = maxmtu - min_protoh;
- if (!path_mtu_discovery &&
+ if (!V_path_mtu_discovery &&
!in_localaddr(inp->inp_faddr))
- mss = min(mss, tcp_mssdflt);
+ mss = min(mss, V_tcp_mssdflt);
}
}
mss = min(mss, offer);
@@ -2960,10 +2961,10 @@ tcp_mss(struct tcpcb *tp, int offer)
if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
tp->t_srtt = rtt;
tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
- tcpstat.tcps_usedrtt++;
+ V_tcpstat.tcps_usedrtt++;
if (metrics.rmx_rttvar) {
tp->t_rttvar = metrics.rmx_rttvar;
- tcpstat.tcps_usedrttvar++;
+ V_tcpstat.tcps_usedrttvar++;
} else {
/* default variation is +- 1 rtt */
tp->t_rttvar =
@@ -2981,7 +2982,7 @@ tcp_mss(struct tcpcb *tp, int offer)
* threshold to no less than 2*mss.
*/
tp->snd_ssthresh = max(2 * mss, metrics.rmx_ssthresh);
- tcpstat.tcps_usedssthresh++;
+ V_tcpstat.tcps_usedssthresh++;
}
if (metrics.rmx_bandwidth)
tp->snd_bandwidth = metrics.rmx_bandwidth;
@@ -3010,7 +3011,7 @@ tcp_mss(struct tcpcb *tp, int offer)
min(tp->snd_wnd, so->so_snd.sb_hiwat)));
else
#endif
- if (tcp_do_rfc3390)
+ if (V_tcp_do_rfc3390)
tp->snd_cwnd = min(4 * mss, max(2 * mss, 4380));
#ifdef INET6
else if ((isipv6 && in6_localaddr(&inp->in6p_faddr)) ||
@@ -3018,9 +3019,9 @@ tcp_mss(struct tcpcb *tp, int offer)
#else
else if (in_localaddr(inp->inp_faddr))
#endif
- tp->snd_cwnd = mss * ss_fltsz_local;
+ tp->snd_cwnd = mss * V_ss_fltsz_local;
else
- tp->snd_cwnd = mss * ss_fltsz;
+ tp->snd_cwnd = mss * V_ss_fltsz;
/* Check the interface for TSO capabilities. */
if (mtuflags & CSUM_TSO)
@@ -3045,14 +3046,14 @@ tcp_mssopt(struct in_conninfo *inc)
#ifdef INET6
if (isipv6) {
- mss = tcp_v6mssdflt;
+ mss = V_tcp_v6mssdflt;
maxmtu = tcp_maxmtu6(inc, NULL);
thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
} else
#endif
{
- mss = tcp_mssdflt;
+ mss = V_tcp_mssdflt;
maxmtu = tcp_maxmtu(inc, NULL);
thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
min_protoh = sizeof(struct tcpiphdr);
diff --git a/sys/netinet/tcp_offload.c b/sys/netinet/tcp_offload.c
index d64805f..ec1fe23 100644
--- a/sys/netinet/tcp_offload.c
+++ b/sys/netinet/tcp_offload.c
@@ -37,6 +37,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
+#include <sys/vimage.h>
#include <net/if.h>
#include <net/if_types.h>
@@ -102,20 +103,20 @@ void
tcp_offload_twstart(struct tcpcb *tp)
{
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
INP_WLOCK(tp->t_inpcb);
tcp_twstart(tp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
}
struct tcpcb *
tcp_offload_close(struct tcpcb *tp)
{
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
INP_WLOCK(tp->t_inpcb);
tp = tcp_close(tp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
if (tp)
INP_WUNLOCK(tp->t_inpcb);
@@ -126,10 +127,10 @@ struct tcpcb *
tcp_offload_drop(struct tcpcb *tp, int error)
{
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
INP_WLOCK(tp->t_inpcb);
tp = tcp_drop(tp, error);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
if (tp)
INP_WUNLOCK(tp->t_inpcb);
diff --git a/sys/netinet/tcp_output.c b/sys/netinet/tcp_output.c
index ac8b9e0..1e6e53e 100644
--- a/sys/netinet/tcp_output.c
+++ b/sys/netinet/tcp_output.c
@@ -49,6 +49,7 @@ __FBSDID("$FreeBSD$");
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/sysctl.h>
+#include <sys/vimage.h>
#include <net/route.h>
@@ -170,15 +171,15 @@ tcp_output(struct tcpcb *tp)
* Set the slow-start flight size depending on whether
* this is a local network or not.
*/
- int ss = ss_fltsz;
+ int ss = V_ss_fltsz;
#ifdef INET6
if (isipv6) {
if (in6_localaddr(&tp->t_inpcb->in6p_faddr))
- ss = ss_fltsz_local;
+ ss = V_ss_fltsz_local;
} else
#endif /* INET6 */
if (in_localaddr(tp->t_inpcb->inp_faddr))
- ss = ss_fltsz_local;
+ ss = V_ss_fltsz_local;
tp->snd_cwnd = tp->t_maxseg * ss;
}
tp->t_flags &= ~TF_LASTIDLE;
@@ -252,8 +253,8 @@ again:
if (len > 0) {
sack_rxmit = 1;
sendalot = 1;
- tcpstat.tcps_sack_rexmits++;
- tcpstat.tcps_sack_rexmit_bytes +=
+ V_tcpstat.tcps_sack_rexmits++;
+ V_tcpstat.tcps_sack_rexmit_bytes +=
min(len, tp->t_maxseg);
}
}
@@ -428,14 +429,14 @@ after_sack_rexmit:
* with congestion window. Requires another timer. Has to
* wait for upcoming tcp timer rewrite.
*/
- if (tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) {
+ if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) {
if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat &&
so->so_snd.sb_cc >= (so->so_snd.sb_hiwat / 8 * 7) &&
- so->so_snd.sb_cc < tcp_autosndbuf_max &&
+ so->so_snd.sb_cc < V_tcp_autosndbuf_max &&
sendwin >= (so->so_snd.sb_cc - (tp->snd_nxt - tp->snd_una))) {
if (!sbreserve_locked(&so->so_snd,
- min(so->so_snd.sb_hiwat + tcp_autosndbuf_inc,
- tcp_autosndbuf_max), so, curthread))
+ min(so->so_snd.sb_hiwat + V_tcp_autosndbuf_inc,
+ V_tcp_autosndbuf_max), so, curthread))
so->so_snd.sb_flags &= ~SB_AUTOSIZE;
}
}
@@ -464,7 +465,7 @@ after_sack_rexmit:
ipsec_optlen = ipsec_hdrsiz_tcp(tp);
#endif
if (len > tp->t_maxseg) {
- if ((tp->t_flags & TF_TSO) && tcp_do_tso &&
+ if ((tp->t_flags & TF_TSO) && V_tcp_do_tso &&
((tp->t_flags & TF_SIGNATURE) == 0) &&
tp->rcv_numsacks == 0 && sack_rxmit == 0 &&
tp->t_inpcb->inp_options == NULL &&
@@ -754,13 +755,13 @@ send:
u_int moff;
if ((tp->t_flags & TF_FORCEDATA) && len == 1)
- tcpstat.tcps_sndprobe++;
+ V_tcpstat.tcps_sndprobe++;
else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
- tcpstat.tcps_sndrexmitpack++;
- tcpstat.tcps_sndrexmitbyte += len;
+ V_tcpstat.tcps_sndrexmitpack++;
+ V_tcpstat.tcps_sndrexmitbyte += len;
} else {
- tcpstat.tcps_sndpack++;
- tcpstat.tcps_sndbyte += len;
+ V_tcpstat.tcps_sndpack++;
+ V_tcpstat.tcps_sndbyte += len;
}
#ifdef notyet
if ((m = m_copypack(so->so_snd.sb_mb, off,
@@ -827,13 +828,13 @@ send:
} else {
SOCKBUF_UNLOCK(&so->so_snd);
if (tp->t_flags & TF_ACKNOW)
- tcpstat.tcps_sndacks++;
+ V_tcpstat.tcps_sndacks++;
else if (flags & (TH_SYN|TH_FIN|TH_RST))
- tcpstat.tcps_sndctrl++;
+ V_tcpstat.tcps_sndctrl++;
else if (SEQ_GT(tp->snd_up, tp->snd_una))
- tcpstat.tcps_sndurg++;
+ V_tcpstat.tcps_sndurg++;
else
- tcpstat.tcps_sndwinup++;
+ V_tcpstat.tcps_sndwinup++;
MGETHDR(m, M_DONTWAIT, MT_DATA);
if (m == NULL) {
@@ -882,9 +883,9 @@ send:
* resend those bits a number of times as per
* RFC 3168.
*/
- if (tp->t_state == TCPS_SYN_SENT && tcp_do_ecn) {
+ if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) {
if (tp->t_rxtshift >= 1) {
- if (tp->t_rxtshift <= tcp_ecn_maxretries)
+ if (tp->t_rxtshift <= V_tcp_ecn_maxretries)
flags |= TH_ECE|TH_CWR;
} else
flags |= TH_ECE|TH_CWR;
@@ -905,7 +906,7 @@ send:
else
#endif
ip->ip_tos |= IPTOS_ECN_ECT0;
- tcpstat.tcps_ecn_ect0++;
+ V_tcpstat.tcps_ecn_ect0++;
}
/*
@@ -1074,7 +1075,7 @@ send:
if (tp->t_rtttime == 0) {
tp->t_rtttime = ticks;
tp->t_rtseq = startseq;
- tcpstat.tcps_segstimed++;
+ V_tcpstat.tcps_segstimed++;
}
}
@@ -1172,7 +1173,7 @@ timer:
* Section 2. However the tcp hostcache migitates the problem
* so it affects only the first tcp connection with a host.
*/
- if (path_mtu_discovery)
+ if (V_path_mtu_discovery)
ip->ip_off |= IP_DF;
error = ip_output(m, tp->t_inpcb->inp_options, NULL,
@@ -1251,7 +1252,7 @@ out:
return (error);
}
}
- tcpstat.tcps_sndtotal++;
+ V_tcpstat.tcps_sndtotal++;
/*
* Data sent (as far as we can tell).
@@ -1272,7 +1273,7 @@ out:
* on the transmitter effectively destroys the TCP window, forcing
* it to four packets (1.5Kx4 = 6K window).
*/
- if (sendalot && (!tcp_do_newreno || --maxburst))
+ if (sendalot && (!V_tcp_do_newreno || --maxburst))
goto again;
#endif
if (sendalot)
@@ -1425,7 +1426,7 @@ tcp_addoptions(struct tcpopt *to, u_char *optp)
optlen += TCPOLEN_SACK;
sack++;
}
- tcpstat.tcps_sack_send_blocks++;
+ V_tcpstat.tcps_sack_send_blocks++;
break;
}
default:
diff --git a/sys/netinet/tcp_reass.c b/sys/netinet/tcp_reass.c
index 1502242..cf83bc2 100644
--- a/sys/netinet/tcp_reass.c
+++ b/sys/netinet/tcp_reass.c
@@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/syslog.h>
#include <sys/systm.h>
+#include <sys/vimage.h>
#include <vm/uma.h>
@@ -101,8 +102,8 @@ static void
tcp_reass_zone_change(void *tag)
{
- tcp_reass_maxseg = nmbclusters / 16;
- uma_zone_set_max(tcp_reass_zone, tcp_reass_maxseg);
+ V_tcp_reass_maxseg = nmbclusters / 16;
+ uma_zone_set_max(tcp_reass_zone, V_tcp_reass_maxseg);
}
uma_zone_t tcp_reass_zone;
@@ -111,12 +112,12 @@ void
tcp_reass_init(void)
{
- tcp_reass_maxseg = nmbclusters / 16;
+ V_tcp_reass_maxseg = nmbclusters / 16;
TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments",
- &tcp_reass_maxseg);
+ &V_tcp_reass_maxseg);
tcp_reass_zone = uma_zcreate("tcpreass", sizeof (struct tseg_qent),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
- uma_zone_set_max(tcp_reass_zone, tcp_reass_maxseg);
+ uma_zone_set_max(tcp_reass_zone, V_tcp_reass_maxseg);
EVENTHANDLER_REGISTER(nmbclusters_change,
tcp_reass_zone_change, NULL, EVENTHANDLER_PRI_ANY);
}
@@ -154,10 +155,10 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m)
* process the missing segment.
*/
if (th->th_seq != tp->rcv_nxt &&
- (tcp_reass_qsize + 1 >= tcp_reass_maxseg ||
- tp->t_segqlen >= tcp_reass_maxqlen)) {
- tcp_reass_overflows++;
- tcpstat.tcps_rcvmemdrop++;
+ (V_tcp_reass_qsize + 1 >= V_tcp_reass_maxseg ||
+ tp->t_segqlen >= V_tcp_reass_maxqlen)) {
+ V_tcp_reass_overflows++;
+ V_tcpstat.tcps_rcvmemdrop++;
m_freem(m);
*tlenp = 0;
return (0);
@@ -169,13 +170,13 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m)
*/
te = uma_zalloc(tcp_reass_zone, M_NOWAIT);
if (te == NULL) {
- tcpstat.tcps_rcvmemdrop++;
+ V_tcpstat.tcps_rcvmemdrop++;
m_freem(m);
*tlenp = 0;
return (0);
}
tp->t_segqlen++;
- tcp_reass_qsize++;
+ V_tcp_reass_qsize++;
/*
* Find a segment which begins after this one does.
@@ -197,12 +198,12 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m)
i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
if (i > 0) {
if (i >= *tlenp) {
- tcpstat.tcps_rcvduppack++;
- tcpstat.tcps_rcvdupbyte += *tlenp;
+ V_tcpstat.tcps_rcvduppack++;
+ V_tcpstat.tcps_rcvdupbyte += *tlenp;
m_freem(m);
uma_zfree(tcp_reass_zone, te);
tp->t_segqlen--;
- tcp_reass_qsize--;
+ V_tcp_reass_qsize--;
/*
* Try to present any queued data
* at the left window edge to the user.
@@ -216,8 +217,8 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m)
th->th_seq += i;
}
}
- tcpstat.tcps_rcvoopack++;
- tcpstat.tcps_rcvoobyte += *tlenp;
+ V_tcpstat.tcps_rcvoopack++;
+ V_tcpstat.tcps_rcvoobyte += *tlenp;
/*
* While we overlap succeeding segments trim them or,
@@ -239,7 +240,7 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m)
m_freem(q->tqe_m);
uma_zfree(tcp_reass_zone, q);
tp->t_segqlen--;
- tcp_reass_qsize--;
+ V_tcp_reass_qsize--;
q = nq;
}
@@ -276,7 +277,7 @@ present:
sbappendstream_locked(&so->so_rcv, q->tqe_m);
uma_zfree(tcp_reass_zone, q);
tp->t_segqlen--;
- tcp_reass_qsize--;
+ V_tcp_reass_qsize--;
q = nq;
} while (q && q->tqe_th->th_seq == tp->rcv_nxt);
ND6_HINT(tp);
diff --git a/sys/netinet/tcp_sack.c b/sys/netinet/tcp_sack.c
index 0b55a04..961cb78 100644
--- a/sys/netinet/tcp_sack.c
+++ b/sys/netinet/tcp_sack.c
@@ -89,6 +89,7 @@ __FBSDID("$FreeBSD$");
#include <sys/socketvar.h>
#include <sys/syslog.h>
#include <sys/systm.h>
+#include <sys/vimage.h>
#include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
@@ -254,9 +255,9 @@ tcp_sackhole_alloc(struct tcpcb *tp, tcp_seq start, tcp_seq end)
{
struct sackhole *hole;
- if (tp->snd_numholes >= tcp_sack_maxholes ||
- tcp_sack_globalholes >= tcp_sack_globalmaxholes) {
- tcpstat.tcps_sack_sboverflow++;
+ if (tp->snd_numholes >= V_tcp_sack_maxholes ||
+ V_tcp_sack_globalholes >= V_tcp_sack_globalmaxholes) {
+ V_tcpstat.tcps_sack_sboverflow++;
return NULL;
}
@@ -269,7 +270,7 @@ tcp_sackhole_alloc(struct tcpcb *tp, tcp_seq start, tcp_seq end)
hole->rxmit = start;
tp->snd_numholes++;
- tcp_sack_globalholes++;
+ V_tcp_sack_globalholes++;
return hole;
}
@@ -284,10 +285,10 @@ tcp_sackhole_free(struct tcpcb *tp, struct sackhole *hole)
uma_zfree(sack_hole_zone, hole);
tp->snd_numholes--;
- tcp_sack_globalholes--;
+ V_tcp_sack_globalholes--;
KASSERT(tp->snd_numholes >= 0, ("tp->snd_numholes >= 0"));
- KASSERT(tcp_sack_globalholes >= 0, ("tcp_sack_globalholes >= 0"));
+ KASSERT(V_tcp_sack_globalholes >= 0, ("tcp_sack_globalholes >= 0"));
}
/*
diff --git a/sys/netinet/tcp_subr.c b/sys/netinet/tcp_subr.c
index 4de3e59..659626c 100644
--- a/sys/netinet/tcp_subr.c
+++ b/sys/netinet/tcp_subr.c
@@ -55,6 +55,7 @@ __FBSDID("$FreeBSD$");
#include <sys/socketvar.h>
#include <sys/protosw.h>
#include <sys/random.h>
+#include <sys/vimage.h>
#include <vm/uma.h>
@@ -236,7 +237,7 @@ static void
tcp_zone_change(void *tag)
{
- uma_zone_set_max(tcbinfo.ipi_zone, maxsockets);
+ uma_zone_set_max(V_tcbinfo.ipi_zone, maxsockets);
uma_zone_set_max(tcpcb_zone, maxsockets);
tcp_tw_zone_change();
}
@@ -265,25 +266,25 @@ tcp_init(void)
if (tcp_rexmit_min < 1)
tcp_rexmit_min = 1;
tcp_rexmit_slop = TCPTV_CPU_VAR;
- tcp_inflight_rttthresh = TCPTV_INFLIGHT_RTTTHRESH;
+ V_tcp_inflight_rttthresh = TCPTV_INFLIGHT_RTTTHRESH;
tcp_finwait2_timeout = TCPTV_FINWAIT2_TIMEOUT;
- INP_INFO_LOCK_INIT(&tcbinfo, "tcp");
- LIST_INIT(&tcb);
- tcbinfo.ipi_listhead = &tcb;
+ INP_INFO_LOCK_INIT(&V_tcbinfo, "tcp");
+ LIST_INIT(&V_tcb);
+ V_tcbinfo.ipi_listhead = &V_tcb;
TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize);
if (!powerof2(hashsize)) {
printf("WARNING: TCB hash size not a power of 2\n");
hashsize = 512; /* safe default */
}
tcp_tcbhashsize = hashsize;
- tcbinfo.ipi_hashbase = hashinit(hashsize, M_PCB,
- &tcbinfo.ipi_hashmask);
- tcbinfo.ipi_porthashbase = hashinit(hashsize, M_PCB,
- &tcbinfo.ipi_porthashmask);
- tcbinfo.ipi_zone = uma_zcreate("inpcb", sizeof(struct inpcb),
+ V_tcbinfo.ipi_hashbase = hashinit(hashsize, M_PCB,
+ &V_tcbinfo.ipi_hashmask);
+ V_tcbinfo.ipi_porthashbase = hashinit(hashsize, M_PCB,
+ &V_tcbinfo.ipi_porthashmask);
+ V_tcbinfo.ipi_zone = uma_zcreate("inpcb", sizeof(struct inpcb),
NULL, NULL, tcp_inpcb_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
- uma_zone_set_max(tcbinfo.ipi_zone, maxsockets);
+ uma_zone_set_max(V_tcbinfo.ipi_zone, maxsockets);
#ifdef INET6
#define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
#else /* INET6 */
@@ -512,8 +513,8 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
{
tlen += sizeof (struct tcpiphdr);
ip->ip_len = tlen;
- ip->ip_ttl = ip_defttl;
- if (path_mtu_discovery)
+ ip->ip_ttl = V_ip_defttl;
+ if (V_path_mtu_discovery)
ip->ip_off |= IP_DF;
}
m->m_len = tlen;
@@ -596,9 +597,9 @@ tcp_newtcpcb(struct inpcb *inp)
/* LIST_INIT(&tp->t_segq); */ /* XXX covered by M_ZERO */
tp->t_maxseg = tp->t_maxopd =
#ifdef INET6
- isipv6 ? tcp_v6mssdflt :
+ isipv6 ? V_tcp_v6mssdflt :
#endif /* INET6 */
- tcp_mssdflt;
+ V_tcp_mssdflt;
/* Set up our timeouts. */
callout_init(&tp->t_timers->tt_rexmt, CALLOUT_MPSAFE);
@@ -607,9 +608,9 @@ tcp_newtcpcb(struct inpcb *inp)
callout_init(&tp->t_timers->tt_2msl, CALLOUT_MPSAFE);
callout_init(&tp->t_timers->tt_delack, CALLOUT_MPSAFE);
- if (tcp_do_rfc1323)
+ if (V_tcp_do_rfc1323)
tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
- if (tcp_do_sack)
+ if (V_tcp_do_sack)
tp->t_flags |= TF_SACK_PERMIT;
TAILQ_INIT(&tp->snd_holes);
tp->t_inpcb = inp; /* XXX */
@@ -632,7 +633,7 @@ tcp_newtcpcb(struct inpcb *inp)
* because the socket may be bound to an IPv6 wildcard address,
* which may match an IPv4-mapped IPv6 address.
*/
- inp->inp_ip_ttl = ip_defttl;
+ inp->inp_ip_ttl = V_ip_defttl;
inp->inp_ppcb = tp;
return (tp); /* XXX */
}
@@ -647,15 +648,15 @@ tcp_drop(struct tcpcb *tp, int errno)
{
struct socket *so = tp->t_inpcb->inp_socket;
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
INP_WLOCK_ASSERT(tp->t_inpcb);
if (TCPS_HAVERCVDSYN(tp->t_state)) {
tp->t_state = TCPS_CLOSED;
(void) tcp_output_reset(tp);
- tcpstat.tcps_drops++;
+ V_tcpstat.tcps_drops++;
} else
- tcpstat.tcps_conndrops++;
+ V_tcpstat.tcps_conndrops++;
if (errno == ETIMEDOUT && tp->t_softerror)
errno = tp->t_softerror;
so->so_error = errno;
@@ -748,7 +749,7 @@ tcp_discardcb(struct tcpcb *tp)
m_freem(q->tqe_m);
uma_zfree(tcp_reass_zone, q);
tp->t_segqlen--;
- tcp_reass_qsize--;
+ V_tcp_reass_qsize--;
}
/* Disconnect offload device, if any. */
tcp_offload_detach(tp);
@@ -769,14 +770,14 @@ tcp_close(struct tcpcb *tp)
struct inpcb *inp = tp->t_inpcb;
struct socket *so;
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
INP_WLOCK_ASSERT(inp);
/* Notify any offload devices of listener close */
if (tp->t_state == TCPS_LISTEN)
tcp_offload_listen_close(tp);
in_pcbdrop(inp);
- tcpstat.tcps_closed++;
+ V_tcpstat.tcps_closed++;
KASSERT(inp->inp_socket != NULL, ("tcp_close: inp_socket NULL"));
so = inp->inp_socket;
soisdisconnected(so);
@@ -811,8 +812,8 @@ tcp_drain(void)
* where we're really low on mbufs, this is potentially
* usefull.
*/
- INP_INFO_RLOCK(&tcbinfo);
- LIST_FOREACH(inpb, tcbinfo.ipi_listhead, inp_list) {
+ INP_INFO_RLOCK(&V_tcbinfo);
+ LIST_FOREACH(inpb, V_tcbinfo.ipi_listhead, inp_list) {
if (inpb->inp_vflag & INP_TIMEWAIT)
continue;
INP_WLOCK(inpb);
@@ -823,13 +824,13 @@ tcp_drain(void)
m_freem(te->tqe_m);
uma_zfree(tcp_reass_zone, te);
tcpb->t_segqlen--;
- tcp_reass_qsize--;
+ V_tcp_reass_qsize--;
}
tcp_clean_sackreport(tcpb);
}
INP_WUNLOCK(inpb);
}
- INP_INFO_RUNLOCK(&tcbinfo);
+ INP_INFO_RUNLOCK(&V_tcbinfo);
}
}
@@ -846,7 +847,7 @@ tcp_notify(struct inpcb *inp, int error)
{
struct tcpcb *tp;
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
INP_WLOCK_ASSERT(inp);
if ((inp->inp_vflag & INP_TIMEWAIT) ||
@@ -899,7 +900,7 @@ tcp_pcblist(SYSCTL_HANDLER_ARGS)
*/
if (req->oldptr == NULL) {
m = syncache_pcbcount();
- n = tcbinfo.ipi_count;
+ n = V_tcbinfo.ipi_count;
req->oldidx = 2 * (sizeof xig)
+ ((m + n) + n/8) * sizeof(struct xtcpcb);
return (0);
@@ -911,10 +912,10 @@ tcp_pcblist(SYSCTL_HANDLER_ARGS)
/*
* OK, now we're committed to doing something.
*/
- INP_INFO_RLOCK(&tcbinfo);
- gencnt = tcbinfo.ipi_gencnt;
- n = tcbinfo.ipi_count;
- INP_INFO_RUNLOCK(&tcbinfo);
+ INP_INFO_RLOCK(&V_tcbinfo);
+ gencnt = V_tcbinfo.ipi_gencnt;
+ n = V_tcbinfo.ipi_count;
+ INP_INFO_RUNLOCK(&V_tcbinfo);
m = syncache_pcbcount();
@@ -939,8 +940,8 @@ tcp_pcblist(SYSCTL_HANDLER_ARGS)
if (inp_list == NULL)
return (ENOMEM);
- INP_INFO_RLOCK(&tcbinfo);
- for (inp = LIST_FIRST(tcbinfo.ipi_listhead), i = 0; inp != NULL && i
+ INP_INFO_RLOCK(&V_tcbinfo);
+ for (inp = LIST_FIRST(V_tcbinfo.ipi_listhead), i = 0; inp != NULL && i
< n; inp = LIST_NEXT(inp, inp_list)) {
INP_RLOCK(inp);
if (inp->inp_gencnt <= gencnt) {
@@ -963,7 +964,7 @@ tcp_pcblist(SYSCTL_HANDLER_ARGS)
}
INP_RUNLOCK(inp);
}
- INP_INFO_RUNLOCK(&tcbinfo);
+ INP_INFO_RUNLOCK(&V_tcbinfo);
n = i;
error = 0;
@@ -1007,11 +1008,11 @@ tcp_pcblist(SYSCTL_HANDLER_ARGS)
* while we were processing this request, and it
* might be necessary to retry.
*/
- INP_INFO_RLOCK(&tcbinfo);
- xig.xig_gen = tcbinfo.ipi_gencnt;
+ INP_INFO_RLOCK(&V_tcbinfo);
+ xig.xig_gen = V_tcbinfo.ipi_gencnt;
xig.xig_sogen = so_gencnt;
- xig.xig_count = tcbinfo.ipi_count + pcb_count;
- INP_INFO_RUNLOCK(&tcbinfo);
+ xig.xig_count = V_tcbinfo.ipi_count + pcb_count;
+ INP_INFO_RUNLOCK(&V_tcbinfo);
error = SYSCTL_OUT(req, &xig, sizeof xig);
}
free(inp_list, M_TEMP);
@@ -1035,12 +1036,12 @@ tcp_getcred(SYSCTL_HANDLER_ARGS)
error = SYSCTL_IN(req, addrs, sizeof(addrs));
if (error)
return (error);
- INP_INFO_RLOCK(&tcbinfo);
- inp = in_pcblookup_hash(&tcbinfo, addrs[1].sin_addr, addrs[1].sin_port,
+ INP_INFO_RLOCK(&V_tcbinfo);
+ inp = in_pcblookup_hash(&V_tcbinfo, addrs[1].sin_addr, addrs[1].sin_port,
addrs[0].sin_addr, addrs[0].sin_port, 0, NULL);
if (inp != NULL) {
INP_RLOCK(inp);
- INP_INFO_RUNLOCK(&tcbinfo);
+ INP_INFO_RUNLOCK(&V_tcbinfo);
if (inp->inp_socket == NULL)
error = ENOENT;
if (error == 0)
@@ -1050,7 +1051,7 @@ tcp_getcred(SYSCTL_HANDLER_ARGS)
cru2x(inp->inp_socket->so_cred, &xuc);
INP_RUNLOCK(inp);
} else {
- INP_INFO_RUNLOCK(&tcbinfo);
+ INP_INFO_RUNLOCK(&V_tcbinfo);
error = ENOENT;
}
if (error == 0)
@@ -1077,8 +1078,8 @@ tcp6_getcred(SYSCTL_HANDLER_ARGS)
error = SYSCTL_IN(req, addrs, sizeof(addrs));
if (error)
return (error);
- if ((error = sa6_embedscope(&addrs[0], ip6_use_defzone)) != 0 ||
- (error = sa6_embedscope(&addrs[1], ip6_use_defzone)) != 0) {
+ if ((error = sa6_embedscope(&addrs[0], V_ip6_use_defzone)) != 0 ||
+ (error = sa6_embedscope(&addrs[1], V_ip6_use_defzone)) != 0) {
return (error);
}
if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
@@ -1088,21 +1089,21 @@ tcp6_getcred(SYSCTL_HANDLER_ARGS)
return (EINVAL);
}
- INP_INFO_RLOCK(&tcbinfo);
+ INP_INFO_RLOCK(&V_tcbinfo);
if (mapped == 1)
- inp = in_pcblookup_hash(&tcbinfo,
+ inp = in_pcblookup_hash(&V_tcbinfo,
*(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
addrs[1].sin6_port,
*(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
addrs[0].sin6_port,
0, NULL);
else
- inp = in6_pcblookup_hash(&tcbinfo,
+ inp = in6_pcblookup_hash(&V_tcbinfo,
&addrs[1].sin6_addr, addrs[1].sin6_port,
&addrs[0].sin6_addr, addrs[0].sin6_port, 0, NULL);
if (inp != NULL) {
INP_RLOCK(inp);
- INP_INFO_RUNLOCK(&tcbinfo);
+ INP_INFO_RUNLOCK(&V_tcbinfo);
if (inp->inp_socket == NULL)
error = ENOENT;
if (error == 0)
@@ -1112,7 +1113,7 @@ tcp6_getcred(SYSCTL_HANDLER_ARGS)
cru2x(inp->inp_socket->so_cred, &xuc);
INP_RUNLOCK(inp);
} else {
- INP_INFO_RUNLOCK(&tcbinfo);
+ INP_INFO_RUNLOCK(&V_tcbinfo);
error = ENOENT;
}
if (error == 0)
@@ -1146,7 +1147,7 @@ tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
if (cmd == PRC_MSGSIZE)
notify = tcp_mtudisc;
- else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
+ else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) && ip)
notify = tcp_drop_syn_sent;
/*
@@ -1173,8 +1174,8 @@ tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
- offsetof(struct icmp, icmp_ip));
th = (struct tcphdr *)((caddr_t)ip
+ (ip->ip_hl << 2));
- INP_INFO_WLOCK(&tcbinfo);
- inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport,
+ INP_INFO_WLOCK(&V_tcbinfo);
+ inp = in_pcblookup_hash(&V_tcbinfo, faddr, th->th_dport,
ip->ip_src, th->th_sport, 0, NULL);
if (inp != NULL) {
INP_WLOCK(inp);
@@ -1208,11 +1209,11 @@ tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
if (!mtu)
mtu = ip_next_mtu(ip->ip_len,
1);
- if (mtu < max(296, (tcp_minmss)
+ if (mtu < max(296, (V_tcp_minmss)
+ sizeof(struct tcpiphdr)))
mtu = 0;
if (!mtu)
- mtu = tcp_mssdflt
+ mtu = V_tcp_mssdflt
+ sizeof(struct tcpiphdr);
/*
* Only cache the the MTU if it
@@ -1239,9 +1240,9 @@ tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
#endif
syncache_unreach(&inc, th);
}
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
} else
- in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify);
+ in_pcbnotifyall(&V_tcbinfo, faddr, inetctlerrmap[cmd], notify);
}
#ifdef INET6
@@ -1301,7 +1302,7 @@ tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d)
bzero(&th, sizeof(th));
m_copydata(m, off, sizeof(*thp), (caddr_t)&th);
- in6_pcbnotify(&tcbinfo, sa, th.th_dport,
+ in6_pcbnotify(&V_tcbinfo, sa, th.th_dport,
(struct sockaddr *)ip6cp->ip6c_src,
th.th_sport, cmd, NULL, notify);
@@ -1310,11 +1311,11 @@ tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d)
inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr;
inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr;
inc.inc_isipv6 = 1;
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
syncache_unreach(&inc, &th);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
} else
- in6_pcbnotify(&tcbinfo, sa, 0, (const struct sockaddr *)sa6_src,
+ in6_pcbnotify(&V_tcbinfo, sa, 0, (const struct sockaddr *)sa6_src,
0, cmd, NULL, notify);
}
#endif /* INET6 */
@@ -1383,37 +1384,37 @@ tcp_new_isn(struct tcpcb *tp)
ISN_LOCK();
/* Seed if this is the first use, reseed if requested. */
- if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) &&
- (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz)
+ if ((V_isn_last_reseed == 0) || ((V_tcp_isn_reseed_interval > 0) &&
+ (((u_int)V_isn_last_reseed + (u_int)V_tcp_isn_reseed_interval*hz)
< (u_int)ticks))) {
- read_random(&isn_secret, sizeof(isn_secret));
- isn_last_reseed = ticks;
+ read_random(&V_isn_secret, sizeof(V_isn_secret));
+ V_isn_last_reseed = ticks;
}
/* Compute the md5 hash and return the ISN. */
- MD5Init(&isn_ctx);
- MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short));
- MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short));
+ MD5Init(&V_isn_ctx);
+ MD5Update(&V_isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short));
+ MD5Update(&V_isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short));
#ifdef INET6
if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
- MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
+ MD5Update(&V_isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
sizeof(struct in6_addr));
- MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
+ MD5Update(&V_isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
sizeof(struct in6_addr));
} else
#endif
{
- MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
+ MD5Update(&V_isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
sizeof(struct in_addr));
- MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
+ MD5Update(&V_isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
sizeof(struct in_addr));
}
- MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret));
- MD5Final((u_char *) &md5_buffer, &isn_ctx);
+ MD5Update(&V_isn_ctx, (u_char *) &V_isn_secret, sizeof(V_isn_secret));
+ MD5Final((u_char *) &md5_buffer, &V_isn_ctx);
new_isn = (tcp_seq) md5_buffer[0];
- isn_offset += ISN_STATIC_INCREMENT +
+ V_isn_offset += ISN_STATIC_INCREMENT +
(arc4random() & ISN_RANDOM_INCREMENT);
- new_isn += isn_offset;
+ new_isn += V_isn_offset;
ISN_UNLOCK();
return (new_isn);
}
@@ -1429,12 +1430,12 @@ tcp_isn_tick(void *xtp)
u_int32_t projected_offset;
ISN_LOCK();
- projected_offset = isn_offset_old + ISN_BYTES_PER_SECOND / 100;
+ projected_offset = V_isn_offset_old + ISN_BYTES_PER_SECOND / 100;
- if (SEQ_GT(projected_offset, isn_offset))
- isn_offset = projected_offset;
+ if (SEQ_GT(projected_offset, V_isn_offset))
+ V_isn_offset = projected_offset;
- isn_offset_old = isn_offset;
+ V_isn_offset_old = V_isn_offset;
callout_reset(&isn_callout, hz/100, tcp_isn_tick, NULL);
ISN_UNLOCK();
}
@@ -1449,7 +1450,7 @@ tcp_drop_syn_sent(struct inpcb *inp, int errno)
{
struct tcpcb *tp;
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
INP_WLOCK_ASSERT(inp);
if ((inp->inp_vflag & INP_TIMEWAIT) ||
@@ -1509,9 +1510,9 @@ tcp_mtudisc(struct inpcb *inp, int errno)
if (!maxmtu) {
tp->t_maxopd = tp->t_maxseg =
#ifdef INET6
- isipv6 ? tcp_v6mssdflt :
+ isipv6 ? V_tcp_v6mssdflt :
#endif /* INET6 */
- tcp_mssdflt;
+ V_tcp_mssdflt;
return (inp);
}
mss = maxmtu -
@@ -1560,7 +1561,7 @@ tcp_mtudisc(struct inpcb *inp, int errno)
tp->t_maxseg = mss;
- tcpstat.tcps_mturesent++;
+ V_tcpstat.tcps_mturesent++;
tp->t_rtttime = 0;
tp->snd_nxt = tp->snd_una;
tcp_free_sackholes(tp);
@@ -1756,7 +1757,7 @@ tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
* If inflight_enable is disabled in the middle of a tcp connection,
* make sure snd_bwnd is effectively disabled.
*/
- if (tcp_inflight_enable == 0 || tp->t_rttlow < tcp_inflight_rttthresh) {
+ if (V_tcp_inflight_enable == 0 || tp->t_rttlow < V_tcp_inflight_rttthresh) {
tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
tp->snd_bandwidth = 0;
return;
@@ -1816,7 +1817,7 @@ tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
* no other choice.
*/
#define USERTT ((tp->t_srtt + tp->t_rttbest) / 2)
- bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + tcp_inflight_stab * tp->t_maxseg / 10;
+ bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + V_tcp_inflight_stab * tp->t_maxseg / 10;
#undef USERTT
if (tcp_inflight_debug > 0) {
@@ -1832,10 +1833,10 @@ tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
);
}
}
- if ((long)bwnd < tcp_inflight_min)
- bwnd = tcp_inflight_min;
- if (bwnd > tcp_inflight_max)
- bwnd = tcp_inflight_max;
+ if ((long)bwnd < V_tcp_inflight_min)
+ bwnd = V_tcp_inflight_min;
+ if (bwnd > V_tcp_inflight_max)
+ bwnd = V_tcp_inflight_max;
if ((long)bwnd < tp->t_maxseg * 2)
bwnd = tp->t_maxseg * 2;
tp->snd_bwnd = bwnd;
@@ -2011,10 +2012,10 @@ sysctl_drop(SYSCTL_HANDLER_ARGS)
lin = (struct sockaddr_in *)&addrs[1];
break;
}
- error = sa6_embedscope(fin6, ip6_use_defzone);
+ error = sa6_embedscope(fin6, V_ip6_use_defzone);
if (error)
return (error);
- error = sa6_embedscope(lin6, ip6_use_defzone);
+ error = sa6_embedscope(lin6, V_ip6_use_defzone);
if (error)
return (error);
break;
@@ -2029,16 +2030,16 @@ sysctl_drop(SYSCTL_HANDLER_ARGS)
default:
return (EINVAL);
}
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
switch (addrs[0].ss_family) {
#ifdef INET6
case AF_INET6:
- inp = in6_pcblookup_hash(&tcbinfo, &f6, fin6->sin6_port,
+ inp = in6_pcblookup_hash(&V_tcbinfo, &f6, fin6->sin6_port,
&l6, lin6->sin6_port, 0, NULL);
break;
#endif
case AF_INET:
- inp = in_pcblookup_hash(&tcbinfo, fin->sin_addr, fin->sin_port,
+ inp = in_pcblookup_hash(&V_tcbinfo, fin->sin_addr, fin->sin_port,
lin->sin_addr, lin->sin_port, 0, NULL);
break;
}
@@ -2066,7 +2067,7 @@ sysctl_drop(SYSCTL_HANDLER_ARGS)
INP_WUNLOCK(inp);
} else
error = ESRCH;
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return (error);
}
diff --git a/sys/netinet/tcp_syncache.c b/sys/netinet/tcp_syncache.c
index 3f7a9bc..fd580d2 100644
--- a/sys/netinet/tcp_syncache.c
+++ b/sys/netinet/tcp_syncache.c
@@ -53,6 +53,7 @@ __FBSDID("$FreeBSD$");
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/syslog.h>
+#include <sys/vimage.h>
#include <vm/uma.h>
@@ -231,13 +232,13 @@ SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rst_on_sock_fail, CTLFLAG_RW,
static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache");
#define SYNCACHE_HASH(inc, mask) \
- ((tcp_syncache.hash_secret ^ \
+ ((V_tcp_syncache.hash_secret ^ \
(inc)->inc_faddr.s_addr ^ \
((inc)->inc_faddr.s_addr >> 16) ^ \
(inc)->inc_fport ^ (inc)->inc_lport) & mask)
#define SYNCACHE_HASH6(inc, mask) \
- ((tcp_syncache.hash_secret ^ \
+ ((V_tcp_syncache.hash_secret ^ \
(inc)->inc6_faddr.s6_addr32[0] ^ \
(inc)->inc6_faddr.s6_addr32[3] ^ \
(inc)->inc_fport ^ (inc)->inc_lport) & mask)
@@ -267,7 +268,7 @@ syncache_free(struct syncache *sc)
mac_syncache_destroy(&sc->sc_label);
#endif
- uma_zfree(tcp_syncache.zone, sc);
+ uma_zfree(V_tcp_syncache.zone, sc);
}
void
@@ -275,47 +276,47 @@ syncache_init(void)
{
int i;
- tcp_syncache.cache_count = 0;
- tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
- tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
- tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
- tcp_syncache.hash_secret = arc4random();
+ V_tcp_syncache.cache_count = 0;
+ V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
+ V_tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
+ V_tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
+ V_tcp_syncache.hash_secret = arc4random();
TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
- &tcp_syncache.hashsize);
+ &V_tcp_syncache.hashsize);
TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
- &tcp_syncache.bucket_limit);
- if (!powerof2(tcp_syncache.hashsize) || tcp_syncache.hashsize == 0) {
+ &V_tcp_syncache.bucket_limit);
+ if (!powerof2(V_tcp_syncache.hashsize) || V_tcp_syncache.hashsize == 0) {
printf("WARNING: syncache hash size is not a power of 2.\n");
- tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
+ V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
}
- tcp_syncache.hashmask = tcp_syncache.hashsize - 1;
+ V_tcp_syncache.hashmask = V_tcp_syncache.hashsize - 1;
/* Set limits. */
- tcp_syncache.cache_limit =
- tcp_syncache.hashsize * tcp_syncache.bucket_limit;
+ V_tcp_syncache.cache_limit =
+ V_tcp_syncache.hashsize * V_tcp_syncache.bucket_limit;
TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
- &tcp_syncache.cache_limit);
+ &V_tcp_syncache.cache_limit);
/* Allocate the hash table. */
- MALLOC(tcp_syncache.hashbase, struct syncache_head *,
- tcp_syncache.hashsize * sizeof(struct syncache_head),
+ MALLOC(V_tcp_syncache.hashbase, struct syncache_head *,
+ V_tcp_syncache.hashsize * sizeof(struct syncache_head),
M_SYNCACHE, M_WAITOK | M_ZERO);
/* Initialize the hash buckets. */
- for (i = 0; i < tcp_syncache.hashsize; i++) {
- TAILQ_INIT(&tcp_syncache.hashbase[i].sch_bucket);
- mtx_init(&tcp_syncache.hashbase[i].sch_mtx, "tcp_sc_head",
+ for (i = 0; i < V_tcp_syncache.hashsize; i++) {
+ TAILQ_INIT(&V_tcp_syncache.hashbase[i].sch_bucket);
+ mtx_init(&V_tcp_syncache.hashbase[i].sch_mtx, "tcp_sc_head",
NULL, MTX_DEF);
- callout_init_mtx(&tcp_syncache.hashbase[i].sch_timer,
- &tcp_syncache.hashbase[i].sch_mtx, 0);
- tcp_syncache.hashbase[i].sch_length = 0;
+ callout_init_mtx(&V_tcp_syncache.hashbase[i].sch_timer,
+ &V_tcp_syncache.hashbase[i].sch_mtx, 0);
+ V_tcp_syncache.hashbase[i].sch_length = 0;
}
/* Create the syncache entry zone. */
- tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache),
+ V_tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
- uma_zone_set_max(tcp_syncache.zone, tcp_syncache.cache_limit);
+ uma_zone_set_max(V_tcp_syncache.zone, V_tcp_syncache.cache_limit);
}
/*
@@ -333,12 +334,12 @@ syncache_insert(struct syncache *sc, struct syncache_head *sch)
* Make sure that we don't overflow the per-bucket limit.
* If the bucket is full, toss the oldest element.
*/
- if (sch->sch_length >= tcp_syncache.bucket_limit) {
+ if (sch->sch_length >= V_tcp_syncache.bucket_limit) {
KASSERT(!TAILQ_EMPTY(&sch->sch_bucket),
("sch->sch_length incorrect"));
sc2 = TAILQ_LAST(&sch->sch_bucket, sch_head);
syncache_drop(sc2, sch);
- tcpstat.tcps_sc_bucketoverflow++;
+ V_tcpstat.tcps_sc_bucketoverflow++;
}
/* Put it into the bucket. */
@@ -352,8 +353,8 @@ syncache_insert(struct syncache *sc, struct syncache_head *sch)
SCH_UNLOCK(sch);
- tcp_syncache.cache_count++;
- tcpstat.tcps_sc_added++;
+ V_tcp_syncache.cache_count++;
+ V_tcpstat.tcps_sc_added++;
}
/*
@@ -374,7 +375,7 @@ syncache_drop(struct syncache *sc, struct syncache_head *sch)
sc->sc_tu->tu_syncache_event(TOE_SC_DROP, sc->sc_toepcb);
#endif
syncache_free(sc);
- tcp_syncache.cache_count--;
+ V_tcp_syncache.cache_count--;
}
/*
@@ -430,7 +431,7 @@ syncache_timer(void *xsch)
sch->sch_nextc = sc->sc_rxttime;
continue;
}
- if (sc->sc_rxmits > tcp_syncache.rexmt_limit) {
+ if (sc->sc_rxmits > V_tcp_syncache.rexmt_limit) {
if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
log(LOG_DEBUG, "%s; %s: Retransmits exhausted, "
"giving up and removing syncache entry\n",
@@ -438,7 +439,7 @@ syncache_timer(void *xsch)
free(s, M_TCPLOG);
}
syncache_drop(sc, sch);
- tcpstat.tcps_sc_stale++;
+ V_tcpstat.tcps_sc_stale++;
continue;
}
if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
@@ -449,7 +450,7 @@ syncache_timer(void *xsch)
}
(void) syncache_respond(sc);
- tcpstat.tcps_sc_retransmitted++;
+ V_tcpstat.tcps_sc_retransmitted++;
syncache_timeout(sc, sch, 0);
}
if (!TAILQ_EMPTY(&(sch)->sch_bucket))
@@ -469,8 +470,8 @@ syncache_lookup(struct in_conninfo *inc, struct syncache_head **schp)
#ifdef INET6
if (inc->inc_isipv6) {
- sch = &tcp_syncache.hashbase[
- SYNCACHE_HASH6(inc, tcp_syncache.hashmask)];
+ sch = &V_tcp_syncache.hashbase[
+ SYNCACHE_HASH6(inc, V_tcp_syncache.hashmask)];
*schp = sch;
SCH_LOCK(sch);
@@ -483,8 +484,8 @@ syncache_lookup(struct in_conninfo *inc, struct syncache_head **schp)
} else
#endif
{
- sch = &tcp_syncache.hashbase[
- SYNCACHE_HASH(inc, tcp_syncache.hashmask)];
+ sch = &V_tcp_syncache.hashbase[
+ SYNCACHE_HASH(inc, V_tcp_syncache.hashmask)];
*schp = sch;
SCH_LOCK(sch);
@@ -526,7 +527,7 @@ syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th)
if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
log(LOG_DEBUG, "%s; %s: Spurious RST with ACK, SYN or "
"FIN flag set, segment ignored\n", s, __func__);
- tcpstat.tcps_badrst++;
+ V_tcpstat.tcps_badrst++;
goto done;
}
@@ -543,7 +544,7 @@ syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th)
log(LOG_DEBUG, "%s; %s: Spurious RST without matching "
"syncache entry (possibly syncookie only), "
"segment ignored\n", s, __func__);
- tcpstat.tcps_badrst++;
+ V_tcpstat.tcps_badrst++;
goto done;
}
@@ -567,13 +568,13 @@ syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th)
log(LOG_DEBUG, "%s; %s: Our SYN|ACK was rejected, "
"connection attempt aborted by remote endpoint\n",
s, __func__);
- tcpstat.tcps_sc_reset++;
+ V_tcpstat.tcps_sc_reset++;
} else {
if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
log(LOG_DEBUG, "%s; %s: RST with invalid SEQ %u != "
"IRS %u (+WND %u), segment ignored\n",
s, __func__, th->th_seq, sc->sc_irs, sc->sc_wnd);
- tcpstat.tcps_badrst++;
+ V_tcpstat.tcps_badrst++;
}
done:
@@ -592,7 +593,7 @@ syncache_badack(struct in_conninfo *inc)
SCH_LOCK_ASSERT(sch);
if (sc != NULL) {
syncache_drop(sc, sch);
- tcpstat.tcps_sc_badack++;
+ V_tcpstat.tcps_sc_badack++;
}
SCH_UNLOCK(sch);
}
@@ -625,7 +626,7 @@ syncache_unreach(struct in_conninfo *inc, struct tcphdr *th)
goto done;
}
syncache_drop(sc, sch);
- tcpstat.tcps_sc_unreach++;
+ V_tcpstat.tcps_sc_unreach++;
done:
SCH_UNLOCK(sch);
}
@@ -641,7 +642,7 @@ syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
struct tcpcb *tp;
char *s;
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
/*
* Ok, create the full blown connection, and set things up
@@ -656,7 +657,7 @@ syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
* have the peer retransmit its SYN again after its
* RTO and try again.
*/
- tcpstat.tcps_listendrop++;
+ V_tcpstat.tcps_listendrop++;
if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
log(LOG_DEBUG, "%s; %s: Socket create failed "
"due to limits or memory shortage\n",
@@ -826,7 +827,7 @@ syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
INP_WUNLOCK(inp);
- tcpstat.tcps_accepts++;
+ V_tcpstat.tcps_accepts++;
return (so);
abort:
@@ -857,7 +858,7 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
* Global TCP locks are held because we manipulate the PCB lists
* and create a new socket.
*/
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK,
("%s: can handle only ACK", __func__));
@@ -895,7 +896,7 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
/* Pull out the entry to unlock the bucket row. */
TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
sch->sch_length--;
- tcp_syncache.cache_count--;
+ V_tcp_syncache.cache_count--;
SCH_UNLOCK(sch);
}
@@ -945,9 +946,9 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
*lsop = syncache_socket(sc, *lsop, m);
if (*lsop == NULL)
- tcpstat.tcps_sc_aborted++;
+ V_tcpstat.tcps_sc_aborted++;
else
- tcpstat.tcps_sc_completed++;
+ V_tcpstat.tcps_sc_completed++;
/* how do we find the inp for the new socket? */
if (sc != &scs)
@@ -968,9 +969,9 @@ tcp_offload_syncache_expand(struct in_conninfo *inc, struct tcpopt *to,
{
int rc;
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
rc = syncache_expand(inc, to, th, lsop, m);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return (rc);
}
@@ -1009,7 +1010,7 @@ _syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
#endif
struct syncache scs;
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
INP_WLOCK_ASSERT(inp); /* listen socket */
KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_SYN,
("%s: unexpected tcp flags", __func__));
@@ -1038,13 +1039,13 @@ _syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
#ifdef MAC
if (mac_syncache_init(&maclabel) != 0) {
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
goto done;
} else
mac_syncache_create(maclabel, inp);
#endif
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
/*
* Remember the IP options, if any.
@@ -1074,7 +1075,7 @@ _syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
sc->sc_tu->tu_syncache_event(TOE_SC_ENTRY_PRESENT,
sc->sc_toepcb);
#endif
- tcpstat.tcps_sc_dupsyn++;
+ V_tcpstat.tcps_sc_dupsyn++;
if (ipopts) {
/*
* If we were remembering a previous source route,
@@ -1111,24 +1112,24 @@ _syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
if (!TOEPCB_ISSET(sc) && syncache_respond(sc) == 0) {
sc->sc_rxmits = 0;
syncache_timeout(sc, sch, 1);
- tcpstat.tcps_sndacks++;
- tcpstat.tcps_sndtotal++;
+ V_tcpstat.tcps_sndacks++;
+ V_tcpstat.tcps_sndtotal++;
}
SCH_UNLOCK(sch);
goto done;
}
- sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT | M_ZERO);
+ sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
if (sc == NULL) {
/*
* The zone allocator couldn't provide more entries.
* Treat this as if the cache was full; drop the oldest
* entry and insert the new one.
*/
- tcpstat.tcps_sc_zonefail++;
+ V_tcpstat.tcps_sc_zonefail++;
if ((sc = TAILQ_LAST(&sch->sch_bucket, sch_head)) != NULL)
syncache_drop(sc, sch);
- sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT | M_ZERO);
+ sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
if (sc == NULL) {
if (tcp_syncookies) {
bzero(&scs, sizeof(scs));
@@ -1175,7 +1176,7 @@ _syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
win = imin(win, TCP_MAXWIN);
sc->sc_wnd = win;
- if (tcp_do_rfc1323) {
+ if (V_tcp_do_rfc1323) {
/*
* A timestamp received in a SYN makes
* it ok to send timestamp requests and replies.
@@ -1234,7 +1235,7 @@ _syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
sc->sc_peer_mss = to->to_mss; /* peer mss may be zero */
if (noopt)
sc->sc_flags |= SCF_NOOPT;
- if ((th->th_flags & (TH_ECE|TH_CWR)) && tcp_do_ecn)
+ if ((th->th_flags & (TH_ECE|TH_CWR)) && V_tcp_do_ecn)
sc->sc_flags |= SCF_ECN;
if (tcp_syncookies) {
@@ -1260,12 +1261,12 @@ _syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
syncache_free(sc);
else if (sc != &scs)
syncache_insert(sc, sch); /* locks and unlocks sch */
- tcpstat.tcps_sndacks++;
- tcpstat.tcps_sndtotal++;
+ V_tcpstat.tcps_sndacks++;
+ V_tcpstat.tcps_sndtotal++;
} else {
if (sc != &scs)
syncache_free(sc);
- tcpstat.tcps_sc_dropped++;
+ V_tcpstat.tcps_sc_dropped++;
}
done:
@@ -1304,7 +1305,7 @@ syncache_respond(struct syncache *sc)
/* Determine MSS we advertize to other end of connection. */
mssopt = tcp_mssopt(&sc->sc_inc);
if (sc->sc_peer_mss)
- mssopt = max( min(sc->sc_peer_mss, mssopt), tcp_minmss);
+ mssopt = max( min(sc->sc_peer_mss, mssopt), V_tcp_minmss);
/* XXX: Assume that the entire packet will fit in a header mbuf. */
KASSERT(max_linkhdr + tlen + TCP_MAXOLEN <= MHLEN,
@@ -1358,7 +1359,7 @@ syncache_respond(struct syncache *sc)
* 1) path_mtu_discovery is disabled
* 2) the SCF_UNREACH flag has been set
*/
- if (path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0))
+ if (V_path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0))
ip->ip_off |= IP_DF;
th = (struct tcphdr *)(ip + 1);
@@ -1376,7 +1377,7 @@ syncache_respond(struct syncache *sc)
if (sc->sc_flags & SCF_ECN) {
th->th_flags |= TH_ECE;
- tcpstat.tcps_ecn_shs++;
+ V_tcpstat.tcps_ecn_shs++;
}
/* Tack on the TCP options. */
@@ -1454,7 +1455,7 @@ tcp_offload_syncache_add(struct in_conninfo *inc, struct tcpopt *to,
struct toe_usrreqs *tu, void *toepcb)
{
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
INP_WLOCK(inp);
_syncache_add(inc, to, th, inp, lsop, NULL, tu, toepcb);
}
@@ -1568,7 +1569,7 @@ syncookie_generate(struct syncache_head *sch, struct syncache *sc,
off = sc->sc_iss & 0x7; /* iss was randomized before */
/* Maximum segment size calculation. */
- pmss = max( min(sc->sc_peer_mss, tcp_mssopt(&sc->sc_inc)), tcp_minmss);
+ pmss = max( min(sc->sc_peer_mss, tcp_mssopt(&sc->sc_inc)), V_tcp_minmss);
for (mss = sizeof(tcp_sc_msstab) / sizeof(int) - 1; mss > 0; mss--)
if (tcp_sc_msstab[mss] <= pmss)
break;
@@ -1606,7 +1607,7 @@ syncookie_generate(struct syncache_head *sch, struct syncache *sc,
sc->sc_tsoff = data - ticks; /* after XOR */
}
- tcpstat.tcps_sc_sendcookie++;
+ V_tcpstat.tcps_sc_sendcookie++;
return;
}
@@ -1709,7 +1710,7 @@ syncookie_lookup(struct in_conninfo *inc, struct syncache_head *sch,
sc->sc_rxmits = 0;
sc->sc_peer_mss = tcp_sc_msstab[mss];
- tcpstat.tcps_sc_recvcookie++;
+ V_tcpstat.tcps_sc_recvcookie++;
return (sc);
}
@@ -1725,9 +1726,9 @@ syncache_pcbcount(void)
struct syncache_head *sch;
int count, i;
- for (count = 0, i = 0; i < tcp_syncache.hashsize; i++) {
+ for (count = 0, i = 0; i < V_tcp_syncache.hashsize; i++) {
/* No need to lock for a read. */
- sch = &tcp_syncache.hashbase[i];
+ sch = &V_tcp_syncache.hashbase[i];
count += sch->sch_length;
}
return count;
@@ -1750,8 +1751,8 @@ syncache_pcblist(struct sysctl_req *req, int max_pcbs, int *pcbs_exported)
struct syncache_head *sch;
int count, error, i;
- for (count = 0, error = 0, i = 0; i < tcp_syncache.hashsize; i++) {
- sch = &tcp_syncache.hashbase[i];
+ for (count = 0, error = 0, i = 0; i < V_tcp_syncache.hashsize; i++) {
+ sch = &V_tcp_syncache.hashbase[i];
SCH_LOCK(sch);
TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
if (count >= max_pcbs) {
diff --git a/sys/netinet/tcp_timer.c b/sys/netinet/tcp_timer.c
index 3cb74e1..db156ec 100644
--- a/sys/netinet/tcp_timer.c
+++ b/sys/netinet/tcp_timer.c
@@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
#include <sys/socketvar.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
+#include <sys/vimage.h>
#include <net/route.h>
@@ -125,9 +126,9 @@ tcp_slowtimo(void)
{
tcp_maxidle = tcp_keepcnt * tcp_keepintvl;
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
(void) tcp_tw_2msl_scan(0);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
}
int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] =
@@ -152,7 +153,7 @@ tcp_timer_delack(void *xtp)
struct tcpcb *tp = xtp;
struct inpcb *inp;
- INP_INFO_RLOCK(&tcbinfo);
+ INP_INFO_RLOCK(&V_tcbinfo);
inp = tp->t_inpcb;
/*
* XXXRW: While this assert is in fact correct, bugs in the tcpcb
@@ -163,11 +164,11 @@ tcp_timer_delack(void *xtp)
*/
if (inp == NULL) {
tcp_timer_race++;
- INP_INFO_RUNLOCK(&tcbinfo);
+ INP_INFO_RUNLOCK(&V_tcbinfo);
return;
}
INP_WLOCK(inp);
- INP_INFO_RUNLOCK(&tcbinfo);
+ INP_INFO_RUNLOCK(&V_tcbinfo);
if ((inp->inp_vflag & INP_DROPPED) || callout_pending(&tp->t_timers->tt_delack)
|| !callout_active(&tp->t_timers->tt_delack)) {
INP_WUNLOCK(inp);
@@ -176,7 +177,7 @@ tcp_timer_delack(void *xtp)
callout_deactivate(&tp->t_timers->tt_delack);
tp->t_flags |= TF_ACKNOW;
- tcpstat.tcps_delack++;
+ V_tcpstat.tcps_delack++;
(void) tcp_output(tp);
INP_WUNLOCK(inp);
}
@@ -194,7 +195,7 @@ tcp_timer_2msl(void *xtp)
/*
* XXXRW: Does this actually happen?
*/
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
inp = tp->t_inpcb;
/*
* XXXRW: While this assert is in fact correct, bugs in the tcpcb
@@ -205,7 +206,7 @@ tcp_timer_2msl(void *xtp)
*/
if (inp == NULL) {
tcp_timer_race++;
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return;
}
INP_WLOCK(inp);
@@ -213,7 +214,7 @@ tcp_timer_2msl(void *xtp)
if ((inp->inp_vflag & INP_DROPPED) || callout_pending(&tp->t_timers->tt_2msl) ||
!callout_active(&tp->t_timers->tt_2msl)) {
INP_WUNLOCK(tp->t_inpcb);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return;
}
callout_deactivate(&tp->t_timers->tt_2msl);
@@ -230,7 +231,7 @@ tcp_timer_2msl(void *xtp)
if (tcp_fast_finwait2_recycle && tp->t_state == TCPS_FIN_WAIT_2 &&
tp->t_inpcb && tp->t_inpcb->inp_socket &&
(tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) {
- tcpstat.tcps_finwait2_drops++;
+ V_tcpstat.tcps_finwait2_drops++;
tp = tcp_close(tp);
} else {
if (tp->t_state != TCPS_TIME_WAIT &&
@@ -248,7 +249,7 @@ tcp_timer_2msl(void *xtp)
#endif
if (tp != NULL)
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
}
void
@@ -262,7 +263,7 @@ tcp_timer_keep(void *xtp)
ostate = tp->t_state;
#endif
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
inp = tp->t_inpcb;
/*
* XXXRW: While this assert is in fact correct, bugs in the tcpcb
@@ -273,14 +274,14 @@ tcp_timer_keep(void *xtp)
*/
if (inp == NULL) {
tcp_timer_race++;
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return;
}
INP_WLOCK(inp);
if ((inp->inp_vflag & INP_DROPPED) || callout_pending(&tp->t_timers->tt_keep)
|| !callout_active(&tp->t_timers->tt_keep)) {
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return;
}
callout_deactivate(&tp->t_timers->tt_keep);
@@ -288,7 +289,7 @@ tcp_timer_keep(void *xtp)
* Keep-alive timer went off; send something
* or drop connection if idle for too long.
*/
- tcpstat.tcps_keeptimeo++;
+ V_tcpstat.tcps_keeptimeo++;
if (tp->t_state < TCPS_ESTABLISHED)
goto dropit;
if ((always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
@@ -307,7 +308,7 @@ tcp_timer_keep(void *xtp)
* by the protocol spec, this requires the
* correspondent TCP to respond.
*/
- tcpstat.tcps_keepprobe++;
+ V_tcpstat.tcps_keepprobe++;
t_template = tcpip_maketemplate(inp);
if (t_template) {
tcp_respond(tp, t_template->tt_ipgen,
@@ -325,11 +326,11 @@ tcp_timer_keep(void *xtp)
PRU_SLOWTIMO);
#endif
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return;
dropit:
- tcpstat.tcps_keepdrops++;
+ V_tcpstat.tcps_keepdrops++;
tp = tcp_drop(tp, ETIMEDOUT);
#ifdef TCPDEBUG
@@ -339,7 +340,7 @@ dropit:
#endif
if (tp != NULL)
INP_WUNLOCK(tp->t_inpcb);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
}
void
@@ -352,7 +353,7 @@ tcp_timer_persist(void *xtp)
ostate = tp->t_state;
#endif
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
inp = tp->t_inpcb;
/*
* XXXRW: While this assert is in fact correct, bugs in the tcpcb
@@ -363,14 +364,14 @@ tcp_timer_persist(void *xtp)
*/
if (inp == NULL) {
tcp_timer_race++;
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return;
}
INP_WLOCK(inp);
if ((inp->inp_vflag & INP_DROPPED) || callout_pending(&tp->t_timers->tt_persist)
|| !callout_active(&tp->t_timers->tt_persist)) {
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return;
}
callout_deactivate(&tp->t_timers->tt_persist);
@@ -378,7 +379,7 @@ tcp_timer_persist(void *xtp)
* Persistance timer into zero window.
* Force a byte to be output, if possible.
*/
- tcpstat.tcps_persisttimeo++;
+ V_tcpstat.tcps_persisttimeo++;
/*
* Hack: if the peer is dead/unreachable, we do not
* time out if the window is closed. After a full
@@ -389,7 +390,7 @@ tcp_timer_persist(void *xtp)
if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
((ticks - tp->t_rcvtime) >= tcp_maxpersistidle ||
(ticks - tp->t_rcvtime) >= TCP_REXMTVAL(tp) * tcp_totbackoff)) {
- tcpstat.tcps_persistdrop++;
+ V_tcpstat.tcps_persistdrop++;
tp = tcp_drop(tp, ETIMEDOUT);
goto out;
}
@@ -405,7 +406,7 @@ out:
#endif
if (tp != NULL)
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
}
void
@@ -420,7 +421,7 @@ tcp_timer_rexmt(void * xtp)
ostate = tp->t_state;
#endif
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
headlocked = 1;
inp = tp->t_inpcb;
/*
@@ -432,14 +433,14 @@ tcp_timer_rexmt(void * xtp)
*/
if (inp == NULL) {
tcp_timer_race++;
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return;
}
INP_WLOCK(inp);
if ((inp->inp_vflag & INP_DROPPED) || callout_pending(&tp->t_timers->tt_rexmt)
|| !callout_active(&tp->t_timers->tt_rexmt)) {
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return;
}
callout_deactivate(&tp->t_timers->tt_rexmt);
@@ -451,12 +452,12 @@ tcp_timer_rexmt(void * xtp)
*/
if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
tp->t_rxtshift = TCP_MAXRXTSHIFT;
- tcpstat.tcps_timeoutdrop++;
+ V_tcpstat.tcps_timeoutdrop++;
tp = tcp_drop(tp, tp->t_softerror ?
tp->t_softerror : ETIMEDOUT);
goto out;
}
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
headlocked = 0;
if (tp->t_rxtshift == 1) {
/*
@@ -477,7 +478,7 @@ tcp_timer_rexmt(void * xtp)
tp->t_flags &= ~TF_WASFRECOVERY;
tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
}
- tcpstat.tcps_rexmttimeo++;
+ V_tcpstat.tcps_rexmttimeo++;
if (tp->t_state == TCPS_SYN_SENT)
rexmt = TCP_REXMTVAL(tp) * tcp_syn_backoff[tp->t_rxtshift];
else
@@ -562,7 +563,7 @@ out:
if (tp != NULL)
INP_WUNLOCK(inp);
if (headlocked)
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
}
void
diff --git a/sys/netinet/tcp_timewait.c b/sys/netinet/tcp_timewait.c
index 9d4b6e2..75d61e7 100644
--- a/sys/netinet/tcp_timewait.c
+++ b/sys/netinet/tcp_timewait.c
@@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$");
#include <sys/socketvar.h>
#include <sys/protosw.h>
#include <sys/random.h>
+#include <sys/vimage.h>
#include <vm/uma.h>
@@ -115,10 +116,10 @@ tcptw_auto_size(void)
* Max out at half the ephemeral port range so that TIME_WAIT
* sockets don't tie up too many ephemeral ports.
*/
- if (ipport_lastauto > ipport_firstauto)
- halfrange = (ipport_lastauto - ipport_firstauto) / 2;
+ if (V_ipport_lastauto > V_ipport_firstauto)
+ halfrange = (V_ipport_lastauto - V_ipport_firstauto) / 2;
else
- halfrange = (ipport_firstauto - ipport_lastauto) / 2;
+ halfrange = (V_ipport_firstauto - V_ipport_lastauto) / 2;
/* Protect against goofy port ranges smaller than 32. */
return (imin(imax(halfrange, 32), maxsockets / 5));
}
@@ -169,7 +170,7 @@ tcp_tw_init(void)
uma_zone_set_max(tcptw_zone, tcptw_auto_size());
else
uma_zone_set_max(tcptw_zone, maxtcptw);
- TAILQ_INIT(&twq_2msl);
+ TAILQ_INIT(&V_twq_2msl);
}
/*
@@ -185,10 +186,10 @@ tcp_twstart(struct tcpcb *tp)
int acknow;
struct socket *so;
- INP_INFO_WLOCK_ASSERT(&tcbinfo); /* tcp_tw_2msl_reset(). */
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo); /* tcp_tw_2msl_reset(). */
INP_WLOCK_ASSERT(inp);
- if (nolocaltimewait && in_localip(inp->inp_faddr)) {
+ if (V_nolocaltimewait && in_localip(inp->inp_faddr)) {
tp = tcp_close(tp);
if (tp != NULL)
INP_WUNLOCK(inp);
@@ -298,7 +299,7 @@ tcp_twrecycleable(struct tcptw *tw)
tcp_seq new_iss = tw->iss;
tcp_seq new_irs = tw->irs;
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
new_iss += (ticks - tw->t_starttime) * (ISN_BYTES_PER_SECOND / hz);
new_irs += (ticks - tw->t_starttime) * (MS_ISN_BYTES_PER_SECOND / hz);
@@ -327,7 +328,7 @@ tcp_twcheck(struct inpcb *inp, struct tcpopt *to, struct tcphdr *th,
#endif
/* tcbinfo lock required for tcp_twclose(), tcp_tw_2msl_reset(). */
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
INP_WLOCK_ASSERT(inp);
/*
@@ -468,7 +469,7 @@ tcp_twclose(struct tcptw *tw, int reuse)
inp = tw->tw_inpcb;
KASSERT((inp->inp_vflag & INP_TIMEWAIT), ("tcp_twclose: !timewait"));
KASSERT(intotw(inp) == tw, ("tcp_twclose: inp_ppcb != tw"));
- INP_INFO_WLOCK_ASSERT(&tcbinfo); /* tcp_tw_2msl_stop(). */
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo); /* tcp_tw_2msl_stop(). */
INP_WLOCK_ASSERT(inp);
tw->tw_inpcb = NULL;
@@ -509,7 +510,7 @@ tcp_twclose(struct tcptw *tw, int reuse)
#endif
in_pcbfree(inp);
}
- tcpstat.tcps_closed++;
+ V_tcpstat.tcps_closed++;
crfree(tw->tw_cred);
tw->tw_cred = NULL;
if (reuse)
@@ -596,17 +597,17 @@ tcp_twrespond(struct tcptw *tw, int flags)
m->m_pkthdr.csum_flags = CSUM_TCP;
m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
ip->ip_len = m->m_pkthdr.len;
- if (path_mtu_discovery)
+ if (V_path_mtu_discovery)
ip->ip_off |= IP_DF;
error = ip_output(m, inp->inp_options, NULL,
((tw->tw_so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
NULL, inp);
}
if (flags & TH_ACK)
- tcpstat.tcps_sndacks++;
+ V_tcpstat.tcps_sndacks++;
else
- tcpstat.tcps_sndctrl++;
- tcpstat.tcps_sndtotal++;
+ V_tcpstat.tcps_sndctrl++;
+ V_tcpstat.tcps_sndtotal++;
return (error);
}
@@ -614,20 +615,20 @@ static void
tcp_tw_2msl_reset(struct tcptw *tw, int rearm)
{
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
INP_WLOCK_ASSERT(tw->tw_inpcb);
if (rearm)
- TAILQ_REMOVE(&twq_2msl, tw, tw_2msl);
+ TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl);
tw->tw_time = ticks + 2 * tcp_msl;
- TAILQ_INSERT_TAIL(&twq_2msl, tw, tw_2msl);
+ TAILQ_INSERT_TAIL(&V_twq_2msl, tw, tw_2msl);
}
static void
tcp_tw_2msl_stop(struct tcptw *tw)
{
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
- TAILQ_REMOVE(&twq_2msl, tw, tw_2msl);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl);
}
struct tcptw *
@@ -635,9 +636,9 @@ tcp_tw_2msl_scan(int reuse)
{
struct tcptw *tw;
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
for (;;) {
- tw = TAILQ_FIRST(&twq_2msl);
+ tw = TAILQ_FIRST(&V_twq_2msl);
if (tw == NULL || (!reuse && tw->tw_time > ticks))
break;
INP_WLOCK(tw->tw_inpcb);
diff --git a/sys/netinet/tcp_usrreq.c b/sys/netinet/tcp_usrreq.c
index 4a31e2b..6a19ad0 100644
--- a/sys/netinet/tcp_usrreq.c
+++ b/sys/netinet/tcp_usrreq.c
@@ -53,6 +53,7 @@ __FBSDID("$FreeBSD$");
#include <sys/protosw.h>
#include <sys/proc.h>
#include <sys/jail.h>
+#include <sys/vimage.h>
#ifdef DDB
#include <ddb/ddb.h>
@@ -159,7 +160,7 @@ tcp_detach(struct socket *so, struct inpcb *inp)
int isipv6 = INP_CHECK_SOCKAF(so, AF_INET6) != 0;
#endif
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
INP_WLOCK_ASSERT(inp);
KASSERT(so->so_pcb == inp, ("tcp_detach: so_pcb != inp"));
@@ -252,12 +253,12 @@ tcp_usr_detach(struct socket *so)
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("tcp_usr_detach: inp == NULL"));
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
INP_WLOCK(inp);
KASSERT(inp->inp_socket != NULL,
("tcp_usr_detach: inp_socket == NULL"));
tcp_detach(so, inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
}
/*
@@ -283,7 +284,7 @@ tcp_usr_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
return (EAFNOSUPPORT);
TCPDEBUG0;
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("tcp_usr_bind: inp == NULL"));
INP_WLOCK(inp);
@@ -297,7 +298,7 @@ tcp_usr_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
out:
TCPDEBUG2(PRU_BIND);
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return (error);
}
@@ -323,7 +324,7 @@ tcp6_usr_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
return (EAFNOSUPPORT);
TCPDEBUG0;
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("tcp6_usr_bind: inp == NULL"));
INP_WLOCK(inp);
@@ -353,7 +354,7 @@ tcp6_usr_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
out:
TCPDEBUG2(PRU_BIND);
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return (error);
}
#endif /* INET6 */
@@ -369,7 +370,7 @@ tcp_usr_listen(struct socket *so, int backlog, struct thread *td)
struct tcpcb *tp = NULL;
TCPDEBUG0;
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("tcp_usr_listen: inp == NULL"));
INP_WLOCK(inp);
@@ -393,7 +394,7 @@ tcp_usr_listen(struct socket *so, int backlog, struct thread *td)
out:
TCPDEBUG2(PRU_LISTEN);
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return (error);
}
@@ -406,7 +407,7 @@ tcp6_usr_listen(struct socket *so, int backlog, struct thread *td)
struct tcpcb *tp = NULL;
TCPDEBUG0;
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("tcp6_usr_listen: inp == NULL"));
INP_WLOCK(inp);
@@ -433,7 +434,7 @@ tcp6_usr_listen(struct socket *so, int backlog, struct thread *td)
out:
TCPDEBUG2(PRU_LISTEN);
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return (error);
}
#endif /* INET6 */
@@ -466,7 +467,7 @@ tcp_usr_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
prison_remote_ip(td->td_ucred, 0, &sinp->sin_addr.s_addr);
TCPDEBUG0;
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("tcp_usr_connect: inp == NULL"));
INP_WLOCK(inp);
@@ -482,7 +483,7 @@ tcp_usr_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
out:
TCPDEBUG2(PRU_CONNECT);
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return (error);
}
@@ -507,7 +508,7 @@ tcp6_usr_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
&& IN6_IS_ADDR_MULTICAST(&sin6p->sin6_addr))
return (EAFNOSUPPORT);
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("tcp6_usr_connect: inp == NULL"));
INP_WLOCK(inp);
@@ -543,7 +544,7 @@ tcp6_usr_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
out:
TCPDEBUG2(PRU_CONNECT);
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return (error);
}
#endif /* INET6 */
@@ -567,7 +568,7 @@ tcp_usr_disconnect(struct socket *so)
int error = 0;
TCPDEBUG0;
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("tcp_usr_disconnect: inp == NULL"));
INP_WLOCK(inp);
@@ -581,7 +582,7 @@ tcp_usr_disconnect(struct socket *so)
out:
TCPDEBUG2(PRU_DISCONNECT);
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return (error);
}
@@ -605,7 +606,7 @@ tcp_usr_accept(struct socket *so, struct sockaddr **nam)
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("tcp_usr_accept: inp == NULL"));
- INP_INFO_RLOCK(&tcbinfo);
+ INP_INFO_RLOCK(&V_tcbinfo);
INP_WLOCK(inp);
if (inp->inp_vflag & (INP_TIMEWAIT | INP_DROPPED)) {
error = ECONNABORTED;
@@ -625,7 +626,7 @@ tcp_usr_accept(struct socket *so, struct sockaddr **nam)
out:
TCPDEBUG2(PRU_ACCEPT);
INP_WUNLOCK(inp);
- INP_INFO_RUNLOCK(&tcbinfo);
+ INP_INFO_RUNLOCK(&V_tcbinfo);
if (error == 0)
*nam = in_sockaddr(port, &addr);
return error;
@@ -695,7 +696,7 @@ tcp_usr_shutdown(struct socket *so)
struct tcpcb *tp = NULL;
TCPDEBUG0;
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("inp == NULL"));
INP_WLOCK(inp);
@@ -712,7 +713,7 @@ tcp_usr_shutdown(struct socket *so)
out:
TCPDEBUG2(PRU_SHUTDOWN);
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return (error);
}
@@ -775,7 +776,7 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m,
* (2) PRUS_EOF is set, resulting in explicit close on the send.
*/
if ((nam != NULL) || (flags & PRUS_EOF)) {
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
headlocked = 1;
}
inp = sotoinpcb(so);
@@ -814,7 +815,7 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m,
* initialize maxseg/maxopd using peer's cached
* MSS.
*/
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
#ifdef INET6
if (isipv6)
error = tcp6_connect(tp, nam, td);
@@ -831,12 +832,12 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m,
* Close the send side of the connection after
* the data is sent.
*/
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
socantsendmore(so);
tcp_usrclosed(tp);
}
if (headlocked) {
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
headlocked = 0;
}
if (tp != NULL) {
@@ -874,7 +875,7 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m,
* initialize maxseg/maxopd using peer's cached
* MSS.
*/
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
#ifdef INET6
if (isipv6)
error = tcp6_connect(tp, nam, td);
@@ -885,10 +886,10 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m,
goto out;
tp->snd_wnd = TTCP_CLIENT_SND_WND;
tcp_mss(tp, -1);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
headlocked = 0;
} else if (nam) {
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
headlocked = 0;
}
tp->snd_up = tp->snd_una + so->so_snd.sb_cc;
@@ -901,7 +902,7 @@ out:
((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND));
INP_WUNLOCK(inp);
if (headlocked)
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return (error);
}
@@ -918,7 +919,7 @@ tcp_usr_abort(struct socket *so)
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("tcp_usr_abort: inp == NULL"));
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
INP_WLOCK(inp);
KASSERT(inp->inp_socket != NULL,
("tcp_usr_abort: inp_socket == NULL"));
@@ -940,7 +941,7 @@ tcp_usr_abort(struct socket *so)
inp->inp_vflag |= INP_SOCKREF;
}
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
}
/*
@@ -956,7 +957,7 @@ tcp_usr_close(struct socket *so)
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("tcp_usr_close: inp == NULL"));
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
INP_WLOCK(inp);
KASSERT(inp->inp_socket != NULL,
("tcp_usr_close: inp_socket == NULL"));
@@ -979,7 +980,7 @@ tcp_usr_close(struct socket *so)
inp->inp_vflag |= INP_SOCKREF;
}
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
}
/*
@@ -1085,7 +1086,7 @@ tcp_connect(struct tcpcb *tp, struct sockaddr *nam, struct thread *td)
u_short lport;
int error;
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
INP_WLOCK_ASSERT(inp);
if (inp->inp_lport == 0) {
@@ -1120,7 +1121,7 @@ tcp_connect(struct tcpcb *tp, struct sockaddr *nam, struct thread *td)
tp->request_r_scale++;
soisconnecting(so);
- tcpstat.tcps_connattempt++;
+ V_tcpstat.tcps_connattempt++;
tp->t_state = TCPS_SYN_SENT;
tcp_timer_activate(tp, TT_KEEP, tcp_keepinit);
tp->iss = tcp_new_isn(tp);
@@ -1140,7 +1141,7 @@ tcp6_connect(struct tcpcb *tp, struct sockaddr *nam, struct thread *td)
struct in6_addr *addr6;
int error;
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
INP_WLOCK_ASSERT(inp);
if (inp->inp_lport == 0) {
@@ -1183,7 +1184,7 @@ tcp6_connect(struct tcpcb *tp, struct sockaddr *nam, struct thread *td)
tp->request_r_scale++;
soisconnecting(so);
- tcpstat.tcps_connattempt++;
+ V_tcpstat.tcps_connattempt++;
tp->t_state = TCPS_SYN_SENT;
tcp_timer_activate(tp, TT_KEEP, tcp_keepinit);
tp->iss = tcp_new_isn(tp);
@@ -1358,7 +1359,7 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt)
INP_WLOCK_RECHECK(inp);
if (optval > 0 && optval <= tp->t_maxseg &&
- optval + 40 >= tcp_minmss)
+ optval + 40 >= V_tcp_minmss)
tp->t_maxseg = optval;
else
error = EINVAL;
@@ -1458,10 +1459,10 @@ tcp_attach(struct socket *so)
}
so->so_rcv.sb_flags |= SB_AUTOSIZE;
so->so_snd.sb_flags |= SB_AUTOSIZE;
- INP_INFO_WLOCK(&tcbinfo);
- error = in_pcballoc(so, &tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
+ error = in_pcballoc(so, &V_tcbinfo);
if (error) {
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return (error);
}
inp = sotoinpcb(so);
@@ -1486,12 +1487,12 @@ tcp_attach(struct socket *so)
#ifdef INET6
}
#endif
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return (ENOBUFS);
}
tp->t_state = TCPS_CLOSED;
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return (0);
}
@@ -1509,7 +1510,7 @@ tcp_disconnect(struct tcpcb *tp)
struct inpcb *inp = tp->t_inpcb;
struct socket *so = inp->inp_socket;
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
INP_WLOCK_ASSERT(inp);
/*
@@ -1547,7 +1548,7 @@ static void
tcp_usrclosed(struct tcpcb *tp)
{
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
INP_WLOCK_ASSERT(tp->t_inpcb);
switch (tp->t_state) {
diff --git a/sys/netinet/udp_usrreq.c b/sys/netinet/udp_usrreq.c
index c9cf5d4..d864b83 100644
--- a/sys/netinet/udp_usrreq.c
+++ b/sys/netinet/udp_usrreq.c
@@ -57,6 +57,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/syslog.h>
#include <sys/systm.h>
+#include <sys/vimage.h>
#include <vm/uma.h>
@@ -147,7 +148,7 @@ static void
udp_zone_change(void *tag)
{
- uma_zone_set_max(udbinfo.ipi_zone, maxsockets);
+ uma_zone_set_max(V_udbinfo.ipi_zone, maxsockets);
}
static int
@@ -164,16 +165,16 @@ void
udp_init(void)
{
- INP_INFO_LOCK_INIT(&udbinfo, "udp");
- LIST_INIT(&udb);
- udbinfo.ipi_listhead = &udb;
- udbinfo.ipi_hashbase = hashinit(UDBHASHSIZE, M_PCB,
- &udbinfo.ipi_hashmask);
- udbinfo.ipi_porthashbase = hashinit(UDBHASHSIZE, M_PCB,
- &udbinfo.ipi_porthashmask);
- udbinfo.ipi_zone = uma_zcreate("udpcb", sizeof(struct inpcb), NULL,
+ INP_INFO_LOCK_INIT(&V_udbinfo, "udp");
+ LIST_INIT(&V_udb);
+ V_udbinfo.ipi_listhead = &udb;
+ V_udbinfo.ipi_hashbase = hashinit(UDBHASHSIZE, M_PCB,
+ &V_udbinfo.ipi_hashmask);
+ V_udbinfo.ipi_porthashbase = hashinit(UDBHASHSIZE, M_PCB,
+ &V_udbinfo.ipi_porthashmask);
+ V_udbinfo.ipi_zone = uma_zcreate("udpcb", sizeof(struct inpcb), NULL,
NULL, udp_inpcb_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
- uma_zone_set_max(udbinfo.ipi_zone, maxsockets);
+ uma_zone_set_max(V_udbinfo.ipi_zone, maxsockets);
EVENTHANDLER_REGISTER(maxsockets_change, udp_zone_change, NULL,
EVENTHANDLER_PRI_ANY);
}
@@ -202,7 +203,7 @@ udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off,
/* Check AH/ESP integrity. */
if (ipsec4_in_reject(n, inp)) {
m_freem(n);
- ipsec4stat.in_polvio++;
+ V_ipsec4stat.in_polvio++;
return;
}
#endif /* IPSEC */
@@ -240,7 +241,7 @@ udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off,
m_freem(n);
if (opts)
m_freem(opts);
- udpstat.udps_fullsock++;
+ V_udpstat.udps_fullsock++;
} else
sorwakeup_locked(so);
}
@@ -261,7 +262,7 @@ udp_input(struct mbuf *m, int off)
#endif
ifp = m->m_pkthdr.rcvif;
- udpstat.udps_ipackets++;
+ V_udpstat.udps_ipackets++;
/*
* Strip IP options, if any; should skip this, make available to
@@ -279,7 +280,7 @@ udp_input(struct mbuf *m, int off)
ip = mtod(m, struct ip *);
if (m->m_len < iphlen + sizeof(struct udphdr)) {
if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == 0) {
- udpstat.udps_hdrops++;
+ V_udpstat.udps_hdrops++;
return;
}
ip = mtod(m, struct ip *);
@@ -309,7 +310,7 @@ udp_input(struct mbuf *m, int off)
len = ntohs((u_short)uh->uh_ulen);
if (ip->ip_len != len) {
if (len > ip->ip_len || len < sizeof(struct udphdr)) {
- udpstat.udps_badlen++;
+ V_udpstat.udps_badlen++;
goto badunlocked;
}
m_adj(m, len - ip->ip_len);
@@ -320,7 +321,7 @@ udp_input(struct mbuf *m, int off)
* Save a copy of the IP header in case we want restore it for
* sending an ICMP error message in response.
*/
- if (!udp_blackhole)
+ if (!V_udp_blackhole)
save_ip = *ip;
else
memset(&save_ip, 0, sizeof(save_ip));
@@ -349,12 +350,12 @@ udp_input(struct mbuf *m, int off)
bcopy(b, ((struct ipovly *)ip)->ih_x1, 9);
}
if (uh_sum) {
- udpstat.udps_badsum++;
+ V_udpstat.udps_badsum++;
m_freem(m);
return;
}
} else
- udpstat.udps_nosum++;
+ V_udpstat.udps_nosum++;
#ifdef IPFIREWALL_FORWARD
/*
@@ -378,14 +379,14 @@ udp_input(struct mbuf *m, int off)
}
#endif
- INP_INFO_RLOCK(&udbinfo);
+ INP_INFO_RLOCK(&V_udbinfo);
if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
in_broadcast(ip->ip_dst, ifp)) {
struct inpcb *last;
struct ip_moptions *imo;
last = NULL;
- LIST_FOREACH(inp, &udb, inp_list) {
+ LIST_FOREACH(inp, &V_udb, inp_list) {
if (inp->inp_lport != uh->uh_dport)
continue;
#ifdef INET6
@@ -460,7 +461,7 @@ udp_input(struct mbuf *m, int off)
__func__);
}
#endif
- udpstat.udps_filtermcast++;
+ V_udpstat.udps_filtermcast++;
blocked++;
}
}
@@ -498,20 +499,20 @@ udp_input(struct mbuf *m, int off)
* to send an ICMP Port Unreachable for a broadcast
* or multicast datgram.)
*/
- udpstat.udps_noportbcast++;
+ V_udpstat.udps_noportbcast++;
goto badheadlocked;
}
udp_append(last, ip, m, iphlen + sizeof(struct udphdr),
&udp_in);
INP_RUNLOCK(last);
- INP_INFO_RUNLOCK(&udbinfo);
+ INP_INFO_RUNLOCK(&V_udbinfo);
return;
}
/*
* Locate pcb for datagram.
*/
- inp = in_pcblookup_hash(&udbinfo, ip->ip_src, uh->uh_sport,
+ inp = in_pcblookup_hash(&V_udbinfo, ip->ip_src, uh->uh_sport,
ip->ip_dst, uh->uh_dport, 1, ifp);
if (inp == NULL) {
if (udp_log_in_vain) {
@@ -523,19 +524,19 @@ udp_input(struct mbuf *m, int off)
buf, ntohs(uh->uh_dport), inet_ntoa(ip->ip_src),
ntohs(uh->uh_sport));
}
- udpstat.udps_noport++;
+ V_udpstat.udps_noport++;
if (m->m_flags & (M_BCAST | M_MCAST)) {
- udpstat.udps_noportbcast++;
+ V_udpstat.udps_noportbcast++;
goto badheadlocked;
}
- if (udp_blackhole)
+ if (V_udp_blackhole)
goto badheadlocked;
if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0)
goto badheadlocked;
*ip = save_ip;
ip->ip_len += iphlen;
icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0);
- INP_INFO_RUNLOCK(&udbinfo);
+ INP_INFO_RUNLOCK(&V_udbinfo);
return;
}
@@ -543,7 +544,7 @@ udp_input(struct mbuf *m, int off)
* Check the minimum TTL for socket.
*/
INP_RLOCK(inp);
- INP_INFO_RUNLOCK(&udbinfo);
+ INP_INFO_RUNLOCK(&V_udbinfo);
if (inp->inp_ip_minttl && inp->inp_ip_minttl > ip->ip_ttl) {
INP_RUNLOCK(inp);
goto badunlocked;
@@ -555,7 +556,7 @@ udp_input(struct mbuf *m, int off)
badheadlocked:
if (inp)
INP_RUNLOCK(inp);
- INP_INFO_RUNLOCK(&udbinfo);
+ INP_INFO_RUNLOCK(&V_udbinfo);
badunlocked:
m_freem(m);
}
@@ -612,8 +613,8 @@ udp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
return;
if (ip != NULL) {
uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
- INP_INFO_RLOCK(&udbinfo);
- inp = in_pcblookup_hash(&udbinfo, faddr, uh->uh_dport,
+ INP_INFO_RLOCK(&V_udbinfo);
+ inp = in_pcblookup_hash(&V_udbinfo, faddr, uh->uh_dport,
ip->ip_src, uh->uh_sport, 0, NULL);
if (inp != NULL) {
INP_RLOCK(inp);
@@ -622,9 +623,9 @@ udp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
}
INP_RUNLOCK(inp);
}
- INP_INFO_RUNLOCK(&udbinfo);
+ INP_INFO_RUNLOCK(&V_udbinfo);
} else
- in_pcbnotifyall(&udbinfo, faddr, inetctlerrmap[cmd],
+ in_pcbnotifyall(&V_udbinfo, faddr, inetctlerrmap[cmd],
udp_notify);
}
@@ -641,7 +642,7 @@ udp_pcblist(SYSCTL_HANDLER_ARGS)
* resource-intensive to repeat twice on every request.
*/
if (req->oldptr == 0) {
- n = udbinfo.ipi_count;
+ n = V_udbinfo.ipi_count;
req->oldidx = 2 * (sizeof xig)
+ (n + n/8) * sizeof(struct xinpcb);
return (0);
@@ -653,10 +654,10 @@ udp_pcblist(SYSCTL_HANDLER_ARGS)
/*
* OK, now we're committed to doing something.
*/
- INP_INFO_RLOCK(&udbinfo);
- gencnt = udbinfo.ipi_gencnt;
- n = udbinfo.ipi_count;
- INP_INFO_RUNLOCK(&udbinfo);
+ INP_INFO_RLOCK(&V_udbinfo);
+ gencnt = V_udbinfo.ipi_gencnt;
+ n = V_udbinfo.ipi_count;
+ INP_INFO_RUNLOCK(&V_udbinfo);
error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
+ n * sizeof(struct xinpcb));
@@ -675,8 +676,8 @@ udp_pcblist(SYSCTL_HANDLER_ARGS)
if (inp_list == 0)
return (ENOMEM);
- INP_INFO_RLOCK(&udbinfo);
- for (inp = LIST_FIRST(udbinfo.ipi_listhead), i = 0; inp && i < n;
+ INP_INFO_RLOCK(&V_udbinfo);
+ for (inp = LIST_FIRST(V_udbinfo.ipi_listhead), i = 0; inp && i < n;
inp = LIST_NEXT(inp, inp_list)) {
INP_RLOCK(inp);
if (inp->inp_gencnt <= gencnt &&
@@ -684,7 +685,7 @@ udp_pcblist(SYSCTL_HANDLER_ARGS)
inp_list[i++] = inp;
INP_RUNLOCK(inp);
}
- INP_INFO_RUNLOCK(&udbinfo);
+ INP_INFO_RUNLOCK(&V_udbinfo);
n = i;
error = 0;
@@ -712,11 +713,11 @@ udp_pcblist(SYSCTL_HANDLER_ARGS)
* that something happened while we were processing this
* request, and it might be necessary to retry.
*/
- INP_INFO_RLOCK(&udbinfo);
- xig.xig_gen = udbinfo.ipi_gencnt;
+ INP_INFO_RLOCK(&V_udbinfo);
+ xig.xig_gen = V_udbinfo.ipi_gencnt;
xig.xig_sogen = so_gencnt;
- xig.xig_count = udbinfo.ipi_count;
- INP_INFO_RUNLOCK(&udbinfo);
+ xig.xig_count = V_udbinfo.ipi_count;
+ INP_INFO_RUNLOCK(&V_udbinfo);
error = SYSCTL_OUT(req, &xig, sizeof xig);
}
free(inp_list, M_TEMP);
@@ -740,12 +741,12 @@ udp_getcred(SYSCTL_HANDLER_ARGS)
error = SYSCTL_IN(req, addrs, sizeof(addrs));
if (error)
return (error);
- INP_INFO_RLOCK(&udbinfo);
- inp = in_pcblookup_hash(&udbinfo, addrs[1].sin_addr, addrs[1].sin_port,
+ INP_INFO_RLOCK(&V_udbinfo);
+ inp = in_pcblookup_hash(&V_udbinfo, addrs[1].sin_addr, addrs[1].sin_port,
addrs[0].sin_addr, addrs[0].sin_port, 1, NULL);
if (inp != NULL) {
INP_RLOCK(inp);
- INP_INFO_RUNLOCK(&udbinfo);
+ INP_INFO_RUNLOCK(&V_udbinfo);
if (inp->inp_socket == NULL)
error = ENOENT;
if (error == 0)
@@ -755,7 +756,7 @@ udp_getcred(SYSCTL_HANDLER_ARGS)
cru2x(inp->inp_socket->so_cred, &xuc);
INP_RUNLOCK(inp);
} else {
- INP_INFO_RUNLOCK(&udbinfo);
+ INP_INFO_RUNLOCK(&V_udbinfo);
error = ENOENT;
}
if (error == 0)
@@ -865,7 +866,7 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
if (sin != NULL &&
(inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0)) {
INP_RUNLOCK(inp);
- INP_INFO_WLOCK(&udbinfo);
+ INP_INFO_WLOCK(&V_udbinfo);
INP_WLOCK(inp);
unlock_udbinfo = 2;
} else if ((sin != NULL && (
@@ -874,9 +875,9 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
(inp->inp_laddr.s_addr == INADDR_ANY) ||
(inp->inp_lport == 0))) ||
(src.sin_family == AF_INET)) {
- if (!INP_INFO_TRY_RLOCK(&udbinfo)) {
+ if (!INP_INFO_TRY_RLOCK(&V_udbinfo)) {
INP_RUNLOCK(inp);
- INP_INFO_RLOCK(&udbinfo);
+ INP_INFO_RLOCK(&V_udbinfo);
INP_RLOCK(inp);
}
unlock_udbinfo = 1;
@@ -891,7 +892,7 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
laddr = inp->inp_laddr;
lport = inp->inp_lport;
if (src.sin_family == AF_INET) {
- INP_INFO_LOCK_ASSERT(&udbinfo);
+ INP_INFO_LOCK_ASSERT(&V_udbinfo);
if ((lport == 0) ||
(laddr.s_addr == INADDR_ANY &&
src.sin_addr.s_addr == INADDR_ANY)) {
@@ -942,7 +943,7 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
inp->inp_lport == 0 ||
sin->sin_addr.s_addr == INADDR_ANY ||
sin->sin_addr.s_addr == INADDR_BROADCAST) {
- INP_INFO_LOCK_ASSERT(&udbinfo);
+ INP_INFO_LOCK_ASSERT(&V_udbinfo);
error = in_pcbconnect_setup(inp, addr, &laddr.s_addr,
&lport, &faddr.s_addr, &fport, NULL,
td->td_ucred);
@@ -956,7 +957,7 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
/* Commit the local port if newly assigned. */
if (inp->inp_laddr.s_addr == INADDR_ANY &&
inp->inp_lport == 0) {
- INP_INFO_WLOCK_ASSERT(&udbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_udbinfo);
INP_WLOCK_ASSERT(inp);
/*
* Remember addr if jailed, to prevent
@@ -1050,12 +1051,12 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
((struct ip *)ui)->ip_len = sizeof (struct udpiphdr) + len;
((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */
((struct ip *)ui)->ip_tos = inp->inp_ip_tos; /* XXX */
- udpstat.udps_opackets++;
+ V_udpstat.udps_opackets++;
if (unlock_udbinfo == 2)
- INP_INFO_WUNLOCK(&udbinfo);
+ INP_INFO_WUNLOCK(&V_udbinfo);
else if (unlock_udbinfo == 1)
- INP_INFO_RUNLOCK(&udbinfo);
+ INP_INFO_RUNLOCK(&V_udbinfo);
error = ip_output(m, inp->inp_options, NULL, ipflags,
inp->inp_moptions, inp);
if (unlock_udbinfo == 2)
@@ -1067,10 +1068,10 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
release:
if (unlock_udbinfo == 2) {
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&udbinfo);
+ INP_INFO_WUNLOCK(&V_udbinfo);
} else if (unlock_udbinfo == 1) {
INP_RUNLOCK(inp);
- INP_INFO_RUNLOCK(&udbinfo);
+ INP_INFO_RUNLOCK(&V_udbinfo);
} else
INP_RUNLOCK(inp);
m_freem(m);
@@ -1084,7 +1085,7 @@ udp_abort(struct socket *so)
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("udp_abort: inp == NULL"));
- INP_INFO_WLOCK(&udbinfo);
+ INP_INFO_WLOCK(&V_udbinfo);
INP_WLOCK(inp);
if (inp->inp_faddr.s_addr != INADDR_ANY) {
in_pcbdisconnect(inp);
@@ -1092,7 +1093,7 @@ udp_abort(struct socket *so)
soisdisconnected(so);
}
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&udbinfo);
+ INP_INFO_WUNLOCK(&V_udbinfo);
}
static int
@@ -1106,17 +1107,17 @@ udp_attach(struct socket *so, int proto, struct thread *td)
error = soreserve(so, udp_sendspace, udp_recvspace);
if (error)
return (error);
- INP_INFO_WLOCK(&udbinfo);
- error = in_pcballoc(so, &udbinfo);
+ INP_INFO_WLOCK(&V_udbinfo);
+ error = in_pcballoc(so, &V_udbinfo);
if (error) {
- INP_INFO_WUNLOCK(&udbinfo);
+ INP_INFO_WUNLOCK(&V_udbinfo);
return (error);
}
inp = (struct inpcb *)so->so_pcb;
- INP_INFO_WUNLOCK(&udbinfo);
+ INP_INFO_WUNLOCK(&V_udbinfo);
inp->inp_vflag |= INP_IPV4;
- inp->inp_ip_ttl = ip_defttl;
+ inp->inp_ip_ttl = V_ip_defttl;
INP_WUNLOCK(inp);
return (0);
}
@@ -1129,11 +1130,11 @@ udp_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("udp_bind: inp == NULL"));
- INP_INFO_WLOCK(&udbinfo);
+ INP_INFO_WLOCK(&V_udbinfo);
INP_WLOCK(inp);
error = in_pcbbind(inp, nam, td->td_ucred);
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&udbinfo);
+ INP_INFO_WUNLOCK(&V_udbinfo);
return (error);
}
@@ -1144,7 +1145,7 @@ udp_close(struct socket *so)
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("udp_close: inp == NULL"));
- INP_INFO_WLOCK(&udbinfo);
+ INP_INFO_WLOCK(&V_udbinfo);
INP_WLOCK(inp);
if (inp->inp_faddr.s_addr != INADDR_ANY) {
in_pcbdisconnect(inp);
@@ -1152,7 +1153,7 @@ udp_close(struct socket *so)
soisdisconnected(so);
}
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&udbinfo);
+ INP_INFO_WUNLOCK(&V_udbinfo);
}
static int
@@ -1164,11 +1165,11 @@ udp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("udp_connect: inp == NULL"));
- INP_INFO_WLOCK(&udbinfo);
+ INP_INFO_WLOCK(&V_udbinfo);
INP_WLOCK(inp);
if (inp->inp_faddr.s_addr != INADDR_ANY) {
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&udbinfo);
+ INP_INFO_WUNLOCK(&V_udbinfo);
return (EISCONN);
}
sin = (struct sockaddr_in *)nam;
@@ -1178,7 +1179,7 @@ udp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
if (error == 0)
soisconnected(so);
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&udbinfo);
+ INP_INFO_WUNLOCK(&V_udbinfo);
return (error);
}
@@ -1191,11 +1192,11 @@ udp_detach(struct socket *so)
KASSERT(inp != NULL, ("udp_detach: inp == NULL"));
KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
("udp_detach: not disconnected"));
- INP_INFO_WLOCK(&udbinfo);
+ INP_INFO_WLOCK(&V_udbinfo);
INP_WLOCK(inp);
in_pcbdetach(inp);
in_pcbfree(inp);
- INP_INFO_WUNLOCK(&udbinfo);
+ INP_INFO_WUNLOCK(&V_udbinfo);
}
static int
@@ -1205,11 +1206,11 @@ udp_disconnect(struct socket *so)
inp = sotoinpcb(so);
KASSERT(inp != NULL, ("udp_disconnect: inp == NULL"));
- INP_INFO_WLOCK(&udbinfo);
+ INP_INFO_WLOCK(&V_udbinfo);
INP_WLOCK(inp);
if (inp->inp_faddr.s_addr == INADDR_ANY) {
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&udbinfo);
+ INP_INFO_WUNLOCK(&V_udbinfo);
return (ENOTCONN);
}
@@ -1219,7 +1220,7 @@ udp_disconnect(struct socket *so)
so->so_state &= ~SS_ISCONNECTED; /* XXX */
SOCK_UNLOCK(so);
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&udbinfo);
+ INP_INFO_WUNLOCK(&V_udbinfo);
return (0);
}
OpenPOWER on IntegriCloud