summaryrefslogtreecommitdiffstats
path: root/sys/netinet6/frag6.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/netinet6/frag6.c')
-rw-r--r--sys/netinet6/frag6.c77
1 files changed, 39 insertions, 38 deletions
diff --git a/sys/netinet6/frag6.c b/sys/netinet6/frag6.c
index 9826f6b..bad4410 100644
--- a/sys/netinet6/frag6.c
+++ b/sys/netinet6/frag6.c
@@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$");
#include <sys/time.h>
#include <sys/kernel.h>
#include <sys/syslog.h>
+#include <sys/vimage.h>
#include <net/if.h>
#include <net/route.h>
@@ -91,22 +92,22 @@ static void
frag6_change(void *tag)
{
- ip6_maxfragpackets = nmbclusters / 4;
- ip6_maxfrags = nmbclusters / 4;
+ V_ip6_maxfragpackets = nmbclusters / 4;
+ V_ip6_maxfrags = nmbclusters / 4;
}
void
frag6_init(void)
{
- ip6_maxfragpackets = nmbclusters / 4;
- ip6_maxfrags = nmbclusters / 4;
+ V_ip6_maxfragpackets = nmbclusters / 4;
+ V_ip6_maxfrags = nmbclusters / 4;
EVENTHANDLER_REGISTER(nmbclusters_change,
frag6_change, NULL, EVENTHANDLER_PRI_ANY);
IP6Q_LOCK_INIT();
- ip6q.ip6q_next = ip6q.ip6q_prev = &ip6q;
+ V_ip6q.ip6q_next = V_ip6q.ip6q_prev = &V_ip6q;
}
/*
@@ -203,7 +204,7 @@ frag6_input(struct mbuf **mp, int *offp, int proto)
return IPPROTO_DONE;
}
- ip6stat.ip6s_fragments++;
+ V_ip6stat.ip6s_fragments++;
in6_ifstat_inc(dstifp, ifs6_reass_reqd);
/* offset now points to data portion */
@@ -216,18 +217,18 @@ frag6_input(struct mbuf **mp, int *offp, int proto)
* If maxfrag is 0, never accept fragments.
* If maxfrag is -1, accept all fragments without limitation.
*/
- if (ip6_maxfrags < 0)
+ if (V_ip6_maxfrags < 0)
;
- else if (frag6_nfrags >= (u_int)ip6_maxfrags)
+ else if (V_frag6_nfrags >= (u_int)V_ip6_maxfrags)
goto dropfrag;
- for (q6 = ip6q.ip6q_next; q6 != &ip6q; q6 = q6->ip6q_next)
+ for (q6 = V_ip6q.ip6q_next; q6 != &V_ip6q; q6 = q6->ip6q_next)
if (ip6f->ip6f_ident == q6->ip6q_ident &&
IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst))
break;
- if (q6 == &ip6q) {
+ if (q6 == &V_ip6q) {
/*
* the first fragment to arrive, create a reassembly queue.
*/
@@ -240,18 +241,18 @@ frag6_input(struct mbuf **mp, int *offp, int proto)
* If maxfragpackets is -1, accept all fragments without
* limitation.
*/
- if (ip6_maxfragpackets < 0)
+ if (V_ip6_maxfragpackets < 0)
;
- else if (frag6_nfragpackets >= (u_int)ip6_maxfragpackets)
+ else if (V_frag6_nfragpackets >= (u_int)V_ip6_maxfragpackets)
goto dropfrag;
- frag6_nfragpackets++;
+ V_frag6_nfragpackets++;
q6 = (struct ip6q *)malloc(sizeof(struct ip6q), M_FTABLE,
M_NOWAIT);
if (q6 == NULL)
goto dropfrag;
bzero(q6, sizeof(*q6));
- frag6_insque(q6, &ip6q);
+ frag6_insque(q6, &V_ip6q);
/* ip6q_nxt will be filled afterwards, from 1st fragment */
q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6;
@@ -465,12 +466,12 @@ insert:
* the most recently active fragmented packet.
*/
frag6_enq(ip6af, af6->ip6af_up);
- frag6_nfrags++;
+ V_frag6_nfrags++;
q6->ip6q_nfrag++;
#if 0 /* xxx */
- if (q6 != ip6q.ip6q_next) {
+ if (q6 != V_ip6q.ip6q_next) {
frag6_remque(q6);
- frag6_insque(q6, &ip6q);
+ frag6_insque(q6, &V_ip6q);
}
#endif
next = 0;
@@ -528,9 +529,9 @@ insert:
/* this comes with no copy if the boundary is on cluster */
if ((t = m_split(m, offset, M_DONTWAIT)) == NULL) {
frag6_remque(q6);
- frag6_nfrags -= q6->ip6q_nfrag;
+ V_frag6_nfrags -= q6->ip6q_nfrag;
free(q6, M_FTABLE);
- frag6_nfragpackets--;
+ V_frag6_nfragpackets--;
goto dropfrag;
}
m_adj(t, sizeof(struct ip6_frag));
@@ -546,9 +547,9 @@ insert:
}
frag6_remque(q6);
- frag6_nfrags -= q6->ip6q_nfrag;
+ V_frag6_nfrags -= q6->ip6q_nfrag;
free(q6, M_FTABLE);
- frag6_nfragpackets--;
+ V_frag6_nfragpackets--;
if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */
int plen = 0;
@@ -557,7 +558,7 @@ insert:
m->m_pkthdr.len = plen;
}
- ip6stat.ip6s_reassembled++;
+ V_ip6stat.ip6s_reassembled++;
in6_ifstat_inc(dstifp, ifs6_reass_ok);
/*
@@ -573,7 +574,7 @@ insert:
dropfrag:
IP6Q_UNLOCK();
in6_ifstat_inc(dstifp, ifs6_reass_fail);
- ip6stat.ip6s_fragdropped++;
+ V_ip6stat.ip6s_fragdropped++;
m_freem(m);
return IPPROTO_DONE;
}
@@ -617,9 +618,9 @@ frag6_freef(struct ip6q *q6)
free(af6, M_FTABLE);
}
frag6_remque(q6);
- frag6_nfrags -= q6->ip6q_nfrag;
+ V_frag6_nfrags -= q6->ip6q_nfrag;
free(q6, M_FTABLE);
- frag6_nfragpackets--;
+ V_frag6_nfragpackets--;
}
/*
@@ -684,13 +685,13 @@ frag6_slowtimo(void)
struct ip6q *q6;
IP6Q_LOCK();
- q6 = ip6q.ip6q_next;
+ q6 = V_ip6q.ip6q_next;
if (q6)
- while (q6 != &ip6q) {
+ while (q6 != &V_ip6q) {
--q6->ip6q_ttl;
q6 = q6->ip6q_next;
if (q6->ip6q_prev->ip6q_ttl == 0) {
- ip6stat.ip6s_fragtimeout++;
+ V_ip6stat.ip6s_fragtimeout++;
/* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
frag6_freef(q6->ip6q_prev);
}
@@ -700,11 +701,11 @@ frag6_slowtimo(void)
* (due to the limit being lowered), drain off
* enough to get down to the new limit.
*/
- while (frag6_nfragpackets > (u_int)ip6_maxfragpackets &&
- ip6q.ip6q_prev) {
- ip6stat.ip6s_fragoverflow++;
+ while (V_frag6_nfragpackets > (u_int)V_ip6_maxfragpackets &&
+ V_ip6q.ip6q_prev) {
+ V_ip6stat.ip6s_fragoverflow++;
/* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
- frag6_freef(ip6q.ip6q_prev);
+ frag6_freef(V_ip6q.ip6q_prev);
}
IP6Q_UNLOCK();
@@ -714,9 +715,9 @@ frag6_slowtimo(void)
* make sure we notice eventually, even if forwarding only for one
* destination and the cache is never replaced.
*/
- if (ip6_forward_rt.ro_rt) {
- RTFREE(ip6_forward_rt.ro_rt);
- ip6_forward_rt.ro_rt = 0;
+ if (V_ip6_forward_rt.ro_rt) {
+ RTFREE(V_ip6_forward_rt.ro_rt);
+ V_ip6_forward_rt.ro_rt = 0;
}
if (ipsrcchk_rt.ro_rt) {
RTFREE(ipsrcchk_rt.ro_rt);
@@ -734,10 +735,10 @@ frag6_drain(void)
if (IP6Q_TRYLOCK() == 0)
return;
- while (ip6q.ip6q_next != &ip6q) {
- ip6stat.ip6s_fragdropped++;
+ while (V_ip6q.ip6q_next != &V_ip6q) {
+ V_ip6stat.ip6s_fragdropped++;
/* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
- frag6_freef(ip6q.ip6q_next);
+ frag6_freef(V_ip6q.ip6q_next);
}
IP6Q_UNLOCK();
}
OpenPOWER on IntegriCloud