summaryrefslogtreecommitdiffstats
path: root/sys/netinet
diff options
context:
space:
mode:
authorjesper <jesper@FreeBSD.org>2001-05-31 21:57:29 +0000
committerjesper <jesper@FreeBSD.org>2001-05-31 21:57:29 +0000
commit70faf8712a430d49fe0453f62b7f8e4f237ec0d7 (patch)
tree6b850cfac838c6217a69fee044bd2ddcb01a75ee /sys/netinet
parent51b1367e426bc4d7f1eb9e2bcf4f1b5bf570ffd7 (diff)
downloadFreeBSD-src-70faf8712a430d49fe0453f62b7f8e4f237ec0d7.zip
FreeBSD-src-70faf8712a430d49fe0453f62b7f8e4f237ec0d7.tar.gz
Prevent denial of service using bogus fragmented IPv4 packets.
A attacker sending a lot of bogus fragmented packets to the target (with different IPv4 identification field - ip_id), may be able to put the target machine into mbuf starvation state. By setting a upper limit on the number of reassembly queues we prevent this situation. This upper limit is controlled by the new sysctl net.inet.ip.maxfragpackets which defaults to NMBCLUSTERS/4 If you want old behaviour (no upper limit) set this sysctl to a negative value. If you don't want to accept any fragments (not recommended) set the sysctl to 0 (zero) Obtained from: NetBSD (partially) MFC after: 1 week
Diffstat (limited to 'sys/netinet')
-rw-r--r--sys/netinet/ip_input.c31
1 files changed, 31 insertions, 0 deletions
diff --git a/sys/netinet/ip_input.c b/sys/netinet/ip_input.c
index 0963a0a..71ecf06 100644
--- a/sys/netinet/ip_input.c
+++ b/sys/netinet/ip_input.c
@@ -124,6 +124,12 @@ SYSCTL_INT(_net_inet_ip, IPCTL_KEEPFAITH, keepfaith, CTLFLAG_RW,
&ip_keepfaith, 0,
"Enable packet capture for FAITH IPv4->IPv6 translater daemon");
+static int ip_nfragpackets = 0;
+static int ip_maxfragpackets = NMBCLUSTERS/4;
+SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragpackets, CTLFLAG_RW,
+ &ip_maxfragpackets, 0,
+ "Maximum number of IPv4 fragment reassembly queue entries");
+
/*
* XXX - Setting ip_checkinterface mostly implements the receive side of
* the Strong ES model described in RFC 1122, but since the routing table
@@ -862,6 +868,15 @@ ip_reass(m, head, fp)
* If first fragment to arrive, create a reassembly queue.
*/
if (fp == 0) {
+ /*
+ * Enforce upper bound on number of fragmented packets
+ * for which we attempt reassembly;
+ * If maxfrag is 0, never accept fragments.
+ * If maxfrag is -1, accept all fragments without limitation.
+ */
+ if ((ip_maxfragpackets >= 0) && (ip_nfragpackets >= ip_maxfragpackets))
+ goto dropfrag;
+ ip_nfragpackets++;
if ((t = m_get(M_DONTWAIT, MT_FTABLE)) == NULL)
goto dropfrag;
fp = mtod(t, struct ipq *);
@@ -1010,6 +1025,7 @@ inserted:
TAILQ_REMOVE(head, fp, ipq_list);
nipq--;
(void) m_free(dtom(fp));
+ ip_nfragpackets--;
m->m_len += (IP_VHL_HL(ip->ip_vhl) << 2);
m->m_data -= (IP_VHL_HL(ip->ip_vhl) << 2);
/* some debugging cruft by sklower, below, will go away soon */
@@ -1051,6 +1067,7 @@ ip_freef(fhp, fp)
}
TAILQ_REMOVE(fhp, fp, ipq_list);
(void) m_free(dtom(fp));
+ ip_nfragpackets--;
nipq--;
}
@@ -1078,6 +1095,20 @@ ip_slowtimo()
}
}
}
+ /*
+ * If we are over the maximum number of fragments
+ * (due to the limit being lowered), drain off
+ * enough to get down to the new limit.
+ */
+ for (i = 0; i < IPREASS_NHASH; i++) {
+ if (ip_maxfragpackets >= 0) {
+ while (ip_nfragpackets > ip_maxfragpackets &&
+ !TAILQ_EMPTY(&ipq[i])) {
+ ipstat.ips_fragdropped++;
+ ip_freef(&ipq[i], TAILQ_FIRST(&ipq[i]));
+ }
+ }
+ }
ipflow_slowtimo();
splx(s);
}
OpenPOWER on IntegriCloud