summaryrefslogtreecommitdiffstats
path: root/sys/vm/uma_dbg.c
diff options
context:
space:
mode:
authorgreen <green@FreeBSD.org>2004-10-08 20:19:29 +0000
committergreen <green@FreeBSD.org>2004-10-08 20:19:29 +0000
commit9128ff1ce9a84a3099a1b9e8ae5cfc56b2276565 (patch)
tree50327d4c1c80261747d0d09c606631d722403fd2 /sys/vm/uma_dbg.c
parenta0fc078f8632157112a60136eb368dbc8203e93f (diff)
downloadFreeBSD-src-9128ff1ce9a84a3099a1b9e8ae5cfc56b2276565.zip
FreeBSD-src-9128ff1ce9a84a3099a1b9e8ae5cfc56b2276565.tar.gz
Fix critical stability problems that can cause UMA mbuf cluster
state management corruption, mbuf leaks, general mbuf corruption, and at least on i386 a first level splash damage radius that encompasses up to about half a megabyte of the memory after an mbuf cluster's allocation slab. In short, this has caused instability nightmares anywhere the right kind of network traffic is present. When the polymorphic refcount slabs were added to UMA, the new types were not used pervasively. In particular, the slab management structure was turned into one for refcounts, and one for non-refcounts (supposed to be mostly like the old slab management structure), but the latter was almost always used through out. In general, every access to zones with UMA_ZONE_REFCNT turned on corrupted the "next free" slab offset offset and the refcount with each other and with other allocations (on i386, 2 mbuf clusters per 4096 byte slab). Fix things so that the right type is used to access refcounted zones where it was not before. There are additional errors in gross overestimation of padding, it seems, that would cause a large kegs (nee zones) to be allocated when small ones would do. Unless I have analyzed this incorrectly, it is not directly harmful.
Diffstat (limited to 'sys/vm/uma_dbg.c')
-rw-r--r--sys/vm/uma_dbg.c50
1 files changed, 37 insertions, 13 deletions
diff --git a/sys/vm/uma_dbg.c b/sys/vm/uma_dbg.c
index cbf164a..963a36e 100644
--- a/sys/vm/uma_dbg.c
+++ b/sys/vm/uma_dbg.c
@@ -218,6 +218,7 @@ void
uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
{
uma_keg_t keg;
+ uma_slabrefcnt_t slabref;
int freei;
keg = zone->uz_keg;
@@ -231,7 +232,12 @@ uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
freei = ((unsigned long)item - (unsigned long)slab->us_data)
/ keg->uk_rsize;
- slab->us_freelist[freei].us_item = 255;
+ if (keg->uk_flags & UMA_ZONE_REFCNT) {
+ slabref = (uma_slabrefcnt_t)slab;
+ slabref->us_freelist[freei].us_item = 255;
+ } else {
+ slab->us_freelist[freei].us_item = 255;
+ }
return;
}
@@ -246,6 +252,7 @@ void
uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
{
uma_keg_t keg;
+ uma_slabrefcnt_t slabref;
int freei;
keg = zone->uz_keg;
@@ -270,17 +277,34 @@ uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
(freei * keg->uk_rsize) + slab->us_data);
}
- if (slab->us_freelist[freei].us_item != 255) {
- printf("Slab at %p, freei %d = %d.\n",
- slab, freei, slab->us_freelist[freei].us_item);
- panic("Duplicate free of item %p from zone %p(%s)\n",
- item, zone, zone->uz_name);
- }
+ if (keg->uk_flags & UMA_ZONE_REFCNT) {
+ slabref = (uma_slabrefcnt_t)slab;
+ if (slabref->us_freelist[freei].us_item != 255) {
+ printf("Slab at %p, freei %d = %d.\n",
+ slab, freei, slabref->us_freelist[freei].us_item);
+ panic("Duplicate free of item %p from zone %p(%s)\n",
+ item, zone, zone->uz_name);
+ }
- /*
- * When this is actually linked into the slab this will change.
- * Until then the count of valid slabs will make sure we don't
- * accidentally follow this and assume it's a valid index.
- */
- slab->us_freelist[freei].us_item = 0;
+ /*
+ * When this is actually linked into the slab this will change.
+ * Until then the count of valid slabs will make sure we don't
+ * accidentally follow this and assume it's a valid index.
+ */
+ slabref->us_freelist[freei].us_item = 0;
+ } else {
+ if (slab->us_freelist[freei].us_item != 255) {
+ printf("Slab at %p, freei %d = %d.\n",
+ slab, freei, slab->us_freelist[freei].us_item);
+ panic("Duplicate free of item %p from zone %p(%s)\n",
+ item, zone, zone->uz_name);
+ }
+
+ /*
+ * When this is actually linked into the slab this will change.
+ * Until then the count of valid slabs will make sure we don't
+ * accidentally follow this and assume it's a valid index.
+ */
+ slab->us_freelist[freei].us_item = 0;
+ }
}
OpenPOWER on IntegriCloud