summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2002-05-02 02:08:48 +0000
committerjeff <jeff@FreeBSD.org>2002-05-02 02:08:48 +0000
commitb152d5fbb5c19a734918705720935da121992010 (patch)
tree3151c53a5a4481a8b9fedf0d8f1ab5cfaa46389f /sys
parentc136e5d44278a6b1c5a93a71adab858b5e07503b (diff)
downloadFreeBSD-src-b152d5fbb5c19a734918705720935da121992010.zip
FreeBSD-src-b152d5fbb5c19a734918705720935da121992010.tar.gz
Remove the temporary alignment check in free().
Implement the following checks on freed memory in the bucket path: - Slab membership - Alignment - Duplicate free This previously was only done if we skipped the buckets. This code will slow down INVARIANTS a bit, but it is smp safe. The checks were moved out of the normal path and into hooks supplied in uma_dbg.
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_malloc.c6
-rw-r--r--sys/vm/uma_core.c40
-rw-r--r--sys/vm/uma_dbg.c95
-rw-r--r--sys/vm/uma_dbg.h2
4 files changed, 118 insertions, 25 deletions
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index 56769ad..57a3b80 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -208,12 +208,6 @@ free(addr, type)
if (addr == NULL)
return;
- if ((u_long)addr & 3) { /* XXX: Jeff: find better value for 3 */
- printf("free(9)'ing unaligned pointer %p\n", addr);
- Debugger("Don't do that...");
- return;
- }
-
size = 0;
mem = (void *)((u_long)addr & (~UMA_SLAB_MASK));
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 53c1d59..00b489b 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -45,7 +45,6 @@
/*
* TODO:
* - Improve memory usage for large allocations
- * - Improve INVARIANTS (0xdeadc0de write out)
* - Investigate cache size adjustments
*/
@@ -81,6 +80,7 @@
#include <vm/vm_extern.h>
#include <vm/uma.h>
#include <vm/uma_int.h>
+#include <vm/uma_dbg.h>
/*
* This is the zone from which all zones are spawned. The idea is that even
@@ -1321,6 +1321,9 @@ zalloc_start:
("uma_zalloc: Bucket pointer mangled."));
cache->uc_allocs++;
CPU_UNLOCK(zone, cpu);
+#ifdef INVARIANTS
+ uma_dbg_alloc(zone, NULL, item);
+#endif
if (zone->uz_ctor)
zone->uz_ctor(item, zone->uz_size, udata);
if (flags & M_ZERO)
@@ -1540,13 +1543,14 @@ new_slab:
while (slab->us_freecount) {
freei = slab->us_firstfree;
slab->us_firstfree = slab->us_freelist[freei];
-#ifdef INVARIANTS
- slab->us_freelist[freei] = 255;
-#endif
- slab->us_freecount--;
- zone->uz_free--;
+
item = slab->us_data + (zone->uz_rsize * freei);
+ slab->us_freecount--;
+ zone->uz_free--;
+#ifdef INVARIANTS
+ uma_dbg_alloc(zone, slab, item);
+#endif
if (bucket == NULL) {
zone->uz_allocs++;
break;
@@ -1616,6 +1620,13 @@ uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
if (zone->uz_flags & UMA_ZFLAG_FULL)
goto zfree_internal;
+#ifdef INVARIANTS
+ if (zone->uz_flags & UMA_ZFLAG_MALLOC)
+ uma_dbg_free(zone, udata, item);
+ else
+ uma_dbg_free(zone, NULL, item);
+#endif
+
zfree_restart:
cpu = PCPU_GET(cpuid);
CPU_LOCK(zone, cpu);
@@ -1768,21 +1779,12 @@ uma_zfree_internal(uma_zone_t zone, void *item, void *udata, int skip)
/* Slab management stuff */
freei = ((unsigned long)item - (unsigned long)slab->us_data)
/ zone->uz_rsize;
+
#ifdef INVARIANTS
- if (((freei * zone->uz_rsize) + slab->us_data) != item)
- panic("zone: %s(%p) slab %p freed address %p unaligned.\n",
- zone->uz_name, zone, slab, item);
- if (freei >= zone->uz_ipers)
- panic("zone: %s(%p) slab %p freelist %i out of range 0-%d\n",
- zone->uz_name, zone, slab, freei, zone->uz_ipers-1);
-
- if (slab->us_freelist[freei] != 255) {
- printf("Slab at %p, freei %d = %d.\n",
- slab, freei, slab->us_freelist[freei]);
- panic("Duplicate free of item %p from zone %p(%s)\n",
- item, zone, zone->uz_name);
- }
+ if (!skip)
+ uma_dbg_free(zone, slab, item);
#endif
+
slab->us_freelist[freei] = slab->us_firstfree;
slab->us_firstfree = freei;
slab->us_freecount++;
diff --git a/sys/vm/uma_dbg.c b/sys/vm/uma_dbg.c
index 40aedbf..2e5c894 100644
--- a/sys/vm/uma_dbg.c
+++ b/sys/vm/uma_dbg.c
@@ -110,3 +110,98 @@ trash_fini(void *mem, int size)
{
trash_ctor(mem, size, NULL);
}
+
+static uma_slab_t
+uma_dbg_getslab(uma_zone_t zone, void *item)
+{
+ uma_slab_t slab;
+ u_int8_t *mem;
+
+ mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
+ if (zone->uz_flags & UMA_ZFLAG_MALLOC) {
+ slab = hash_sfind(mallochash, mem);
+ } else if (zone->uz_flags & UMA_ZFLAG_OFFPAGE) {
+ ZONE_LOCK(zone);
+ slab = hash_sfind(&zone->uz_hash, mem);
+ ZONE_UNLOCK(zone);
+ } else {
+ mem += zone->uz_pgoff;
+ slab = (uma_slab_t)mem;
+ }
+
+ return (slab);
+}
+
+/*
+ * Set up the slab's freei data such that uma_dbg_free can function.
+ *
+ */
+
+void
+uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
+{
+ int freei;
+
+ if (slab == NULL) {
+ slab = uma_dbg_getslab(zone, item);
+ if (slab == NULL)
+ panic("uma: item %p did not belong to zone %s\n",
+ item, zone->uz_name);
+ }
+
+ freei = ((unsigned long)item - (unsigned long)slab->us_data)
+ / zone->uz_rsize;
+
+ slab->us_freelist[freei] = 255;
+
+ return;
+}
+
+/*
+ * Verifies freed addresses. Checks for alignment, valid slab membership
+ * and duplicate frees.
+ *
+ */
+
+void
+uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
+{
+ int freei;
+
+ return;
+
+ if (slab == NULL) {
+ slab = uma_dbg_getslab(zone, item);
+ if (slab == NULL)
+ panic("uma: Freed item %p did not belong to zone %s\n",
+ item, zone->uz_name);
+ }
+
+ freei = ((unsigned long)item - (unsigned long)slab->us_data)
+ / zone->uz_rsize;
+
+ if (freei >= zone->uz_ipers)
+ panic("zone: %s(%p) slab %p freelist %i out of range 0-%d\n",
+ zone->uz_name, zone, slab, freei, zone->uz_ipers-1);
+
+ if (((freei * zone->uz_rsize) + slab->us_data) != item) {
+ printf("zone: %s(%p) slab %p freed address %p unaligned.\n",
+ zone->uz_name, zone, slab, item);
+ panic("should be %p\n",
+ (freei * zone->uz_rsize) + slab->us_data);
+ }
+
+ if (slab->us_freelist[freei] != 255) {
+ printf("Slab at %p, freei %d = %d.\n",
+ slab, freei, slab->us_freelist[freei]);
+ panic("Duplicate free of item %p from zone %p(%s)\n",
+ item, zone, zone->uz_name);
+ }
+
+ /*
+ * When this is actually linked into the slab this will change.
+ * Until then the count of valid slabs will make sure we don't
+ * accidentally follow this and assume it's a valid index.
+ */
+ slab->us_freelist[freei] = 0;
+}
diff --git a/sys/vm/uma_dbg.h b/sys/vm/uma_dbg.h
index 126dc57..fed04a6 100644
--- a/sys/vm/uma_dbg.h
+++ b/sys/vm/uma_dbg.h
@@ -43,5 +43,7 @@ void trash_ctor(void *mem, int size, void *arg);
void trash_dtor(void *mem, int size, void *arg);
void trash_init(void *mem, int size);
void trash_fini(void *mem, int size);
+void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
+void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
#endif /* VM_UMA_DBG_H */
OpenPOWER on IntegriCloud