summaryrefslogtreecommitdiffstats
path: root/sys/vm/uma_core.c
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2002-05-02 02:08:48 +0000
committerjeff <jeff@FreeBSD.org>2002-05-02 02:08:48 +0000
commitb152d5fbb5c19a734918705720935da121992010 (patch)
tree3151c53a5a4481a8b9fedf0d8f1ab5cfaa46389f /sys/vm/uma_core.c
parentc136e5d44278a6b1c5a93a71adab858b5e07503b (diff)
downloadFreeBSD-src-b152d5fbb5c19a734918705720935da121992010.zip
FreeBSD-src-b152d5fbb5c19a734918705720935da121992010.tar.gz
Remove the temporary alignment check in free().
Implement the following checks on freed memory in the bucket path: - Slab membership - Alignment - Duplicate free This previously was only done if we skipped the buckets. This code will slow down INVARIANTS a bit, but it is smp safe. The checks were moved out of the normal path and into hooks supplied in uma_dbg.
Diffstat (limited to 'sys/vm/uma_core.c')
-rw-r--r--sys/vm/uma_core.c40
1 files changed, 21 insertions, 19 deletions
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 53c1d59..00b489b 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -45,7 +45,6 @@
/*
* TODO:
* - Improve memory usage for large allocations
- * - Improve INVARIANTS (0xdeadc0de write out)
* - Investigate cache size adjustments
*/
@@ -81,6 +80,7 @@
#include <vm/vm_extern.h>
#include <vm/uma.h>
#include <vm/uma_int.h>
+#include <vm/uma_dbg.h>
/*
* This is the zone from which all zones are spawned. The idea is that even
@@ -1321,6 +1321,9 @@ zalloc_start:
("uma_zalloc: Bucket pointer mangled."));
cache->uc_allocs++;
CPU_UNLOCK(zone, cpu);
+#ifdef INVARIANTS
+ uma_dbg_alloc(zone, NULL, item);
+#endif
if (zone->uz_ctor)
zone->uz_ctor(item, zone->uz_size, udata);
if (flags & M_ZERO)
@@ -1540,13 +1543,14 @@ new_slab:
while (slab->us_freecount) {
freei = slab->us_firstfree;
slab->us_firstfree = slab->us_freelist[freei];
-#ifdef INVARIANTS
- slab->us_freelist[freei] = 255;
-#endif
- slab->us_freecount--;
- zone->uz_free--;
+
item = slab->us_data + (zone->uz_rsize * freei);
+ slab->us_freecount--;
+ zone->uz_free--;
+#ifdef INVARIANTS
+ uma_dbg_alloc(zone, slab, item);
+#endif
if (bucket == NULL) {
zone->uz_allocs++;
break;
@@ -1616,6 +1620,13 @@ uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
if (zone->uz_flags & UMA_ZFLAG_FULL)
goto zfree_internal;
+#ifdef INVARIANTS
+ if (zone->uz_flags & UMA_ZFLAG_MALLOC)
+ uma_dbg_free(zone, udata, item);
+ else
+ uma_dbg_free(zone, NULL, item);
+#endif
+
zfree_restart:
cpu = PCPU_GET(cpuid);
CPU_LOCK(zone, cpu);
@@ -1768,21 +1779,12 @@ uma_zfree_internal(uma_zone_t zone, void *item, void *udata, int skip)
/* Slab management stuff */
freei = ((unsigned long)item - (unsigned long)slab->us_data)
/ zone->uz_rsize;
+
#ifdef INVARIANTS
- if (((freei * zone->uz_rsize) + slab->us_data) != item)
- panic("zone: %s(%p) slab %p freed address %p unaligned.\n",
- zone->uz_name, zone, slab, item);
- if (freei >= zone->uz_ipers)
- panic("zone: %s(%p) slab %p freelist %i out of range 0-%d\n",
- zone->uz_name, zone, slab, freei, zone->uz_ipers-1);
-
- if (slab->us_freelist[freei] != 255) {
- printf("Slab at %p, freei %d = %d.\n",
- slab, freei, slab->us_freelist[freei]);
- panic("Duplicate free of item %p from zone %p(%s)\n",
- item, zone, zone->uz_name);
- }
+ if (!skip)
+ uma_dbg_free(zone, slab, item);
#endif
+
slab->us_freelist[freei] = slab->us_firstfree;
slab->us_firstfree = freei;
slab->us_freecount++;
OpenPOWER on IntegriCloud