summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authordyson <dyson@FreeBSD.org>1997-09-21 04:24:27 +0000
committerdyson <dyson@FreeBSD.org>1997-09-21 04:24:27 +0000
commite64b1984f97c6d987d7d36b61a3afe5028a08312 (patch)
tree325bcf17de3aad0383fb86548872026a7c3d2599 /sys/vm
parent1419fcb42b4e1e5a73f4574739f4c232fde357e2 (diff)
downloadFreeBSD-src-e64b1984f97c6d987d7d36b61a3afe5028a08312.zip
FreeBSD-src-e64b1984f97c6d987d7d36b61a3afe5028a08312.tar.gz
Change the M_NAMEI allocations to use the zone allocator. This change
plus the previous changes to use the zone allocator decrease the useage of malloc by half. The Zone allocator will be upgradeable to be able to use per CPU-pools, and has more intelligent usage of SPLs. Additionally, it has reasonable stats gathering capabilities, while making most calls inline.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_map.c13
-rw-r--r--sys/vm/vm_object.c32
-rw-r--r--sys/vm/vm_object.h3
-rw-r--r--sys/vm/vm_zone.c70
-rw-r--r--sys/vm/vm_zone.h33
5 files changed, 125 insertions, 26 deletions
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 1b7e1ac..d4235ad 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.c,v 1.89 1997/09/01 03:17:18 bde Exp $
+ * $Id: vm_map.c,v 1.90 1997/09/12 15:58:47 jlemon Exp $
*/
/*
@@ -214,8 +214,9 @@ vm_init2(void) {
zinitna(mapentzone, &mapentobj,
NULL, 0, 0, 0, 4);
zinitna(mapzone, &mapobj,
- NULL, 0, 0, 0, 1);
+ NULL, 0, 0, 0, 2);
pmap_init2();
+ vm_object_init2();
}
void
@@ -684,8 +685,12 @@ vm_map_findspace(map, start, length, addr)
}
SAVE_HINT(map, entry);
*addr = start;
- if (map == kernel_map && round_page(start + length) > kernel_vm_end)
- pmap_growkernel(round_page(start + length));
+ if (map == kernel_map) {
+ vm_offset_t ksize;
+ if ((ksize = round_page(start + length)) > kernel_vm_end) {
+ pmap_growkernel(ksize);
+ }
+ }
return (0);
}
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 40a4a50..388cc5f 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.c,v 1.96 1997/09/01 02:55:48 bde Exp $
+ * $Id: vm_object.c,v 1.97 1997/09/01 03:17:22 bde Exp $
*/
/*
@@ -89,6 +89,7 @@
#include <vm/swap_pager.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
+#include <vm/vm_zone.h>
static void vm_object_qcollapse __P((vm_object_t object));
#ifdef not_used
@@ -138,6 +139,10 @@ extern int vm_pageout_page_count;
static long object_collapses;
static long object_bypasses;
static int next_index;
+static vm_zone_t obj_zone;
+static struct vm_zone obj_zone_store;
+#define VM_OBJECTS_INIT 256
+struct vm_object vm_objects_init[VM_OBJECTS_INIT];
void
_vm_object_allocate(type, size, object)
@@ -145,6 +150,7 @@ _vm_object_allocate(type, size, object)
vm_size_t size;
register vm_object_t object;
{
+ int incr;
TAILQ_INIT(&object->memq);
TAILQ_INIT(&object->shadow_head);
@@ -157,7 +163,11 @@ _vm_object_allocate(type, size, object)
object->resident_page_count = 0;
object->shadow_count = 0;
object->pg_color = next_index;
- next_index = (next_index + PQ_PRIME1) & PQ_L2_MASK;
+ if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1))
+ incr = PQ_L2_SIZE / 3 + PQ_PRIME1;
+ else
+ incr = size;
+ next_index = (next_index + incr) & PQ_L2_MASK;
object->handle = NULL;
object->paging_offset = (vm_ooffset_t) 0;
object->backing_object = NULL;
@@ -194,6 +204,15 @@ vm_object_init()
kmem_object = &kmem_object_store;
_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kmem_object);
+
+ obj_zone = &obj_zone_store;
+ zbootinit(obj_zone, "VM OBJECT", sizeof (struct vm_object),
+ vm_objects_init, VM_OBJECTS_INIT);
+}
+
+void
+vm_object_init2() {
+ zinitna(obj_zone, NULL, NULL, 0, 0, 0, 4);
}
/*
@@ -208,10 +227,7 @@ vm_object_allocate(type, size)
vm_size_t size;
{
register vm_object_t result;
-
- result = (vm_object_t)
- malloc((u_long) sizeof *result, M_VMOBJ, M_WAITOK);
-
+ result = (vm_object_t) zalloc(obj_zone);
_vm_object_allocate(type, size, result);
@@ -429,7 +445,7 @@ vm_object_terminate(object)
/*
* Free the space for the object.
*/
- free((caddr_t) object, M_VMOBJ);
+ zfree(obj_zone, object);
}
/*
@@ -1102,7 +1118,7 @@ vm_object_collapse(object)
object_list);
vm_object_count--;
- free((caddr_t) backing_object, M_VMOBJ);
+ zfree(obj_zone, backing_object);
object_collapses++;
} else {
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index d6aaa00..e2ef694 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.h,v 1.36 1997/08/05 00:02:06 dyson Exp $
+ * $Id: vm_object.h,v 1.37 1997/09/01 02:55:50 bde Exp $
*/
/*
@@ -183,6 +183,7 @@ void vm_object_pmap_remove __P((vm_object_t, vm_pindex_t, vm_pindex_t));
void vm_object_reference __P((vm_object_t));
void vm_object_shadow __P((vm_object_t *, vm_ooffset_t *, vm_size_t));
void vm_object_madvise __P((vm_object_t, vm_pindex_t, int, int));
+void vm_object_init2 __P((void));
#endif /* KERNEL */
#endif /* _VM_OBJECT_ */
diff --git a/sys/vm/vm_zone.c b/sys/vm/vm_zone.c
index 6792a4c..38c43db 100644
--- a/sys/vm/vm_zone.c
+++ b/sys/vm/vm_zone.c
@@ -18,7 +18,7 @@
* 5. Modifications may be freely made to this file if the above conditions
* are met.
*
- * $Id: vm_zone.c,v 1.5 1997/08/18 03:29:21 fsmp Exp $
+ * $Id: vm_zone.c,v 1.6 1997/09/01 03:17:32 bde Exp $
*/
#include <sys/param.h>
@@ -79,13 +79,14 @@ zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
int totsize;
if ((z->zflags & ZONE_BOOT) == 0) {
- z->zsize = size;
+ z->zsize = (size + 32 - 1) & ~(32 - 1);
simple_lock_init(&z->zlock);
z->zfreecnt = 0;
z->ztotal = 0;
z->zmax = 0;
z->zname = name;
z->znalloc = 0;
+ z->zitems = NULL;
if (zlist == 0) {
zlist = z;
@@ -183,8 +184,12 @@ zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems) {
z->znalloc = 0;
simple_lock_init(&z->zlock);
+ z->zitems = NULL;
for (i = 0; i < nitems; i++) {
- * (void **) item = z->zitems;
+ ((void **) item)[0] = z->zitems;
+#if defined(DIAGNOSTIC)
+ ((void **) item)[1] = (void *) ZENTRY_FREE;
+#endif
z->zitems = item;
(char *) item += z->zsize;
}
@@ -263,9 +268,12 @@ void *
_zget(vm_zone_t z) {
int i;
vm_page_t m;
- int nitems;
+ int nitems, nbytes;
void *item;
+ if (z == NULL)
+ panic("zget: null zone");
+
if (z->zflags & ZONE_INTERRUPT) {
item = (char *) z->zkva + z->zpagecount * PAGE_SIZE;
for( i = 0; ((i < z->zalloc) && (z->zpagecount < z->zpagemax)); i++) {
@@ -280,11 +288,26 @@ _zget(vm_zone_t z) {
}
nitems = (i * PAGE_SIZE) / z->zsize;
} else {
+ nbytes = z->zalloc * PAGE_SIZE;
/*
* We can wait, so just do normal kernel map allocation
*/
- item = (void *) kmem_alloc(kernel_map, z->zalloc * PAGE_SIZE);
- nitems = (z->zalloc * PAGE_SIZE) / z->zsize;
+ item = (void *) kmem_alloc(kernel_map, nbytes);
+
+#if 0
+ if (z->zname)
+ printf("zalloc: %s, %d (0x%x --> 0x%x)\n",
+ z->zname, z->zalloc, item, (char *)item + nbytes);
+ else
+ printf("zalloc: XXX(%d), %d (0x%x --> 0x%x)\n",
+ z->zsize, z->zalloc, item, (char *)item + nbytes);
+
+ for(i=0;i<nbytes;i+=PAGE_SIZE) {
+ printf("(%x, %x)", (char *) item + i, pmap_kextract( (char *) item + i));
+ }
+ printf("\n");
+#endif
+ nitems = nbytes / z->zsize;
}
z->ztotal += nitems;
@@ -294,14 +317,22 @@ _zget(vm_zone_t z) {
if (nitems != 0) {
nitems -= 1;
for (i = 0; i < nitems; i++) {
- * (void **) item = z->zitems;
+ ((void **) item)[0] = z->zitems;
+#if defined(DIAGNOSTIC)
+ ((void **) item)[1] = (void *) ZENTRY_FREE;
+#endif
z->zitems = item;
(char *) item += z->zsize;
}
z->zfreecnt += nitems;
} else if (z->zfreecnt > 0) {
item = z->zitems;
- z->zitems = *(void **) item;
+ z->zitems = ((void **) item)[0];
+#if defined(DIAGNOSTIC)
+ if (((void **) item)[1] != (void *) ZENTRY_FREE)
+ zerror(ZONE_ERROR_NOTFREE);
+ ((void **) item)[1] = 0;
+#endif
z->zfreecnt--;
} else {
item = NULL;
@@ -356,5 +387,28 @@ sysctl_vm_zone SYSCTL_HANDLER_ARGS
return (0);
}
+#if defined(DIAGNOSTIC)
+void
+zerror(int error) {
+ char *msg;
+ switch (error) {
+case ZONE_ERROR_INVALID:
+ msg = "zone: invalid zone";
+ break;
+case ZONE_ERROR_NOTFREE:
+ msg = "zone: entry not free";
+ break;
+case ZONE_ERROR_ALREADYFREE:
+ msg = "zone: freeing free entry";
+ break;
+default:
+ msg = "zone: invalid error";
+ break;
+ }
+
+ panic(msg);
+}
+#endif
+
SYSCTL_OID(_kern, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \
NULL, 0, sysctl_vm_zone, "A", "Zone Info");
diff --git a/sys/vm/vm_zone.h b/sys/vm/vm_zone.h
index fe42f49..6ecc7d2 100644
--- a/sys/vm/vm_zone.h
+++ b/sys/vm/vm_zone.h
@@ -19,7 +19,7 @@
* 5. Modifications may be freely made to this file if the above conditions
* are met.
*
- * $Id: vm_zone.h,v 1.4 1997/08/07 03:52:55 dyson Exp $
+ * $Id: vm_zone.h,v 1.5 1997/08/10 00:12:13 dyson Exp $
*/
#if !defined(_SYS_ZONE_H)
@@ -54,6 +54,7 @@ typedef struct vm_zone {
} *vm_zone_t;
+void zerror __P((int)) __dead2;
vm_zone_t zinit __P((char *name, int size, int nentries, int flags, int zalloc));
int zinitna __P((vm_zone_t z, struct vm_object *obj, char *name, int size,
int nentries, int flags, int zalloc));
@@ -64,6 +65,12 @@ void zfreei __P((vm_zone_t z, void *item));
void zbootinit __P((vm_zone_t z, char *name, int size, void *item, int nitems));
void * _zget __P((vm_zone_t z));
+#define ZONE_ERROR_INVALID 0
+#define ZONE_ERROR_NOTFREE 1
+#define ZONE_ERROR_ALREADYFREE 2
+
+
+#define ZENTRY_FREE 0x12342378
/*
* void *zalloc(vm_zone_t zone) --
* Returns an item from a specified zone.
@@ -75,12 +82,23 @@ static __inline__ void *
_zalloc(vm_zone_t z) {
void *item;
+#if defined(DIAGNOSTIC)
+ if (z == 0)
+ zerror(ZONE_ERROR_INVALID);
+#endif
+
if (z->zfreecnt <= z->zfreemin) {
return _zget(z);
}
item = z->zitems;
- z->zitems = *(void **) item;
+ z->zitems = ((void **) item)[0];
+#if defined(DIAGNOSTIC)
+ if (((void **) item)[1] != (void *) ZENTRY_FREE)
+ zerror(ZONE_ERROR_NOTFREE);
+ ((void **) item)[1] = 0;
+#endif
+
z->zfreecnt--;
z->znalloc++;
return item;
@@ -88,14 +106,19 @@ _zalloc(vm_zone_t z) {
static __inline__ void
_zfree(vm_zone_t z, void *item) {
- * (void **) item = z->zitems;
+ ((void **) item)[0] = z->zitems;
+#if defined(DIAGNOSTIC)
+ if ((( void **) item)[1] == (void *) ZENTRY_FREE)
+ zerror(ZONE_ERROR_ALREADYFREE);
+ ((void **) item)[1] = (void *) ZENTRY_FREE;
+#endif
z->zitems = item;
z->zfreecnt++;
}
static __inline__ void *
zalloc(vm_zone_t z) {
-#if NCPU > 1
+#if defined(SMP)
return zalloci(z);
#else
return _zalloc(z);
@@ -104,7 +127,7 @@ zalloc(vm_zone_t z) {
static __inline__ void
zfree(vm_zone_t z, void *item) {
-#if NCPU > 1
+#if defined(SMP)
zfreei(z, item);
#else
_zfree(z, item);
OpenPOWER on IntegriCloud