summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2013-02-24 16:41:36 +0000
committerattilio <attilio@FreeBSD.org>2013-02-24 16:41:36 +0000
commit12289fcebc849f6eda9d53961cd2088c39310599 (patch)
tree7047ab7ee00ab2136a9fcd785a9ad30af0de326f /sys/vm
parent6b1291b4d1f698048c27a58ed4ac41557801cf6d (diff)
downloadFreeBSD-src-12289fcebc849f6eda9d53961cd2088c39310599.zip
FreeBSD-src-12289fcebc849f6eda9d53961cd2088c39310599.tar.gz
Retire the old UMA primitive uma_zone_set_obj() and replace it with the
more modern uma_zone_reserve_kva(). The difference is that it doesn't rely anymore on an obj to allocate pages and the slab allocator doesn't use any more any specific locking but atomic operations to complete the operation. Where possible, the uma_small_alloc() is instead used and the uk_kva member becomes unused. The subsequent cleanups also brings along the removal of VM_OBJECT_LOCK_INIT() macro which is not used anymore as the code can be easilly cleaned up to perform a single mtx_init(), private to vm_object.c. For the same reason, _vm_object_allocate() becomes private as well. Sponsored by: EMC / Isilon storage division Reviewed by: alc
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/swap_pager.c3
-rw-r--r--sys/vm/uma.h11
-rw-r--r--sys/vm/uma_core.c104
-rw-r--r--sys/vm/uma_int.h4
-rw-r--r--sys/vm/vm_map.c3
-rw-r--r--sys/vm/vm_object.c19
-rw-r--r--sys/vm/vm_object.h4
-rw-r--r--sys/vm/vm_radix.c10
8 files changed, 78 insertions, 80 deletions
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 44bff25..d254b6b 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -343,7 +343,6 @@ SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
static struct mtx sw_alloc_mtx; /* protect list manipulation */
static struct pagerlst swap_pager_object_list[NOBJLISTS];
static uma_zone_t swap_zone;
-static struct vm_object swap_zone_obj;
/*
* pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
@@ -554,7 +553,7 @@ swap_pager_swap_init(void)
if (swap_zone == NULL)
panic("failed to create swap_zone.");
do {
- if (uma_zone_set_obj(swap_zone, &swap_zone_obj, n))
+ if (uma_zone_reserve_kva(swap_zone, n))
break;
/*
* if the allocation failed, try a zone two thirds the
diff --git a/sys/vm/uma.h b/sys/vm/uma.h
index 3abfd3e..549b1b3 100644
--- a/sys/vm/uma.h
+++ b/sys/vm/uma.h
@@ -432,24 +432,21 @@ void uma_reclaim(void);
void uma_set_align(int align);
/*
- * Switches the backing object of a zone
+ * Switches the backing object of a zone to VM_ALLOC_NOOBJ.
*
* Arguments:
* zone The zone to update.
- * obj The VM object to use for future allocations.
- * size The size of the object to allocate.
+ * nitems The number of items previewed to be allocated.
*
* Returns:
* 0 if kva space can not be allocated
* 1 if successful
*
* Discussion:
- * A NULL object can be used and uma will allocate one for you. Setting
- * the size will limit the amount of memory allocated to this zone.
+ * The size will limit the amount of memory allocated to this zone.
*
*/
-struct vm_object;
-int uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int size);
+int uma_zone_reserve_kva(uma_zone_t zone, int nitems);
/*
* Sets a high limit on the number of items allowed in a zone
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 2d5b555..d298064 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -79,6 +79,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
#include <vm/vm_param.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
@@ -213,7 +214,7 @@ enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI };
/* Prototypes.. */
-static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
+static void *noobj_alloc(uma_zone_t, int, u_int8_t *, int);
static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
static void *startup_alloc(uma_zone_t, int, u_int8_t *, int);
static void page_free(void *, int, u_int8_t);
@@ -1030,50 +1031,53 @@ page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
* NULL if M_NOWAIT is set.
*/
static void *
-obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
+noobj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
{
- vm_object_t object;
+ TAILQ_HEAD(, vm_page) alloctail;
+ u_long npages;
vm_offset_t retkva, zkva;
- vm_page_t p;
- int pages, startpages;
+ vm_page_t p, p_next;
uma_keg_t keg;
+ TAILQ_INIT(&alloctail);
keg = zone_first_keg(zone);
- object = keg->uk_obj;
- retkva = 0;
- /*
- * This looks a little weird since we're getting one page at a time.
- */
- VM_OBJECT_LOCK(object);
- p = TAILQ_LAST(&object->memq, pglist);
- pages = p != NULL ? p->pindex + 1 : 0;
- startpages = pages;
- zkva = keg->uk_kva + pages * PAGE_SIZE;
- for (; bytes > 0; bytes -= PAGE_SIZE) {
- p = vm_page_alloc(object, pages,
- VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
- if (p == NULL) {
- if (pages != startpages)
- pmap_qremove(retkva, pages - startpages);
- while (pages != startpages) {
- pages--;
- p = TAILQ_LAST(&object->memq, pglist);
- vm_page_unwire(p, 0);
- vm_page_free(p);
- }
- retkva = 0;
- goto done;
+ npages = howmany(bytes, PAGE_SIZE);
+ while (npages > 0) {
+ p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
+ VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
+ if (p != NULL) {
+ /*
+ * Since the page does not belong to an object, its
+ * listq is unused.
+ */
+ TAILQ_INSERT_TAIL(&alloctail, p, listq);
+ npages--;
+ continue;
+ }
+ if (wait & M_WAITOK) {
+ VM_WAIT;
+ continue;
}
+
+ /*
+ * Page allocation failed, free intermediate pages and
+ * exit.
+ */
+ TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
+ vm_page_unwire(p, 0);
+ vm_page_free(p);
+ }
+ return (NULL);
+ }
+ *flags = UMA_SLAB_PRIV;
+ zkva = keg->uk_kva +
+ atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
+ retkva = zkva;
+ TAILQ_FOREACH(p, &alloctail, listq) {
pmap_qenter(zkva, &p, 1);
- if (retkva == 0)
- retkva = zkva;
zkva += PAGE_SIZE;
- pages += 1;
}
-done:
- VM_OBJECT_UNLOCK(object);
- *flags = UMA_SLAB_PRIV;
return ((void *)retkva);
}
@@ -3012,7 +3016,7 @@ uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
/* See uma.h */
int
-uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
+uma_zone_reserve_kva(uma_zone_t zone, int count)
{
uma_keg_t keg;
vm_offset_t kva;
@@ -3024,21 +3028,25 @@ uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
if (pages * keg->uk_ipers < count)
pages++;
- kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
-
- if (kva == 0)
- return (0);
- if (obj == NULL)
- obj = vm_object_allocate(OBJT_PHYS, pages);
- else {
- VM_OBJECT_LOCK_INIT(obj, "uma object");
- _vm_object_allocate(OBJT_PHYS, pages, obj);
- }
+#ifdef UMA_MD_SMALL_ALLOC
+ if (keg->uk_ppera > 1) {
+#else
+ if (1) {
+#endif
+ kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
+ if (kva == 0)
+ return (0);
+ } else
+ kva = 0;
ZONE_LOCK(zone);
keg->uk_kva = kva;
- keg->uk_obj = obj;
+ keg->uk_offset = 0;
keg->uk_maxpages = pages;
- keg->uk_allocf = obj_alloc;
+#ifdef UMA_MD_SMALL_ALLOC
+ keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
+#else
+ keg->uk_allocf = noobj_alloc;
+#endif
keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
ZONE_UNLOCK(zone);
return (1);
diff --git a/sys/vm/uma_int.h b/sys/vm/uma_int.h
index aaec926..e3b0c55 100644
--- a/sys/vm/uma_int.h
+++ b/sys/vm/uma_int.h
@@ -221,8 +221,8 @@ struct uma_keg {
uma_alloc uk_allocf; /* Allocation function */
uma_free uk_freef; /* Free routine */
- struct vm_object *uk_obj; /* Zone specific object */
- vm_offset_t uk_kva; /* Base kva for zones with objs */
+ u_long uk_offset; /* Zone specific next page index */
+ vm_offset_t uk_kva; /* Zone base kva */
uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */
u_int16_t uk_pgoff; /* Offset to uma_slab struct */
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 26de826..35ac468 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -125,7 +125,6 @@ static uma_zone_t mapentzone;
static uma_zone_t kmapentzone;
static uma_zone_t mapzone;
static uma_zone_t vmspace_zone;
-static struct vm_object kmapentobj;
static int vmspace_zinit(void *mem, int size, int flags);
static void vmspace_zfini(void *mem, int size);
static int vm_map_zinit(void *mem, int ize, int flags);
@@ -303,7 +302,7 @@ vmspace_alloc(min, max)
void
vm_init2(void)
{
- uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count,
+ uma_zone_reserve_kva(kmapentzone, lmin(cnt.v_page_count,
(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE) / 8 +
maxproc * 2 + maxfiles);
vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index a2f7df3..534152d 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -194,20 +194,23 @@ vm_object_zinit(void *mem, int size, int flags)
vm_object_t object;
object = (vm_object_t)mem;
- bzero(&object->mtx, sizeof(object->mtx));
- VM_OBJECT_LOCK_INIT(object, "standard object");
/* These are true for any object that has been freed */
object->paging_in_progress = 0;
object->resident_page_count = 0;
object->shadow_count = 0;
+
+ /* It relies on vm object mutex to be initialized afterwards. */
return (0);
}
-void
-_vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
+static void
+_vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object,
+ const char *mtxname)
{
+ bzero(&object->mtx, sizeof(object->mtx));
+ mtx_init(&object->mtx, "vm object", mtxname, MTX_DEF | MTX_DUPOK);
TAILQ_INIT(&object->memq);
LIST_INIT(&object->shadow_head);
@@ -267,17 +270,15 @@ vm_object_init(void)
TAILQ_INIT(&vm_object_list);
mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
- VM_OBJECT_LOCK_INIT(kernel_object, "kernel object");
_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
- kernel_object);
+ kernel_object, "kernel object");
#if VM_NRESERVLEVEL > 0
kernel_object->flags |= OBJ_COLORED;
kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
#endif
- VM_OBJECT_LOCK_INIT(kmem_object, "kmem object");
_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
- kmem_object);
+ kmem_object, "kmem object");
#if VM_NRESERVLEVEL > 0
kmem_object->flags |= OBJ_COLORED;
kmem_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
@@ -403,7 +404,7 @@ vm_object_allocate(objtype_t type, vm_pindex_t size)
vm_object_t object;
object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
- _vm_object_allocate(type, size, object);
+ _vm_object_allocate(type, size, object, NULL);
return (object);
}
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index be454f5..2cfd37f 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -208,9 +208,6 @@ extern struct vm_object kmem_object_store;
#define VM_OBJECT_LOCK(object) mtx_lock(&(object)->mtx)
#define VM_OBJECT_LOCK_ASSERT(object, type) \
mtx_assert(&(object)->mtx, (type))
-#define VM_OBJECT_LOCK_INIT(object, type) \
- mtx_init(&(object)->mtx, "vm object", \
- (type), MTX_DEF | MTX_DUPOK)
#define VM_OBJECT_LOCKED(object) mtx_owned(&(object)->mtx)
#define VM_OBJECT_MTX(object) (&(object)->mtx)
#define VM_OBJECT_TRYLOCK(object) mtx_trylock(&(object)->mtx)
@@ -241,7 +238,6 @@ vm_object_cache_is_empty(vm_object_t object)
}
vm_object_t vm_object_allocate (objtype_t, vm_pindex_t);
-void _vm_object_allocate (objtype_t, vm_pindex_t, vm_object_t);
boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t,
boolean_t);
void vm_object_collapse (vm_object_t);
diff --git a/sys/vm/vm_radix.c b/sys/vm/vm_radix.c
index 8a7fb01..41afed8 100644
--- a/sys/vm/vm_radix.c
+++ b/sys/vm/vm_radix.c
@@ -63,7 +63,7 @@
#endif
#ifndef VM_RADIX_BOOT_CACHE
-#define VM_RADIX_BOOT_CACHE 1500
+#define VM_RADIX_BOOT_CACHE 150
#endif
/*
@@ -373,7 +373,6 @@ vm_radix_node_zone_dtor(void *mem, int size __unused, void *arg __unused)
static void
vm_radix_init(void *arg __unused)
{
- int nitems;
vm_radix_node_zone = uma_zcreate("RADIX NODE",
sizeof(struct vm_radix_node), NULL,
@@ -383,10 +382,9 @@ vm_radix_init(void *arg __unused)
NULL,
#endif
NULL, NULL, VM_RADIX_PAD, UMA_ZONE_VM | UMA_ZONE_NOFREE);
- nitems = uma_zone_set_max(vm_radix_node_zone, cnt.v_page_count);
- if (nitems < cnt.v_page_count)
- panic("%s: unexpected requested number of items", __func__);
- uma_prealloc(vm_radix_node_zone, nitems);
+ if (!uma_zone_reserve_kva(vm_radix_node_zone, cnt.v_page_count))
+ panic("%s: unable to create new zone", __func__);
+ uma_prealloc(vm_radix_node_zone, cnt.v_page_count);
boot_cache_cnt = VM_RADIX_BOOT_CACHE + 1;
}
SYSINIT(vm_radix_init, SI_SUB_KMEM, SI_ORDER_SECOND, vm_radix_init, NULL);
OpenPOWER on IntegriCloud