summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2007-11-25 07:42:34 +0000
committeralc <alc@FreeBSD.org>2007-11-25 07:42:34 +0000
commit9ba5385124b0c51a31a2a273e5a51fdd2a4d38d7 (patch)
tree926a6d6a6f491ff4ece43b27c18b3918ff898295
parent3cb99b81472362410bc7031011b04e4ca9eeaee7 (diff)
downloadFreeBSD-src-9ba5385124b0c51a31a2a273e5a51fdd2a4d38d7.zip
FreeBSD-src-9ba5385124b0c51a31a2a273e5a51fdd2a4d38d7.tar.gz
Tidy up: Add comments. Eliminate the pointless
malloc_type_allocated(..., 0) calls that occur when contigmalloc() has failed. Eliminate the acquisition and release of the page queues lock from vm_page_release_contig(). Rename contigmalloc2() to contigmapping(), reflecting what it does.
-rw-r--r--sys/vm/vm_contig.c42
1 files changed, 23 insertions, 19 deletions
diff --git a/sys/vm/vm_contig.c b/sys/vm/vm_contig.c
index b40a951..7a6eb51 100644
--- a/sys/vm/vm_contig.c
+++ b/sys/vm/vm_contig.c
@@ -96,6 +96,7 @@ vm_contig_launder_page(vm_page_t m)
struct mount *mp;
int vfslocked;
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
object = m->object;
if (!VM_OBJECT_TRYLOCK(object))
return (EAGAIN);
@@ -148,8 +149,7 @@ vm_contig_launder(int queue)
vm_page_t m, next;
int error;
- for (m = TAILQ_FIRST(&vm_page_queues[queue].pl); m != NULL; m = next) {
- next = TAILQ_NEXT(m, pageq);
+ TAILQ_FOREACH_SAFE(m, &vm_page_queues[queue].pl, pageq, next) {
/* Skip marker pages */
if ((m->flags & PG_MARKER) != 0)
@@ -166,35 +166,38 @@ vm_contig_launder(int queue)
return (FALSE);
}
+/*
+ * Frees the given physically contiguous pages.
+ *
+ * N.B.: Any pages with PG_ZERO set must, in fact, be zero filled.
+ */
static void
-vm_page_release_contigl(vm_page_t m, vm_pindex_t count)
+vm_page_release_contig(vm_page_t m, vm_pindex_t count)
{
+
while (count--) {
+ /* Leave PG_ZERO unchanged. */
vm_page_free_toq(m);
m++;
}
}
-static void
-vm_page_release_contig(vm_page_t m, vm_pindex_t count)
-{
- vm_page_lock_queues();
- vm_page_release_contigl(m, count);
- vm_page_unlock_queues();
-}
-
+/*
+ * Allocates a region from the kernel address map, inserts the
+ * given physically contiguous pages into the kernel object,
+ * creates a wired mapping from the region to the pages, and
+ * returns the region's starting virtual address. If M_ZERO is
+ * specified through the given flags, then the pages are zeroed
+ * before they are mapped.
+ */
static void *
-contigmalloc2(vm_page_t m, vm_pindex_t npages, int flags)
+contigmapping(vm_page_t m, vm_pindex_t npages, int flags)
{
vm_object_t object = kernel_object;
vm_map_t map = kernel_map;
vm_offset_t addr, tmp_addr;
vm_pindex_t i;
- /*
- * Allocate kernel VM, unfree and assign the physical pages to
- * it and return kernel VM pointer.
- */
vm_map_lock(map);
if (vm_map_findspace(map, vm_map_min(map), npages << PAGE_SHIFT, &addr)
!= KERN_SUCCESS) {
@@ -230,7 +233,7 @@ contigmalloc(
unsigned long alignment,
unsigned long boundary)
{
- void * ret;
+ void *ret;
vm_page_t pages;
unsigned long npgs;
int actl, actmax, inactl, inactmax, tries;
@@ -263,11 +266,12 @@ again:
}
ret = NULL;
} else {
- ret = contigmalloc2(pages, npgs, flags);
+ ret = contigmapping(pages, npgs, flags);
if (ret == NULL)
vm_page_release_contig(pages, npgs);
+ else
+ malloc_type_allocated(type, npgs << PAGE_SHIFT);
}
- malloc_type_allocated(type, ret == NULL ? 0 : npgs << PAGE_SHIFT);
return (ret);
}
OpenPOWER on IntegriCloud