summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_page.c
diff options
context:
space:
mode:
authorbde <bde@FreeBSD.org>1996-01-27 00:13:33 +0000
committerbde <bde@FreeBSD.org>1996-01-27 00:13:33 +0000
commit0f99c673dca6e0c25f96b0853d856adb0dbd0b08 (patch)
tree2905e6db07fad0f6d741359d4ee37c7cdccd380b /sys/vm/vm_page.c
parentae614ac2900c22bdd2fe050e5b9afd85ad375e68 (diff)
downloadFreeBSD-src-0f99c673dca6e0c25f96b0853d856adb0dbd0b08.zip
FreeBSD-src-0f99c673dca6e0c25f96b0853d856adb0dbd0b08.tar.gz
Added a `boundary' arg to vm_alloc_page_contig(). Previously the only
way to avoid crossing a 64K DMA boundary was to specify an alignment greater than the size even when the alignment didn't matter, and for sizes larger than a page, this reduced the chance of finding enough contiguous pages. E.g., allocations of 8K not crossing a 64K boundary previously had to be allocated on 8K boundaries; now they can be allocated on any 4K boundary except (64 * n + 60)K. Fixed bugs in vm_alloc_page_contig(): - the last page wasn't allocated for sizes smaller than a page. - failures of kmem_alloc_pageable() weren't handled. Mutated vm_page_alloc_contig() to create a more convenient interface named contigmalloc(). This is the same as the one in 1.1.5 except it has `low' and `high' args, and the `alignment' and `boundary' args are multipliers instead of masks.
Diffstat (limited to 'sys/vm/vm_page.c')
-rw-r--r--sys/vm/vm_page.c52
1 files changed, 42 insertions, 10 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 288f140..7ef6e9f 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
- * $Id: vm_page.c,v 1.45 1996/01/04 21:13:23 wollman Exp $
+ * $Id: vm_page.c,v 1.46 1996/01/19 04:00:10 dyson Exp $
*/
/*
@@ -71,6 +71,7 @@
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/vmmeter.h>
@@ -686,31 +687,47 @@ vm_page_alloc(object, pindex, page_req)
return (m);
}
-vm_offset_t
-vm_page_alloc_contig(size, low, high, alignment)
- vm_offset_t size;
- vm_offset_t low;
- vm_offset_t high;
- vm_offset_t alignment;
+/*
+ * This interface is for merging with malloc() someday.
+ * Even if we never implement compaction so that contiguous allocation
+ * works after initialization time, malloc()'s data structures are good
+ * for statistics and for allocations of less than a page.
+ */
+void *
+contigmalloc(size, type, flags, low, high, alignment, boundary)
+ unsigned long size; /* should be size_t here and for malloc() */
+ int type;
+ int flags;
+ unsigned long low;
+ unsigned long high;
+ unsigned long alignment;
+ unsigned long boundary;
{
int i, s, start;
vm_offset_t addr, phys, tmp_addr;
vm_page_t pga = vm_page_array;
+ size = round_page(size);
+ if (size == 0)
+ panic("vm_page_alloc_contig: size must not be 0");
if ((alignment & (alignment - 1)) != 0)
panic("vm_page_alloc_contig: alignment must be a power of 2");
+ if ((boundary & (boundary - 1)) != 0)
+ panic("vm_page_alloc_contig: boundary must be a power of 2");
start = 0;
s = splhigh();
again:
/*
- * Find first page in array that is free, within range, and aligned.
+ * Find first page in array that is free, within range, aligned, and
+ * such that the boundary won't be crossed.
*/
for (i = start; i < cnt.v_page_count; i++) {
phys = VM_PAGE_TO_PHYS(&pga[i]);
if ((pga[i].queue == PQ_FREE) &&
(phys >= low) && (phys < high) &&
- ((phys & (alignment - 1)) == 0))
+ ((phys & (alignment - 1)) == 0) &&
+ (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0))
break;
}
@@ -742,6 +759,10 @@ again:
* return kernel VM pointer.
*/
tmp_addr = addr = kmem_alloc_pageable(kernel_map, size);
+ if (addr == 0) {
+ splx(s);
+ return (NULL);
+ }
for (i = start; i < (start + size / PAGE_SIZE); i++) {
vm_page_t m = &pga[i];
@@ -763,7 +784,18 @@ again:
}
splx(s);
- return (addr);
+ return ((void *)addr);
+}
+
+vm_offset_t
+vm_page_alloc_contig(size, low, high, alignment)
+ vm_offset_t size;
+ vm_offset_t low;
+ vm_offset_t high;
+ vm_offset_t alignment;
+{
+ return ((vm_offset_t)contigmalloc(size, M_DEVBUF, M_NOWAIT, low, high,
+ alignment, 0ul));
}
/*
OpenPOWER on IntegriCloud