summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_contig.c
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2011-11-16 16:46:09 +0000
committeralc <alc@FreeBSD.org>2011-11-16 16:46:09 +0000
commit3692d0165926e89d0ad0d746888b510d4e49348c (patch)
tree272387d8a035403f23822efe5dfd1df09f424088 /sys/vm/vm_contig.c
parent1c37e71ee838a9da8d383e6e57d29d25820b52d5 (diff)
downloadFreeBSD-src-3692d0165926e89d0ad0d746888b510d4e49348c.zip
FreeBSD-src-3692d0165926e89d0ad0d746888b510d4e49348c.tar.gz
Refactor the code that performs physically contiguous memory allocation,
yielding a new public interface, vm_page_alloc_contig(). This new function addresses some of the limitations of the current interfaces, contigmalloc() and kmem_alloc_contig(). For example, the physically contiguous memory that is allocated with those interfaces can only be allocated to the kernel vm object and must be mapped into the kernel virtual address space. It also provides functionality that vm_phys_alloc_contig() doesn't, such as wiring the returned pages. Moreover, unlike that function, it respects the low water marks on the paging queues and wakes up the page daemon when necessary. That said, at present, this new function can't be applied to all types of vm objects. However, that restriction will be eliminated in the coming weeks. From a design standpoint, this change also addresses an inconsistency between vm_phys_alloc_contig() and the other vm_phys_alloc*() functions. Specifically, vm_phys_alloc_contig() manipulated vm_page fields that other functions in vm/vm_phys.c didn't. Moreover, vm_phys_alloc_contig() knew about vnodes and reservations. Now, vm_page_alloc_contig() is responsible for these things. Reviewed by: kib Discussed with: jhb
Diffstat (limited to 'sys/vm/vm_contig.c')
-rw-r--r--sys/vm/vm_contig.c119
1 files changed, 52 insertions, 67 deletions
diff --git a/sys/vm/vm_contig.c b/sys/vm/vm_contig.c
index 50f95a5..ea2c904 100644
--- a/sys/vm/vm_contig.c
+++ b/sys/vm/vm_contig.c
@@ -82,7 +82,6 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <vm/vm_pager.h>
-#include <vm/vm_phys.h>
#include <vm/vm_extern.h>
static int
@@ -185,22 +184,6 @@ vm_contig_launder(int queue, vm_paddr_t low, vm_paddr_t high)
}
/*
- * Frees the given physically contiguous pages.
- *
- * N.B.: Any pages with PG_ZERO set must, in fact, be zero filled.
- */
-static void
-vm_page_release_contig(vm_page_t m, vm_pindex_t count)
-{
-
- while (count--) {
- /* Leave PG_ZERO unchanged. */
- vm_page_free_toq(m);
- m++;
- }
-}
-
-/*
* Increase the number of cached pages.
*/
void
@@ -238,9 +221,10 @@ kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, vm_memattr_t memattr)
{
vm_object_t object = kernel_object;
- vm_offset_t addr, i, offset;
+ vm_offset_t addr;
+ vm_ooffset_t end_offset, offset;
vm_page_t m;
- int tries;
+ int pflags, tries;
size = round_page(size);
vm_map_lock(map);
@@ -252,11 +236,19 @@ kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
vm_object_reference(object);
vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL,
VM_PROT_ALL, 0);
+ if ((flags & (M_NOWAIT | M_USE_RESERVE)) == M_NOWAIT)
+ pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_NOBUSY;
+ else
+ pflags = VM_ALLOC_SYSTEM | VM_ALLOC_NOBUSY;
+ if (flags & M_ZERO)
+ pflags |= VM_ALLOC_ZERO;
VM_OBJECT_LOCK(object);
- for (i = 0; i < size; i += PAGE_SIZE) {
+ end_offset = offset + size;
+ for (; offset < end_offset; offset += PAGE_SIZE) {
tries = 0;
retry:
- m = vm_phys_alloc_contig(1, low, high, PAGE_SIZE, 0);
+ m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags, 1,
+ low, high, PAGE_SIZE, 0, memattr);
if (m == NULL) {
VM_OBJECT_UNLOCK(object);
if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
@@ -277,9 +269,6 @@ retry:
vm_map_unlock(map);
return (0);
}
- if (memattr != VM_MEMATTR_DEFAULT)
- pmap_page_set_memattr(m, memattr);
- vm_page_insert(m, object, OFF_TO_IDX(offset + i));
if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
@@ -299,65 +288,61 @@ retry:
* specified through the given flags, then the pages are zeroed
* before they are mapped.
*/
-static vm_offset_t
-contigmapping(vm_map_t map, vm_size_t size, vm_page_t m, vm_memattr_t memattr,
- int flags)
+vm_offset_t
+kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
+ vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
+ vm_memattr_t memattr)
{
vm_object_t object = kernel_object;
- vm_offset_t addr, tmp_addr;
+ vm_offset_t addr;
+ vm_ooffset_t offset;
+ vm_page_t end_m, m;
+ int pflags, tries;
+ size = round_page(size);
vm_map_lock(map);
if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
vm_map_unlock(map);
return (0);
}
+ offset = addr - VM_MIN_KERNEL_ADDRESS;
vm_object_reference(object);
- vm_map_insert(map, object, addr - VM_MIN_KERNEL_ADDRESS,
- addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
- vm_map_unlock(map);
+ vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL,
+ VM_PROT_ALL, 0);
+ if ((flags & (M_NOWAIT | M_USE_RESERVE)) == M_NOWAIT)
+ pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_NOBUSY;
+ else
+ pflags = VM_ALLOC_SYSTEM | VM_ALLOC_NOBUSY;
+ if (flags & M_ZERO)
+ pflags |= VM_ALLOC_ZERO;
VM_OBJECT_LOCK(object);
- for (tmp_addr = addr; tmp_addr < addr + size; tmp_addr += PAGE_SIZE) {
- if (memattr != VM_MEMATTR_DEFAULT)
- pmap_page_set_memattr(m, memattr);
- vm_page_insert(m, object,
- OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS));
- if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
- pmap_zero_page(m);
- m->valid = VM_PAGE_BITS_ALL;
- m++;
- }
- VM_OBJECT_UNLOCK(object);
- vm_map_wire(map, addr, addr + size,
- VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
- return (addr);
-}
-
-vm_offset_t
-kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
- vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
- vm_memattr_t memattr)
-{
- vm_offset_t ret;
- vm_page_t pages;
- u_long npgs;
- int tries;
-
- size = round_page(size);
- npgs = size >> PAGE_SHIFT;
tries = 0;
retry:
- pages = vm_phys_alloc_contig(npgs, low, high, alignment, boundary);
- if (pages == NULL) {
+ m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags,
+ atop(size), low, high, alignment, boundary, memattr);
+ if (m == NULL) {
+ VM_OBJECT_UNLOCK(object);
if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
+ vm_map_unlock(map);
vm_contig_grow_cache(tries, low, high);
+ vm_map_lock(map);
+ VM_OBJECT_LOCK(object);
tries++;
goto retry;
}
- ret = 0;
- } else {
- ret = contigmapping(map, size, pages, memattr, flags);
- if (ret == 0)
- vm_page_release_contig(pages, npgs);
+ vm_map_delete(map, addr, addr + size);
+ vm_map_unlock(map);
+ return (0);
+ }
+ end_m = m + atop(size);
+ for (; m < end_m; m++) {
+ if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
+ pmap_zero_page(m);
+ m->valid = VM_PAGE_BITS_ALL;
}
- return (ret);
+ VM_OBJECT_UNLOCK(object);
+ vm_map_unlock(map);
+ vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM |
+ VM_MAP_WIRE_NOHOLES);
+ return (addr);
}
OpenPOWER on IntegriCloud