summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2009-07-12 23:31:20 +0000
committeralc <alc@FreeBSD.org>2009-07-12 23:31:20 +0000
commitea60573817570658a0528f9e36f4b65d1dc10b05 (patch)
treea3d5747cacdafc73df95feaef86c79ecdf8fc918 /sys/vm
parentc86a9c376273cc3a3010c0b5ad122b391a1b2ecd (diff)
downloadFreeBSD-src-ea60573817570658a0528f9e36f4b65d1dc10b05.zip
FreeBSD-src-ea60573817570658a0528f9e36f4b65d1dc10b05.tar.gz
Add support to the virtual memory system for configuring machine-
dependent memory attributes: Rename vm_cache_mode_t to vm_memattr_t. The new name reflects the fact that there are machine-dependent memory attributes that have nothing to do with controlling the cache's behavior. Introduce vm_object_set_memattr() for setting the default memory attributes that will be given to an object's pages. Introduce and use pmap_page_{get,set}_memattr() for getting and setting a page's machine-dependent memory attributes. Add full support for these functions on amd64 and i386 and stubs for them on the other architectures. The function pmap_page_set_memattr() is also responsible for any other machine-dependent aspects of changing a page's memory attributes, such as flushing the cache or updating the direct map. The uses include kmem_alloc_contig(), vm_page_alloc(), and the device pager: kmem_alloc_contig() can now be used to allocate kernel memory with non-default memory attributes on amd64 and i386. vm_page_alloc() and the device pager will set the memory attributes for the real or fictitious page according to the object's default memory attributes. Update the various pmap functions on amd64 and i386 that map pages to incorporate each page's memory attributes in the mapping. Notes: (1) Inherent to this design are safety features that prevent the specification of inconsistent memory attributes by different mappings on amd64 and i386. In addition, the device pager provides a warning when a device driver creates a fictitious page with memory attributes that are inconsistent with the real page that the fictitious page is an alias for. (2) Storing the machine-dependent memory attributes for amd64 and i386 as a dedicated "int" in "struct md_page" represents a compromise between space efficiency and the ease of MFCing these changes to RELENG_7. In collaboration with: jhb Approved by: re (kib)
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/device_pager.c69
-rw-r--r--sys/vm/pmap.h8
-rw-r--r--sys/vm/vm.h6
-rw-r--r--sys/vm/vm_contig.c14
-rw-r--r--sys/vm/vm_extern.h2
-rw-r--r--sys/vm/vm_object.c31
-rw-r--r--sys/vm/vm_object.h2
-rw-r--r--sys/vm/vm_page.c29
-rw-r--r--sys/vm/vm_phys.c5
-rw-r--r--sys/vm/vm_phys.h2
10 files changed, 125 insertions, 43 deletions
diff --git a/sys/vm/device_pager.c b/sys/vm/device_pager.c
index 69d6dca..7993405 100644
--- a/sys/vm/device_pager.c
+++ b/sys/vm/device_pager.c
@@ -70,9 +70,9 @@ static struct mtx dev_pager_mtx;
static uma_zone_t fakepg_zone;
-static vm_page_t dev_pager_getfake(vm_paddr_t);
+static vm_page_t dev_pager_getfake(vm_paddr_t, vm_memattr_t);
static void dev_pager_putfake(vm_page_t);
-static void dev_pager_updatefake(vm_page_t, vm_paddr_t);
+static void dev_pager_updatefake(vm_page_t, vm_paddr_t, vm_memattr_t);
struct pagerops devicepagerops = {
.pgo_init = dev_pager_init,
@@ -210,7 +210,8 @@ dev_pager_getpages(object, m, count, reqpage)
{
vm_pindex_t offset;
vm_paddr_t paddr;
- vm_page_t page;
+ vm_page_t m_paddr, page;
+ vm_memattr_t memattr;
struct cdev *dev;
int i, ret;
int prot;
@@ -222,6 +223,7 @@ dev_pager_getpages(object, m, count, reqpage)
dev = object->handle;
page = m[reqpage];
offset = page->pindex;
+ memattr = object->memattr;
VM_OBJECT_UNLOCK(object);
csw = dev_refthread(dev);
if (csw == NULL)
@@ -235,14 +237,20 @@ dev_pager_getpages(object, m, count, reqpage)
KASSERT(ret == 0, ("dev_pager_getpage: map function returns error"));
td->td_fpop = fpop;
dev_relthread(dev);
-
+ /* If "paddr" is a real page, perform a sanity check on "memattr". */
+ if ((m_paddr = vm_phys_paddr_to_vm_page(paddr)) != NULL &&
+ pmap_page_get_memattr(m_paddr) != memattr) {
+ memattr = pmap_page_get_memattr(m_paddr);
+ printf(
+ "WARNING: A device driver has set \"memattr\" inconsistently.\n");
+ }
if ((page->flags & PG_FICTITIOUS) != 0) {
/*
* If the passed in reqpage page is a fake page, update it with
* the new physical address.
*/
VM_OBJECT_LOCK(object);
- dev_pager_updatefake(page, paddr);
+ dev_pager_updatefake(page, paddr, memattr);
if (count > 1) {
vm_page_lock_queues();
for (i = 0; i < count; i++) {
@@ -256,7 +264,7 @@ dev_pager_getpages(object, m, count, reqpage)
* Replace the passed in reqpage page with our own fake page and
* free up the all of the original pages.
*/
- page = dev_pager_getfake(paddr);
+ page = dev_pager_getfake(paddr, memattr);
VM_OBJECT_LOCK(object);
TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist, page, pageq);
vm_page_lock_queues();
@@ -296,47 +304,56 @@ dev_pager_haspage(object, pindex, before, after)
}
/*
- * Instantiate a fictitious page. Unlike physical memory pages, only
- * the machine-independent fields must be initialized.
+ * Create a fictitious page with the specified physical address and memory
+ * attribute.
*/
static vm_page_t
-dev_pager_getfake(paddr)
- vm_paddr_t paddr;
+dev_pager_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
{
vm_page_t m;
- m = uma_zalloc(fakepg_zone, M_WAITOK);
-
+ m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO);
+ m->phys_addr = paddr;
+ /* Fictitious pages don't use "segind". */
m->flags = PG_FICTITIOUS;
+ /* Fictitious pages don't use "order" or "pool". */
+ pmap_page_init(m);
m->oflags = VPO_BUSY;
- /* Fictitious pages don't use "act_count". */
- m->dirty = 0;
- m->busy = 0;
- m->queue = PQ_NONE;
- m->object = NULL;
-
m->wire_count = 1;
- m->hold_count = 0;
- m->phys_addr = paddr;
-
+ if (memattr != VM_MEMATTR_DEFAULT)
+ pmap_page_set_memattr(m, memattr);
return (m);
}
+/*
+ * Release a fictitious page.
+ */
static void
-dev_pager_putfake(m)
- vm_page_t m;
+dev_pager_putfake(vm_page_t m)
{
+
if (!(m->flags & PG_FICTITIOUS))
panic("dev_pager_putfake: bad page");
+ /* Restore the default memory attribute to "phys_addr". */
+ if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
+ pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
uma_zfree(fakepg_zone, m);
}
+/*
+ * Update the given fictitious page to the specified physical address and
+ * memory attribute.
+ */
static void
-dev_pager_updatefake(m, paddr)
- vm_page_t m;
- vm_paddr_t paddr;
+dev_pager_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
{
+
if (!(m->flags & PG_FICTITIOUS))
panic("dev_pager_updatefake: bad page");
+ /* Restore the default memory attribute before changing "phys_addr". */
+ if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
+ pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
m->phys_addr = paddr;
+ if (memattr != VM_MEMATTR_DEFAULT)
+ pmap_page_set_memattr(m, memattr);
}
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 486855c..22d6118 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -79,10 +79,16 @@ struct pmap_statistics {
};
typedef struct pmap_statistics *pmap_statistics_t;
+/*
+ * Each machine dependent implementation is expected to provide:
+ *
+ * vm_memattr_t pmap_page_get_memattr(vm_page_t);
+ * boolean_t pmap_page_is_mapped(vm_page_t);
+ * void pmap_page_set_memattr(vm_page_t, vm_memattr_t);
+ */
#include <machine/pmap.h>
#ifdef _KERNEL
-struct proc;
struct thread;
/*
diff --git a/sys/vm/vm.h b/sys/vm/vm.h
index 832c335..9159e93 100644
--- a/sys/vm/vm.h
+++ b/sys/vm/vm.h
@@ -64,10 +64,10 @@
#include <machine/vm.h>
/*
- * The exact set of cache control codes is machine dependent. However, every
- * machine is required to define VM_CACHE_DEFAULT.
+ * The exact set of memory attributes is machine dependent. However, every
+ * machine is required to define VM_MEMATTR_DEFAULT.
*/
-typedef char vm_cache_mode_t; /* cache control codes */
+typedef char vm_memattr_t; /* memory attribute codes */
typedef char vm_inherit_t; /* inheritance codes */
diff --git a/sys/vm/vm_contig.c b/sys/vm/vm_contig.c
index d2b6703..7358cb0 100644
--- a/sys/vm/vm_contig.c
+++ b/sys/vm/vm_contig.c
@@ -194,7 +194,8 @@ vm_page_release_contig(vm_page_t m, vm_pindex_t count)
* before they are mapped.
*/
static vm_offset_t
-contigmapping(vm_map_t map, vm_size_t size, vm_page_t m, int flags)
+contigmapping(vm_map_t map, vm_size_t size, vm_page_t m, vm_memattr_t memattr,
+ int flags)
{
vm_object_t object = kernel_object;
vm_offset_t addr, tmp_addr;
@@ -210,6 +211,8 @@ contigmapping(vm_map_t map, vm_size_t size, vm_page_t m, int flags)
vm_map_unlock(map);
VM_OBJECT_LOCK(object);
for (tmp_addr = addr; tmp_addr < addr + size; tmp_addr += PAGE_SIZE) {
+ if (memattr != VM_MEMATTR_DEFAULT)
+ pmap_page_set_memattr(m, memattr);
vm_page_insert(m, object,
OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS));
if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
@@ -236,7 +239,7 @@ contigmalloc(
void *ret;
ret = (void *)kmem_alloc_contig(kernel_map, size, flags, low, high,
- alignment, boundary, VM_CACHE_DEFAULT);
+ alignment, boundary, VM_MEMATTR_DEFAULT);
if (ret != NULL)
malloc_type_allocated(type, round_page(size));
return (ret);
@@ -245,7 +248,7 @@ contigmalloc(
vm_offset_t
kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, unsigned long alignment, unsigned long boundary,
- vm_cache_mode_t mode)
+ vm_memattr_t memattr)
{
vm_offset_t ret;
vm_page_t pages;
@@ -256,8 +259,7 @@ kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
npgs = size >> PAGE_SHIFT;
tries = 0;
retry:
- pages = vm_phys_alloc_contig(npgs, low, high, alignment, boundary,
- mode);
+ pages = vm_phys_alloc_contig(npgs, low, high, alignment, boundary);
if (pages == NULL) {
if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
vm_page_lock_queues();
@@ -282,7 +284,7 @@ again:
}
ret = 0;
} else {
- ret = contigmapping(map, size, pages, flags);
+ ret = contigmapping(map, size, pages, memattr, flags);
if (ret == 0)
vm_page_release_contig(pages, npgs);
}
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index ec21a3a..53f7694 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -43,7 +43,7 @@ int kernacc(void *, int, int);
vm_offset_t kmem_alloc(vm_map_t, vm_size_t);
vm_offset_t kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
- unsigned long boundary, vm_cache_mode_t mode);
+ unsigned long boundary, vm_memattr_t memattr);
vm_offset_t kmem_alloc_nofault(vm_map_t, vm_size_t);
vm_offset_t kmem_alloc_wait(vm_map_t, vm_size_t);
void kmem_free(vm_map_t, vm_offset_t, vm_size_t);
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 3b3124c..10d58e7 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -222,6 +222,7 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
object->size = size;
object->generation = 1;
object->ref_count = 1;
+ object->memattr = VM_MEMATTR_DEFAULT;
object->flags = 0;
object->uip = NULL;
object->charge = 0;
@@ -290,6 +291,36 @@ vm_object_clear_flag(vm_object_t object, u_short bits)
object->flags &= ~bits;
}
+/*
+ * Sets the default memory attribute for the specified object. Pages
+ * that are allocated to this object are by default assigned this memory
+ * attribute.
+ *
+ * Presently, this function must be called before any pages are allocated
+ * to the object. In the future, this requirement may be relaxed for
+ * "default" and "swap" objects.
+ */
+int
+vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
+{
+
+ VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ switch (object->type) {
+ case OBJT_DEFAULT:
+ case OBJT_DEVICE:
+ case OBJT_PHYS:
+ case OBJT_SWAP:
+ case OBJT_VNODE:
+ if (!TAILQ_EMPTY(&object->memq))
+ return (KERN_FAILURE);
+ break;
+ case OBJT_DEAD:
+ return (KERN_INVALID_ARGUMENT);
+ }
+ object->memattr = memattr;
+ return (KERN_SUCCESS);
+}
+
void
vm_object_pip_add(vm_object_t object, short i)
{
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index bf88cfd..0e57aca 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -92,6 +92,7 @@ struct vm_object {
int generation; /* generation ID */
int ref_count; /* How many refs?? */
int shadow_count; /* how many objects that this is a shadow for */
+ vm_memattr_t memattr; /* default memory attribute for pages */
objtype_t type; /* type of pager */
u_short flags; /* see below */
u_short pg_color; /* (c) color of first page in obj */
@@ -213,6 +214,7 @@ void vm_object_page_remove (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t);
boolean_t vm_object_populate(vm_object_t, vm_pindex_t, vm_pindex_t);
void vm_object_reference (vm_object_t);
void vm_object_reference_locked(vm_object_t);
+int vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr);
void vm_object_shadow (vm_object_t *, vm_ooffset_t *, vm_size_t);
void vm_object_split(vm_map_entry_t);
void vm_object_sync(vm_object_t, vm_ooffset_t, vm_size_t, boolean_t,
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 7148fdc..b9c4ebc 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1109,12 +1109,15 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
*/
KASSERT(m != NULL, ("vm_page_alloc: missing page"));
- KASSERT(m->queue == PQ_NONE, ("vm_page_alloc: page %p has unexpected queue %d",
- m, m->queue));
+ KASSERT(m->queue == PQ_NONE,
+ ("vm_page_alloc: page %p has unexpected queue %d", m, m->queue));
KASSERT(m->wire_count == 0, ("vm_page_alloc: page %p is wired", m));
KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m));
KASSERT(m->busy == 0, ("vm_page_alloc: page %p is busy", m));
KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m));
+ KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
+ ("vm_page_alloc: page %p has unexpected memattr %d", m,
+ pmap_page_get_memattr(m)));
if ((m->flags & PG_CACHED) != 0) {
KASSERT(m->valid != 0,
("vm_page_alloc: cached page %p is invalid", m));
@@ -1157,9 +1160,11 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
m->act_count = 0;
mtx_unlock(&vm_page_queue_free_mtx);
- if ((req & VM_ALLOC_NOOBJ) == 0)
+ if (object != NULL) {
+ if (object->memattr != VM_MEMATTR_DEFAULT)
+ pmap_page_set_memattr(m, object->memattr);
vm_page_insert(m, object, pindex);
- else
+ } else
m->pindex = pindex;
/*
@@ -1415,6 +1420,16 @@ vm_page_free_toq(vm_page_t m)
m->flags &= ~PG_ZERO;
vm_page_enqueue(PQ_HOLD, m);
} else {
+ /*
+ * Restore the default memory attribute to the page.
+ */
+ if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
+ pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
+
+ /*
+ * Insert the page into the physical memory allocator's
+ * cache/free page queues.
+ */
mtx_lock(&vm_page_queue_free_mtx);
m->flags |= PG_FREE;
cnt.v_free_count++;
@@ -1664,6 +1679,12 @@ vm_page_cache(vm_page_t m)
object->generation++;
/*
+ * Restore the default memory attribute to the page.
+ */
+ if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
+ pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
+
+ /*
* Insert the page into the object's collection of cached pages
* and the physical memory allocator's cache/free page queues.
*/
diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c
index 58e71d0..a245462 100644
--- a/sys/vm/vm_phys.c
+++ b/sys/vm/vm_phys.c
@@ -588,7 +588,7 @@ vm_phys_zero_pages_idle(void)
*/
vm_page_t
vm_phys_alloc_contig(unsigned long npages, vm_paddr_t low, vm_paddr_t high,
- unsigned long alignment, unsigned long boundary, vm_cache_mode_t mode)
+ unsigned long alignment, unsigned long boundary)
{
struct vm_freelist *fl;
struct vm_phys_seg *seg;
@@ -698,6 +698,9 @@ done:
("vm_phys_alloc_contig: page %p is busy", m));
KASSERT(m->dirty == 0,
("vm_phys_alloc_contig: page %p is dirty", m));
+ KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
+ ("vm_phys_alloc_contig: page %p has unexpected memattr %d",
+ m, pmap_page_get_memattr(m)));
if ((m->flags & PG_CACHED) != 0) {
m->valid = 0;
m_object = m->object;
diff --git a/sys/vm/vm_phys.h b/sys/vm/vm_phys.h
index 483ab91..0e012c3 100644
--- a/sys/vm/vm_phys.h
+++ b/sys/vm/vm_phys.h
@@ -43,7 +43,7 @@
void vm_phys_add_page(vm_paddr_t pa);
vm_page_t vm_phys_alloc_contig(unsigned long npages,
vm_paddr_t low, vm_paddr_t high,
- unsigned long alignment, unsigned long boundary, vm_cache_mode_t mode);
+ unsigned long alignment, unsigned long boundary);
vm_page_t vm_phys_alloc_pages(int pool, int order);
vm_paddr_t vm_phys_bootstrap_alloc(vm_size_t size, unsigned long alignment);
void vm_phys_free_pages(vm_page_t m, int order);
OpenPOWER on IntegriCloud