summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2012-12-09 00:32:38 +0000
committeralc <alc@FreeBSD.org>2012-12-09 00:32:38 +0000
commit02094caa2c2fce94821d84bde50b9373b47f97e7 (patch)
treeaed54f82a0f3ede6ae38c3c71be27832f9a87d0a /sys/vm
parentc82d89183db93c8a4a4a1db712fa5464d28ff9a3 (diff)
downloadFreeBSD-src-02094caa2c2fce94821d84bde50b9373b47f97e7.zip
FreeBSD-src-02094caa2c2fce94821d84bde50b9373b47f97e7.tar.gz
In the past four years, we've added two new vm object types. Each time,
similar changes had to be made in various places throughout the machine- independent virtual memory layer to support the new vm object type. However, in most of these places, it's actually not the type of the vm object that matters to us but instead certain attributes of its pages. For example, OBJT_DEVICE, OBJT_MGTDEVICE, and OBJT_SG objects contain fictitious pages. In other words, in most of these places, we were testing the vm object's type to determine if it contained fictitious (or unmanaged) pages. To both simplify the code in these places and make the addition of future vm object types easier, this change introduces two new vm object flags that describe attributes of the vm object's pages, specifically, whether they are fictitious or unmanaged. Reviewed and tested by: kib
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_fault.c4
-rw-r--r--sys/vm/vm_map.c15
-rw-r--r--sys/vm/vm_meter.c2
-rw-r--r--sys/vm/vm_object.c35
-rw-r--r--sys/vm/vm_object.h2
-rw-r--r--sys/vm/vm_page.c13
-rw-r--r--sys/vm/vm_pageout.c6
7 files changed, 47 insertions, 30 deletions
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 516138af..6865782 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -968,8 +968,8 @@ vm_fault_cache_behind(const struct faultstate *fs, int distance)
VM_OBJECT_LOCK(object);
}
}
- if (first_object->type != OBJT_DEVICE &&
- first_object->type != OBJT_PHYS && first_object->type != OBJT_SG) {
+ /* Neither fictitious nor unmanaged pages can be cached. */
+ if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) {
if (fs->first_pindex < distance)
pindex = 0;
else
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 4ed0691..14e5985 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -2324,8 +2324,8 @@ done:
*/
vm_fault_unwire(map, entry->start, entry->end,
entry->object.vm_object != NULL &&
- (entry->object.vm_object->type == OBJT_DEVICE ||
- entry->object.vm_object->type == OBJT_SG));
+ (entry->object.vm_object->flags &
+ OBJ_FICTITIOUS) != 0);
}
}
KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
@@ -2445,8 +2445,8 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
saved_start = entry->start;
saved_end = entry->end;
fictitious = entry->object.vm_object != NULL &&
- (entry->object.vm_object->type == OBJT_DEVICE ||
- entry->object.vm_object->type == OBJT_SG);
+ (entry->object.vm_object->flags &
+ OBJ_FICTITIOUS) != 0;
/*
* Release the map lock, relying on the in-transition
* mark. Mark the map busy for fork.
@@ -2544,8 +2544,8 @@ done:
*/
vm_fault_unwire(map, entry->start, entry->end,
entry->object.vm_object != NULL &&
- (entry->object.vm_object->type == OBJT_DEVICE ||
- entry->object.vm_object->type == OBJT_SG));
+ (entry->object.vm_object->flags &
+ OBJ_FICTITIOUS) != 0);
}
}
next_entry_done:
@@ -2681,8 +2681,7 @@ vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
{
vm_fault_unwire(map, entry->start, entry->end,
entry->object.vm_object != NULL &&
- (entry->object.vm_object->type == OBJT_DEVICE ||
- entry->object.vm_object->type == OBJT_SG));
+ (entry->object.vm_object->flags & OBJ_FICTITIOUS) != 0);
entry->wired_count = 0;
}
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index 784bc61..b5bb0fa 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -200,7 +200,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
* synchronization should not impair the accuracy of
* the reported statistics.
*/
- if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
+ if ((object->flags & OBJ_FICTITIOUS) != 0) {
/*
* Devices, like /dev/mem, will badly skew our totals.
*/
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index f95ab54..32b0779 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -212,15 +212,35 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
object->root = NULL;
object->type = type;
+ switch (type) {
+ case OBJT_DEAD:
+ panic("_vm_object_allocate: can't create OBJT_DEAD");
+ case OBJT_DEFAULT:
+ case OBJT_SWAP:
+ object->flags = OBJ_ONEMAPPING;
+ break;
+ case OBJT_DEVICE:
+ case OBJT_SG:
+ object->flags = OBJ_FICTITIOUS | OBJ_UNMANAGED;
+ break;
+ case OBJT_MGTDEVICE:
+ object->flags = OBJ_FICTITIOUS;
+ break;
+ case OBJT_PHYS:
+ object->flags = OBJ_UNMANAGED;
+ break;
+ case OBJT_VNODE:
+ object->flags = 0;
+ break;
+ default:
+ panic("_vm_object_allocate: type %d is undefined", type);
+ }
object->size = size;
object->generation = 1;
object->ref_count = 1;
object->memattr = VM_MEMATTR_DEFAULT;
- object->flags = 0;
object->cred = NULL;
object->charge = 0;
- if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
- object->flags = OBJ_ONEMAPPING;
object->pg_color = 0;
object->handle = NULL;
object->backing_object = NULL;
@@ -1064,7 +1084,7 @@ shadowlookup:
(tobject->flags & OBJ_ONEMAPPING) == 0) {
goto unlock_tobject;
}
- } else if (tobject->type == OBJT_PHYS)
+ } else if ((tobject->flags & OBJ_UNMANAGED) != 0)
goto unlock_tobject;
m = vm_page_lookup(tobject, tpindex);
if (m == NULL && advise == MADV_WILLNEED) {
@@ -1834,7 +1854,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
int wirings;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
- KASSERT((object->type != OBJT_DEVICE && object->type != OBJT_PHYS) ||
+ KASSERT((object->flags & OBJ_UNMANAGED) == 0 ||
(options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED,
("vm_object_page_remove: illegal options for object %p", object));
if (object->resident_page_count == 0)
@@ -1918,7 +1938,7 @@ skipmemq:
* pages are moved to the cache queue.
*
* This operation should only be performed on objects that
- * contain managed pages.
+ * contain non-fictitious, managed pages.
*
* The object must be locked.
*/
@@ -1929,8 +1949,7 @@ vm_object_page_cache(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
vm_page_t p, next;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
- KASSERT((object->type != OBJT_DEVICE && object->type != OBJT_SG &&
- object->type != OBJT_PHYS),
+ KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0,
("vm_object_page_cache: illegal object %p", object));
if (object->resident_page_count == 0)
return;
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index daafd03..b584239 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -165,6 +165,8 @@ struct vm_object {
/*
* Flags
*/
+#define OBJ_FICTITIOUS 0x0001 /* (c) contains fictitious pages */
+#define OBJ_UNMANAGED 0x0002 /* (c) contains unmanaged pages */
#define OBJ_ACTIVE 0x0004 /* active objects */
#define OBJ_DEAD 0x0008 /* dead objects (during rundown) */
#define OBJ_NOSPLIT 0x0010 /* dont split this object */
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index ecd86de..c952c05 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1414,9 +1414,8 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
mtx_unlock(&vm_page_queue_free_mtx);
return (NULL);
#if VM_NRESERVLEVEL > 0
- } else if (object == NULL || object->type == OBJT_DEVICE ||
- object->type == OBJT_SG ||
- (object->flags & OBJ_COLORED) == 0 ||
+ } else if (object == NULL || (object->flags & (OBJ_COLORED |
+ OBJ_FICTITIOUS)) != OBJ_COLORED ||
(m = vm_reserv_alloc_page(object, pindex)) == NULL) {
#else
} else {
@@ -1491,10 +1490,8 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
m->flags = flags;
mtx_unlock(&vm_page_queue_free_mtx);
m->aflags = 0;
- if (object == NULL || object->type == OBJT_PHYS)
- m->oflags = VPO_UNMANAGED;
- else
- m->oflags = 0;
+ m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ?
+ VPO_UNMANAGED : 0;
if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ)) == 0)
m->oflags |= VPO_BUSY;
if (req & VM_ALLOC_WIRED) {
@@ -1510,7 +1507,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
if (object != NULL) {
/* Ignore device objects; the pager sets "memattr" for them. */
if (object->memattr != VM_MEMATTR_DEFAULT &&
- object->type != OBJT_DEVICE && object->type != OBJT_SG)
+ (object->flags & OBJ_FICTITIOUS) == 0)
pmap_page_set_memattr(m, object->memattr);
vm_page_insert(m, object, pindex);
} else
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 610f6e0..b5e9747 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -705,14 +705,14 @@ vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
int actcount, remove_mode;
VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED);
- if (first_object->type == OBJT_DEVICE ||
- first_object->type == OBJT_SG)
+ if ((first_object->flags & OBJ_FICTITIOUS) != 0)
return;
for (object = first_object;; object = backing_object) {
if (pmap_resident_count(pmap) <= desired)
goto unlock_return;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
- if (object->type == OBJT_PHYS || object->paging_in_progress)
+ if ((object->flags & OBJ_UNMANAGED) != 0 ||
+ object->paging_in_progress != 0)
goto unlock_return;
remove_mode = 0;
OpenPOWER on IntegriCloud