summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2003-06-22 21:35:41 +0000
committeralc <alc@FreeBSD.org>2003-06-22 21:35:41 +0000
commitfa54a6610e522e820a2a6679afa8a9a46818f211 (patch)
tree35bfb2aa8b61e54802910fb4be459989a2516fbe /sys/vm
parent01da790999f6e45aef0a3c53cbf1b030acca4371 (diff)
downloadFreeBSD-src-fa54a6610e522e820a2a6679afa8a9a46818f211.zip
FreeBSD-src-fa54a6610e522e820a2a6679afa8a9a46818f211.tar.gz
Maintain a lock on the vm object of interest throughout vm_fault(),
releasing the lock only if we are about to sleep (e.g., vm_pager_get_pages() or vm_pager_has_pages()). If we sleep, we have marked the vm object with the paging-in-progress flag.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_fault.c19
-rw-r--r--sys/vm/vm_page.c2
-rw-r--r--sys/vm/vm_pager.h2
-rw-r--r--sys/vm/vnode_pager.c4
4 files changed, 15 insertions, 12 deletions
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index d3ccd93..ad739bf 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -138,8 +138,7 @@ unlock_map(struct faultstate *fs)
static void
_unlock_things(struct faultstate *fs, int dealloc)
{
- GIANT_REQUIRED;
- VM_OBJECT_LOCK(fs->object);
+
vm_object_pip_wakeup(fs->object);
VM_OBJECT_UNLOCK(fs->object);
if (fs->object != fs->first_object) {
@@ -286,7 +285,6 @@ RetryFault:;
fs.vp = vnode_pager_lock(fs.first_object);
VM_OBJECT_LOCK(fs.first_object);
vm_object_pip_add(fs.first_object, 1);
- VM_OBJECT_UNLOCK(fs.first_object);
fs.lookup_still_valid = TRUE;
@@ -620,13 +618,13 @@ readrest:
* object with zeros.
*/
if (fs.object != fs.first_object) {
- VM_OBJECT_LOCK(fs.object);
vm_object_pip_wakeup(fs.object);
VM_OBJECT_UNLOCK(fs.object);
fs.object = fs.first_object;
fs.pindex = fs.first_pindex;
fs.m = fs.first_m;
+ VM_OBJECT_LOCK(fs.object);
}
fs.first_m = NULL;
@@ -646,8 +644,6 @@ readrest:
("object loop %p", next_object));
VM_OBJECT_LOCK(next_object);
vm_object_pip_add(next_object, 1);
- VM_OBJECT_UNLOCK(next_object);
- VM_OBJECT_LOCK(fs.object);
if (fs.object != fs.first_object)
vm_object_pip_wakeup(fs.object);
VM_OBJECT_UNLOCK(fs.object);
@@ -730,8 +726,6 @@ readrest:
*/
vm_page_copy(fs.m, fs.first_m);
}
- if (is_first_object_locked)
-/*XXX*/ VM_OBJECT_UNLOCK(fs.first_object);
if (fs.m) {
/*
* We no longer need the old page or object.
@@ -742,7 +736,6 @@ readrest:
* fs.object != fs.first_object due to above
* conditional
*/
- VM_OBJECT_LOCK(fs.object);
vm_object_pip_wakeup(fs.object);
VM_OBJECT_UNLOCK(fs.object);
/*
@@ -751,6 +744,8 @@ readrest:
fs.object = fs.first_object;
fs.pindex = fs.first_pindex;
fs.m = fs.first_m;
+ if (!is_first_object_locked)
+ VM_OBJECT_LOCK(fs.object);
cnt.v_cow_faults++;
} else {
prot &= ~VM_PROT_WRITE;
@@ -787,6 +782,7 @@ readrest:
unlock_and_deallocate(&fs);
goto RetryFault;
}
+ VM_OBJECT_UNLOCK(fs.object);
/*
* To avoid trying to write_lock the map while another process
@@ -800,6 +796,7 @@ readrest:
&fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired);
map_generation = fs.map->timestamp;
+ VM_OBJECT_LOCK(fs.object);
/*
* If we don't need the page any longer, put it on the active
* list (the easiest thing to do here). If no one needs it,
@@ -1083,8 +1080,10 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
* (Because the source is wired down, the page will be in
* memory.)
*/
+ VM_OBJECT_LOCK(src_object);
src_m = vm_page_lookup(src_object,
OFF_TO_IDX(dst_offset + src_offset));
+ VM_OBJECT_UNLOCK(src_object);
if (src_m == NULL)
panic("vm_fault_copy_wired: page missing");
@@ -1137,7 +1136,7 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
vm_page_t rtm;
int cbehind, cahead;
- GIANT_REQUIRED;
+ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
object = m->object;
pindex = m->pindex;
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 7cb4607..ac13102 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1736,7 +1736,9 @@ vm_page_cowfault(vm_page_t m)
if (mnew == NULL) {
vm_page_insert(m, object, pindex);
vm_page_unlock_queues();
+ VM_OBJECT_UNLOCK(object);
VM_WAIT;
+ VM_OBJECT_LOCK(object);
vm_page_lock_queues();
goto retry_alloc;
}
diff --git a/sys/vm/vm_pager.h b/sys/vm/vm_pager.h
index c578c36..4f2cf12 100644
--- a/sys/vm/vm_pager.h
+++ b/sys/vm/vm_pager.h
@@ -165,7 +165,7 @@ vm_pager_has_page(
) {
boolean_t ret;
- GIANT_REQUIRED;
+ VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
ret = (*pagertab[object->type]->pgo_haspage)
(object, offset, before, after);
return (ret);
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index bfc255d..69f4b42 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -214,7 +214,7 @@ vnode_pager_haspage(object, pindex, before, after)
int bsize;
int pagesperblock, blocksperpage;
- GIANT_REQUIRED;
+ VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
/*
* If no vp or vp is doomed or marked transparent to VM, we do not
* have the page.
@@ -245,8 +245,10 @@ vnode_pager_haspage(object, pindex, before, after)
blocksperpage = (PAGE_SIZE / bsize);
reqblock = pindex * blocksperpage;
}
+ VM_OBJECT_UNLOCK(object);
err = VOP_BMAP(vp, reqblock, (struct vnode **) 0, &bn,
after, before);
+ VM_OBJECT_LOCK(object);
if (err)
return TRUE;
if (bn == -1)
OpenPOWER on IntegriCloud