summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2012-01-14 23:04:27 +0000
committeralc <alc@FreeBSD.org>2012-01-14 23:04:27 +0000
commit413b89a7e9cb148ffb08855ef27c973cfbeeb5c0 (patch)
treeb4b7b9f68b84a7f86b3d64ae99d131efcdb106e7
parent1ea7d0d666a38d5bda1efae01c48a1f3142a416c (diff)
downloadFreeBSD-src-413b89a7e9cb148ffb08855ef27c973cfbeeb5c0.zip
FreeBSD-src-413b89a7e9cb148ffb08855ef27c973cfbeeb5c0.tar.gz
Neither tmpfs_nocacheread() nor tmpfs_mappedwrite() needs to call
vm_object_pip_{add,subtract}() on the swap object because the swap object can't be destroyed while the vnode is exclusively locked. Moreover, even if the swap object could have been destroyed during tmpfs_nocacheread() and tmpfs_mappedwrite() this code is broken because vm_object_pip_subtract() does not wake up the sleeping thread that is trying to destroy the swap object. Free invalid pages after an I/O error. There is no virtue in keeping them around in the swap object creating more work for the page daemon. (I believe that any non-busy page in the swap object will now always be valid.) vm_pager_get_pages() does not return a standard errno, so its return value should not be returned by tmpfs without translation to an errno value. There is no reason for the wakeup on vpg in tmpfs_mappedwrite() to occur with the swap object locked. Eliminate printf()s from tmpfs_nocacheread() and tmpfs_mappedwrite(). (The swap pager already spam your console if data corruption is imminent.) Reviewed by: kib MFC after: 3 weeks
-rw-r--r--sys/fs/tmpfs/tmpfs_subr.c2
-rw-r--r--sys/fs/tmpfs/tmpfs_vnops.c40
2 files changed, 21 insertions, 21 deletions
diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c
index e733f19..94bc0bf 100644
--- a/sys/fs/tmpfs/tmpfs_subr.c
+++ b/sys/fs/tmpfs/tmpfs_subr.c
@@ -929,6 +929,7 @@ retry:
vm_page_sleep(m, "tmfssz");
goto retry;
}
+ MPASS(m->valid == VM_PAGE_BITS_ALL);
} else if (vm_pager_has_page(uobj, idx, NULL, NULL)) {
m = vm_page_alloc(uobj, idx, VM_ALLOC_NORMAL);
if (m == NULL) {
@@ -957,7 +958,6 @@ retry:
}
if (m != NULL) {
pmap_zero_page_area(m, base, PAGE_SIZE - base);
- MPASS(m->valid == VM_PAGE_BITS_ALL);
vm_page_dirty(m);
vm_pager_page_unswapped(m);
}
diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c
index 0abd3c7..f0dfe36 100644
--- a/sys/fs/tmpfs/tmpfs_vnops.c
+++ b/sys/fs/tmpfs/tmpfs_vnops.c
@@ -437,18 +437,20 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx,
vm_offset_t offset, size_t tlen, struct uio *uio)
{
vm_page_t m;
- int error;
+ int error, rv;
VM_OBJECT_LOCK(tobj);
- vm_object_pip_add(tobj, 1);
m = vm_page_grab(tobj, idx, VM_ALLOC_WIRED |
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
if (vm_pager_has_page(tobj, idx, NULL, NULL)) {
- error = vm_pager_get_pages(tobj, &m, 1, 0);
- if (error != 0) {
- printf("tmpfs get pages from pager error [read]\n");
- goto out;
+ rv = vm_pager_get_pages(tobj, &m, 1, 0);
+ if (rv != VM_PAGER_OK) {
+ vm_page_lock(m);
+ vm_page_free(m);
+ vm_page_unlock(m);
+ VM_OBJECT_UNLOCK(tobj);
+ return (EIO);
}
} else
vm_page_zero_invalid(m, TRUE);
@@ -456,12 +458,10 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx,
VM_OBJECT_UNLOCK(tobj);
error = uiomove_fromphys(&m, offset, tlen, uio);
VM_OBJECT_LOCK(tobj);
-out:
vm_page_lock(m);
vm_page_unwire(m, TRUE);
vm_page_unlock(m);
vm_page_wakeup(m);
- vm_object_pip_subtract(tobj, 1);
VM_OBJECT_UNLOCK(tobj);
return (error);
@@ -624,7 +624,7 @@ tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *ui
vm_offset_t offset;
off_t addr;
size_t tlen;
- int error;
+ int error, rv;
error = 0;
@@ -664,14 +664,16 @@ lookupvpg:
}
nocache:
VM_OBJECT_LOCK(tobj);
- vm_object_pip_add(tobj, 1);
tpg = vm_page_grab(tobj, idx, VM_ALLOC_WIRED |
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (tpg->valid != VM_PAGE_BITS_ALL) {
if (vm_pager_has_page(tobj, idx, NULL, NULL)) {
- error = vm_pager_get_pages(tobj, &tpg, 1, 0);
- if (error != 0) {
- printf("tmpfs get pages from pager error [write]\n");
+ rv = vm_pager_get_pages(tobj, &tpg, 1, 0);
+ if (rv != VM_PAGER_OK) {
+ vm_page_lock(tpg);
+ vm_page_free(tpg);
+ vm_page_unlock(tpg);
+ error = EIO;
goto out;
}
} else
@@ -685,9 +687,6 @@ nocache:
pmap_copy_page(vpg, tpg);
}
VM_OBJECT_LOCK(tobj);
-out:
- if (vobj != NULL)
- VM_OBJECT_LOCK(vobj);
if (error == 0) {
KASSERT(tpg->valid == VM_PAGE_BITS_ALL,
("parts of tpg invalid"));
@@ -697,12 +696,13 @@ out:
vm_page_unwire(tpg, TRUE);
vm_page_unlock(tpg);
vm_page_wakeup(tpg);
- if (vpg != NULL)
+out:
+ VM_OBJECT_UNLOCK(tobj);
+ if (vpg != NULL) {
+ VM_OBJECT_LOCK(vobj);
vm_page_wakeup(vpg);
- if (vobj != NULL)
VM_OBJECT_UNLOCK(vobj);
- vm_object_pip_subtract(tobj, 1);
- VM_OBJECT_UNLOCK(tobj);
+ }
return (error);
}
OpenPOWER on IntegriCloud