summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2017-04-12 09:20:02 +0000
committerkib <kib@FreeBSD.org>2017-04-12 09:20:02 +0000
commitb4e0ebca416aa114600baa89a05bc5cb65c7c897 (patch)
treebb0619832f5c8d66cb2aa90eb8bad288ba94cc24
parent05b5efe749e72bf21930479402237e4bfd2120d1 (diff)
downloadFreeBSD-src-b4e0ebca416aa114600baa89a05bc5cb65c7c897.zip
FreeBSD-src-b4e0ebca416aa114600baa89a05bc5cb65c7c897.tar.gz
MFC r316526:
Extract calculation of ioflags from the vm_pager_putpages flags into a helper.
-rw-r--r--sys/vm/vnode_pager.c46
-rw-r--r--sys/vm/vnode_pager.h2
2 files changed, 30 insertions, 18 deletions
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index a9d43b9..f9dfbf0 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -1164,7 +1164,7 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
vm_ooffset_t poffset;
struct uio auio;
struct iovec aiov;
- int count, error, i, ioflags, maxsize, ncount, ppscheck;
+ int count, error, i, maxsize, ncount, ppscheck;
static struct timeval lastfail;
static int curfail;
@@ -1231,21 +1231,6 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
}
VM_OBJECT_WUNLOCK(object);
- /*
- * pageouts are already clustered, use IO_ASYNC to force a bawrite()
- * rather then a bdwrite() to prevent paging I/O from saturating
- * the buffer cache. Dummy-up the sequential heuristic to cause
- * large ranges to cluster. If neither IO_SYNC or IO_ASYNC is set,
- * the system decides how to cluster.
- */
- ioflags = IO_VMIO;
- if (flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL))
- ioflags |= IO_SYNC;
- else if ((flags & VM_PAGER_CLUSTER_OK) == 0)
- ioflags |= IO_ASYNC;
- ioflags |= (flags & VM_PAGER_PUT_INVAL) ? IO_INVAL: 0;
- ioflags |= IO_SEQMAX << IO_SEQSHIFT;
-
aiov.iov_base = (caddr_t) 0;
aiov.iov_len = maxsize;
auio.uio_iov = &aiov;
@@ -1255,7 +1240,8 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
auio.uio_rw = UIO_WRITE;
auio.uio_resid = maxsize;
auio.uio_td = (struct thread *) 0;
- error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred);
+ error = VOP_WRITE(vp, &auio, vnode_pager_putpages_ioflags(flags),
+ curthread->td_ucred);
PCPU_INC(cnt.v_vnodeout);
PCPU_ADD(cnt.v_vnodepgsout, ncount);
@@ -1275,6 +1261,32 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
return rtvals[0];
}
+int
+vnode_pager_putpages_ioflags(int pager_flags)
+{
+ int ioflags;
+
+ /*
+ * Pageouts are already clustered, use IO_ASYNC to force a
+ * bawrite() rather then a bdwrite() to prevent paging I/O
+ * from saturating the buffer cache. Dummy-up the sequential
+ * heuristic to cause large ranges to cluster. If neither
+ * IO_SYNC or IO_ASYNC is set, the system decides how to
+ * cluster.
+ */
+ ioflags = IO_VMIO;
+ if ((pager_flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) != 0)
+ ioflags |= IO_SYNC;
+ else if ((pager_flags & VM_PAGER_CLUSTER_OK) == 0)
+ ioflags |= IO_ASYNC;
+ ioflags |= (pager_flags & VM_PAGER_PUT_INVAL) != 0 ? IO_INVAL: 0;
+#ifdef notyet
+ ioflags |= (pager_flags & VM_PAGER_PUT_NOREUSE) != 0 ? IO_NOREUSE : 0;
+#endif
+ ioflags |= IO_SEQMAX << IO_SEQSHIFT;
+ return (ioflags);
+}
+
void
vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written)
{
diff --git a/sys/vm/vnode_pager.h b/sys/vm/vnode_pager.h
index 9fca464..bfcb98a 100644
--- a/sys/vm/vnode_pager.h
+++ b/sys/vm/vnode_pager.h
@@ -47,7 +47,7 @@ int vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *m,
int count, int flags, int *rtvals);
int vnode_pager_local_getpages(struct vop_getpages_args *ap);
int vnode_pager_local_getpages_async(struct vop_getpages_async_args *ap);
-
+int vnode_pager_putpages_ioflags(int pager_flags);
void vnode_pager_release_writecount(vm_object_t object, vm_offset_t start,
vm_offset_t end);
void vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written);
OpenPOWER on IntegriCloud