summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authordillon <dillon@FreeBSD.org>2003-01-20 17:46:48 +0000
committerdillon <dillon@FreeBSD.org>2003-01-20 17:46:48 +0000
commite7be7a0432de3e374a6d4cfedc0ef5c8b264a021 (patch)
tree9f8f2a306dcef88eb5da009ebff53701aaaeee2b /sys/kern
parenta752ec7b60312f295643dc7eb37ec1318d8c7412 (diff)
downloadFreeBSD-src-e7be7a0432de3e374a6d4cfedc0ef5c8b264a021.zip
FreeBSD-src-e7be7a0432de3e374a6d4cfedc0ef5c8b264a021.tar.gz
Close the remaining user address mapping races for physical
I/O, CAM, and AIO. Still TODO: streamline useracc() checks. Reviewed by: alc, tegge MFC after: 7 days
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_physio.c12
-rw-r--r--sys/kern/vfs_aio.c15
-rw-r--r--sys/kern/vfs_bio.c32
3 files changed, 50 insertions, 9 deletions
diff --git a/sys/kern/kern_physio.c b/sys/kern/kern_physio.c
index f61b55c..01e3750 100644
--- a/sys/kern/kern_physio.c
+++ b/sys/kern/kern_physio.c
@@ -95,13 +95,23 @@ physio(dev_t dev, struct uio *uio, int ioflag)
bp->b_blkno = btodb(bp->b_offset);
if (uio->uio_segflg == UIO_USERSPACE) {
+ /*
+ * Note that useracc() alone is not a
+ * sufficient test. vmapbuf() can still fail
+ * due to a smaller file mapped into a larger
+ * area of VM, or if userland races against
+ * vmapbuf() after the useracc() check.
+ */
if (!useracc(bp->b_data, bp->b_bufsize,
bp->b_iocmd == BIO_READ ?
VM_PROT_WRITE : VM_PROT_READ)) {
error = EFAULT;
goto doerror;
}
- vmapbuf(bp);
+ if (vmapbuf(bp) < 0) {
+ error = EFAULT;
+ goto doerror;
+ }
}
DEV_STRATEGY(bp);
diff --git a/sys/kern/vfs_aio.c b/sys/kern/vfs_aio.c
index bffdf71..9532ff2 100644
--- a/sys/kern/vfs_aio.c
+++ b/sys/kern/vfs_aio.c
@@ -1124,8 +1124,19 @@ aio_qphysio(struct proc *p, struct aiocblist *aiocbe)
}
}
- /* Bring buffer into kernel space. */
- vmapbuf(bp);
+ /*
+ * Bring buffer into kernel space.
+ *
+ * Note that useracc() alone is not a
+ * sufficient test. vmapbuf() can still fail
+ * due to a smaller file mapped into a larger
+ * area of VM, or if userland races against
+ * vmapbuf() after the useracc() check.
+ */
+ if (vmapbuf(bp) < 0) {
+ error = EFAULT;
+ goto doerror;
+ }
s = splbio();
aiocbe->bp = bp;
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index b8a156c..9de4fd1 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -3547,13 +3547,18 @@ vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
* All requests are (re)mapped into kernel VA space.
* Notice that we use b_bufsize for the size of the buffer
* to be mapped. b_bcount might be modified by the driver.
+ *
+ * Note that even if the caller determines that the address space should
+ * be valid, a race or a smaller-file mapped into a larger space may
+ * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
+ * check the return value.
*/
-void
+int
vmapbuf(struct buf *bp)
{
caddr_t addr, kva;
vm_offset_t pa;
- int pidx;
+ int pidx, i;
struct vm_page *m;
struct pmap *pmap = &curproc->p_vmspace->vm_pmap;
@@ -3573,11 +3578,25 @@ vmapbuf(struct buf *bp)
* the userland address space, and kextract is only guarenteed
* to work for the kernland address space (see: sparc64 port).
*/
- vm_fault_quick((addr >= bp->b_data) ? addr : bp->b_data,
- (bp->b_iocmd == BIO_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
+retry:
+ i = vm_fault_quick((addr >= bp->b_data) ? addr : bp->b_data,
+ (bp->b_iocmd == BIO_READ) ?
+ (VM_PROT_READ|VM_PROT_WRITE) : VM_PROT_READ);
+ if (i < 0) {
+ printf("vmapbuf: warning, bad user address during I/O\n");
+ vm_page_lock_queues();
+ for (i = 0; i < pidx; ++i) {
+ vm_page_unhold(bp->b_pages[i]);
+ bp->b_pages[i] = NULL;
+ }
+ vm_page_unlock_queues();
+ return(-1);
+ }
pa = trunc_page(pmap_extract(pmap, (vm_offset_t) addr));
- if (pa == 0)
- panic("vmapbuf: page not present");
+ if (pa == 0) {
+ printf("vmapbuf: warning, race against user address during I/O");
+ goto retry;
+ }
m = PHYS_TO_VM_PAGE(pa);
vm_page_lock_queues();
vm_page_hold(m);
@@ -3592,6 +3611,7 @@ vmapbuf(struct buf *bp)
bp->b_npages = pidx;
bp->b_saveaddr = bp->b_data;
bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
+ return(0);
}
/*
OpenPOWER on IntegriCloud