summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2015-01-19 11:02:23 +0000
committerkib <kib@FreeBSD.org>2015-01-19 11:02:23 +0000
commit762486d18fccdd071845bf27b076d5384e24e5fd (patch)
tree093733f3a81ecb779612f61073043ee3a432496b
parent4337378f015b471c6cbc4c347ba7422bb720fd16 (diff)
downloadFreeBSD-src-762486d18fccdd071845bf27b076d5384e24e5fd.zip
FreeBSD-src-762486d18fccdd071845bf27b076d5384e24e5fd.tar.gz
MFC r277051:
Fix several issues with /dev/mem and /dev/kmem devices on amd64.
-rw-r--r--sys/amd64/amd64/mem.c102
1 files changed, 55 insertions, 47 deletions
diff --git a/sys/amd64/amd64/mem.c b/sys/amd64/amd64/mem.c
index 2b6b112..3a1f4a4 100644
--- a/sys/amd64/amd64/mem.c
+++ b/sys/amd64/amd64/mem.c
@@ -58,6 +58,7 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/uio.h>
+#include <machine/md_var.h>
#include <machine/specialreg.h>
#include <machine/vmparam.h>
@@ -77,13 +78,15 @@ int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
struct iovec *iov;
- u_long c, v, vd;
- int error, o, sflags;
- vm_offset_t addr, eaddr;
+ void *p;
+ ssize_t orig_resid;
+ u_long v, vd;
+ u_int c;
+ int error, sflags;
error = 0;
- c = 0;
sflags = curthread_pflags_set(TDP_DEVMEMIO);
+ orig_resid = uio->uio_resid;
while (uio->uio_resid > 0 && error == 0) {
iov = uio->uio_iov;
if (iov->iov_len == 0) {
@@ -93,63 +96,68 @@ memrw(struct cdev *dev, struct uio *uio, int flags)
panic("memrw");
continue;
}
- if (dev2unit(dev) == CDEV_MINOR_MEM) {
- v = uio->uio_offset;
-kmemphys:
- o = v & PAGE_MASK;
- c = min(uio->uio_resid, (u_int)(PAGE_SIZE - o));
- vd = PHYS_TO_DMAP(v);
- if (vd < DMAP_MIN_ADDRESS ||
- (vd > DMAP_MIN_ADDRESS + dmaplimit &&
- vd <= DMAP_MAX_ADDRESS) ||
- (pmap_kextract(vd) == 0 && (v & PG_FRAME) != 0)) {
- error = EFAULT;
- goto ret;
- }
- error = uiomove((void *)vd, (int)c, uio);
- continue;
- } else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
- v = uio->uio_offset;
+ v = uio->uio_offset;
+ c = ulmin(iov->iov_len, PAGE_SIZE - (u_int)(v & PAGE_MASK));
- if (v >= DMAP_MIN_ADDRESS && v < DMAP_MAX_ADDRESS) {
- v = DMAP_TO_PHYS(v);
- goto kmemphys;
+ switch (dev2unit(dev)) {
+ case CDEV_MINOR_KMEM:
+ /*
+ * Since c is clamped to be less or equal than
+ * PAGE_SIZE, the uiomove() call does not
+ * access past the end of the direct map.
+ */
+ if (v >= DMAP_MIN_ADDRESS &&
+ v < DMAP_MIN_ADDRESS + dmaplimit) {
+ error = uiomove((void *)v, c, uio);
+ break;
}
- c = iov->iov_len;
+ if (!kernacc((void *)v, c, uio->uio_rw == UIO_READ ?
+ VM_PROT_READ : VM_PROT_WRITE)) {
+ error = EFAULT;
+ break;
+ }
/*
- * Make sure that all of the pages are currently
- * resident so that we don't create any zero-fill
- * pages.
+ * If the extracted address is not accessible
+ * through the direct map, then we make a
+ * private (uncached) mapping because we can't
+ * depend on the existing kernel mapping
+ * remaining valid until the completion of
+ * uiomove().
+ *
+ * XXX We cannot provide access to the
+ * physical page 0 mapped into KVA.
*/
- addr = trunc_page(v);
- eaddr = round_page(v + c);
-
- if (addr < VM_MIN_KERNEL_ADDRESS) {
+ v = pmap_extract(kernel_pmap, v);
+ if (v == 0) {
error = EFAULT;
- goto ret;
+ break;
}
- for (; addr < eaddr; addr += PAGE_SIZE) {
- if (pmap_extract(kernel_pmap, addr) == 0) {
- error = EFAULT;
- goto ret;
- }
+ /* FALLTHROUGH */
+ case CDEV_MINOR_MEM:
+ if (v < dmaplimit) {
+ vd = PHYS_TO_DMAP(v);
+ error = uiomove((void *)vd, c, uio);
+ break;
}
- if (!kernacc((caddr_t)(long)v, c,
- uio->uio_rw == UIO_READ ?
- VM_PROT_READ : VM_PROT_WRITE)) {
+ if (v >= (1ULL << cpu_maxphyaddr)) {
error = EFAULT;
- goto ret;
+ break;
}
-
- error = uiomove((caddr_t)(long)v, (int)c, uio);
- continue;
+ p = pmap_mapdev(v, PAGE_SIZE);
+ error = uiomove(p, c, uio);
+ pmap_unmapdev((vm_offset_t)p, PAGE_SIZE);
+ break;
}
- /* else panic! */
}
-ret:
curthread_pflags_restore(sflags);
+ /*
+ * Don't return error if any byte was written. Read and write
+ * can return error only if no i/o was performed.
+ */
+ if (uio->uio_resid != orig_resid)
+ error = 0;
return (error);
}
OpenPOWER on IntegriCloud