summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/kern/kern_sysctl.c6
-rw-r--r--sys/vm/vm_extern.h4
-rw-r--r--sys/vm/vm_glue.c78
-rw-r--r--sys/vm/vm_mmap.c81
4 files changed, 116 insertions, 53 deletions
diff --git a/sys/kern/kern_sysctl.c b/sys/kern/kern_sysctl.c
index e608b64..f129df8 100644
--- a/sys/kern/kern_sysctl.c
+++ b/sys/kern/kern_sysctl.c
@@ -1000,7 +1000,7 @@ kernel_sysctl(struct thread *td, int *name, u_int namelen, void *old,
error = sysctl_root(0, name, namelen, &req);
if (req.lock == REQ_WIRED)
- kern_munlock(req.td, (vm_offset_t)req.oldptr,
+ vsunlock(req.td, (vm_offset_t)req.oldptr,
(vm_size_t)req.wiredlen);
SYSCTL_UNLOCK();
@@ -1103,7 +1103,7 @@ sysctl_wire_old_buffer(struct sysctl_req *req, size_t len)
ret = 0;
if (req->lock == REQ_LOCKED && req->oldptr &&
req->oldfunc == sysctl_old_user) {
- ret = kern_mlock(req->td, (vm_offset_t)req->oldptr,
+ ret = vslock(req->td, (vm_offset_t)req->oldptr,
(vm_size_t)wiredlen);
if (ret == 0) {
req->lock = REQ_WIRED;
@@ -1320,7 +1320,7 @@ userland_sysctl(struct thread *td, int *name, u_int namelen, void *old,
req = req2;
if (req.lock == REQ_WIRED)
- kern_munlock(req.td, (vm_offset_t)req.oldptr,
+ vsunlock(req.td, (vm_offset_t)req.oldptr,
(vm_size_t)req.wiredlen);
SYSCTL_UNLOCK();
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index d8581e6..9795cc1 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -59,8 +59,6 @@ int sstk(struct thread *, void *, int *);
int swapon(struct thread *, void *, int *);
#endif /* TYPEDEF_FOR_UAP */
-int kern_mlock(struct thread *, vm_offset_t, vm_size_t);
-int kern_munlock(struct thread *, vm_offset_t, vm_size_t);
int kernacc(void *, int, int);
vm_offset_t kmem_alloc(vm_map_t, vm_size_t);
vm_offset_t kmem_alloc_nofault(vm_map_t, vm_size_t);
@@ -88,6 +86,8 @@ void vmspace_unshare(struct proc *);
void vmspace_free(struct vmspace *);
void vmspace_exitfree(struct proc *);
void vnode_pager_setsize(struct vnode *, vm_ooffset_t);
+int vslock(struct thread *, vm_offset_t, vm_size_t);
+int vsunlock(struct thread *, vm_offset_t, vm_size_t);
void vm_object_print(/* db_expr_t */ long, boolean_t, /* db_expr_t */ long,
char *);
int vm_fault_quick(caddr_t v, int prot);
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 84cf879..89cf4c7 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -184,6 +184,84 @@ useracc(addr, len, rw)
}
/*
+ * MPSAFE
+ */
+int
+vslock(td, addr, size)
+ struct thread *td;
+ vm_offset_t addr;
+ vm_size_t size;
+{
+ vm_offset_t start, end;
+ struct proc *proc = td->td_proc;
+ int error, npages;
+
+ start = trunc_page(addr);
+ end = round_page(addr + size);
+
+ /* disable wrap around */
+ if (end <= start)
+ return (EINVAL);
+
+ npages = atop(end - start);
+
+ if (npages > vm_page_max_wired)
+ return (ENOMEM);
+
+ PROC_LOCK(proc);
+ if (npages + pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map)) >
+ atop(lim_cur(proc, RLIMIT_MEMLOCK))) {
+ PROC_UNLOCK(proc);
+ return (ENOMEM);
+ }
+ PROC_UNLOCK(proc);
+
+#if 0
+ /*
+ * XXX - not yet
+ *
+ * The limit for transient usage of wired pages should be
+ * larger than for "permanent" wired pages (mlock()).
+ *
+ * Also, the sysctl code, which is the only present user
+ * of vslock(), does a hard loop on EAGAIN.
+ */
+ if (npages + cnt.v_wire_count > vm_page_max_wired)
+ return (EAGAIN);
+#endif
+
+ error = vm_map_wire(&proc->p_vmspace->vm_map, start, end,
+ VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
+
+ /* EINVAL is probably a better error to return than ENOMEM */
+ return (error == KERN_SUCCESS ? 0 : EINVAL);
+}
+
+/*
+ * MPSAFE
+ */
+int
+vsunlock(td, addr, size)
+ struct thread *td;
+ vm_offset_t addr;
+ vm_size_t size;
+{
+ vm_offset_t start, end;
+ int error;
+
+ start = trunc_page(addr);
+ end = round_page(addr + size);
+
+ /* disable wrap around */
+ if (end <= start)
+ return (EINVAL);
+
+ error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
+ VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES);
+ return (error == KERN_SUCCESS ? 0 : EINVAL);
+}
+
+/*
* Create the U area for a new process.
* This routine directly affects the fork perf for a process.
*/
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 3782ff2..26d5399 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -893,49 +893,42 @@ mlock(td, uap)
struct thread *td;
struct mlock_args *uap;
{
- int error;
+ struct proc *proc = td->td_proc;
+ vm_offset_t addr, start, end;
+ vm_size_t size;
+ int error, npages;
error = suser(td);
if (error)
return (error);
- return (kern_mlock(td, (vm_offset_t)uap->addr, (vm_size_t)uap->len));
-}
-/*
- * MPSAFE
- */
-int
-kern_mlock(td, addr, size)
- struct thread *td;
- vm_offset_t addr;
- vm_size_t size;
-{
- vm_size_t pageoff;
- struct proc *proc = td->td_proc;
- int error;
-
- pageoff = (addr & PAGE_MASK);
- addr -= pageoff;
- size += pageoff;
- size = (vm_size_t) round_page(size);
+ addr = (vm_offset_t)uap->addr;
+ size = uap->len;
+ start = trunc_page(addr);
+ end = round_page(addr + size);
/* disable wrap around */
- if (addr + size < addr)
+ if (end <= start)
return (EINVAL);
- if (atop(size) + cnt.v_wire_count > vm_page_max_wired)
- return (EAGAIN);
+ npages = atop(end - start);
+
+ if (npages > vm_page_max_wired)
+ return (ENOMEM);
PROC_LOCK(proc);
- if (size + ptoa(pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map))) >
- lim_cur(proc, RLIMIT_MEMLOCK)) {
+ if (npages + pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map)) >
+ atop(lim_cur(proc, RLIMIT_MEMLOCK))) {
PROC_UNLOCK(proc);
return (ENOMEM);
}
PROC_UNLOCK(proc);
- error = vm_map_wire(&proc->p_vmspace->vm_map, addr,
- addr + size, VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
+ if (npages + cnt.v_wire_count > vm_page_max_wired)
+ return (EAGAIN);
+
+ error = vm_map_wire(&proc->p_vmspace->vm_map, start, end,
+ VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
@@ -1050,37 +1043,29 @@ munlock(td, uap)
struct thread *td;
struct munlock_args *uap;
{
+ vm_offset_t addr, start, end;
+ vm_size_t size;
int error;
error = suser(td);
if (error)
return (error);
- return (kern_munlock(td, (vm_offset_t)uap->addr, (vm_size_t)uap->len));
-}
-/*
- * MPSAFE
- */
-int
-kern_munlock(td, addr, size)
- struct thread *td;
- vm_offset_t addr;
- vm_size_t size;
-{
- vm_size_t pageoff;
- int error;
-
- pageoff = (addr & PAGE_MASK);
- addr -= pageoff;
- size += pageoff;
- size = (vm_size_t) round_page(size);
+ addr = (vm_offset_t)uap->addr;
+ size = uap->len;
+ start = trunc_page(addr);
+ end = round_page(addr + size);
/* disable wrap around */
- if (addr + size < addr)
+ if (end <= start)
return (EINVAL);
- error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, addr,
- addr + size, VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
+ error = suser(td);
+ if (error)
+ return (error);
+
+ error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
+ VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
OpenPOWER on IntegriCloud