summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2013-02-03 20:13:33 +0000
committerattilio <attilio@FreeBSD.org>2013-02-03 20:13:33 +0000
commit0d3b58aee00948d85d75a9d3d222deb454afc98e (patch)
tree865d112b57519913a8de64b2d9ca8787633c95a2 /sys/vm
parent561dd1163dbb481d204da7a526739ac6e43d08f2 (diff)
parent2d2c37fb592dfc24f15e4bf14c2f109b5d4b5a83 (diff)
downloadFreeBSD-src-0d3b58aee00948d85d75a9d3d222deb454afc98e.zip
FreeBSD-src-0d3b58aee00948d85d75a9d3d222deb454afc98e.tar.gz
MFC
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/device_pager.c3
-rw-r--r--sys/vm/uma_core.c2
-rw-r--r--sys/vm/vm_fault.c5
-rw-r--r--sys/vm/vm_map.c16
-rw-r--r--sys/vm/vm_map.h1
-rw-r--r--sys/vm/vm_meter.c1
-rw-r--r--sys/vm/vm_mmap.c17
-rw-r--r--sys/vm/vm_object.h1
-rw-r--r--sys/vm/vm_pageout.c45
-rw-r--r--sys/vm/vm_unix.c44
10 files changed, 70 insertions, 65 deletions
diff --git a/sys/vm/device_pager.c b/sys/vm/device_pager.c
index 2a18b0d..30aaac0 100644
--- a/sys/vm/device_pager.c
+++ b/sys/vm/device_pager.c
@@ -160,6 +160,7 @@ cdev_pager_allocate(void *handle, enum obj_type tp, struct cdev_pager_ops *ops,
object1->pg_color = color;
object1->handle = handle;
object1->un_pager.devp.ops = ops;
+ object1->un_pager.devp.dev = handle;
TAILQ_INIT(&object1->un_pager.devp.devp_pglist);
mtx_lock(&dev_pager_mtx);
object = vm_pager_object_lookup(&dev_pager_object_list, handle);
@@ -237,7 +238,7 @@ dev_pager_dealloc(object)
vm_page_t m;
VM_OBJECT_UNLOCK(object);
- object->un_pager.devp.ops->cdev_pg_dtor(object->handle);
+ object->un_pager.devp.ops->cdev_pg_dtor(object->un_pager.devp.dev);
mtx_lock(&dev_pager_mtx);
TAILQ_REMOVE(&dev_pager_object_list, object, pager_object_list);
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 36a149e..2d5b555 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -1702,7 +1702,7 @@ uma_startup(void *bootmem, int boot_pages)
#ifdef UMA_DEBUG
printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers);
- printf("Calculated uma_max_ipers_slab (for OFFPAGE) is %d\n",
+ printf("Calculated uma_max_ipers_ref (for OFFPAGE) is %d\n",
uma_max_ipers_ref);
#endif
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 2274464..9883dcf 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -937,9 +937,10 @@ vnode_locked:
* Unlock everything, and return
*/
unlock_and_deallocate(&fs);
- if (hardfault)
+ if (hardfault) {
+ PCPU_INC(cnt.v_io_faults);
curthread->td_ru.ru_majflt++;
- else
+ } else
curthread->td_ru.ru_minflt++;
return (KERN_SUCCESS);
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index f87e5b9..26de826 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -726,12 +726,6 @@ vmspace_resident_count(struct vmspace *vmspace)
return pmap_resident_count(vmspace_pmap(vmspace));
}
-long
-vmspace_wired_count(struct vmspace *vmspace)
-{
- return pmap_wired_count(vmspace_pmap(vmspace));
-}
-
/*
* vm_map_create:
*
@@ -3281,8 +3275,7 @@ vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
}
if (!old_mlock && map->flags & MAP_WIREFUTURE) {
- if (ptoa(vmspace_wired_count(curproc->p_vmspace)) +
- init_ssize > lmemlim) {
+ if (ptoa(pmap_wired_count(map->pmap)) + init_ssize > lmemlim) {
vm_map_unlock(map);
return (KERN_NO_SPACE);
}
@@ -3505,8 +3498,7 @@ Retry:
grow_amount = limit - ctob(vm->vm_ssize);
#endif
if (!old_mlock && map->flags & MAP_WIREFUTURE) {
- if (ptoa(vmspace_wired_count(p->p_vmspace)) + grow_amount >
- lmemlim) {
+ if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
vm_map_unlock_read(map);
rv = KERN_NO_SPACE;
goto out;
@@ -3514,7 +3506,7 @@ Retry:
#ifdef RACCT
PROC_LOCK(p);
if (racct_set(p, RACCT_MEMLOCK,
- ptoa(vmspace_wired_count(p->p_vmspace)) + grow_amount)) {
+ ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
PROC_UNLOCK(p);
vm_map_unlock_read(map);
rv = KERN_NO_SPACE;
@@ -3645,7 +3637,7 @@ out:
KASSERT(error == 0, ("decreasing RACCT_VMEM failed"));
if (!old_mlock) {
error = racct_set(p, RACCT_MEMLOCK,
- ptoa(vmspace_wired_count(p->p_vmspace)));
+ ptoa(pmap_wired_count(map->pmap)));
KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed"));
}
error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index b3b1ad4..135b555 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -298,7 +298,6 @@ void vm_map_wait_busy(vm_map_t map);
_vm_map_lock_downgrade(map, LOCK_FILE, LOCK_LINE)
long vmspace_resident_count(struct vmspace *vmspace);
-long vmspace_wired_count(struct vmspace *vmspace);
#endif /* _KERNEL */
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index b5bb0fa..05174e9 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -283,6 +283,7 @@ VM_STATS_SYS(v_syscall, "System calls");
VM_STATS_SYS(v_intr, "Device interrupts");
VM_STATS_SYS(v_soft, "Software interrupts");
VM_STATS_VM(v_vm_faults, "Address memory faults");
+VM_STATS_VM(v_io_faults, "Page faults requiring I/O");
VM_STATS_VM(v_cow_faults, "Copy-on-write faults");
VM_STATS_VM(v_cow_optim, "Optimized COW faults");
VM_STATS_VM(v_zfod, "Pages zero-filled on demand");
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 3ad7c37..4ae1f90 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -1044,6 +1044,7 @@ sys_mlock(td, uap)
struct proc *proc;
vm_offset_t addr, end, last, start;
vm_size_t npages, size;
+ vm_map_t map;
unsigned long nsize;
int error;
@@ -1061,8 +1062,9 @@ sys_mlock(td, uap)
if (npages > vm_page_max_wired)
return (ENOMEM);
proc = td->td_proc;
+ map = &proc->p_vmspace->vm_map;
PROC_LOCK(proc);
- nsize = ptoa(npages + vmspace_wired_count(proc->p_vmspace));
+ nsize = ptoa(npages + pmap_wired_count(map->pmap));
if (nsize > lim_cur(proc, RLIMIT_MEMLOCK)) {
PROC_UNLOCK(proc);
return (ENOMEM);
@@ -1077,13 +1079,13 @@ sys_mlock(td, uap)
if (error != 0)
return (ENOMEM);
#endif
- error = vm_map_wire(&proc->p_vmspace->vm_map, start, end,
+ error = vm_map_wire(map, start, end,
VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
#ifdef RACCT
if (error != KERN_SUCCESS) {
PROC_LOCK(proc);
racct_set(proc, RACCT_MEMLOCK,
- ptoa(vmspace_wired_count(proc->p_vmspace)));
+ ptoa(pmap_wired_count(map->pmap)));
PROC_UNLOCK(proc);
}
#endif
@@ -1157,7 +1159,7 @@ sys_mlockall(td, uap)
if (error != KERN_SUCCESS) {
PROC_LOCK(td->td_proc);
racct_set(td->td_proc, RACCT_MEMLOCK,
- ptoa(vmspace_wired_count(td->td_proc->p_vmspace)));
+ ptoa(pmap_wired_count(map->pmap)));
PROC_UNLOCK(td->td_proc);
}
#endif
@@ -1491,16 +1493,15 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
return (ENOMEM);
}
if (!old_mlock && map->flags & MAP_WIREFUTURE) {
- if (ptoa(vmspace_wired_count(td->td_proc->p_vmspace)) +
- size > lim_cur(td->td_proc, RLIMIT_MEMLOCK)) {
+ if (ptoa(pmap_wired_count(map->pmap)) + size >
+ lim_cur(td->td_proc, RLIMIT_MEMLOCK)) {
racct_set_force(td->td_proc, RACCT_VMEM,
map->size);
PROC_UNLOCK(td->td_proc);
return (ENOMEM);
}
error = racct_set(td->td_proc, RACCT_MEMLOCK,
- ptoa(vmspace_wired_count(td->td_proc->p_vmspace)) +
- size);
+ ptoa(pmap_wired_count(map->pmap)) + size);
if (error != 0) {
racct_set_force(td->td_proc, RACCT_VMEM,
map->size);
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 6aee2bc..7beee59 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -138,6 +138,7 @@ struct vm_object {
struct {
TAILQ_HEAD(, vm_page) devp_pglist;
struct cdev_pager_ops *ops;
+ struct cdev *dev;
} devp;
/*
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index b5e9747..ac593a4 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -151,18 +151,21 @@ static struct mtx vm_daemon_mtx;
MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF);
#endif
static int vm_max_launder = 32;
-static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
-static int vm_pageout_full_stats_interval = 0;
-static int vm_pageout_algorithm=0;
-static int defer_swap_pageouts=0;
-static int disable_swap_pageouts=0;
+static int vm_pageout_stats_max;
+static int vm_pageout_stats;
+static int vm_pageout_stats_interval;
+static int vm_pageout_full_stats;
+static int vm_pageout_full_stats_interval;
+static int vm_pageout_algorithm;
+static int defer_swap_pageouts;
+static int disable_swap_pageouts;
#if defined(NO_SWAPPING)
-static int vm_swap_enabled=0;
-static int vm_swap_idle_enabled=0;
+static int vm_swap_enabled = 0;
+static int vm_swap_idle_enabled = 0;
#else
-static int vm_swap_enabled=1;
-static int vm_swap_idle_enabled=0;
+static int vm_swap_enabled = 1;
+static int vm_swap_idle_enabled = 0;
#endif
SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
@@ -174,12 +177,18 @@ SYSCTL_INT(_vm, OID_AUTO, max_launder,
SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
-SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
- CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
+SYSCTL_INT(_vm, OID_AUTO, pageout_stats,
+ CTLFLAG_RD, &vm_pageout_stats, 0, "Number of partial stats scans");
SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
+SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats,
+ CTLFLAG_RD, &vm_pageout_full_stats, 0, "Number of full stats scans");
+
+SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
+ CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
+
#if defined(NO_SWAPPING)
SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
CTLFLAG_RD, &vm_swap_enabled, 0, "Enable entire process swapout");
@@ -1512,12 +1521,12 @@ vm_pageout_oom(int shortage)
* helps the situation where paging just starts to occur.
*/
static void
-vm_pageout_page_stats()
+vm_pageout_page_stats(void)
{
struct vm_pagequeue *pq;
vm_object_t object;
- vm_page_t m,next;
- int pcount,tpcount; /* Number of pages to check */
+ vm_page_t m, next;
+ int pcount, tpcount; /* Number of pages to check */
static int fullintervalcount = 0;
int page_shortage;
@@ -1531,11 +1540,13 @@ vm_pageout_page_stats()
pcount = cnt.v_active_count;
fullintervalcount += vm_pageout_stats_interval;
if (fullintervalcount < vm_pageout_full_stats_interval) {
+ vm_pageout_stats++;
tpcount = (int64_t)vm_pageout_stats_max * cnt.v_active_count /
cnt.v_page_count;
if (pcount > tpcount)
pcount = tpcount;
} else {
+ vm_pageout_full_stats++;
fullintervalcount = 0;
}
@@ -1624,7 +1635,7 @@ vm_pageout_page_stats()
* vm_pageout is the high level pageout daemon.
*/
static void
-vm_pageout()
+vm_pageout(void)
{
int error, pass;
@@ -1757,7 +1768,7 @@ vm_pageout()
* the free page queue lock is held until the msleep() is performed.
*/
void
-pagedaemon_wakeup()
+pagedaemon_wakeup(void)
{
if (!vm_pages_needed && curthread->td_proc != pageproc) {
@@ -1782,7 +1793,7 @@ vm_req_vmdaemon(int req)
}
static void
-vm_daemon()
+vm_daemon(void)
{
struct rlimit rsslim;
struct proc *p;
diff --git a/sys/vm/vm_unix.c b/sys/vm/vm_unix.c
index d6da08b..edb6ecc 100644
--- a/sys/vm/vm_unix.c
+++ b/sys/vm/vm_unix.c
@@ -76,6 +76,7 @@ sys_obreak(td, uap)
struct obreak_args *uap;
{
struct vmspace *vm = td->td_proc->p_vmspace;
+ vm_map_t map = &vm->vm_map;
vm_offset_t new, old, base;
rlim_t datalim, lmemlim, vmemlim;
int prot, rv;
@@ -90,7 +91,7 @@ sys_obreak(td, uap)
do_map_wirefuture = FALSE;
new = round_page((vm_offset_t)uap->nsize);
- vm_map_lock(&vm->vm_map);
+ vm_map_lock(map);
base = round_page((vm_offset_t) vm->vm_daddr);
old = base + ctob(vm->vm_dsize);
@@ -103,7 +104,7 @@ sys_obreak(td, uap)
error = ENOMEM;
goto done;
}
- if (new > vm_map_max(&vm->vm_map)) {
+ if (new > vm_map_max(map)) {
error = ENOMEM;
goto done;
}
@@ -117,14 +118,14 @@ sys_obreak(td, uap)
goto done;
}
if (new > old) {
- if (!old_mlock && vm->vm_map.flags & MAP_WIREFUTURE) {
- if (ptoa(vmspace_wired_count(td->td_proc->p_vmspace)) +
+ if (!old_mlock && map->flags & MAP_WIREFUTURE) {
+ if (ptoa(pmap_wired_count(map->pmap)) +
(new - old) > lmemlim) {
error = ENOMEM;
goto done;
}
}
- if (vm->vm_map.size + (new - old) > vmemlim) {
+ if (map->size + (new - old) > vmemlim) {
error = ENOMEM;
goto done;
}
@@ -137,22 +138,21 @@ sys_obreak(td, uap)
goto done;
}
error = racct_set(td->td_proc, RACCT_VMEM,
- vm->vm_map.size + (new - old));
+ map->size + (new - old));
if (error != 0) {
racct_set_force(td->td_proc, RACCT_DATA, old - base);
PROC_UNLOCK(td->td_proc);
error = ENOMEM;
goto done;
}
- if (!old_mlock && vm->vm_map.flags & MAP_WIREFUTURE) {
+ if (!old_mlock && map->flags & MAP_WIREFUTURE) {
error = racct_set(td->td_proc, RACCT_MEMLOCK,
- ptoa(vmspace_wired_count(td->td_proc->p_vmspace)) +
- (new - old));
+ ptoa(pmap_wired_count(map->pmap)) + (new - old));
if (error != 0) {
racct_set_force(td->td_proc, RACCT_DATA,
old - base);
racct_set_force(td->td_proc, RACCT_VMEM,
- vm->vm_map.size);
+ map->size);
PROC_UNLOCK(td->td_proc);
error = ENOMEM;
goto done;
@@ -167,17 +167,15 @@ sys_obreak(td, uap)
prot |= VM_PROT_EXECUTE;
#endif
#endif
- rv = vm_map_insert(&vm->vm_map, NULL, 0, old, new,
- prot, VM_PROT_ALL, 0);
+ rv = vm_map_insert(map, NULL, 0, old, new, prot, VM_PROT_ALL, 0);
if (rv != KERN_SUCCESS) {
#ifdef RACCT
PROC_LOCK(td->td_proc);
racct_set_force(td->td_proc, RACCT_DATA, old - base);
- racct_set_force(td->td_proc, RACCT_VMEM, vm->vm_map.size);
- if (!old_mlock && vm->vm_map.flags & MAP_WIREFUTURE) {
+ racct_set_force(td->td_proc, RACCT_VMEM, map->size);
+ if (!old_mlock && map->flags & MAP_WIREFUTURE) {
racct_set_force(td->td_proc, RACCT_MEMLOCK,
- ptoa(vmspace_wired_count(
- td->td_proc->p_vmspace)));
+ ptoa(pmap_wired_count(map->pmap)));
}
PROC_UNLOCK(td->td_proc);
#endif
@@ -194,13 +192,13 @@ sys_obreak(td, uap)
*
* XXX If the pages cannot be wired, no error is returned.
*/
- if ((vm->vm_map.flags & MAP_WIREFUTURE) == MAP_WIREFUTURE) {
+ if ((map->flags & MAP_WIREFUTURE) == MAP_WIREFUTURE) {
if (bootverbose)
printf("obreak: MAP_WIREFUTURE set\n");
do_map_wirefuture = TRUE;
}
} else if (new < old) {
- rv = vm_map_delete(&vm->vm_map, new, old);
+ rv = vm_map_delete(map, new, old);
if (rv != KERN_SUCCESS) {
error = ENOMEM;
goto done;
@@ -209,19 +207,19 @@ sys_obreak(td, uap)
#ifdef RACCT
PROC_LOCK(td->td_proc);
racct_set_force(td->td_proc, RACCT_DATA, new - base);
- racct_set_force(td->td_proc, RACCT_VMEM, vm->vm_map.size);
- if (!old_mlock && vm->vm_map.flags & MAP_WIREFUTURE) {
+ racct_set_force(td->td_proc, RACCT_VMEM, map->size);
+ if (!old_mlock && map->flags & MAP_WIREFUTURE) {
racct_set_force(td->td_proc, RACCT_MEMLOCK,
- ptoa(vmspace_wired_count(td->td_proc->p_vmspace)));
+ ptoa(pmap_wired_count(map->pmap)));
}
PROC_UNLOCK(td->td_proc);
#endif
}
done:
- vm_map_unlock(&vm->vm_map);
+ vm_map_unlock(map);
if (do_map_wirefuture)
- (void) vm_map_wire(&vm->vm_map, old, new,
+ (void) vm_map_wire(map, old, new,
VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
return (error);
OpenPOWER on IntegriCloud