summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authordillon <dillon@FreeBSD.org>2001-07-04 16:20:28 +0000
committerdillon <dillon@FreeBSD.org>2001-07-04 16:20:28 +0000
commite028603b7e3e4fb35cdf00aab533f3965f4a13cc (patch)
tree7420cce169451a74c5b87963467a4aeff668ed12 /sys
parent0b028660051eb7abf4306d34e7fec0e7fde86a28 (diff)
downloadFreeBSD-src-e028603b7e3e4fb35cdf00aab533f3965f4a13cc.zip
FreeBSD-src-e028603b7e3e4fb35cdf00aab533f3965f4a13cc.tar.gz
With Alfred's permission, remove vm_mtx in favor of a fine-grained approach
(this commit is just the first stage). Also add various GIANT_ macros to formalize the removal of Giant, making it easy to test in a more piecemeal fashion. These macros will allow us to test fine-grained locks to a degree before removing Giant, and also after, and to remove Giant in a piecemeal fashion via sysctl's on those subsystems which the authors believe can operate without Giant.
Diffstat (limited to 'sys')
-rw-r--r--sys/alpha/alpha/machdep.c2
-rw-r--r--sys/alpha/alpha/mem.c6
-rw-r--r--sys/alpha/alpha/trap.c9
-rw-r--r--sys/alpha/alpha/vm_machdep.c21
-rw-r--r--sys/alpha/osf1/imgact_osf1.c7
-rw-r--r--sys/alpha/osf1/osf1_misc.c8
-rw-r--r--sys/amd64/amd64/busdma_machdep.c1
-rw-r--r--sys/amd64/amd64/machdep.c6
-rw-r--r--sys/amd64/amd64/mem.c10
-rw-r--r--sys/amd64/amd64/pmap.c8
-rw-r--r--sys/amd64/amd64/vm_machdep.c26
-rw-r--r--sys/amd64/isa/isa_dma.c12
-rw-r--r--sys/dev/md/md.c9
-rw-r--r--sys/fs/procfs/procfs_map.c8
-rw-r--r--sys/fs/procfs/procfs_mem.c11
-rw-r--r--sys/fs/specfs/spec_vnops.c5
-rw-r--r--sys/i386/i386/busdma_machdep.c1
-rw-r--r--sys/i386/i386/machdep.c6
-rw-r--r--sys/i386/i386/mem.c10
-rw-r--r--sys/i386/i386/pmap.c8
-rw-r--r--sys/i386/i386/vm_machdep.c26
-rw-r--r--sys/i386/isa/isa_dma.c12
-rw-r--r--sys/ia64/ia64/vm_machdep.c21
-rw-r--r--sys/kern/imgact_aout.c10
-rw-r--r--sys/kern/imgact_elf.c17
-rw-r--r--sys/kern/init_main.c8
-rw-r--r--sys/kern/kern_exec.c10
-rw-r--r--sys/kern/kern_exit.c4
-rw-r--r--sys/kern/kern_fork.c5
-rw-r--r--sys/kern/kern_resource.c4
-rw-r--r--sys/kern/link_elf.c10
-rw-r--r--sys/kern/link_elf_obj.c10
-rw-r--r--sys/kern/subr_blist.c1
-rw-r--r--sys/kern/sys_pipe.c18
-rw-r--r--sys/kern/sysv_shm.c24
-rw-r--r--sys/kern/uipc_syscalls.c17
-rw-r--r--sys/kern/vfs_bio.c112
-rw-r--r--sys/kern/vfs_cluster.c16
-rw-r--r--sys/kern/vfs_default.c17
-rw-r--r--sys/kern/vfs_extattr.c4
-rw-r--r--sys/kern/vfs_subr.c16
-rw-r--r--sys/kern/vfs_syscalls.c4
-rw-r--r--sys/nfs/nfs_bio.c22
-rw-r--r--sys/nfs/nfs_common.c4
-rw-r--r--sys/nfs/nfs_subs.c4
-rw-r--r--sys/nfsclient/nfs_bio.c22
-rw-r--r--sys/nfsclient/nfs_subs.c4
-rw-r--r--sys/nfsserver/nfs_srvsubs.c4
-rw-r--r--sys/pc98/cbus/cbus_dma.c11
-rw-r--r--sys/pc98/i386/machdep.c6
-rw-r--r--sys/pc98/pc98/isa_dma.c11
-rw-r--r--sys/pc98/pc98/machdep.c6
-rw-r--r--sys/powerpc/aim/vm_machdep.c21
-rw-r--r--sys/powerpc/powerpc/vm_machdep.c21
-rw-r--r--sys/sys/mutex.h26
-rw-r--r--sys/sys/proc.h1
-rw-r--r--sys/ufs/ufs/ufs_readwrite.c35
-rw-r--r--sys/vm/default_pager.c1
-rw-r--r--sys/vm/device_pager.c1
-rw-r--r--sys/vm/phys_pager.c5
-rw-r--r--sys/vm/swap_pager.c100
-rw-r--r--sys/vm/vm.h4
-rw-r--r--sys/vm/vm_fault.c49
-rw-r--r--sys/vm/vm_glue.c45
-rw-r--r--sys/vm/vm_init.c6
-rw-r--r--sys/vm/vm_kern.c70
-rw-r--r--sys/vm/vm_map.c113
-rw-r--r--sys/vm/vm_map.h16
-rw-r--r--sys/vm/vm_meter.c7
-rw-r--r--sys/vm/vm_mmap.c86
-rw-r--r--sys/vm/vm_object.c71
-rw-r--r--sys/vm/vm_object.h48
-rw-r--r--sys/vm/vm_page.c77
-rw-r--r--sys/vm/vm_page.h36
-rw-r--r--sys/vm/vm_pageout.c46
-rw-r--r--sys/vm/vm_pager.c20
-rw-r--r--sys/vm/vm_pager.h15
-rw-r--r--sys/vm/vm_unix.c46
-rw-r--r--sys/vm/vm_zone.c23
-rw-r--r--sys/vm/vnode_pager.c67
80 files changed, 566 insertions, 1124 deletions
diff --git a/sys/alpha/alpha/machdep.c b/sys/alpha/alpha/machdep.c
index 16e31cc..856334f 100644
--- a/sys/alpha/alpha/machdep.c
+++ b/sys/alpha/alpha/machdep.c
@@ -257,7 +257,6 @@ cpu_startup(dummy)
/*
* Good {morning,afternoon,evening,night}.
*/
- mtx_lock(&vm_mtx);
identifycpu();
/* startrtclock(); */
@@ -367,7 +366,6 @@ again:
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*(ARG_MAX+(PAGE_SIZE*3))));
- mtx_unlock(&vm_mtx);
/*
* XXX: Mbuf system machine-specific initializations should
* go here, if anywhere.
diff --git a/sys/alpha/alpha/mem.c b/sys/alpha/alpha/mem.c
index f9db347..6af53c4 100644
--- a/sys/alpha/alpha/mem.c
+++ b/sys/alpha/alpha/mem.c
@@ -145,6 +145,8 @@ mmrw(dev_t dev, struct uio *uio, int flags)
int error = 0, rw;
vm_offset_t addr, eaddr;
+ GIANT_REQUIRED;
+
while (uio->uio_resid > 0 && !error) {
iov = uio->uio_iov;
if (iov->iov_len == 0) {
@@ -190,19 +192,15 @@ kmemphys:
*/
addr = trunc_page(v);
eaddr = round_page(v + c);
- mtx_lock(&vm_mtx);
for (; addr < eaddr; addr += PAGE_SIZE)
if (pmap_extract(kernel_pmap, addr) == 0) {
- mtx_unlock(&vm_mtx);
return EFAULT;
}
if (!kernacc((caddr_t)v, c,
uio->uio_rw == UIO_READ ?
VM_PROT_READ : VM_PROT_WRITE)) {
- mtx_unlock(&vm_mtx);
return (EFAULT);
}
- mtx_unlock(&vm_mtx);
error = uiomove((caddr_t)v, c, uio);
continue;
}
diff --git a/sys/alpha/alpha/trap.c b/sys/alpha/alpha/trap.c
index 8294d2b..fe15204 100644
--- a/sys/alpha/alpha/trap.c
+++ b/sys/alpha/alpha/trap.c
@@ -278,6 +278,8 @@ trap(a0, a1, a2, entry, framep)
critical_exit(s);
#endif
+ GIANT_REQUIRED;
+
cnt.v_trap++;
ucode = 0;
user = (framep->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) != 0;
@@ -432,15 +434,8 @@ trap(a0, a1, a2, entry, framep)
case ALPHA_MMCSR_FOE:
case ALPHA_MMCSR_FOW:
{
- int hadvmlock;
-
- hadvmlock = mtx_owned(&vm_mtx);
- if (hadvmlock == 0)
- mtx_lock(&vm_mtx);
pmap_emulate_reference(p, a0, user,
a1 == ALPHA_MMCSR_FOW);
- if (hadvmlock == 0)
- mtx_unlock(&vm_mtx);
goto out;
}
case ALPHA_MMCSR_INVALTRANS:
diff --git a/sys/alpha/alpha/vm_machdep.c b/sys/alpha/alpha/vm_machdep.c
index 216e5a4..5336c51 100644
--- a/sys/alpha/alpha/vm_machdep.c
+++ b/sys/alpha/alpha/vm_machdep.c
@@ -274,14 +274,13 @@ void
cpu_wait(p)
struct proc *p;
{
+ GIANT_REQUIRED;
- mtx_lock(&vm_mtx);
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
- mtx_unlock(&vm_mtx);
}
/*
@@ -330,10 +329,11 @@ vmapbuf(bp)
register caddr_t addr, v, kva;
vm_offset_t pa;
+ GIANT_REQUIRED;
+
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
- mtx_lock(&vm_mtx);
for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE, v += PAGE_SIZE) {
@@ -349,7 +349,6 @@ vmapbuf(bp)
vm_page_hold(PHYS_TO_VM_PAGE(pa));
pmap_kenter((vm_offset_t) v, pa);
}
- mtx_unlock(&vm_mtx);
kva = bp->b_saveaddr;
bp->b_saveaddr = bp->b_data;
@@ -367,10 +366,11 @@ vunmapbuf(bp)
register caddr_t addr;
vm_offset_t pa;
+ GIANT_REQUIRED;
+
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
- mtx_lock(&vm_mtx);
for (addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE) {
@@ -378,7 +378,6 @@ vunmapbuf(bp)
pmap_kremove((vm_offset_t) addr);
vm_page_unhold(PHYS_TO_VM_PAGE(pa));
}
- mtx_unlock(&vm_mtx);
bp->b_data = bp->b_saveaddr;
}
@@ -436,17 +435,13 @@ vm_page_zero_idle()
* pages because doing so may flush our L1 and L2 caches too much.
*/
- if (mtx_trylock(&vm_mtx) == 0)
- return (0);
if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) {
- mtx_unlock(&vm_mtx);
return(0);
}
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) {
- mtx_unlock(&vm_mtx);
return(0);
}
-
+ if (mtx_trylock(&Giant)) {
s = splvm();
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
zero_state = 0;
@@ -469,8 +464,10 @@ vm_page_zero_idle()
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
splx(s);
- mtx_unlock(&vm_mtx);
+ mtx_unlock(&Giant);
return (1);
+ }
+ return(0);
}
/*
diff --git a/sys/alpha/osf1/imgact_osf1.c b/sys/alpha/osf1/imgact_osf1.c
index 25f26a3..18d8baa 100644
--- a/sys/alpha/osf1/imgact_osf1.c
+++ b/sys/alpha/osf1/imgact_osf1.c
@@ -97,6 +97,8 @@ exec_osf1_imgact(struct image_params *imgp)
struct nameidata *ndp;
Osf_Auxargs *osf_auxargs;
+ GIANT_REQUIRED;
+
execp = (const struct ecoff_exechdr*)imgp->image_header;
eap = &execp->a;
ndp = NULL;
@@ -175,14 +177,12 @@ exec_osf1_imgact(struct image_params *imgp)
/*
* Destroy old process VM and create a new one (with a new stack).
*/
- mtx_lock(&vm_mtx);
exec_new_vmspace(imgp);
/*
* The vm space can now be changed.
*/
vmspace = imgp->proc->p_vmspace;
- mtx_unlock(&vm_mtx);
imgp->interpreted = 0;
imgp->proc->p_sysent = &osf1_sysvec;
@@ -214,17 +214,14 @@ exec_osf1_imgact(struct image_params *imgp)
/* .. bss .. */
if (round_page(bsize)) {
baddr = bss_start;
- mtx_lock(&vm_mtx);
if ((error = vm_map_find(&vmspace->vm_map, NULL,
(vm_offset_t) 0, &baddr, round_page(bsize), FALSE,
VM_PROT_ALL, VM_PROT_ALL, FALSE))) {
- mtx_unlock(&vm_mtx);
DPRINTF(("%s(%d): error = %d\n", __FILE__, __LINE__,
error));
goto bail;
}
- mtx_unlock(&vm_mtx);
}
diff --git a/sys/alpha/osf1/osf1_misc.c b/sys/alpha/osf1/osf1_misc.c
index 41c34f3..8f02905 100644
--- a/sys/alpha/osf1/osf1_misc.c
+++ b/sys/alpha/osf1/osf1_misc.c
@@ -472,6 +472,8 @@ osf1_mmap(p, uap)
vm_map_t map;
vm_offset_t addr, len, newaddr;
+ GIANT_REQUIRED;
+
SCARG(&a, addr) = SCARG(uap, addr);
SCARG(&a, len) = SCARG(uap, len);
SCARG(&a, prot) = SCARG(uap, prot);
@@ -500,7 +502,6 @@ osf1_mmap(p, uap)
addr = round_page((vm_offset_t)0x10000UL);
len = (vm_offset_t)SCARG(&a, len);
map = &p->p_vmspace->vm_map;
- mtx_lock(&vm_mtx);
if (!vm_map_findspace(map, addr, len, &newaddr)) {
SCARG(&a,addr) = (caddr_t) newaddr;
SCARG(&a, flags) |= (MAP_FIXED);
@@ -510,7 +511,6 @@ osf1_mmap(p, uap)
uprintf("osf1_mmap:vm_map_findspace failed for: %p 0x%lx\n",
(caddr_t)addr, len);
#endif
- mtx_unlock(&vm_mtx);
if (SCARG(uap, flags) & OSF1_MAP_SHARED)
SCARG(&a, flags) |= MAP_SHARED;
if (SCARG(uap, flags) & OSF1_MAP_PRIVATE)
@@ -1643,15 +1643,15 @@ osf1_uswitch(p, uap)
vm_map_entry_t entry;
vm_offset_t zero;
+ GIANT_REQUIRED;
+
zero = 0;
if (uap->cmd == OSF1_USC_GET) {
- mtx_lock(&vm_mtx);
if (vm_map_lookup_entry(&(p->p_vmspace->vm_map),0, &entry))
p->p_retval[0] = OSF1_USW_NULLP;
else
p->p_retval[0] = 0;
- mtx_unlock(&vm_mtx);
return(KERN_SUCCESS);
} else if (uap->cmd == OSF1_USC_SET)
if (uap->mask & OSF1_USW_NULLP) {
diff --git a/sys/amd64/amd64/busdma_machdep.c b/sys/amd64/amd64/busdma_machdep.c
index 3dc9e76..d99e22a 100644
--- a/sys/amd64/amd64/busdma_machdep.c
+++ b/sys/amd64/amd64/busdma_machdep.c
@@ -32,6 +32,7 @@
#include <sys/bus.h>
#include <sys/interrupt.h>
#include <sys/lock.h>
+#include <sys/proc.h>
#include <sys/mutex.h>
#include <vm/vm.h>
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index 73b8052..9ee7bfc 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -218,7 +218,6 @@ cpu_startup(dummy)
/*
* Good {morning,afternoon,evening,night}.
*/
- mtx_lock(&vm_mtx);
earlysetcpuclass();
startrtclock();
printcpuinfo();
@@ -352,7 +351,6 @@ again:
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*(ARG_MAX+(PAGE_SIZE*3))));
- mtx_unlock(&vm_mtx);
/*
* XXX: Mbuf system machine-specific initializations should
* go here, if anywhere.
@@ -2007,6 +2005,8 @@ f00f_hack(void *unused) {
if (!has_f00f_bug)
return;
+ GIANT_REQUIRED;
+
printf("Intel Pentium detected, installing workaround for F00F bug\n");
r_idt.rd_limit = sizeof(idt0) - 1;
@@ -2022,11 +2022,9 @@ f00f_hack(void *unused) {
r_idt.rd_base = (int)new_idt;
lidt(&r_idt);
idt = new_idt;
- mtx_lock(&vm_mtx);
if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
VM_PROT_READ, FALSE) != KERN_SUCCESS)
panic("vm_map_protect failed");
- mtx_unlock(&vm_mtx);
return;
}
#endif /* defined(I586_CPU) && !NO_F00F_HACK */
diff --git a/sys/amd64/amd64/mem.c b/sys/amd64/amd64/mem.c
index dfb034e..5eded32 100644
--- a/sys/amd64/amd64/mem.c
+++ b/sys/amd64/amd64/mem.c
@@ -140,6 +140,8 @@ mmrw(dev_t dev, struct uio *uio, int flags)
int error = 0;
vm_offset_t addr, eaddr;
+ GIANT_REQUIRED;
+
while (uio->uio_resid > 0 && error == 0) {
iov = uio->uio_iov;
if (iov->iov_len == 0) {
@@ -155,17 +157,13 @@ mmrw(dev_t dev, struct uio *uio, int flags)
case 0:
v = uio->uio_offset;
v &= ~PAGE_MASK;
- mtx_lock(&vm_mtx);
pmap_kenter((vm_offset_t)ptvmmap, v);
- mtx_unlock(&vm_mtx);
o = (int)uio->uio_offset & PAGE_MASK;
c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK));
c = min(c, (u_int)(PAGE_SIZE - o));
c = min(c, (u_int)iov->iov_len);
error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
- mtx_lock(&vm_mtx);
pmap_kremove((vm_offset_t)ptvmmap);
- mtx_unlock(&vm_mtx);
continue;
/* minor device 1 is kernel memory */
@@ -183,20 +181,16 @@ mmrw(dev_t dev, struct uio *uio, int flags)
return EFAULT;
if (eaddr >= (vm_offset_t)VADDR(APTDPTDI, 0))
return EFAULT;
- mtx_lock(&vm_mtx);
for (; addr < eaddr; addr += PAGE_SIZE)
if (pmap_extract(kernel_pmap, addr) == 0) {
- mtx_unlock(&vm_mtx);
return EFAULT;
}
if (!kernacc((caddr_t)(int)uio->uio_offset, c,
uio->uio_rw == UIO_READ ?
VM_PROT_READ : VM_PROT_WRITE)) {
- mtx_unlock(&vm_mtx);
return (EFAULT);
}
- mtx_unlock(&vm_mtx);
error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio);
continue;
}
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index c6db00b..a00bcab 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -3135,14 +3135,11 @@ pmap_mapdev(pa, size)
{
vm_offset_t va, tmpva, offset;
unsigned *pte;
- int hadvmlock;
offset = pa & PAGE_MASK;
size = roundup(offset + size, PAGE_SIZE);
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
va = kmem_alloc_pageable(kernel_map, size);
if (!va)
@@ -3158,9 +3155,6 @@ pmap_mapdev(pa, size)
}
invltlb();
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
-
return ((void *)(va + offset));
}
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index e26d211..ef84bf9 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -292,14 +292,13 @@ void
cpu_wait(p)
struct proc *p;
{
+ GIANT_REQUIRED;
- mtx_lock(&vm_mtx);
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
- mtx_unlock(&vm_mtx);
}
/*
@@ -378,10 +377,11 @@ vmapbuf(bp)
register caddr_t addr, v, kva;
vm_offset_t pa;
+ GIANT_REQUIRED;
+
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
- mtx_lock(&vm_mtx);
for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE, v += PAGE_SIZE) {
@@ -397,7 +397,6 @@ vmapbuf(bp)
vm_page_hold(PHYS_TO_VM_PAGE(pa));
pmap_kenter((vm_offset_t) v, pa);
}
- mtx_unlock(&vm_mtx);
kva = bp->b_saveaddr;
bp->b_saveaddr = bp->b_data;
@@ -415,10 +414,11 @@ vunmapbuf(bp)
register caddr_t addr;
vm_offset_t pa;
+ GIANT_REQUIRED;
+
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
- mtx_lock(&vm_mtx);
for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE) {
@@ -426,7 +426,6 @@ vunmapbuf(bp)
pmap_kremove((vm_offset_t) addr);
vm_page_unhold(PHYS_TO_VM_PAGE(pa));
}
- mtx_unlock(&vm_mtx);
bp->b_data = bp->b_saveaddr;
}
@@ -582,17 +581,12 @@ vm_page_zero_idle()
* pages because doing so may flush our L1 and L2 caches too much.
*/
- if (mtx_trylock(&vm_mtx) == 0)
- return (0);
- if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) {
- mtx_unlock(&vm_mtx);
+ if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
return(0);
- }
- if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) {
- mtx_unlock(&vm_mtx);
+ if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
return(0);
- }
+ if (mtx_trylock(&Giant)) {
zero_state = 0;
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
if (m != NULL && (m->flags & PG_ZERO) == 0) {
@@ -611,8 +605,10 @@ vm_page_zero_idle()
zero_state = 1;
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
- mtx_unlock(&vm_mtx);
+ mtx_unlock(&Giant);
return (1);
+ }
+ return(0);
}
/*
diff --git a/sys/amd64/isa/isa_dma.c b/sys/amd64/isa/isa_dma.c
index 6acc527..d9f1233 100644
--- a/sys/amd64/isa/isa_dma.c
+++ b/sys/amd64/isa/isa_dma.c
@@ -53,6 +53,7 @@
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/lock.h>
+#include <sys/proc.h>
#include <sys/mutex.h>
#include <sys/module.h>
#include <vm/vm.h>
@@ -216,6 +217,8 @@ isa_dmastart(int flags, caddr_t addr, u_int nbytes, int chan)
int waport;
caddr_t newaddr;
+ GIANT_REQUIRED;
+
#ifdef DIAGNOSTIC
if (chan & ~VALID_DMA_MASK)
panic("isa_dmastart: channel out of range");
@@ -254,12 +257,7 @@ isa_dmastart(int flags, caddr_t addr, u_int nbytes, int chan)
}
/* translate to physical */
- mtx_lock(&vm_mtx); /*
- * XXX: need to hold for longer period to
- * ensure that mappings don't change
- */
phys = pmap_extract(pmap_kernel(), (vm_offset_t)addr);
- mtx_unlock(&vm_mtx);
if (flags & ISADMA_RAW) {
dma_auto_mode |= (1 << chan);
@@ -378,11 +376,11 @@ isa_dmarangecheck(caddr_t va, u_int length, int chan)
vm_offset_t phys, priorpage = 0, endva;
u_int dma_pgmsk = (chan & 4) ? ~(128*1024-1) : ~(64*1024-1);
+ GIANT_REQUIRED;
+
endva = (vm_offset_t)round_page((vm_offset_t)va + length);
for (; va < (caddr_t) endva ; va += PAGE_SIZE) {
- mtx_lock(&vm_mtx);
phys = trunc_page(pmap_extract(pmap_kernel(), (vm_offset_t)va));
- mtx_unlock(&vm_mtx);
#define ISARAM_END RAM_END
if (phys == 0)
panic("isa_dmacheck: no physical page present");
diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c
index a7de5c2..f8e6a18 100644
--- a/sys/dev/md/md.c
+++ b/sys/dev/md/md.c
@@ -692,6 +692,8 @@ mddestroy(struct md_s *sc, struct md_ioctl *mdio, struct proc *p)
{
unsigned u;
+ GIANT_REQUIRED;
+
if (sc->dev != NULL) {
devstat_remove_entry(&sc->stats);
disk_destroy(sc->dev);
@@ -701,9 +703,7 @@ mddestroy(struct md_s *sc, struct md_ioctl *mdio, struct proc *p)
if (sc->cred != NULL)
crfree(sc->cred);
if (sc->object != NULL) {
- mtx_lock(&vm_mtx);
vm_pager_deallocate(sc->object);
- mtx_unlock(&vm_mtx);
}
if (sc->secp != NULL) {
for (u = 0; u < sc->nsect; u++)
@@ -725,6 +725,8 @@ mdcreate_swap(struct md_ioctl *mdio, struct proc *p)
int error;
struct md_s *sc;
+ GIANT_REQUIRED;
+
if (mdio->md_options & MD_AUTOUNIT) {
sc = mdnew(-1);
mdio->md_unit = sc->unit;
@@ -757,18 +759,15 @@ mdcreate_swap(struct md_ioctl *mdio, struct proc *p)
sc->secsize = PAGE_SIZE;
sc->nsect = mdio->md_size / (PAGE_SIZE / DEV_BSIZE);
- mtx_lock(&vm_mtx);
sc->object = vm_pager_allocate(OBJT_SWAP, NULL, sc->secsize * (vm_offset_t)sc->nsect, VM_PROT_DEFAULT, 0);
if (mdio->md_options & MD_RESERVE) {
if (swap_pager_reserve(sc->object, 0, sc->nsect) < 0) {
vm_pager_deallocate(sc->object);
- mtx_unlock(&vm_mtx);
sc->object = NULL;
mddestroy(sc, mdio, p);
return(EDOM);
}
}
- mtx_unlock(&vm_mtx);
error = mdsetcred(sc, p->p_ucred);
if (error)
mddestroy(sc, mdio, p);
diff --git a/sys/fs/procfs/procfs_map.c b/sys/fs/procfs/procfs_map.c
index 047a67f..692594d 100644
--- a/sys/fs/procfs/procfs_map.c
+++ b/sys/fs/procfs/procfs_map.c
@@ -81,14 +81,14 @@ procfs_domap(curp, p, pfs, uio)
vm_map_entry_t entry;
char mebuffer[MEBUFFERSIZE];
+ GIANT_REQUIRED;
+
if (uio->uio_rw != UIO_READ)
return (EOPNOTSUPP);
if (uio->uio_offset != 0)
return (0);
- mtx_lock(&vm_mtx);
-
error = 0;
if (map != &curproc->p_vmspace->vm_map)
vm_map_lock_read(map);
@@ -171,17 +171,13 @@ case OBJT_DEVICE:
error = EFBIG;
break;
}
- mtx_unlock(&vm_mtx);
error = uiomove(mebuffer, len, uio);
- mtx_lock(&vm_mtx);
if (error)
break;
}
if (map != &curproc->p_vmspace->vm_map)
vm_map_unlock_read(map);
- mtx_unlock(&vm_mtx);
-
return error;
}
diff --git a/sys/fs/procfs/procfs_mem.c b/sys/fs/procfs/procfs_mem.c
index b8c60d6..e109146 100644
--- a/sys/fs/procfs/procfs_mem.c
+++ b/sys/fs/procfs/procfs_mem.c
@@ -83,6 +83,8 @@ procfs_rwmem(curp, p, uio)
vm_prot_t reqprot;
vm_offset_t kva;
+ GIANT_REQUIRED;
+
/*
* if the vmspace is in the midst of being deallocated or the
* process is exiting, don't try to grab anything. The page table
@@ -91,12 +93,8 @@ procfs_rwmem(curp, p, uio)
vm = p->p_vmspace;
if ((p->p_flag & P_WEXIT))
return EFAULT;
-
- mtx_lock(&vm_mtx);
- if (vm->vm_refcnt < 1) {
- mtx_unlock(&vm_mtx);
+ if (vm->vm_refcnt < 1)
return EFAULT;
- }
++vm->vm_refcnt;
/*
* The map we want...
@@ -214,9 +212,7 @@ procfs_rwmem(curp, p, uio)
/*
* Now do the i/o move.
*/
- mtx_unlock(&vm_mtx);
error = uiomove((caddr_t)(kva + page_offset), len, uio);
- mtx_lock(&vm_mtx);
pmap_kremove(kva);
@@ -235,7 +231,6 @@ procfs_rwmem(curp, p, uio)
kmem_free(kernel_map, kva, PAGE_SIZE);
vmspace_free(vm);
- mtx_unlock(&vm_mtx);
return (error);
}
diff --git a/sys/fs/specfs/spec_vnops.c b/sys/fs/specfs/spec_vnops.c
index 5ed07c0..f2b1c34 100644
--- a/sys/fs/specfs/spec_vnops.c
+++ b/sys/fs/specfs/spec_vnops.c
@@ -660,7 +660,8 @@ spec_getpages(ap)
int blksiz;
int gotreqpage;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
+
error = 0;
pcount = round_page(ap->a_count) / PAGE_SIZE;
@@ -732,7 +733,6 @@ spec_getpages(ap)
cnt.v_vnodein++;
cnt.v_vnodepgsin += pcount;
- mtx_unlock(&vm_mtx);
/* Do the input. */
BUF_STRATEGY(bp);
@@ -743,7 +743,6 @@ spec_getpages(ap)
tsleep(bp, PVM, "spread", 0);
splx(s);
- mtx_lock(&vm_mtx);
if ((bp->b_ioflags & BIO_ERROR) != 0) {
if (bp->b_error)
diff --git a/sys/i386/i386/busdma_machdep.c b/sys/i386/i386/busdma_machdep.c
index 3dc9e76..d99e22a 100644
--- a/sys/i386/i386/busdma_machdep.c
+++ b/sys/i386/i386/busdma_machdep.c
@@ -32,6 +32,7 @@
#include <sys/bus.h>
#include <sys/interrupt.h>
#include <sys/lock.h>
+#include <sys/proc.h>
#include <sys/mutex.h>
#include <vm/vm.h>
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index 73b8052..9ee7bfc 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -218,7 +218,6 @@ cpu_startup(dummy)
/*
* Good {morning,afternoon,evening,night}.
*/
- mtx_lock(&vm_mtx);
earlysetcpuclass();
startrtclock();
printcpuinfo();
@@ -352,7 +351,6 @@ again:
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*(ARG_MAX+(PAGE_SIZE*3))));
- mtx_unlock(&vm_mtx);
/*
* XXX: Mbuf system machine-specific initializations should
* go here, if anywhere.
@@ -2007,6 +2005,8 @@ f00f_hack(void *unused) {
if (!has_f00f_bug)
return;
+ GIANT_REQUIRED;
+
printf("Intel Pentium detected, installing workaround for F00F bug\n");
r_idt.rd_limit = sizeof(idt0) - 1;
@@ -2022,11 +2022,9 @@ f00f_hack(void *unused) {
r_idt.rd_base = (int)new_idt;
lidt(&r_idt);
idt = new_idt;
- mtx_lock(&vm_mtx);
if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
VM_PROT_READ, FALSE) != KERN_SUCCESS)
panic("vm_map_protect failed");
- mtx_unlock(&vm_mtx);
return;
}
#endif /* defined(I586_CPU) && !NO_F00F_HACK */
diff --git a/sys/i386/i386/mem.c b/sys/i386/i386/mem.c
index dfb034e..5eded32 100644
--- a/sys/i386/i386/mem.c
+++ b/sys/i386/i386/mem.c
@@ -140,6 +140,8 @@ mmrw(dev_t dev, struct uio *uio, int flags)
int error = 0;
vm_offset_t addr, eaddr;
+ GIANT_REQUIRED;
+
while (uio->uio_resid > 0 && error == 0) {
iov = uio->uio_iov;
if (iov->iov_len == 0) {
@@ -155,17 +157,13 @@ mmrw(dev_t dev, struct uio *uio, int flags)
case 0:
v = uio->uio_offset;
v &= ~PAGE_MASK;
- mtx_lock(&vm_mtx);
pmap_kenter((vm_offset_t)ptvmmap, v);
- mtx_unlock(&vm_mtx);
o = (int)uio->uio_offset & PAGE_MASK;
c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK));
c = min(c, (u_int)(PAGE_SIZE - o));
c = min(c, (u_int)iov->iov_len);
error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
- mtx_lock(&vm_mtx);
pmap_kremove((vm_offset_t)ptvmmap);
- mtx_unlock(&vm_mtx);
continue;
/* minor device 1 is kernel memory */
@@ -183,20 +181,16 @@ mmrw(dev_t dev, struct uio *uio, int flags)
return EFAULT;
if (eaddr >= (vm_offset_t)VADDR(APTDPTDI, 0))
return EFAULT;
- mtx_lock(&vm_mtx);
for (; addr < eaddr; addr += PAGE_SIZE)
if (pmap_extract(kernel_pmap, addr) == 0) {
- mtx_unlock(&vm_mtx);
return EFAULT;
}
if (!kernacc((caddr_t)(int)uio->uio_offset, c,
uio->uio_rw == UIO_READ ?
VM_PROT_READ : VM_PROT_WRITE)) {
- mtx_unlock(&vm_mtx);
return (EFAULT);
}
- mtx_unlock(&vm_mtx);
error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio);
continue;
}
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index c6db00b..a00bcab 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -3135,14 +3135,11 @@ pmap_mapdev(pa, size)
{
vm_offset_t va, tmpva, offset;
unsigned *pte;
- int hadvmlock;
offset = pa & PAGE_MASK;
size = roundup(offset + size, PAGE_SIZE);
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
va = kmem_alloc_pageable(kernel_map, size);
if (!va)
@@ -3158,9 +3155,6 @@ pmap_mapdev(pa, size)
}
invltlb();
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
-
return ((void *)(va + offset));
}
diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c
index e26d211..ef84bf9 100644
--- a/sys/i386/i386/vm_machdep.c
+++ b/sys/i386/i386/vm_machdep.c
@@ -292,14 +292,13 @@ void
cpu_wait(p)
struct proc *p;
{
+ GIANT_REQUIRED;
- mtx_lock(&vm_mtx);
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
- mtx_unlock(&vm_mtx);
}
/*
@@ -378,10 +377,11 @@ vmapbuf(bp)
register caddr_t addr, v, kva;
vm_offset_t pa;
+ GIANT_REQUIRED;
+
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
- mtx_lock(&vm_mtx);
for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE, v += PAGE_SIZE) {
@@ -397,7 +397,6 @@ vmapbuf(bp)
vm_page_hold(PHYS_TO_VM_PAGE(pa));
pmap_kenter((vm_offset_t) v, pa);
}
- mtx_unlock(&vm_mtx);
kva = bp->b_saveaddr;
bp->b_saveaddr = bp->b_data;
@@ -415,10 +414,11 @@ vunmapbuf(bp)
register caddr_t addr;
vm_offset_t pa;
+ GIANT_REQUIRED;
+
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
- mtx_lock(&vm_mtx);
for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE) {
@@ -426,7 +426,6 @@ vunmapbuf(bp)
pmap_kremove((vm_offset_t) addr);
vm_page_unhold(PHYS_TO_VM_PAGE(pa));
}
- mtx_unlock(&vm_mtx);
bp->b_data = bp->b_saveaddr;
}
@@ -582,17 +581,12 @@ vm_page_zero_idle()
* pages because doing so may flush our L1 and L2 caches too much.
*/
- if (mtx_trylock(&vm_mtx) == 0)
- return (0);
- if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) {
- mtx_unlock(&vm_mtx);
+ if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
return(0);
- }
- if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) {
- mtx_unlock(&vm_mtx);
+ if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
return(0);
- }
+ if (mtx_trylock(&Giant)) {
zero_state = 0;
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
if (m != NULL && (m->flags & PG_ZERO) == 0) {
@@ -611,8 +605,10 @@ vm_page_zero_idle()
zero_state = 1;
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
- mtx_unlock(&vm_mtx);
+ mtx_unlock(&Giant);
return (1);
+ }
+ return(0);
}
/*
diff --git a/sys/i386/isa/isa_dma.c b/sys/i386/isa/isa_dma.c
index 6acc527..d9f1233 100644
--- a/sys/i386/isa/isa_dma.c
+++ b/sys/i386/isa/isa_dma.c
@@ -53,6 +53,7 @@
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/lock.h>
+#include <sys/proc.h>
#include <sys/mutex.h>
#include <sys/module.h>
#include <vm/vm.h>
@@ -216,6 +217,8 @@ isa_dmastart(int flags, caddr_t addr, u_int nbytes, int chan)
int waport;
caddr_t newaddr;
+ GIANT_REQUIRED;
+
#ifdef DIAGNOSTIC
if (chan & ~VALID_DMA_MASK)
panic("isa_dmastart: channel out of range");
@@ -254,12 +257,7 @@ isa_dmastart(int flags, caddr_t addr, u_int nbytes, int chan)
}
/* translate to physical */
- mtx_lock(&vm_mtx); /*
- * XXX: need to hold for longer period to
- * ensure that mappings don't change
- */
phys = pmap_extract(pmap_kernel(), (vm_offset_t)addr);
- mtx_unlock(&vm_mtx);
if (flags & ISADMA_RAW) {
dma_auto_mode |= (1 << chan);
@@ -378,11 +376,11 @@ isa_dmarangecheck(caddr_t va, u_int length, int chan)
vm_offset_t phys, priorpage = 0, endva;
u_int dma_pgmsk = (chan & 4) ? ~(128*1024-1) : ~(64*1024-1);
+ GIANT_REQUIRED;
+
endva = (vm_offset_t)round_page((vm_offset_t)va + length);
for (; va < (caddr_t) endva ; va += PAGE_SIZE) {
- mtx_lock(&vm_mtx);
phys = trunc_page(pmap_extract(pmap_kernel(), (vm_offset_t)va));
- mtx_unlock(&vm_mtx);
#define ISARAM_END RAM_END
if (phys == 0)
panic("isa_dmacheck: no physical page present");
diff --git a/sys/ia64/ia64/vm_machdep.c b/sys/ia64/ia64/vm_machdep.c
index 4f50ba0..ee1d2db 100644
--- a/sys/ia64/ia64/vm_machdep.c
+++ b/sys/ia64/ia64/vm_machdep.c
@@ -313,14 +313,13 @@ void
cpu_wait(p)
struct proc *p;
{
+ GIANT_REQUIRED;
- mtx_lock(&vm_mtx);
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
- mtx_unlock(&vm_mtx);
}
/*
@@ -369,10 +368,11 @@ vmapbuf(bp)
register caddr_t addr, v, kva;
vm_offset_t pa;
+ GIANT_REQUIRED;
+
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
- mtx_lock(&vm_mtx);
for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE, v += PAGE_SIZE) {
@@ -388,7 +388,6 @@ vmapbuf(bp)
vm_page_hold(PHYS_TO_VM_PAGE(pa));
pmap_kenter((vm_offset_t) v, pa);
}
- mtx_unlock(&vm_mtx);
kva = bp->b_saveaddr;
bp->b_saveaddr = bp->b_data;
@@ -406,10 +405,11 @@ vunmapbuf(bp)
register caddr_t addr;
vm_offset_t pa;
+ GIANT_REQUIRED;
+
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
- mtx_lock(&vm_mtx);
for (addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE) {
@@ -417,7 +417,6 @@ vunmapbuf(bp)
pmap_kremove((vm_offset_t) addr);
vm_page_unhold(PHYS_TO_VM_PAGE(pa));
}
- mtx_unlock(&vm_mtx);
bp->b_data = bp->b_saveaddr;
}
@@ -475,17 +474,13 @@ vm_page_zero_idle()
* pages because doing so may flush our L1 and L2 caches too much.
*/
- if (mtx_trylock(&vm_mtx) == 0)
- return (0);
if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) {
- mtx_unlock(&vm_mtx);
return(0);
}
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) {
- mtx_unlock(&vm_mtx);
return(0);
}
-
+ if (mtx_trylock(&Giant)) {
s = splvm();
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
zero_state = 0;
@@ -508,8 +503,10 @@ vm_page_zero_idle()
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
splx(s);
- mtx_unlock(&vm_mtx);
+ mtx_unlock(&Giant);
return (1);
+ }
+ return(0);
}
/*
diff --git a/sys/kern/imgact_aout.c b/sys/kern/imgact_aout.c
index 8becda3..856d4ec 100644
--- a/sys/kern/imgact_aout.c
+++ b/sys/kern/imgact_aout.c
@@ -91,6 +91,8 @@ exec_aout_imgact(imgp)
unsigned long bss_size;
int error;
+ GIANT_REQUIRED;
+
/*
* Linux and *BSD binaries look very much alike,
* only the machine id is different:
@@ -171,7 +173,6 @@ exec_aout_imgact(imgp)
if (error)
return (error);
- mtx_lock(&vm_mtx);
/*
* Destroy old process VM and create a new one (with a new stack)
*/
@@ -185,9 +186,7 @@ exec_aout_imgact(imgp)
vp = imgp->vp;
map = &vmspace->vm_map;
vm_map_lock(map);
- mtx_unlock(&vm_mtx);
VOP_GETVOBJECT(vp, &object);
- mtx_lock(&vm_mtx);
vm_object_reference(object);
text_end = virtual_offset + a_out->a_text;
@@ -198,7 +197,6 @@ exec_aout_imgact(imgp)
MAP_COPY_ON_WRITE | MAP_PREFAULT);
if (error) {
vm_map_unlock(map);
- mtx_unlock(&vm_mtx);
return (error);
}
data_end = text_end + a_out->a_data;
@@ -211,7 +209,6 @@ exec_aout_imgact(imgp)
MAP_COPY_ON_WRITE | MAP_PREFAULT);
if (error) {
vm_map_unlock(map);
- mtx_unlock(&vm_mtx);
return (error);
}
}
@@ -222,7 +219,6 @@ exec_aout_imgact(imgp)
VM_PROT_ALL, VM_PROT_ALL, 0);
if (error) {
vm_map_unlock(map);
- mtx_unlock(&vm_mtx);
return (error);
}
}
@@ -235,8 +231,6 @@ exec_aout_imgact(imgp)
vmspace->vm_daddr = (caddr_t) (uintptr_t)
(virtual_offset + a_out->a_text);
- mtx_unlock(&vm_mtx);
-
/* Fill in image_params */
imgp->interpreted = 0;
imgp->entry_addr = a_out->a_entry;
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index fbb2a71..8cf2e24 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -198,6 +198,8 @@ elf_load_section(struct proc *p, struct vmspace *vmspace, struct vnode *vp, vm_o
vm_offset_t file_addr;
vm_offset_t data_buf = 0;
+ GIANT_REQUIRED;
+
VOP_GETVOBJECT(vp, &object);
error = 0;
@@ -230,7 +232,6 @@ elf_load_section(struct proc *p, struct vmspace *vmspace, struct vnode *vp, vm_o
else
map_len = round_page(offset+filsz) - file_addr;
- mtx_lock(&vm_mtx);
if (map_len != 0) {
vm_object_reference(object);
vm_map_lock(&vmspace->vm_map);
@@ -245,13 +246,11 @@ elf_load_section(struct proc *p, struct vmspace *vmspace, struct vnode *vp, vm_o
vm_map_unlock(&vmspace->vm_map);
if (rv != KERN_SUCCESS) {
vm_object_deallocate(object);
- mtx_unlock(&vm_mtx);
return EINVAL;
}
/* we can stop now if we've covered it all */
if (memsz == filsz) {
- mtx_unlock(&vm_mtx);
return 0;
}
}
@@ -275,7 +274,6 @@ elf_load_section(struct proc *p, struct vmspace *vmspace, struct vnode *vp, vm_o
VM_PROT_ALL, VM_PROT_ALL, 0);
vm_map_unlock(&vmspace->vm_map);
if (rv != KERN_SUCCESS) {
- mtx_unlock(&vm_mtx);
return EINVAL;
}
}
@@ -293,17 +291,13 @@ elf_load_section(struct proc *p, struct vmspace *vmspace, struct vnode *vp, vm_o
MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
if (rv != KERN_SUCCESS) {
vm_object_deallocate(object);
- mtx_unlock(&vm_mtx);
return EINVAL;
}
/* send the page fragment to user space */
- mtx_unlock(&vm_mtx);
error = copyout((caddr_t)data_buf, (caddr_t)map_addr, copy_len);
- mtx_lock(&vm_mtx);
vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
if (error) {
- mtx_unlock(&vm_mtx);
return (error);
}
}
@@ -314,7 +308,6 @@ elf_load_section(struct proc *p, struct vmspace *vmspace, struct vnode *vp, vm_o
vm_map_protect(&vmspace->vm_map, map_addr, map_addr + map_len, prot,
FALSE);
- mtx_unlock(&vm_mtx);
return error;
}
@@ -475,6 +468,8 @@ exec_elf_imgact(struct image_params *imgp)
Elf_Brandinfo *brand_info;
char path[MAXPATHLEN];
+ GIANT_REQUIRED;
+
/*
* Do we have a valid ELF header ?
*/
@@ -510,11 +505,9 @@ exec_elf_imgact(struct image_params *imgp)
if ((error = exec_extract_strings(imgp)) != 0)
goto fail;
- mtx_lock(&vm_mtx);
exec_new_vmspace(imgp);
vmspace = imgp->proc->p_vmspace;
- mtx_unlock(&vm_mtx);
for (i = 0; i < hdr->e_phnum; i++) {
switch(phdr[i].p_type) {
@@ -571,12 +564,10 @@ exec_elf_imgact(struct image_params *imgp)
}
}
- mtx_lock(&vm_mtx);
vmspace->vm_tsize = text_size >> PAGE_SHIFT;
vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
vmspace->vm_dsize = data_size >> PAGE_SHIFT;
vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
- mtx_unlock(&vm_mtx);
addr = ELF_RTLD_ADDR(vmspace);
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index aa1fa74..3859c5b 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -274,6 +274,8 @@ proc0_init(void *dummy __unused)
register struct filedesc0 *fdp;
register unsigned i;
+ GIANT_REQUIRED;
+
p = &proc0;
/*
@@ -373,14 +375,12 @@ proc0_init(void *dummy __unused)
limit0.p_refcnt = 1;
/* Allocate a prototype map so we have something to fork. */
- mtx_lock(&vm_mtx);
pmap_pinit0(vmspace_pmap(&vmspace0));
p->p_vmspace = &vmspace0;
vmspace0.vm_refcnt = 1;
vm_map_init(&vmspace0.vm_map, round_page(VM_MIN_ADDRESS),
trunc_page(VM_MAXUSER_ADDRESS));
vmspace0.vm_map.pmap = vmspace_pmap(&vmspace0);
- mtx_unlock(&vm_mtx);
p->p_addr = proc0paddr; /* XXX */
/*
@@ -471,6 +471,8 @@ start_init(void *dummy)
mtx_lock(&Giant);
+ GIANT_REQUIRED;
+
p = curproc;
/* Get the vnode for '/'. Set p->p_fd->fd_cdir to reference it. */
@@ -486,13 +488,11 @@ start_init(void *dummy)
* Need just enough stack to hold the faked-up "execve()" arguments.
*/
addr = trunc_page(USRSTACK - PAGE_SIZE);
- mtx_lock(&vm_mtx);
if (vm_map_find(&p->p_vmspace->vm_map, NULL, 0, &addr, PAGE_SIZE,
FALSE, VM_PROT_ALL, VM_PROT_ALL, 0) != 0)
panic("init: couldn't allocate argument space");
p->p_vmspace->vm_maxsaddr = (caddr_t)addr;
p->p_vmspace->vm_ssize = 1;
- mtx_unlock(&vm_mtx);
if ((var = getenv("init_path")) != NULL) {
strncpy(init_path, var, sizeof init_path);
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index ecadfed..2e0b60c 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -445,13 +445,13 @@ exec_map_first_page(imgp)
vm_page_t ma[VM_INITIAL_PAGEIN];
vm_object_t object;
+ GIANT_REQUIRED;
if (imgp->firstpage) {
exec_unmap_first_page(imgp);
}
VOP_GETVOBJECT(imgp->vp, &object);
- mtx_lock(&vm_mtx);
ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
@@ -482,7 +482,6 @@ exec_map_first_page(imgp)
vm_page_protect(ma[0], VM_PROT_NONE);
vm_page_free(ma[0]);
}
- mtx_unlock(&vm_mtx);
return EIO;
}
}
@@ -493,7 +492,6 @@ exec_map_first_page(imgp)
pmap_kenter((vm_offset_t) imgp->image_header, VM_PAGE_TO_PHYS(ma[0]));
imgp->firstpage = ma[0];
- mtx_unlock(&vm_mtx);
return 0;
}
@@ -501,12 +499,11 @@ void
exec_unmap_first_page(imgp)
struct image_params *imgp;
{
+ GIANT_REQUIRED;
if (imgp->firstpage) {
- mtx_lock(&vm_mtx);
pmap_kremove((vm_offset_t) imgp->image_header);
vm_page_unwire(imgp->firstpage, 1);
- mtx_unlock(&vm_mtx);
imgp->firstpage = NULL;
}
}
@@ -525,7 +522,8 @@ exec_new_vmspace(imgp)
caddr_t stack_addr = (caddr_t) (USRSTACK - MAXSSIZ);
vm_map_t map = &vmspace->vm_map;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
imgp->vmspace_destroyed = 1;
/*
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index b1a2ee5..d73c70b 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -120,6 +120,8 @@ exit1(p, rv)
register struct vmspace *vm;
struct exitlist *ep;
+ GIANT_REQUIRED;
+
if (p->p_pid == 1) {
printf("init died (signal %d, exit %d)\n",
WTERMSIG(rv), WEXITSTATUS(rv));
@@ -213,7 +215,6 @@ exit1(p, rv)
* Can't free the entire vmspace as the kernel stack
* may be mapped within that space also.
*/
- mtx_lock(&vm_mtx);
if (vm->vm_refcnt == 1) {
if (vm->vm_shm)
shmexit(p);
@@ -222,7 +223,6 @@ exit1(p, rv)
(void) vm_map_remove(&vm->vm_map, VM_MIN_ADDRESS,
VM_MAXUSER_ADDRESS);
}
- mtx_unlock(&vm_mtx);
PROC_LOCK(p);
if (SESS_LEADER(p)) {
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index dd3eb04..9eecbc2 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -209,6 +209,8 @@ fork1(p1, flags, procp)
struct forklist *ep;
struct filedesc *fd;
+ GIANT_REQUIRED;
+
/* Can't copy and clear */
if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
return (EINVAL);
@@ -218,9 +220,7 @@ fork1(p1, flags, procp)
* certain parts of a process from itself.
*/
if ((flags & RFPROC) == 0) {
-
vm_fork(p1, 0, flags);
- mtx_assert(&vm_mtx, MA_NOTOWNED);
/*
* Close all file descriptors.
@@ -561,7 +561,6 @@ again:
* execution path later. (ie: directly into user mode)
*/
vm_fork(p1, p2, flags);
- mtx_assert(&vm_mtx, MA_NOTOWNED);
if (flags == (RFFDG | RFPROC)) {
cnt.v_forks++;
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index f46313c..f7503db 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -431,6 +431,8 @@ dosetrlimit(p, which, limp)
register struct rlimit *alimp;
int error;
+ GIANT_REQUIRED;
+
if (which >= RLIM_NLIMITS)
return (EINVAL);
alimp = &p->p_rlimit[which];
@@ -498,10 +500,8 @@ dosetrlimit(p, which, limp)
}
addr = trunc_page(addr);
size = round_page(size);
- mtx_lock(&vm_mtx);
(void) vm_map_protect(&p->p_vmspace->vm_map,
addr, addr+size, prot, FALSE);
- mtx_unlock(&vm_mtx);
}
break;
diff --git a/sys/kern/link_elf.c b/sys/kern/link_elf.c
index da7462a..439b9de 100644
--- a/sys/kern/link_elf.c
+++ b/sys/kern/link_elf.c
@@ -531,6 +531,8 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
int symcnt;
int strcnt;
+ GIANT_REQUIRED;
+
shdr = NULL;
lf = NULL;
@@ -657,10 +659,8 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
ef = (elf_file_t) lf;
#ifdef SPARSE_MAPPING
- mtx_lock(&vm_mtx);
ef->object = vm_object_allocate(OBJT_DEFAULT, mapsize >> PAGE_SHIFT);
if (ef->object == NULL) {
- mtx_unlock(&vm_mtx);
free(ef, M_LINKER);
error = ENOMEM;
goto out;
@@ -673,11 +673,9 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
VM_PROT_ALL, VM_PROT_ALL, 0);
if (error) {
vm_object_deallocate(ef->object);
- mtx_unlock(&vm_mtx);
ef->object = 0;
goto out;
}
- mtx_unlock(&vm_mtx);
#else
ef->address = malloc(mapsize, M_LINKER, M_WAITOK);
if (!ef->address) {
@@ -705,12 +703,10 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
/*
* Wire down the pages
*/
- mtx_lock(&vm_mtx);
vm_map_pageable(kernel_map,
(vm_offset_t) segbase,
(vm_offset_t) segbase + segs[i]->p_memsz,
FALSE);
- mtx_unlock(&vm_mtx);
#endif
}
@@ -834,12 +830,10 @@ link_elf_unload_file(linker_file_t file)
}
#ifdef SPARSE_MAPPING
if (ef->object) {
- mtx_lock(&vm_mtx);
vm_map_remove(kernel_map, (vm_offset_t) ef->address,
(vm_offset_t) ef->address
+ (ef->object->size << PAGE_SHIFT));
vm_object_deallocate(ef->object);
- mtx_unlock(&vm_mtx);
}
#else
if (ef->address)
diff --git a/sys/kern/link_elf_obj.c b/sys/kern/link_elf_obj.c
index da7462a..439b9de 100644
--- a/sys/kern/link_elf_obj.c
+++ b/sys/kern/link_elf_obj.c
@@ -531,6 +531,8 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
int symcnt;
int strcnt;
+ GIANT_REQUIRED;
+
shdr = NULL;
lf = NULL;
@@ -657,10 +659,8 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
ef = (elf_file_t) lf;
#ifdef SPARSE_MAPPING
- mtx_lock(&vm_mtx);
ef->object = vm_object_allocate(OBJT_DEFAULT, mapsize >> PAGE_SHIFT);
if (ef->object == NULL) {
- mtx_unlock(&vm_mtx);
free(ef, M_LINKER);
error = ENOMEM;
goto out;
@@ -673,11 +673,9 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
VM_PROT_ALL, VM_PROT_ALL, 0);
if (error) {
vm_object_deallocate(ef->object);
- mtx_unlock(&vm_mtx);
ef->object = 0;
goto out;
}
- mtx_unlock(&vm_mtx);
#else
ef->address = malloc(mapsize, M_LINKER, M_WAITOK);
if (!ef->address) {
@@ -705,12 +703,10 @@ link_elf_load_file(linker_class_t cls, const char* filename, linker_file_t* resu
/*
* Wire down the pages
*/
- mtx_lock(&vm_mtx);
vm_map_pageable(kernel_map,
(vm_offset_t) segbase,
(vm_offset_t) segbase + segs[i]->p_memsz,
FALSE);
- mtx_unlock(&vm_mtx);
#endif
}
@@ -834,12 +830,10 @@ link_elf_unload_file(linker_file_t file)
}
#ifdef SPARSE_MAPPING
if (ef->object) {
- mtx_lock(&vm_mtx);
vm_map_remove(kernel_map, (vm_offset_t) ef->address,
(vm_offset_t) ef->address
+ (ef->object->size << PAGE_SHIFT));
vm_object_deallocate(ef->object);
- mtx_unlock(&vm_mtx);
}
#else
if (ef->address)
diff --git a/sys/kern/subr_blist.c b/sys/kern/subr_blist.c
index 061d151..6bb7ae7 100644
--- a/sys/kern/subr_blist.c
+++ b/sys/kern/subr_blist.c
@@ -71,6 +71,7 @@
#include <sys/kernel.h>
#include <sys/blist.h>
#include <sys/malloc.h>
+#include <sys/proc.h>
#include <sys/mutex.h>
#include <vm/vm.h>
#include <vm/vm_object.h>
diff --git a/sys/kern/sys_pipe.c b/sys/kern/sys_pipe.c
index 4136532..5ee4f5c 100644
--- a/sys/kern/sys_pipe.c
+++ b/sys/kern/sys_pipe.c
@@ -248,13 +248,14 @@ pipespace(cpipe, size)
caddr_t buffer;
int npages, error;
+ GIANT_REQUIRED;
+
npages = round_page(size)/PAGE_SIZE;
/*
* Create an object, I don't like the idea of paging to/from
* kernel_object.
* XXX -- minor change needed here for NetBSD/OpenBSD VM systems.
*/
- mtx_lock(&vm_mtx);
object = vm_object_allocate(OBJT_DEFAULT, npages);
buffer = (caddr_t) vm_map_min(kernel_map);
@@ -269,13 +270,11 @@ pipespace(cpipe, size)
if (error != KERN_SUCCESS) {
vm_object_deallocate(object);
- mtx_unlock(&vm_mtx);
return (ENOMEM);
}
/* free old resources if we're resizing */
pipe_free_kmem(cpipe);
- mtx_unlock(&vm_mtx);
cpipe->pipe_buffer.object = object;
cpipe->pipe_buffer.buffer = buffer;
cpipe->pipe_buffer.size = size;
@@ -551,12 +550,13 @@ pipe_build_write_buffer(wpipe, uio)
int i;
vm_offset_t addr, endaddr, paddr;
+ GIANT_REQUIRED;
+
size = (u_int) uio->uio_iov->iov_len;
if (size > wpipe->pipe_buffer.size)
size = wpipe->pipe_buffer.size;
endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
- mtx_lock(&vm_mtx);
addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) {
vm_page_t m;
@@ -567,7 +567,6 @@ pipe_build_write_buffer(wpipe, uio)
for (j = 0; j < i; j++)
vm_page_unwire(wpipe->pipe_map.ms[j], 1);
- mtx_unlock(&vm_mtx);
return (EFAULT);
}
@@ -599,7 +598,6 @@ pipe_build_write_buffer(wpipe, uio)
pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms,
wpipe->pipe_map.npages);
- mtx_unlock(&vm_mtx);
/*
* and update the uio data
*/
@@ -622,7 +620,8 @@ pipe_destroy_write_buffer(wpipe)
{
int i;
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
+
if (wpipe->pipe_map.kva) {
pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages);
@@ -636,7 +635,6 @@ pipe_destroy_write_buffer(wpipe)
}
for (i = 0; i < wpipe->pipe_map.npages; i++)
vm_page_unwire(wpipe->pipe_map.ms[i], 1);
- mtx_unlock(&vm_mtx);
}
/*
@@ -1167,8 +1165,8 @@ static void
pipe_free_kmem(cpipe)
struct pipe *cpipe;
{
+ GIANT_REQUIRED;
- mtx_assert(&vm_mtx, MA_OWNED);
if (cpipe->pipe_buffer.buffer != NULL) {
if (cpipe->pipe_buffer.size > PIPE_SIZE)
--nbigpipe;
@@ -1228,13 +1226,11 @@ pipeclose(cpipe)
/*
* free resources
*/
- mtx_lock(&vm_mtx);
pipe_free_kmem(cpipe);
/* XXX: erm, doesn't zalloc already have its own locks and
* not need the giant vm lock?
*/
zfree(pipe_zone, cpipe);
- mtx_unlock(&vm_mtx);
}
}
diff --git a/sys/kern/sysv_shm.c b/sys/kern/sysv_shm.c
index 1d96cff..96a4541 100644
--- a/sys/kern/sysv_shm.c
+++ b/sys/kern/sysv_shm.c
@@ -187,7 +187,8 @@ shm_deallocate_segment(shmseg)
struct shm_handle *shm_handle;
size_t size;
- mtx_assert(&vm_mtx, MA_OWNED); /* For vm_object_deallocate. */
+ GIANT_REQUIRED;
+
shm_handle = shmseg->shm_internal;
vm_object_deallocate(shm_handle->shm_object);
free((caddr_t)shm_handle, M_SHM);
@@ -207,8 +208,7 @@ shm_delete_mapping(p, shmmap_s)
int segnum, result;
size_t size;
- /* For vm_map_remove and shm_deallocate_segment. */
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
segnum = IPCID_TO_IX(shmmap_s->shmid);
shmseg = &shmsegs[segnum];
@@ -254,9 +254,7 @@ shmdt(p, uap)
break;
if (i == shminfo.shmseg)
return EINVAL;
- mtx_lock(&vm_mtx);
error = shm_delete_mapping(p, shmmap_s);
- mtx_unlock(&vm_mtx);
return error;
}
@@ -282,6 +280,8 @@ shmat(p, uap)
vm_size_t size;
int rv;
+ GIANT_REQUIRED;
+
if (!jail_sysvipc_allowed && jailed(p->p_ucred))
return (ENOSYS);
@@ -334,17 +334,14 @@ shmat(p, uap)
}
shm_handle = shmseg->shm_internal;
- mtx_lock(&vm_mtx);
vm_object_reference(shm_handle->shm_object);
rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object,
0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0);
if (rv != KERN_SUCCESS) {
- mtx_unlock(&vm_mtx);
return ENOMEM;
}
vm_map_inherit(&p->p_vmspace->vm_map,
attach_va, attach_va + size, VM_INHERIT_SHARE);
- mtx_unlock(&vm_mtx);
shmmap_s->va = attach_va;
shmmap_s->shmid = uap->shmid;
@@ -434,6 +431,8 @@ shmctl(p, uap)
struct shmid_ds inbuf;
struct shmid_ds *shmseg;
+ GIANT_REQUIRED;
+
if (!jail_sysvipc_allowed && jailed(p->p_ucred))
return (ENOSYS);
@@ -470,9 +469,7 @@ shmctl(p, uap)
shmseg->shm_perm.key = IPC_PRIVATE;
shmseg->shm_perm.mode |= SHMSEG_REMOVED;
if (shmseg->shm_nattch <= 0) {
- mtx_lock(&vm_mtx);
shm_deallocate_segment(shmseg);
- mtx_unlock(&vm_mtx);
shm_last_free = IPCID_TO_IX(uap->shmid);
}
break;
@@ -539,6 +536,8 @@ shmget_allocate_segment(p, uap, mode)
struct shmid_ds *shmseg;
struct shm_handle *shm_handle;
+ GIANT_REQUIRED;
+
if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
return EINVAL;
if (shm_nused >= shminfo.shmmni) /* Any shmids left? */
@@ -574,7 +573,6 @@ shmget_allocate_segment(p, uap, mode)
* We make sure that we have allocated a pager before we need
* to.
*/
- mtx_lock(&vm_mtx);
if (shm_use_phys) {
shm_handle->shm_object =
vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0);
@@ -584,7 +582,6 @@ shmget_allocate_segment(p, uap, mode)
}
vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
- mtx_unlock(&vm_mtx);
shmseg->shm_internal = shm_handle;
shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
@@ -680,7 +677,8 @@ shmexit_myhook(p)
struct shmmap_state *shmmap_s;
int i;
- mtx_assert(&vm_mtx, MA_OWNED); /* For shm_delete_mapping. */
+ GIANT_REQUIRED;
+
shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
if (shmmap_s->shmid != -1)
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index 2718591..b86d17f 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -1489,8 +1489,9 @@ sf_buf_free(caddr_t addr, void *args)
struct sf_buf *sf;
struct vm_page *m;
+ GIANT_REQUIRED;
+
sf = dtosf(addr);
- mtx_lock(&vm_mtx);
pmap_qremove((vm_offset_t)addr, 1);
m = sf->m;
vm_page_unwire(m, 0);
@@ -1501,7 +1502,6 @@ sf_buf_free(caddr_t addr, void *args)
*/
if (m->wire_count == 0 && m->object == NULL)
vm_page_free(m);
- mtx_unlock(&vm_mtx);
sf->m = NULL;
mtx_lock(&sf_freelist.sf_lock);
SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
@@ -1536,6 +1536,8 @@ sendfile(struct proc *p, struct sendfile_args *uap)
off_t off, xfsize, sbytes = 0;
int error = 0, s;
+ GIANT_REQUIRED;
+
vp = NULL;
/*
* Do argument checking. Must be a regular file in, stream
@@ -1646,19 +1648,16 @@ retry_lookup:
*
* Wait and loop if busy.
*/
- mtx_lock(&vm_mtx);
pg = vm_page_lookup(obj, pindex);
if (pg == NULL) {
pg = vm_page_alloc(obj, pindex, VM_ALLOC_NORMAL);
if (pg == NULL) {
VM_WAIT;
- mtx_unlock(&vm_mtx);
goto retry_lookup;
}
vm_page_wakeup(pg);
} else if (vm_page_sleep_busy(pg, TRUE, "sfpbsy")) {
- mtx_unlock(&vm_mtx);
goto retry_lookup;
}
@@ -1683,7 +1682,6 @@ retry_lookup:
* completes.
*/
vm_page_io_start(pg);
- mtx_unlock(&vm_mtx);
/*
* Get the page from backing store.
@@ -1702,7 +1700,6 @@ retry_lookup:
error = VOP_READ(vp, &auio, IO_VMIO | ((MAXBSIZE / bsize) << 16),
p->p_ucred);
VOP_UNLOCK(vp, 0, p);
- mtx_lock(&vm_mtx);
vm_page_flag_clear(pg, PG_ZERO);
vm_page_io_finish(pg);
if (error) {
@@ -1717,7 +1714,6 @@ retry_lookup:
vm_page_busy(pg);
vm_page_free(pg);
}
- mtx_unlock(&vm_mtx);
sbunlock(&so->so_snd);
goto done;
}
@@ -1728,13 +1724,10 @@ retry_lookup:
* Get a sendfile buf. We usually wait as long as necessary,
* but this wait can be interrupted.
*/
- mtx_unlock(&vm_mtx);
if ((sf = sf_buf_alloc()) == NULL) {
- mtx_lock(&vm_mtx);
vm_page_unwire(pg, 0);
if (pg->wire_count == 0 && pg->object == NULL)
vm_page_free(pg);
- mtx_unlock(&vm_mtx);
sbunlock(&so->so_snd);
error = EINTR;
goto done;
@@ -1744,10 +1737,8 @@ retry_lookup:
* Allocate a kernel virtual page and insert the physical page
* into it.
*/
- mtx_lock(&vm_mtx);
sf->m = pg;
pmap_qenter(sf->kva, &pg, 1);
- mtx_unlock(&vm_mtx);
/*
* Get an mbuf header and set it up as having external storage.
*/
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 246fc4c..94baa5a 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -281,8 +281,6 @@ waitrunningbufspace(void)
* Called when a buffer is extended. This function clears the B_CACHE
* bit if the newly extended portion of the buffer does not contain
* valid data.
- *
- * must be called with vm_mtx held
*/
static __inline__
void
@@ -290,6 +288,8 @@ vfs_buf_test_cache(struct buf *bp,
vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
vm_page_t m)
{
+ GIANT_REQUIRED;
+
if (bp->b_flags & B_CACHE) {
int base = (foff + off) & PAGE_MASK;
if (vm_page_is_valid(m, base, size) == 0)
@@ -340,6 +340,8 @@ bufinit(void)
struct buf *bp;
int i;
+ GIANT_REQUIRED;
+
TAILQ_INIT(&bswlist);
LIST_INIT(&invalhash);
mtx_init(&buftimelock, "buftime lock", MTX_DEF);
@@ -428,14 +430,11 @@ bufinit(void)
* from buf_daemon.
*/
- mtx_lock(&vm_mtx);
bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
bogus_page = vm_page_alloc(kernel_object,
((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
VM_ALLOC_NORMAL);
cnt.v_wire_count++;
- mtx_unlock(&vm_mtx);
-
}
/*
@@ -445,23 +444,19 @@ bufinit(void)
* buffer_map.
*
* Since this call frees up buffer space, we call bufspacewakeup().
- *
- * Must be called without the vm_mtx.
*/
static void
bfreekva(struct buf * bp)
{
+ GIANT_REQUIRED;
- mtx_assert(&vm_mtx, MA_NOTOWNED);
if (bp->b_kvasize) {
++buffreekvacnt;
bufspace -= bp->b_kvasize;
- mtx_lock(&vm_mtx);
vm_map_delete(buffer_map,
(vm_offset_t) bp->b_kvabase,
(vm_offset_t) bp->b_kvabase + bp->b_kvasize
);
- mtx_unlock(&vm_mtx);
bp->b_kvasize = 0;
bufspacewakeup();
}
@@ -478,6 +473,8 @@ bremfree(struct buf * bp)
int s = splbio();
int old_qindex = bp->b_qindex;
+ GIANT_REQUIRED;
+
if (bp->b_qindex != QUEUE_NONE) {
KASSERT(BUF_REFCNT(bp) == 1, ("bremfree: bp %p not locked",bp));
TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
@@ -789,6 +786,8 @@ vfs_backgroundwritedone(bp)
void
bdwrite(struct buf * bp)
{
+ GIANT_REQUIRED;
+
if (BUF_REFCNT(bp) == 0)
panic("bdwrite: buffer is not busy");
@@ -817,7 +816,6 @@ bdwrite(struct buf * bp)
VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
}
- mtx_lock(&vm_mtx);
/*
* Set the *dirty* buffer range based upon the VM system dirty pages.
*/
@@ -831,7 +829,6 @@ bdwrite(struct buf * bp)
* out on the next sync, or perhaps the cluster will be completed.
*/
vfs_clean_pages(bp);
- mtx_unlock(&vm_mtx);
bqrelse(bp);
/*
@@ -985,15 +982,14 @@ buf_dirty_count_severe(void)
* Release a busy buffer and, if requested, free its resources. The
* buffer will be stashed in the appropriate bufqueue[] allowing it
* to be accessed later as a cache entity or reused for other purposes.
- *
- * vm_mtx must be not be held.
*/
void
brelse(struct buf * bp)
{
int s;
- mtx_assert(&vm_mtx, MA_NOTOWNED);
+ GIANT_REQUIRED;
+
KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
s = splbio();
@@ -1103,7 +1099,6 @@ brelse(struct buf * bp)
resid = bp->b_bufsize;
foff = bp->b_offset;
- mtx_lock(&vm_mtx);
for (i = 0; i < bp->b_npages; i++) {
int had_bogus = 0;
@@ -1115,12 +1110,10 @@ brelse(struct buf * bp)
* now.
*/
if (m == bogus_page) {
- mtx_unlock(&vm_mtx);
VOP_GETVOBJECT(vp, &obj);
poff = OFF_TO_IDX(bp->b_offset);
had_bogus = 1;
- mtx_lock(&vm_mtx);
for (j = i; j < bp->b_npages; j++) {
vm_page_t mtmp;
mtmp = bp->b_pages[j];
@@ -1154,14 +1147,11 @@ brelse(struct buf * bp)
if (bp->b_flags & (B_INVAL | B_RELBUF))
vfs_vmio_release(bp);
- mtx_unlock(&vm_mtx);
} else if (bp->b_flags & B_VMIO) {
if (bp->b_flags & (B_INVAL | B_RELBUF)) {
- mtx_lock(&vm_mtx);
vfs_vmio_release(bp);
- mtx_unlock(&vm_mtx);
}
}
@@ -1326,9 +1316,6 @@ bqrelse(struct buf * bp)
splx(s);
}
-/*
- * Must be called with vm_mtx held.
- */
static void
vfs_vmio_release(bp)
struct buf *bp;
@@ -1336,7 +1323,8 @@ vfs_vmio_release(bp)
int i;
vm_page_t m;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
bp->b_pages[i] = NULL;
@@ -1372,8 +1360,6 @@ vfs_vmio_release(bp)
}
}
pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
-
- /* could drop vm_mtx here */
if (bp->b_bufsize) {
bufspacewakeup();
@@ -1527,6 +1513,8 @@ getnewbuf(int slpflag, int slptimeo, int size, int maxsize)
int nqindex;
static int flushingbufs;
+ GIANT_REQUIRED;
+
/*
* We can't afford to block since we might be holding a vnode lock,
* which may prevent system daemons from running. We deal with
@@ -1646,9 +1634,7 @@ restart:
if (qindex == QUEUE_CLEAN) {
if (bp->b_flags & B_VMIO) {
bp->b_flags &= ~B_ASYNC;
- mtx_lock(&vm_mtx);
vfs_vmio_release(bp);
- mtx_unlock(&vm_mtx);
}
if (bp->b_vp)
brelvp(bp);
@@ -1771,14 +1757,12 @@ restart:
bfreekva(bp);
- mtx_lock(&vm_mtx);
if (vm_map_findspace(buffer_map,
vm_map_min(buffer_map), maxsize, &addr)) {
/*
* Uh oh. Buffer map is to fragmented. We
* must defragment the map.
*/
- mtx_unlock(&vm_mtx);
++bufdefragcnt;
defrag = 1;
bp->b_flags |= B_INVAL;
@@ -1795,7 +1779,6 @@ restart:
bufspace += bp->b_kvasize;
++bufreusecnt;
}
- mtx_unlock(&vm_mtx);
}
bp->b_data = bp->b_kvabase;
}
@@ -1961,6 +1944,8 @@ inmem(struct vnode * vp, daddr_t blkno)
vm_page_t m;
vm_ooffset_t off;
+ GIANT_REQUIRED;
+
if (incore(vp, blkno))
return 1;
if (vp->v_mount == NULL)
@@ -1973,7 +1958,6 @@ inmem(struct vnode * vp, daddr_t blkno)
size = vp->v_mount->mnt_stat.f_iosize;
off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
- mtx_lock(&vm_mtx);
for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
if (!m)
@@ -1985,11 +1969,9 @@ inmem(struct vnode * vp, daddr_t blkno)
(vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
goto notinmem;
}
- mtx_unlock(&vm_mtx);
return 1;
notinmem:
- mtx_unlock(&vm_mtx);
return (0);
}
@@ -2003,8 +1985,6 @@ notinmem:
*
* This routine is primarily used by NFS, but is generalized for the
* B_VMIO case.
- *
- * Must be called with vm_mtx
*/
static void
vfs_setdirty(struct buf *bp)
@@ -2012,7 +1992,7 @@ vfs_setdirty(struct buf *bp)
int i;
vm_object_t object;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
/*
* Degenerate case - empty buffer
*/
@@ -2365,6 +2345,8 @@ allocbuf(struct buf *bp, int size)
int newbsize, mbsize;
int i;
+ GIANT_REQUIRED;
+
if (BUF_REFCNT(bp) == 0)
panic("allocbuf: buffer not busy");
@@ -2487,7 +2469,6 @@ allocbuf(struct buf *bp, int size)
* DEV_BSIZE aligned existing buffer size. Figure out
* if we have to remove any pages.
*/
- mtx_lock(&vm_mtx);
if (desiredpages < bp->b_npages) {
for (i = desiredpages; i < bp->b_npages; i++) {
/*
@@ -2508,7 +2489,6 @@ allocbuf(struct buf *bp, int size)
(desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
bp->b_npages = desiredpages;
}
- mtx_unlock(&vm_mtx);
} else if (size > bp->b_bcount) {
/*
* We are growing the buffer, possibly in a
@@ -2529,7 +2509,6 @@ allocbuf(struct buf *bp, int size)
vp = bp->b_vp;
VOP_GETVOBJECT(vp, &obj);
- mtx_lock(&vm_mtx);
while (bp->b_npages < desiredpages) {
vm_page_t m;
vm_pindex_t pi;
@@ -2639,8 +2618,6 @@ allocbuf(struct buf *bp, int size)
bp->b_npages
);
- mtx_unlock(&vm_mtx);
-
bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
(vm_offset_t)(bp->b_offset & PAGE_MASK));
}
@@ -2718,6 +2695,8 @@ bufdone(struct buf *bp)
int s, error;
void (*biodone) __P((struct buf *));
+ GIANT_REQUIRED;
+
s = splbio();
KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy %d", bp, BUF_REFCNT(bp)));
@@ -2778,7 +2757,6 @@ bufdone(struct buf *bp)
if (error) {
panic("biodone: no object");
}
- mtx_lock(&vm_mtx);
#if defined(VFS_BIO_DEBUG)
if (obj->paging_in_progress < bp->b_npages) {
printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
@@ -2867,7 +2845,6 @@ bufdone(struct buf *bp)
}
if (obj)
vm_object_pip_wakeupn(obj, 0);
- mtx_unlock(&vm_mtx);
}
/*
@@ -2891,15 +2868,14 @@ bufdone(struct buf *bp)
* This routine is called in lieu of iodone in the case of
* incomplete I/O. This keeps the busy status for pages
* consistant.
- *
- * vm_mtx should not be held
*/
void
vfs_unbusy_pages(struct buf * bp)
{
int i;
- mtx_assert(&vm_mtx, MA_NOTOWNED);
+ GIANT_REQUIRED;
+
runningbufwakeup(bp);
if (bp->b_flags & B_VMIO) {
struct vnode *vp = bp->b_vp;
@@ -2907,7 +2883,6 @@ vfs_unbusy_pages(struct buf * bp)
VOP_GETVOBJECT(vp, &obj);
- mtx_lock(&vm_mtx);
for (i = 0; i < bp->b_npages; i++) {
vm_page_t m = bp->b_pages[i];
@@ -2924,7 +2899,6 @@ vfs_unbusy_pages(struct buf * bp)
vm_page_io_finish(m);
}
vm_object_pip_wakeupn(obj, 0);
- mtx_unlock(&vm_mtx);
}
}
@@ -2935,15 +2909,13 @@ vfs_unbusy_pages(struct buf * bp)
* range is restricted to the buffer's size.
*
* This routine is typically called after a read completes.
- *
- * vm_mtx should be held
*/
static void
vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
{
vm_ooffset_t soff, eoff;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
/*
* Start and end offsets in buffer. eoff - soff may not cross a
* page boundry or cross the end of the buffer. The end of the
@@ -2979,15 +2951,14 @@ vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
* Since I/O has not been initiated yet, certain buffer flags
* such as BIO_ERROR or B_INVAL may be in an inconsistant state
* and should be ignored.
- *
- * vm_mtx should not be held
*/
void
vfs_busy_pages(struct buf * bp, int clear_modify)
{
int i, bogus;
- mtx_assert(&vm_mtx, MA_NOTOWNED);
+ GIANT_REQUIRED;
+
if (bp->b_flags & B_VMIO) {
struct vnode *vp = bp->b_vp;
vm_object_t obj;
@@ -2997,7 +2968,6 @@ vfs_busy_pages(struct buf * bp, int clear_modify)
foff = bp->b_offset;
KASSERT(bp->b_offset != NOOFFSET,
("vfs_busy_pages: no buffer offset"));
- mtx_lock(&vm_mtx);
vfs_setdirty(bp);
retry:
@@ -3045,7 +3015,6 @@ retry:
}
if (bogus)
pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
- mtx_unlock(&vm_mtx);
}
}
@@ -3056,15 +3025,14 @@ retry:
*
* Note that while we only really need to clean through to b_bcount, we
* just go ahead and clean through to b_bufsize.
- *
- * should be called with vm_mtx held
*/
static void
vfs_clean_pages(struct buf * bp)
{
int i;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (bp->b_flags & B_VMIO) {
vm_ooffset_t foff;
@@ -3132,9 +3100,6 @@ vfs_bio_set_validclean(struct buf *bp, int base, int size)
*
* Note that while we only theoretically need to clear through b_bcount,
* we go ahead and clear through b_bufsize.
- *
- * We'll get vm_mtx here for safety if processing a VMIO buffer.
- * I don't think vm_mtx is needed, but we're twiddling vm_page flags.
*/
void
@@ -3142,8 +3107,9 @@ vfs_bio_clrbuf(struct buf *bp) {
int i, mask = 0;
caddr_t sa, ea;
+ GIANT_REQUIRED;
+
if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) {
- mtx_lock(&vm_mtx);
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
@@ -3155,7 +3121,6 @@ vfs_bio_clrbuf(struct buf *bp) {
}
bp->b_pages[0]->valid |= mask;
bp->b_resid = 0;
- mtx_unlock(&vm_mtx);
return;
}
ea = sa = bp->b_data;
@@ -3183,7 +3148,6 @@ vfs_bio_clrbuf(struct buf *bp) {
vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
}
bp->b_resid = 0;
- mtx_unlock(&vm_mtx);
} else {
clrbuf(bp);
}
@@ -3193,8 +3157,6 @@ vfs_bio_clrbuf(struct buf *bp) {
* vm_hold_load_pages and vm_hold_free_pages get pages into
* a buffers address space. The pages are anonymous and are
* not associated with a file object.
- *
- * vm_mtx should not be held
*/
static void
vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
@@ -3203,16 +3165,14 @@ vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
vm_page_t p;
int index;
- mtx_assert(&vm_mtx, MA_NOTOWNED);
+ GIANT_REQUIRED;
+
to = round_page(to);
from = round_page(from);
index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
- mtx_lock(&vm_mtx);
for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
-
tryagain:
-
/*
* note: must allocate system pages since blocking here
* could intefere with paging I/O, no matter which
@@ -3234,7 +3194,6 @@ tryagain:
vm_page_wakeup(p);
}
bp->b_npages = index;
- mtx_unlock(&vm_mtx);
}
void
@@ -3244,12 +3203,12 @@ vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
vm_page_t p;
int index, newnpages;
- mtx_assert(&vm_mtx, MA_NOTOWNED);
+ GIANT_REQUIRED;
+
from = round_page(from);
to = round_page(to);
newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
- mtx_lock(&vm_mtx);
for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
p = bp->b_pages[index];
if (p && (index < bp->b_npages)) {
@@ -3265,7 +3224,6 @@ vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
}
}
bp->b_npages = newnpages;
- mtx_unlock(&vm_mtx);
}
diff --git a/sys/kern/vfs_cluster.c b/sys/kern/vfs_cluster.c
index c9c09cb..b685740 100644
--- a/sys/kern/vfs_cluster.c
+++ b/sys/kern/vfs_cluster.c
@@ -320,6 +320,8 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
daddr_t bn;
int i, inc, j;
+ GIANT_REQUIRED;
+
KASSERT(size == vp->v_mount->mnt_stat.f_iosize,
("cluster_rbuild: size %ld != filesize %ld\n",
size, vp->v_mount->mnt_stat.f_iosize));
@@ -433,7 +435,6 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
BUF_KERNPROC(tbp);
TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
tbp, b_cluster.cluster_entry);
- mtx_lock(&vm_mtx);
for (j = 0; j < tbp->b_npages; j += 1) {
vm_page_t m;
m = tbp->b_pages[j];
@@ -447,12 +448,10 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
tbp->b_pages[j] = bogus_page;
}
- mtx_unlock(&vm_mtx);
bp->b_bcount += tbp->b_bcount;
bp->b_bufsize += tbp->b_bufsize;
}
- mtx_lock(&vm_mtx);
for(j=0;j<bp->b_npages;j++) {
if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) ==
VM_PAGE_BITS_ALL)
@@ -465,7 +464,6 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
(vm_page_t *)bp->b_pages, bp->b_npages);
- mtx_unlock(&vm_mtx);
return (bp);
}
@@ -482,15 +480,15 @@ cluster_callback(bp)
struct buf *nbp, *tbp;
int error = 0;
+ GIANT_REQUIRED;
+
/*
* Must propogate errors to all the components.
*/
if (bp->b_ioflags & BIO_ERROR)
error = bp->b_error;
- mtx_lock(&vm_mtx);
pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
- mtx_unlock(&vm_mtx);
/*
* Move memory from the large cluster buffer into the component
* buffers and mark IO as done on these.
@@ -724,6 +722,8 @@ cluster_wbuild(vp, size, start_lbn, len)
int totalwritten = 0;
int dbsize = btodb(size);
+ GIANT_REQUIRED;
+
while (len > 0) {
s = splbio();
/*
@@ -866,7 +866,6 @@ cluster_wbuild(vp, size, start_lbn, len)
}
}
- mtx_lock(&vm_mtx);
for (j = 0; j < tbp->b_npages; j += 1) {
m = tbp->b_pages[j];
vm_page_io_start(m);
@@ -877,7 +876,6 @@ cluster_wbuild(vp, size, start_lbn, len)
bp->b_npages++;
}
}
- mtx_unlock(&vm_mtx);
}
bp->b_bcount += size;
bp->b_bufsize += size;
@@ -896,10 +894,8 @@ cluster_wbuild(vp, size, start_lbn, len)
tbp, b_cluster.cluster_entry);
}
finishcluster:
- mtx_lock(&vm_mtx);
pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
(vm_page_t *) bp->b_pages, bp->b_npages);
- mtx_unlock(&vm_mtx);
if (bp->b_bufsize > bp->b_kvasize)
panic(
"cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index dc5b9c9..1a1f55b 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -527,6 +527,8 @@ vop_stdcreatevobject(ap)
vm_object_t object;
int error = 0;
+ GIANT_REQUIRED;
+
if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
return (0);
@@ -535,7 +537,6 @@ retry:
if (vp->v_type == VREG || vp->v_type == VDIR) {
if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
goto retn;
- mtx_lock(&vm_mtx);
object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
} else if (devsw(vp->v_rdev) != NULL) {
/*
@@ -543,7 +544,6 @@ retry:
* for a disk vnode. This should be fixed, but doesn't
* cause any problems (yet).
*/
- mtx_lock(&vm_mtx);
object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
} else {
goto retn;
@@ -553,21 +553,14 @@ retry:
* that the object is associated with the vp.
*/
object->ref_count--;
- mtx_unlock(&vm_mtx);
vp->v_usecount--;
} else {
- /*
- * XXX: safe to hold vm mutex through VOP_UNLOCK?
- */
- mtx_lock(&vm_mtx);
if (object->flags & OBJ_DEAD) {
VOP_UNLOCK(vp, 0, p);
- msleep(object, VM_OBJECT_MTX(object), PVM, "vodead", 0);
- mtx_unlock(&vm_mtx);
+ tsleep(object, PVM, "vodead", 0);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
goto retry;
}
- mtx_unlock(&vm_mtx);
}
KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
@@ -586,10 +579,11 @@ vop_stddestroyvobject(ap)
struct vnode *vp = ap->a_vp;
vm_object_t obj = vp->v_object;
+ GIANT_REQUIRED;
+
if (vp->v_object == NULL)
return (0);
- mtx_lock(&vm_mtx);
if (obj->ref_count == 0) {
/*
* vclean() may be called twice. The first time
@@ -604,7 +598,6 @@ vop_stddestroyvobject(ap)
*/
vm_pager_deallocate(obj);
}
- mtx_unlock(&vm_mtx);
return (0);
}
diff --git a/sys/kern/vfs_extattr.c b/sys/kern/vfs_extattr.c
index 850914c..c35a73d 100644
--- a/sys/kern/vfs_extattr.c
+++ b/sys/kern/vfs_extattr.c
@@ -2763,6 +2763,8 @@ fsync(p, uap)
vm_object_t obj;
int error;
+ GIANT_REQUIRED;
+
if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
vp = (struct vnode *)fp->f_data;
@@ -2770,9 +2772,7 @@ fsync(p, uap)
return (error);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
if (VOP_GETVOBJECT(vp, &obj) == 0) {
- mtx_lock(&vm_mtx);
vm_object_page_clean(obj, 0, 0, 0);
- mtx_unlock(&vm_mtx);
}
error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p);
#ifdef SOFTUPDATES
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 04941e3..b421902 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -711,7 +711,7 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
int s, error;
vm_object_t object;
- mtx_assert(&vm_mtx, MA_NOTOWNED);
+ GIANT_REQUIRED;
if (flags & V_SAVE) {
s = splbio();
@@ -799,10 +799,8 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
*/
mtx_lock(&vp->v_interlock);
if (VOP_GETVOBJECT(vp, &object) == 0) {
- mtx_lock(&vm_mtx);
vm_object_page_remove(object, 0, 0,
(flags & V_SAVE) ? TRUE : FALSE);
- mtx_unlock(&vm_mtx);
}
mtx_unlock(&vp->v_interlock);
@@ -1136,8 +1134,6 @@ speedup_syncer()
* Also sets B_PAGING flag to indicate that vnode is not fully associated
* with the buffer. i.e. the bp has not been linked into the vnode or
* ref-counted.
- *
- * Doesn't block, only vnode seems to need a lock.
*/
void
pbgetvp(vp, bp)
@@ -1560,7 +1556,8 @@ vput(vp)
{
struct proc *p = curproc; /* XXX */
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
+
KASSERT(vp != NULL, ("vput: null vp"));
mtx_lock(&vp->v_interlock);
/* Skip this v_writecount check if we're going to panic below. */
@@ -2363,6 +2360,8 @@ vfs_msync(struct mount *mp, int flags) {
struct vm_object *obj;
int anyio, tries;
+ GIANT_REQUIRED;
+
tries = 5;
loop:
anyio = 0;
@@ -2394,11 +2393,9 @@ loop:
if (!vget(vp,
LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curproc)) {
if (VOP_GETVOBJECT(vp, &obj) == 0) {
- mtx_lock(&vm_mtx);
vm_object_page_clean(obj, 0, 0,
flags == MNT_WAIT ?
OBJPC_SYNC : OBJPC_NOSYNC);
- mtx_unlock(&vm_mtx);
anyio = 1;
}
vput(vp);
@@ -2427,8 +2424,7 @@ vfs_object_create(vp, p, cred)
struct proc *p;
struct ucred *cred;
{
-
- mtx_assert(&vm_mtx, MA_NOTOWNED);
+ GIANT_REQUIRED;
return (VOP_CREATEVOBJECT(vp, cred, p));
}
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index 850914c..c35a73d 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -2763,6 +2763,8 @@ fsync(p, uap)
vm_object_t obj;
int error;
+ GIANT_REQUIRED;
+
if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
return (error);
vp = (struct vnode *)fp->f_data;
@@ -2770,9 +2772,7 @@ fsync(p, uap)
return (error);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
if (VOP_GETVOBJECT(vp, &obj) == 0) {
- mtx_lock(&vm_mtx);
vm_object_page_clean(obj, 0, 0, 0);
- mtx_unlock(&vm_mtx);
}
error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p);
#ifdef SOFTUPDATES
diff --git a/sys/nfs/nfs_bio.c b/sys/nfs/nfs_bio.c
index 5ff95e3..cadd2fb 100644
--- a/sys/nfs/nfs_bio.c
+++ b/sys/nfs/nfs_bio.c
@@ -111,6 +111,8 @@ nfs_getpages(ap)
struct nfsmount *nmp;
vm_page_t *pages;
+ GIANT_REQUIRED;
+
vp = ap->a_vp;
p = curproc; /* XXX */
cred = curproc->p_ucred; /* XXX */
@@ -118,7 +120,6 @@ nfs_getpages(ap)
pages = ap->a_m;
count = ap->a_count;
- mtx_assert(&Giant, MA_OWNED);
if (vp->v_object == NULL) {
printf("nfs_getpages: called with non-merged cache vnode??\n");
return VM_PAGER_ERROR;
@@ -126,9 +127,7 @@ nfs_getpages(ap)
if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
(nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
- mtx_unlock(&vm_mtx);
(void)nfs_fsinfo(nmp, vp, cred, p);
- mtx_lock(&vm_mtx);
}
npages = btoc(count);
@@ -172,9 +171,7 @@ nfs_getpages(ap)
uio.uio_rw = UIO_READ;
uio.uio_procp = p;
- mtx_unlock(&vm_mtx);
error = nfs_readrpc(vp, &uio, cred);
- mtx_lock(&vm_mtx);
pmap_qremove(kva, npages);
relpbuf(bp, &nfs_pbuf_freecnt);
@@ -274,6 +271,8 @@ nfs_putpages(ap)
struct nfsnode *np;
vm_page_t *pages;
+ GIANT_REQUIRED;
+
vp = ap->a_vp;
np = VTONFS(vp);
p = curproc; /* XXX */
@@ -285,12 +284,11 @@ nfs_putpages(ap)
npages = btoc(count);
offset = IDX_TO_OFF(pages[0]->pindex);
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
+
if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
(nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
- mtx_unlock(&vm_mtx);
(void)nfs_fsinfo(nmp, vp, cred, p);
- mtx_lock(&vm_mtx);
}
for (i = 0; i < npages; i++) {
@@ -331,9 +329,7 @@ nfs_putpages(ap)
else
iomode = NFSV3WRITE_FILESYNC;
- mtx_unlock(&vm_mtx);
error = nfs_writerpc(vp, &uio, cred, &iomode, &must_commit);
- mtx_lock(&vm_mtx);
pmap_qremove(kva, npages);
relpbuf(bp, &nfs_pbuf_freecnt);
@@ -345,9 +341,7 @@ nfs_putpages(ap)
vm_page_undirty(pages[i]);
}
if (must_commit) {
- mtx_unlock(&vm_mtx);
nfs_clearcommit(vp->v_mount);
- mtx_lock(&vm_mtx);
}
}
return rtvals[0];
@@ -770,6 +764,8 @@ nfs_write(ap)
int n, on, error = 0, iomode, must_commit;
int haverslock = 0;
+ GIANT_REQUIRED;
+
#ifdef DIAGNOSTIC
if (uio->uio_rw != UIO_WRITE)
panic("nfs_write mode");
@@ -1091,9 +1087,7 @@ again:
bp->b_dirtyoff = on;
bp->b_dirtyend = on + n;
}
- mtx_lock(&vm_mtx);
vfs_bio_set_validclean(bp, on, n);
- mtx_unlock(&vm_mtx);
}
/*
diff --git a/sys/nfs/nfs_common.c b/sys/nfs/nfs_common.c
index b9c4197..ad8891c 100644
--- a/sys/nfs/nfs_common.c
+++ b/sys/nfs/nfs_common.c
@@ -2138,9 +2138,9 @@ nfs_clearcommit(mp)
register struct buf *bp, *nbp;
int s;
+ GIANT_REQUIRED;
+
s = splbio();
- mtx_assert(&Giant, MA_OWNED);
- mtx_assert(&vm_mtx, MA_NOTOWNED);
mtx_lock(&mntvnode_mtx);
loop:
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
diff --git a/sys/nfs/nfs_subs.c b/sys/nfs/nfs_subs.c
index b9c4197..ad8891c 100644
--- a/sys/nfs/nfs_subs.c
+++ b/sys/nfs/nfs_subs.c
@@ -2138,9 +2138,9 @@ nfs_clearcommit(mp)
register struct buf *bp, *nbp;
int s;
+ GIANT_REQUIRED;
+
s = splbio();
- mtx_assert(&Giant, MA_OWNED);
- mtx_assert(&vm_mtx, MA_NOTOWNED);
mtx_lock(&mntvnode_mtx);
loop:
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
diff --git a/sys/nfsclient/nfs_bio.c b/sys/nfsclient/nfs_bio.c
index 5ff95e3..cadd2fb 100644
--- a/sys/nfsclient/nfs_bio.c
+++ b/sys/nfsclient/nfs_bio.c
@@ -111,6 +111,8 @@ nfs_getpages(ap)
struct nfsmount *nmp;
vm_page_t *pages;
+ GIANT_REQUIRED;
+
vp = ap->a_vp;
p = curproc; /* XXX */
cred = curproc->p_ucred; /* XXX */
@@ -118,7 +120,6 @@ nfs_getpages(ap)
pages = ap->a_m;
count = ap->a_count;
- mtx_assert(&Giant, MA_OWNED);
if (vp->v_object == NULL) {
printf("nfs_getpages: called with non-merged cache vnode??\n");
return VM_PAGER_ERROR;
@@ -126,9 +127,7 @@ nfs_getpages(ap)
if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
(nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
- mtx_unlock(&vm_mtx);
(void)nfs_fsinfo(nmp, vp, cred, p);
- mtx_lock(&vm_mtx);
}
npages = btoc(count);
@@ -172,9 +171,7 @@ nfs_getpages(ap)
uio.uio_rw = UIO_READ;
uio.uio_procp = p;
- mtx_unlock(&vm_mtx);
error = nfs_readrpc(vp, &uio, cred);
- mtx_lock(&vm_mtx);
pmap_qremove(kva, npages);
relpbuf(bp, &nfs_pbuf_freecnt);
@@ -274,6 +271,8 @@ nfs_putpages(ap)
struct nfsnode *np;
vm_page_t *pages;
+ GIANT_REQUIRED;
+
vp = ap->a_vp;
np = VTONFS(vp);
p = curproc; /* XXX */
@@ -285,12 +284,11 @@ nfs_putpages(ap)
npages = btoc(count);
offset = IDX_TO_OFF(pages[0]->pindex);
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
+
if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
(nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
- mtx_unlock(&vm_mtx);
(void)nfs_fsinfo(nmp, vp, cred, p);
- mtx_lock(&vm_mtx);
}
for (i = 0; i < npages; i++) {
@@ -331,9 +329,7 @@ nfs_putpages(ap)
else
iomode = NFSV3WRITE_FILESYNC;
- mtx_unlock(&vm_mtx);
error = nfs_writerpc(vp, &uio, cred, &iomode, &must_commit);
- mtx_lock(&vm_mtx);
pmap_qremove(kva, npages);
relpbuf(bp, &nfs_pbuf_freecnt);
@@ -345,9 +341,7 @@ nfs_putpages(ap)
vm_page_undirty(pages[i]);
}
if (must_commit) {
- mtx_unlock(&vm_mtx);
nfs_clearcommit(vp->v_mount);
- mtx_lock(&vm_mtx);
}
}
return rtvals[0];
@@ -770,6 +764,8 @@ nfs_write(ap)
int n, on, error = 0, iomode, must_commit;
int haverslock = 0;
+ GIANT_REQUIRED;
+
#ifdef DIAGNOSTIC
if (uio->uio_rw != UIO_WRITE)
panic("nfs_write mode");
@@ -1091,9 +1087,7 @@ again:
bp->b_dirtyoff = on;
bp->b_dirtyend = on + n;
}
- mtx_lock(&vm_mtx);
vfs_bio_set_validclean(bp, on, n);
- mtx_unlock(&vm_mtx);
}
/*
diff --git a/sys/nfsclient/nfs_subs.c b/sys/nfsclient/nfs_subs.c
index b9c4197..ad8891c 100644
--- a/sys/nfsclient/nfs_subs.c
+++ b/sys/nfsclient/nfs_subs.c
@@ -2138,9 +2138,9 @@ nfs_clearcommit(mp)
register struct buf *bp, *nbp;
int s;
+ GIANT_REQUIRED;
+
s = splbio();
- mtx_assert(&Giant, MA_OWNED);
- mtx_assert(&vm_mtx, MA_NOTOWNED);
mtx_lock(&mntvnode_mtx);
loop:
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
diff --git a/sys/nfsserver/nfs_srvsubs.c b/sys/nfsserver/nfs_srvsubs.c
index b9c4197..ad8891c 100644
--- a/sys/nfsserver/nfs_srvsubs.c
+++ b/sys/nfsserver/nfs_srvsubs.c
@@ -2138,9 +2138,9 @@ nfs_clearcommit(mp)
register struct buf *bp, *nbp;
int s;
+ GIANT_REQUIRED;
+
s = splbio();
- mtx_assert(&Giant, MA_OWNED);
- mtx_assert(&vm_mtx, MA_NOTOWNED);
mtx_lock(&mntvnode_mtx);
loop:
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
diff --git a/sys/pc98/cbus/cbus_dma.c b/sys/pc98/cbus/cbus_dma.c
index 5af3005..b1379f8 100644
--- a/sys/pc98/cbus/cbus_dma.c
+++ b/sys/pc98/cbus/cbus_dma.c
@@ -249,6 +249,8 @@ isa_dmastart(int flags, caddr_t addr, u_int nbytes, int chan)
int waport;
caddr_t newaddr;
+ GIANT_REQUIRED;
+
#ifdef DIAGNOSTIC
if (chan & ~VALID_DMA_MASK)
panic("isa_dmastart: channel out of range");
@@ -287,12 +289,7 @@ isa_dmastart(int flags, caddr_t addr, u_int nbytes, int chan)
}
/* translate to physical */
- mtx_lock(&vm_mtx); /*
- * XXX: need to hold for longer period to
- * ensure that mappings don't change
- */
phys = pmap_extract(pmap_kernel(), (vm_offset_t)addr);
- mtx_unlock(&vm_mtx);
if (flags & ISADMA_RAW) {
dma_auto_mode |= (1 << chan);
@@ -438,11 +435,11 @@ isa_dmarangecheck(caddr_t va, u_int length, int chan)
vm_offset_t phys, priorpage = 0, endva;
u_int dma_pgmsk = (chan & 4) ? ~(128*1024-1) : ~(64*1024-1);
+ GIANT_REQUIRED;
+
endva = (vm_offset_t)round_page((vm_offset_t)va + length);
for (; va < (caddr_t) endva ; va += PAGE_SIZE) {
- mtx_lock(&vm_mtx);
phys = trunc_page(pmap_extract(pmap_kernel(), (vm_offset_t)va));
- mtx_unlock(&vm_mtx);
#ifdef EPSON_BOUNCEDMA
#define ISARAM_END 0xf00000
#else
diff --git a/sys/pc98/i386/machdep.c b/sys/pc98/i386/machdep.c
index a0768b1..c1222ab 100644
--- a/sys/pc98/i386/machdep.c
+++ b/sys/pc98/i386/machdep.c
@@ -276,7 +276,6 @@ cpu_startup(dummy)
/*
* Good {morning,afternoon,evening,night}.
*/
- mtx_lock(&vm_mtx);
earlysetcpuclass();
startrtclock();
printcpuinfo();
@@ -410,7 +409,6 @@ again:
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*(ARG_MAX+(PAGE_SIZE*3))));
- mtx_unlock(&vm_mtx);
/*
* XXX: Mbuf system machine-specific initializations should
* go here, if anywhere.
@@ -2116,6 +2114,8 @@ f00f_hack(void *unused) {
if (!has_f00f_bug)
return;
+ GIANT_REQUIRED;
+
printf("Intel Pentium detected, installing workaround for F00F bug\n");
r_idt.rd_limit = sizeof(idt0) - 1;
@@ -2131,11 +2131,9 @@ f00f_hack(void *unused) {
r_idt.rd_base = (int)new_idt;
lidt(&r_idt);
idt = new_idt;
- mtx_lock(&vm_mtx);
if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
VM_PROT_READ, FALSE) != KERN_SUCCESS)
panic("vm_map_protect failed");
- mtx_unlock(&vm_mtx);
return;
}
#endif /* defined(I586_CPU) && !NO_F00F_HACK */
diff --git a/sys/pc98/pc98/isa_dma.c b/sys/pc98/pc98/isa_dma.c
index 5af3005..b1379f8 100644
--- a/sys/pc98/pc98/isa_dma.c
+++ b/sys/pc98/pc98/isa_dma.c
@@ -249,6 +249,8 @@ isa_dmastart(int flags, caddr_t addr, u_int nbytes, int chan)
int waport;
caddr_t newaddr;
+ GIANT_REQUIRED;
+
#ifdef DIAGNOSTIC
if (chan & ~VALID_DMA_MASK)
panic("isa_dmastart: channel out of range");
@@ -287,12 +289,7 @@ isa_dmastart(int flags, caddr_t addr, u_int nbytes, int chan)
}
/* translate to physical */
- mtx_lock(&vm_mtx); /*
- * XXX: need to hold for longer period to
- * ensure that mappings don't change
- */
phys = pmap_extract(pmap_kernel(), (vm_offset_t)addr);
- mtx_unlock(&vm_mtx);
if (flags & ISADMA_RAW) {
dma_auto_mode |= (1 << chan);
@@ -438,11 +435,11 @@ isa_dmarangecheck(caddr_t va, u_int length, int chan)
vm_offset_t phys, priorpage = 0, endva;
u_int dma_pgmsk = (chan & 4) ? ~(128*1024-1) : ~(64*1024-1);
+ GIANT_REQUIRED;
+
endva = (vm_offset_t)round_page((vm_offset_t)va + length);
for (; va < (caddr_t) endva ; va += PAGE_SIZE) {
- mtx_lock(&vm_mtx);
phys = trunc_page(pmap_extract(pmap_kernel(), (vm_offset_t)va));
- mtx_unlock(&vm_mtx);
#ifdef EPSON_BOUNCEDMA
#define ISARAM_END 0xf00000
#else
diff --git a/sys/pc98/pc98/machdep.c b/sys/pc98/pc98/machdep.c
index a0768b1..c1222ab 100644
--- a/sys/pc98/pc98/machdep.c
+++ b/sys/pc98/pc98/machdep.c
@@ -276,7 +276,6 @@ cpu_startup(dummy)
/*
* Good {morning,afternoon,evening,night}.
*/
- mtx_lock(&vm_mtx);
earlysetcpuclass();
startrtclock();
printcpuinfo();
@@ -410,7 +409,6 @@ again:
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
(16*(ARG_MAX+(PAGE_SIZE*3))));
- mtx_unlock(&vm_mtx);
/*
* XXX: Mbuf system machine-specific initializations should
* go here, if anywhere.
@@ -2116,6 +2114,8 @@ f00f_hack(void *unused) {
if (!has_f00f_bug)
return;
+ GIANT_REQUIRED;
+
printf("Intel Pentium detected, installing workaround for F00F bug\n");
r_idt.rd_limit = sizeof(idt0) - 1;
@@ -2131,11 +2131,9 @@ f00f_hack(void *unused) {
r_idt.rd_base = (int)new_idt;
lidt(&r_idt);
idt = new_idt;
- mtx_lock(&vm_mtx);
if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
VM_PROT_READ, FALSE) != KERN_SUCCESS)
panic("vm_map_protect failed");
- mtx_unlock(&vm_mtx);
return;
}
#endif /* defined(I586_CPU) && !NO_F00F_HACK */
diff --git a/sys/powerpc/aim/vm_machdep.c b/sys/powerpc/aim/vm_machdep.c
index c281ef1..4651cef 100644
--- a/sys/powerpc/aim/vm_machdep.c
+++ b/sys/powerpc/aim/vm_machdep.c
@@ -185,14 +185,13 @@ void
cpu_wait(p)
struct proc *p;
{
+ GIANT_REQUIRED;
- mtx_lock(&vm_mtx);
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
- mtx_unlock(&vm_mtx);
}
/*
@@ -241,10 +240,11 @@ vmapbuf(bp)
register caddr_t addr, v, kva;
vm_offset_t pa;
+ GIANT_REQUIRED;
+
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
- mtx_lock(&vm_mtx);
for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE, v += PAGE_SIZE) {
@@ -260,7 +260,6 @@ vmapbuf(bp)
vm_page_hold(PHYS_TO_VM_PAGE(pa));
pmap_kenter((vm_offset_t) v, pa);
}
- mtx_unlock(&vm_mtx);
kva = bp->b_saveaddr;
bp->b_saveaddr = bp->b_data;
@@ -278,10 +277,11 @@ vunmapbuf(bp)
register caddr_t addr;
vm_offset_t pa;
+ GIANT_REQUIRED;
+
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
- mtx_lock(&vm_mtx);
for (addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE) {
@@ -289,7 +289,6 @@ vunmapbuf(bp)
pmap_kremove((vm_offset_t) addr);
vm_page_unhold(PHYS_TO_VM_PAGE(pa));
}
- mtx_unlock(&vm_mtx);
bp->b_data = bp->b_saveaddr;
}
@@ -347,17 +346,13 @@ vm_page_zero_idle()
* pages because doing so may flush our L1 and L2 caches too much.
*/
- if (mtx_trylock(&vm_mtx) == 0)
- return (0);
if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) {
- mtx_unlock(&vm_mtx);
return(0);
}
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) {
- mtx_unlock(&vm_mtx);
return(0);
}
-
+ if (mtx_trylock(&Giant)) {
s = splvm();
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
zero_state = 0;
@@ -386,8 +381,10 @@ vm_page_zero_idle()
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
splx(s);
- mtx_unlock(&vm_mtx);
+ mtx_unlock(&Giant);
return (1);
+ }
+ return(0);
}
/*
diff --git a/sys/powerpc/powerpc/vm_machdep.c b/sys/powerpc/powerpc/vm_machdep.c
index c281ef1..4651cef 100644
--- a/sys/powerpc/powerpc/vm_machdep.c
+++ b/sys/powerpc/powerpc/vm_machdep.c
@@ -185,14 +185,13 @@ void
cpu_wait(p)
struct proc *p;
{
+ GIANT_REQUIRED;
- mtx_lock(&vm_mtx);
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
- mtx_unlock(&vm_mtx);
}
/*
@@ -241,10 +240,11 @@ vmapbuf(bp)
register caddr_t addr, v, kva;
vm_offset_t pa;
+ GIANT_REQUIRED;
+
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
- mtx_lock(&vm_mtx);
for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE, v += PAGE_SIZE) {
@@ -260,7 +260,6 @@ vmapbuf(bp)
vm_page_hold(PHYS_TO_VM_PAGE(pa));
pmap_kenter((vm_offset_t) v, pa);
}
- mtx_unlock(&vm_mtx);
kva = bp->b_saveaddr;
bp->b_saveaddr = bp->b_data;
@@ -278,10 +277,11 @@ vunmapbuf(bp)
register caddr_t addr;
vm_offset_t pa;
+ GIANT_REQUIRED;
+
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
- mtx_lock(&vm_mtx);
for (addr = (caddr_t)trunc_page(bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE) {
@@ -289,7 +289,6 @@ vunmapbuf(bp)
pmap_kremove((vm_offset_t) addr);
vm_page_unhold(PHYS_TO_VM_PAGE(pa));
}
- mtx_unlock(&vm_mtx);
bp->b_data = bp->b_saveaddr;
}
@@ -347,17 +346,13 @@ vm_page_zero_idle()
* pages because doing so may flush our L1 and L2 caches too much.
*/
- if (mtx_trylock(&vm_mtx) == 0)
- return (0);
if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) {
- mtx_unlock(&vm_mtx);
return(0);
}
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) {
- mtx_unlock(&vm_mtx);
return(0);
}
-
+ if (mtx_trylock(&Giant)) {
s = splvm();
m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
zero_state = 0;
@@ -386,8 +381,10 @@ vm_page_zero_idle()
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
splx(s);
- mtx_unlock(&vm_mtx);
+ mtx_unlock(&Giant);
return (1);
+ }
+ return(0);
}
/*
diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h
index b5418da..eeca8dc 100644
--- a/sys/sys/mutex.h
+++ b/sys/sys/mutex.h
@@ -372,8 +372,34 @@ do { \
#define mtx_assert(m, what) \
_mtx_assert((m), (what), __FILE__, __LINE__)
+/*
+ * GIANT_REQUIRED; - place at the beginning of a procedure
+ *
+ *
+ */
+
+#define GIANT_REQUIRED \
+ do { \
+ KASSERT(curproc->p_giant_optional == 0, ("Giant not optional at %s: %d", __FILE__, __LINE__)); \
+ mtx_assert(&Giant, MA_OWNED); \
+ } while(0)
+#define START_GIANT_DEPRECIATED(sysctlvar) \
+ int __gotgiant = (curproc->p_giant_optional == 0 && sysctlvar) ? \
+ (mtx_lock(&Giant), 1) : 0
+#define END_GIANT_DEPRECIATED \
+ if (__gotgiant) mtx_unlock(&Giant)
+#define START_GIANT_OPTIONAL \
+ ++curproc->p_giant_optional
+#define END_GIANT_OPTIONAL \
+ --curproc->p_giant_optional
+
#else /* INVARIANTS */
#define mtx_assert(m, what)
+#define GIANT_REQUIRED
+#define START_GIANT_DEPRECIATED(sysctl)
+#define END_GIANT_DEPRECIATED
+#define START_GIANT_OPTIONAL
+#define END_GIANT_OPTIONAL
#endif /* INVARIANTS */
#endif /* _KERNEL */
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 1552088..78cfe4f 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -276,6 +276,7 @@ struct proc {
struct pasleep p_asleep; /* (k) Used by asleep()/await(). */
void *p_emuldata; /* (c) Emulator state data. */
struct trapframe *p_frame; /* (k) */
+ int p_giant_optional; /* (i) Giant Lock Sanity */
};
#define p_session p_pgrp->pg_session
diff --git a/sys/ufs/ufs/ufs_readwrite.c b/sys/ufs/ufs/ufs_readwrite.c
index c40d98f..0308895 100644
--- a/sys/ufs/ufs/ufs_readwrite.c
+++ b/sys/ufs/ufs/ufs_readwrite.c
@@ -80,6 +80,8 @@ READ(ap)
int ioflag;
vm_object_t object;
+ GIANT_REQUIRED;
+
vp = ap->a_vp;
seqcount = ap->a_ioflag >> 16;
ip = VTOI(vp);
@@ -115,9 +117,7 @@ READ(ap)
}
if (object) {
- mtx_lock(&vm_mtx);
vm_object_reference(object);
- mtx_unlock(&vm_mtx);
}
#ifdef ENABLE_VFS_IOOPT
@@ -151,9 +151,7 @@ READ(ap)
ip->i_flag |= IN_ACCESS;
if (object) {
- mtx_lock(&vm_mtx);
vm_object_vndeallocate(object);
- mtx_unlock(&vm_mtx);
}
return error;
}
@@ -199,9 +197,7 @@ READ(ap)
MNT_NOATIME) == 0)
ip->i_flag |= IN_ACCESS;
if (object) {
- mtx_lock(&vm_mtx);
vm_object_vndeallocate(object);
- mtx_unlock(&vm_mtx);
}
return error;
}
@@ -374,9 +370,7 @@ READ(ap)
}
if (object) {
- mtx_lock(&vm_mtx);
vm_object_vndeallocate(object);
- mtx_unlock(&vm_mtx);
}
if ((error == 0 || uio->uio_resid != orig_resid) &&
(vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
@@ -408,6 +402,8 @@ WRITE(ap)
int blkoffset, error, extended, flags, ioflag, resid, size, xfersize;
vm_object_t object;
+ GIANT_REQUIRED;
+
extended = 0;
seqcount = ap->a_ioflag >> 16;
ioflag = ap->a_ioflag;
@@ -417,9 +413,7 @@ WRITE(ap)
object = vp->v_object;
if (object) {
- mtx_lock(&vm_mtx);
vm_object_reference(object);
- mtx_unlock(&vm_mtx);
}
#ifdef DIAGNOSTIC
@@ -433,9 +427,7 @@ WRITE(ap)
uio->uio_offset = ip->i_size;
if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size) {
if (object) {
- mtx_lock(&vm_mtx);
vm_object_vndeallocate(object);
- mtx_unlock(&vm_mtx);
}
return (EPERM);
}
@@ -456,9 +448,7 @@ WRITE(ap)
if (uio->uio_offset < 0 ||
(u_int64_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize) {
if (object) {
- mtx_lock(&vm_mtx);
vm_object_vndeallocate(object);
- mtx_unlock(&vm_mtx);
}
return (EFBIG);
}
@@ -474,9 +464,7 @@ WRITE(ap)
psignal(p, SIGXFSZ);
PROC_UNLOCK(p);
if (object) {
- mtx_lock(&vm_mtx);
vm_object_vndeallocate(object);
- mtx_unlock(&vm_mtx);
}
return (EFBIG);
}
@@ -488,11 +476,9 @@ WRITE(ap)
flags = B_SYNC;
if (object && (object->flags & OBJ_OPT)) {
- mtx_lock(&vm_mtx);
vm_freeze_copyopts(object,
OFF_TO_IDX(uio->uio_offset),
OFF_TO_IDX(uio->uio_offset + uio->uio_resid + PAGE_MASK));
- mtx_unlock(&vm_mtx);
}
for (error = 0; uio->uio_resid > 0;) {
@@ -595,9 +581,7 @@ WRITE(ap)
error = UFS_UPDATE(vp, 1);
if (object) {
- mtx_lock(&vm_mtx);
vm_object_vndeallocate(object);
- mtx_unlock(&vm_mtx);
}
return (error);
@@ -627,6 +611,7 @@ ffs_getpages(ap)
int rtval;
int pagesperblock;
+ GIANT_REQUIRED;
pcount = round_page(ap->a_count) / PAGE_SIZE;
mreq = ap->a_m[ap->a_reqpage];
@@ -668,16 +653,9 @@ ffs_getpages(ap)
reqlblkno = foff / bsize;
poff = (foff % bsize) / PAGE_SIZE;
- mtx_unlock(&vm_mtx);
- mtx_lock(&Giant);
-
dp = VTOI(vp)->i_devvp;
if (ufs_bmaparray(vp, reqlblkno, &reqblkno, &bforwards, &bbackwards)
|| (reqblkno == -1)) {
-
- mtx_unlock(&Giant);
- mtx_lock(&vm_mtx);
-
for(i = 0; i < pcount; i++) {
if (i != ap->a_reqpage)
vm_page_free(ap->a_m[i]);
@@ -693,9 +671,6 @@ ffs_getpages(ap)
}
}
- mtx_unlock(&Giant);
- mtx_lock(&vm_mtx);
-
physoffset = (off_t)reqblkno * DEV_BSIZE + poff * PAGE_SIZE;
pagesperblock = bsize / PAGE_SIZE;
/*
diff --git a/sys/vm/default_pager.c b/sys/vm/default_pager.c
index 0fb4896..21a3b7c 100644
--- a/sys/vm/default_pager.c
+++ b/sys/vm/default_pager.c
@@ -42,6 +42,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/lock.h>
+#include <sys/proc.h>
#include <sys/mutex.h>
#include <vm/vm.h>
diff --git a/sys/vm/device_pager.c b/sys/vm/device_pager.c
index b7c35af..af52cd9 100644
--- a/sys/vm/device_pager.c
+++ b/sys/vm/device_pager.c
@@ -43,6 +43,7 @@
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/lock.h>
+#include <sys/proc.h>
#include <sys/mutex.h>
#include <sys/mman.h>
#include <sys/sx.h>
diff --git a/sys/vm/phys_pager.c b/sys/vm/phys_pager.c
index b3355c7..8ac32d0 100644
--- a/sys/vm/phys_pager.c
+++ b/sys/vm/phys_pager.c
@@ -31,6 +31,7 @@
#include <sys/conf.h>
#include <sys/kernel.h>
#include <sys/lock.h>
+#include <sys/proc.h>
#include <sys/mutex.h>
#include <sys/mman.h>
#include <sys/sysctl.h>
@@ -62,6 +63,8 @@ phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
{
vm_object_t object;
+ GIANT_REQUIRED;
+
/*
* Offset should be page aligned.
*/
@@ -76,7 +79,7 @@ phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
*/
while (phys_pager_alloc_lock) {
phys_pager_alloc_lock = -1;
- msleep(&phys_pager_alloc_lock, &vm_mtx, PVM, "swpalc", 0);
+ tsleep(&phys_pager_alloc_lock, PVM, "swpalc", 0);
}
phys_pager_alloc_lock = 1;
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index edbfa8e..53b78de 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -80,6 +80,7 @@
#include <sys/sysctl.h>
#include <sys/blist.h>
#include <sys/lock.h>
+#include <sys/sx.h>
#include <sys/vmmeter.h>
#ifndef MAX_PAGEOUT_CLUSTER
@@ -118,12 +119,12 @@ static int nsw_wcount_sync; /* limit write buffers / synchronous */
static int nsw_wcount_async; /* limit write buffers / asynchronous */
static int nsw_wcount_async_max;/* assigned maximum */
static int nsw_cluster_max; /* maximum VOP I/O allowed */
-static int sw_alloc_interlock; /* swap pager allocation interlock */
struct blist *swapblist;
static struct swblock **swhash;
static int swhash_mask;
static int swap_async_max = 4; /* maximum in-progress async I/O's */
+static struct sx sw_alloc_sx;
/* from vm_swap.c */
extern struct vnode *swapdev_vp;
@@ -232,8 +233,8 @@ static daddr_t swp_pager_meta_ctl __P((vm_object_t, vm_pindex_t, int));
static __inline void
swp_sizecheck()
{
+ GIANT_REQUIRED;
- mtx_assert(&vm_mtx, MA_OWNED);
if (vm_swap_size < nswap_lowat) {
if (swap_pager_almost_full == 0) {
printf("swap_pager: out of swap space\n");
@@ -383,7 +384,8 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
{
vm_object_t object;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (handle) {
/*
* Reference existing named region or allocate new one. There
@@ -391,13 +393,7 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
* as called from vm_page_remove() in regards to the lookup
* of the handle.
*/
-
- while (sw_alloc_interlock) {
- sw_alloc_interlock = -1;
- msleep(&sw_alloc_interlock, &vm_mtx, PVM, "swpalc", 0);
- }
- sw_alloc_interlock = 1;
-
+ sx_xlock(&sw_alloc_sx);
object = vm_pager_object_lookup(NOBJLIST(handle), handle);
if (object != NULL) {
@@ -409,10 +405,7 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
}
-
- if (sw_alloc_interlock == -1)
- wakeup(&sw_alloc_interlock);
- sw_alloc_interlock = 0;
+ sx_xunlock(&sw_alloc_sx);
} else {
object = vm_object_allocate(OBJT_DEFAULT,
OFF_TO_IDX(offset + PAGE_MASK + size));
@@ -442,12 +435,12 @@ swap_pager_dealloc(object)
{
int s;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
/*
* Remove from list right away so lookups will fail if we block for
* pageout completion.
*/
-
mtx_lock(&sw_alloc_mtx);
if (object->handle == NULL) {
TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list);
@@ -488,7 +481,6 @@ swap_pager_dealloc(object)
*
* This routine may not block
* This routine must be called at splvm().
- * vm_mtx should be held
*/
static __inline daddr_t
@@ -497,7 +489,8 @@ swp_pager_getswapspace(npages)
{
daddr_t blk;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) {
if (swap_pager_full != 2) {
printf("swap_pager_getswapspace: failed\n");
@@ -526,7 +519,6 @@ swp_pager_getswapspace(npages)
*
* This routine may not block
* This routine must be called at splvm().
- * vm_mtx should be held
*/
static __inline void
@@ -534,8 +526,8 @@ swp_pager_freeswapspace(blk, npages)
daddr_t blk;
int npages;
{
+ GIANT_REQUIRED;
- mtx_assert(&vm_mtx, MA_OWNED);
blist_free(swapblist, blk, npages);
vm_swap_size += npages;
/* per-swap area stats */
@@ -566,9 +558,8 @@ swap_pager_freespace(object, start, size)
vm_size_t size;
{
int s = splvm();
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
swp_pager_meta_free(object, start, size);
splx(s);
}
@@ -651,10 +642,9 @@ swap_pager_copy(srcobject, dstobject, offset, destroysource)
vm_pindex_t i;
int s;
- s = splvm();
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+ s = splvm();
/*
* If destroysource is set, we remove the source object from the
* swap_pager internal queue now.
@@ -871,8 +861,8 @@ swap_pager_strategy(vm_object_t object, struct bio *bp)
char *data;
struct buf *nbp = NULL;
- mtx_assert(&Giant, MA_OWNED);
- mtx_assert(&vm_mtx, MA_NOTOWNED);
+ GIANT_REQUIRED;
+
/* XXX: KASSERT instead ? */
if (bp->bio_bcount & PAGE_MASK) {
biofinish(bp, NULL, EINVAL);
@@ -903,9 +893,7 @@ swap_pager_strategy(vm_object_t object, struct bio *bp)
* FREE PAGE(s) - destroy underlying swap that is no longer
* needed.
*/
- mtx_lock(&vm_mtx);
swp_pager_meta_free(object, start, count);
- mtx_unlock(&vm_mtx);
splx(s);
bp->bio_resid = 0;
biodone(bp);
@@ -915,8 +903,6 @@ swap_pager_strategy(vm_object_t object, struct bio *bp)
/*
* Execute read or write
*/
-
- mtx_lock(&vm_mtx);
while (count > 0) {
daddr_t blk;
@@ -959,9 +945,7 @@ swap_pager_strategy(vm_object_t object, struct bio *bp)
cnt.v_swappgsout += btoc(nbp->b_bcount);
nbp->b_dirtyend = nbp->b_bcount;
}
- mtx_unlock(&vm_mtx);
flushchainbuf(nbp);
- mtx_lock(&vm_mtx);
s = splvm();
nbp = NULL;
}
@@ -981,9 +965,7 @@ swap_pager_strategy(vm_object_t object, struct bio *bp)
bp->bio_resid -= PAGE_SIZE;
} else {
if (nbp == NULL) {
- mtx_unlock(&vm_mtx);
nbp = getchainbuf(bp, swapdev_vp, B_ASYNC);
- mtx_lock(&vm_mtx);
nbp->b_blkno = blk;
nbp->b_bcount = 0;
nbp->b_data = data;
@@ -1010,11 +992,9 @@ swap_pager_strategy(vm_object_t object, struct bio *bp)
cnt.v_swappgsout += btoc(nbp->b_bcount);
nbp->b_dirtyend = nbp->b_bcount;
}
- mtx_unlock(&vm_mtx);
flushchainbuf(nbp);
/* nbp = NULL; */
- } else
- mtx_unlock(&vm_mtx);
+ }
/*
* Wait for completion.
*/
@@ -1057,7 +1037,8 @@ swap_pager_getpages(object, m, count, reqpage)
vm_offset_t kva;
vm_pindex_t lastpindex;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
+
mreq = m[reqpage];
if (mreq->object != object) {
@@ -1185,10 +1166,8 @@ swap_pager_getpages(object, m, count, reqpage)
*
* NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
*/
- mtx_unlock(&vm_mtx);
BUF_KERNPROC(bp);
BUF_STRATEGY(bp);
- mtx_lock(&vm_mtx);
/*
* wait for the page we want to complete. PG_SWAPINPROG is always
@@ -1201,7 +1180,7 @@ swap_pager_getpages(object, m, count, reqpage)
while ((mreq->flags & PG_SWAPINPROG) != 0) {
vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
cnt.v_intrans++;
- if (msleep(mreq, &vm_mtx, PSWP, "swread", hz*20)) {
+ if (tsleep(mreq, PSWP, "swread", hz*20)) {
printf(
"swap_pager: indefinite wait buffer: device:"
" %s, blkno: %ld, size: %ld\n",
@@ -1267,7 +1246,7 @@ swap_pager_putpages(object, m, count, sync, rtvals)
int i;
int n = 0;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
if (count && m[0]->object != object) {
panic("swap_pager_getpages: object mismatch %p/%p",
object,
@@ -1432,7 +1411,6 @@ swap_pager_putpages(object, m, count, sync, rtvals)
swapdev_vp->v_numoutput++;
splx(s);
- mtx_unlock(&vm_mtx);
/*
* asynchronous
@@ -1444,7 +1422,6 @@ swap_pager_putpages(object, m, count, sync, rtvals)
bp->b_iodone = swp_pager_async_iodone;
BUF_KERNPROC(bp);
BUF_STRATEGY(bp);
- mtx_lock(&vm_mtx);
for (j = 0; j < n; ++j)
rtvals[i+j] = VM_PAGER_PEND;
@@ -1482,8 +1459,6 @@ swap_pager_putpages(object, m, count, sync, rtvals)
*/
swp_pager_async_iodone(bp);
- mtx_lock(&vm_mtx);
-
splx(s);
}
}
@@ -1533,7 +1508,8 @@ swp_pager_async_iodone(bp)
int i;
vm_object_t object = NULL;
- mtx_assert(&vm_mtx, MA_NOTOWNED);
+ GIANT_REQUIRED;
+
bp->b_flags |= B_DONE;
/*
@@ -1562,7 +1538,6 @@ swp_pager_async_iodone(bp)
/*
* remove the mapping for kernel virtual
*/
- mtx_lock(&vm_mtx);
pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
/*
@@ -1697,7 +1672,6 @@ swp_pager_async_iodone(bp)
if (object)
vm_object_pip_wakeupn(object, bp->b_npages);
- mtx_unlock(&vm_mtx);
/*
* release the physical I/O buffer
*/
@@ -1771,8 +1745,6 @@ swp_pager_hash(vm_object_t object, vm_pindex_t index)
*
* This routine must be called at splvm(), except when used to convert
* an OBJT_DEFAULT object into an OBJT_SWAP object.
- *
- * Requires vm_mtx.
*/
static void
@@ -1784,7 +1756,7 @@ swp_pager_meta_build(
struct swblock *swap;
struct swblock **pswap;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
/*
* Convert default object to swap object if necessary
*/
@@ -1871,15 +1843,13 @@ retry:
* out. This routine does *NOT* operate on swap metadata associated
* with resident pages.
*
- * vm_mtx must be held
* This routine must be called at splvm()
*/
static void
swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
{
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (object->type != OBJT_SWAP)
return;
@@ -1920,7 +1890,6 @@ swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
* an object.
*
* This routine must be called at splvm()
- * Requires vm_mtx.
*/
static void
@@ -1928,7 +1897,7 @@ swp_pager_meta_free_all(vm_object_t object)
{
daddr_t index = 0;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (object->type != OBJT_SWAP)
return;
@@ -1978,7 +1947,6 @@ swp_pager_meta_free_all(vm_object_t object)
* busy page.
*
* This routine must be called at splvm().
- * Requires vm_mtx.
*
* SWM_FREE remove and free swap block from metadata
* SWM_POP remove from meta data but do not free.. pop it out
@@ -1994,7 +1962,7 @@ swp_pager_meta_ctl(
struct swblock *swap;
daddr_t r1;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
/*
* The meta data only exists of the object is OBJT_SWAP
* and even then might not be allocated yet.
@@ -2082,8 +2050,6 @@ vm_pager_chain_iodone(struct buf *nbp)
* Obtain a physical buffer and chain it to its parent buffer. When
* I/O completes, the parent buffer will be B_SIGNAL'd. Errors are
* automatically propagated to the parent
- *
- * vm_mtx can't be held
*/
struct buf *
@@ -2092,8 +2058,7 @@ getchainbuf(struct bio *bp, struct vnode *vp, int flags)
struct buf *nbp;
u_int *count;
- mtx_assert(&vm_mtx, MA_NOTOWNED);
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
nbp = getpbuf(NULL);
count = (u_int *)&(bp->bio_caller1);
@@ -2120,9 +2085,7 @@ getchainbuf(struct bio *bp, struct vnode *vp, int flags)
void
flushchainbuf(struct buf *nbp)
{
-
- mtx_assert(&vm_mtx, MA_NOTOWNED);
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
if (nbp->b_bcount) {
nbp->b_bufsize = nbp->b_bcount;
if (nbp->b_iocmd == BIO_WRITE)
@@ -2140,8 +2103,7 @@ waitchainbuf(struct bio *bp, int limit, int done)
int s;
u_int *count;
- mtx_assert(&vm_mtx, MA_NOTOWNED);
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
count = (u_int *)&(bp->bio_caller1);
s = splbio();
while (*count > limit) {
diff --git a/sys/vm/vm.h b/sys/vm/vm.h
index 5915b29..38f04ac 100644
--- a/sys/vm/vm.h
+++ b/sys/vm/vm.h
@@ -95,10 +95,6 @@ typedef struct vm_map *vm_map_t;
struct vm_object;
typedef struct vm_object *vm_object_t;
-#ifdef _KERNEL
-extern struct mtx vm_mtx;
-#endif
-
#ifndef _KERNEL
/*
* This is defined in <sys/types.h> for the kernel so that non-vm kernel
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index e263280..a25b805 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -136,9 +136,7 @@ unlock_map(struct faultstate *fs)
static void
_unlock_things(struct faultstate *fs, int dealloc)
{
-
- mtx_assert(&vm_mtx, MA_OWNED);
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
vm_object_pip_wakeup(fs->object);
if (fs->object != fs->first_object) {
vm_page_free(fs->first_m);
@@ -150,13 +148,8 @@ _unlock_things(struct faultstate *fs, int dealloc)
}
unlock_map(fs);
if (fs->vp != NULL) {
- struct vnode *vp;
-
- vp = fs->vp;
+ vput(fs->vp);
fs->vp = NULL;
- mtx_unlock(&vm_mtx);
- vput(vp);
- mtx_lock(&vm_mtx);
}
}
@@ -189,37 +182,20 @@ _unlock_things(struct faultstate *fs, int dealloc)
*
*
* The map in question must be referenced, and remains so.
- * Caller may hold no locks except the vm_mtx which will be
- * locked if needed.
+ * Caller may hold no locks.
*/
static int vm_fault1 __P((vm_map_t, vm_offset_t, vm_prot_t, int));
-static int vm_faults_no_vm_mtx;
-SYSCTL_INT(_vm, OID_AUTO, vm_faults_no_vm_mtx, CTLFLAG_RW,
- &vm_faults_no_vm_mtx, 0, "");
-
-static int vm_faults_no_giant;
-SYSCTL_INT(_vm, OID_AUTO, vm_faults_no_giant, CTLFLAG_RW,
- &vm_faults_no_giant, 0, "");
-
int
vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
int fault_flags)
{
- int hadvmlock, hadgiant, ret;
+ int ret;
- hadvmlock = mtx_owned(&vm_mtx);
- hadgiant = mtx_owned(&Giant);
mtx_lock(&Giant);
- if (!hadvmlock) {
- mtx_lock(&vm_mtx);
- vm_faults_no_vm_mtx++;
- if (hadgiant == 0)
- vm_faults_no_giant++;
- }
+ /* GIANT_REQUIRED */
+
ret = vm_fault1(map, vaddr, fault_type, fault_flags);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
mtx_unlock(&Giant);
return (ret);
}
@@ -238,7 +214,8 @@ vm_fault1(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
int faultcount;
struct faultstate fs;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
cnt.v_vm_faults++;
hardfault = 0;
@@ -296,9 +273,7 @@ RetryFault:;
vm_object_reference(fs.first_object);
vm_object_pip_add(fs.first_object, 1);
- mtx_unlock(&vm_mtx);
fs.vp = vnode_pager_lock(fs.first_object);
- mtx_lock(&vm_mtx);
if ((fault_type & VM_PROT_WRITE) &&
(fs.first_object->type == OBJT_VNODE)) {
vm_freeze_copyopts(fs.first_object,
@@ -770,9 +745,7 @@ readrest:
*/
if (fs.vp != NULL) {
- mtx_unlock(&vm_mtx);
vput(fs.vp);
- mtx_lock(&vm_mtx);
fs.vp = NULL;
}
@@ -989,7 +962,8 @@ vm_fault_user_wire(map, start, end)
register pmap_t pmap;
int rv;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
pmap = vm_map_pmap(map);
/*
@@ -1164,7 +1138,6 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
* number of pages in marray
*
* This routine can't block.
- * vm_mtx must be held.
*/
static int
vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
@@ -1180,7 +1153,7 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
vm_page_t rtm;
int cbehind, cahead;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
object = m->object;
pindex = m->pindex;
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index ba14789..ce610d3 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -145,6 +145,8 @@ useracc(addr, len, rw)
vm_map_t map;
vm_map_entry_t save_hint;
+ GIANT_REQUIRED;
+
KASSERT((rw & (~VM_PROT_ALL)) == 0,
("illegal ``rw'' argument to useracc (%x)\n", rw));
prot = rw;
@@ -161,7 +163,6 @@ useracc(addr, len, rw)
|| (vm_offset_t) addr + len < (vm_offset_t) addr) {
return (FALSE);
}
- mtx_lock(&vm_mtx);
map = &curproc->p_vmspace->vm_map;
vm_map_lock_read(map);
/*
@@ -173,7 +174,6 @@ useracc(addr, len, rw)
trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), prot);
map->hint = save_hint;
vm_map_unlock_read(map);
- mtx_unlock(&vm_mtx);
return (rv == TRUE);
}
@@ -183,12 +183,10 @@ vslock(addr, len)
caddr_t addr;
u_int len;
{
-
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
vm_map_pageable(&curproc->p_vmspace->vm_map,
trunc_page((vm_offset_t)addr),
round_page((vm_offset_t)addr + len), FALSE);
- mtx_unlock(&vm_mtx);
}
void
@@ -196,12 +194,10 @@ vsunlock(addr, len)
caddr_t addr;
u_int len;
{
-
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
vm_map_pageable(&curproc->p_vmspace->vm_map,
trunc_page((vm_offset_t)addr),
round_page((vm_offset_t)addr + len), TRUE);
- mtx_unlock(&vm_mtx);
}
/*
@@ -211,8 +207,6 @@ vsunlock(addr, len)
* machine-dependent layer to fill those in and make the new process
* ready to run. The new process is set up so that it returns directly
* to user mode to avoid stack copying and relocation problems.
- *
- * Called without vm_mtx.
*/
void
vm_fork(p1, p2, flags)
@@ -221,7 +215,8 @@ vm_fork(p1, p2, flags)
{
register struct user *up;
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
+
if ((flags & RFPROC) == 0) {
/*
* Divorce the memory, if it is shared, essentially
@@ -234,7 +229,6 @@ vm_fork(p1, p2, flags)
}
}
cpu_fork(p1, p2, flags);
- mtx_unlock(&vm_mtx);
return;
}
@@ -289,7 +283,6 @@ vm_fork(p1, p2, flags)
* and make the child ready to run.
*/
cpu_fork(p1, p2, flags);
- mtx_unlock(&vm_mtx);
}
/*
@@ -329,18 +322,16 @@ void
faultin(p)
struct proc *p;
{
+ GIANT_REQUIRED;
PROC_LOCK_ASSERT(p, MA_OWNED);
mtx_lock_spin(&sched_lock);
if ((p->p_sflag & PS_INMEM) == 0) {
-
++p->p_lock;
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
- mtx_lock(&vm_mtx);
pmap_swapin_proc(p);
- mtx_unlock(&vm_mtx);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
@@ -374,15 +365,13 @@ scheduler(dummy)
int ppri;
mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
+ /* GIANT_REQUIRED */
loop:
- mtx_lock(&vm_mtx);
if (vm_page_count_min()) {
VM_WAIT;
- mtx_unlock(&vm_mtx);
goto loop;
}
- mtx_unlock(&vm_mtx);
pp = NULL;
ppri = INT_MIN;
@@ -458,9 +447,6 @@ SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2,
* If any procs have been sleeping/stopped for at least maxslp seconds,
* they are swapped. Else, we swap the longest-sleeping or stopped process,
* if any, otherwise the longest-resident process.
- *
- * Can block
- * must be called with vm_mtx
*/
void
swapout_procs(action)
@@ -471,8 +457,8 @@ int action;
int outpri, outpri2;
int didswap = 0;
- mtx_assert(&vm_mtx, MA_OWNED);
- mtx_unlock(&vm_mtx);
+ GIANT_REQUIRED;
+
outp = outp2 = NULL;
outpri = outpri2 = INT_MIN;
retry:
@@ -480,12 +466,10 @@ retry:
LIST_FOREACH(p, &allproc, p_list) {
struct vmspace *vm;
- mtx_lock(&vm_mtx);
PROC_LOCK(p);
if (p->p_lock != 0 ||
(p->p_flag & (P_TRACED|P_SYSTEM|P_WEXIT)) != 0) {
PROC_UNLOCK(p);
- mtx_unlock(&vm_mtx);
continue;
}
/*
@@ -498,7 +482,6 @@ retry:
if ((p->p_sflag & (PS_INMEM|PS_SWAPPING)) != PS_INMEM) {
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
- mtx_unlock(&vm_mtx);
continue;
}
@@ -506,7 +489,6 @@ retry:
default:
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
- mtx_unlock(&vm_mtx);
continue;
case SSLEEP:
@@ -517,7 +499,6 @@ retry:
if (PRI_IS_REALTIME(p->p_pri.pri_class)) {
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
- mtx_unlock(&vm_mtx);
continue;
}
@@ -530,7 +511,6 @@ retry:
(p->p_slptime < swap_idle_threshold1)) {
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
- mtx_unlock(&vm_mtx);
continue;
}
@@ -544,7 +524,6 @@ retry:
(p->p_slptime < swap_idle_threshold2))) {
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
- mtx_unlock(&vm_mtx);
continue;
}
mtx_unlock_spin(&sched_lock);
@@ -559,7 +538,6 @@ retry:
NULL, curproc)) {
vmspace_free(vm);
PROC_UNLOCK(p);
- mtx_unlock(&vm_mtx);
continue;
}
vm_map_unlock(&vm->vm_map);
@@ -574,12 +552,10 @@ retry:
swapout(p);
vmspace_free(vm);
didswap++;
- mtx_unlock(&vm_mtx);
goto retry;
}
PROC_UNLOCK(p);
vmspace_free(vm);
- mtx_unlock(&vm_mtx);
}
}
sx_sunlock(&allproc_lock);
@@ -587,7 +563,6 @@ retry:
* If we swapped something out, and another process needed memory,
* then wakeup the sched process.
*/
- mtx_lock(&vm_mtx);
if (didswap)
wakeup(&proc0);
}
diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c
index ce8301a..cec1997 100644
--- a/sys/vm/vm_init.c
+++ b/sys/vm/vm_init.c
@@ -97,20 +97,15 @@ SYSINIT(vm_mem, SI_SUB_VM, SI_ORDER_FIRST, vm_mem_init, NULL)
* The start and end address of physical memory is passed in.
*/
-struct mtx vm_mtx;
-
/* ARGSUSED*/
static void
vm_mem_init(dummy)
void *dummy;
{
-
/*
* Initializes resident memory structures. From here on, all physical
* memory is accounted for, and we use only virtual addresses.
*/
- mtx_init(&vm_mtx, "vm", MTX_DEF);
- mtx_lock(&vm_mtx);
vm_set_page_size();
virtual_avail = vm_page_startup(avail_start, avail_end, virtual_avail);
@@ -123,5 +118,4 @@ vm_mem_init(dummy)
kmem_init(virtual_avail, virtual_end);
pmap_init(avail_start, avail_end);
vm_pager_init();
- mtx_unlock(&vm_mtx);
}
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 38f969e..96199cd 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -104,17 +104,13 @@ kmem_alloc_pageable(map, size)
{
vm_offset_t addr;
int result;
- int hadvmlock;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
+
size = round_page(size);
addr = vm_map_min(map);
result = vm_map_find(map, NULL, (vm_offset_t) 0,
&addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
if (result != KERN_SUCCESS) {
return (0);
}
@@ -135,17 +131,12 @@ kmem_alloc_nofault(map, size)
vm_offset_t addr;
int result;
- int hadvmlock;
+ GIANT_REQUIRED;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
size = round_page(size);
addr = vm_map_min(map);
result = vm_map_find(map, NULL, (vm_offset_t) 0,
&addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
if (result != KERN_SUCCESS) {
return (0);
}
@@ -164,11 +155,9 @@ kmem_alloc(map, size)
vm_offset_t addr;
vm_offset_t offset;
vm_offset_t i;
- int hadvmlock;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
+
size = round_page(size);
/*
@@ -184,8 +173,6 @@ kmem_alloc(map, size)
vm_map_lock(map);
if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
vm_map_unlock(map);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
return (0);
}
offset = addr - VM_MIN_KERNEL_ADDRESS;
@@ -230,8 +217,6 @@ kmem_alloc(map, size)
(void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
return (addr);
}
@@ -250,16 +235,9 @@ kmem_free(map, addr, size)
vm_offset_t addr;
vm_size_t size;
{
- int hadvmlock;
-
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
(void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
-
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
}
/*
@@ -282,11 +260,8 @@ kmem_suballoc(parent, min, max, size)
{
int ret;
vm_map_t result;
- int hadvmlock;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
size = round_page(size);
@@ -304,8 +279,6 @@ kmem_suballoc(parent, min, max, size)
panic("kmem_suballoc: cannot create submap");
if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS)
panic("kmem_suballoc: unable to change range to submap");
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
return (result);
}
@@ -343,12 +316,9 @@ kmem_malloc(map, size, flags)
vm_map_entry_t entry;
vm_offset_t addr;
vm_page_t m;
- int hadvmlock;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
-
+ GIANT_REQUIRED;
+
size = round_page(size);
addr = vm_map_min(map);
@@ -444,13 +414,9 @@ retry:
}
vm_map_unlock(map);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
return (addr);
bad:
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
return (0);
}
@@ -469,11 +435,8 @@ kmem_alloc_wait(map, size)
vm_size_t size;
{
vm_offset_t addr;
- int hadvmlock;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
size = round_page(size);
@@ -488,17 +451,13 @@ kmem_alloc_wait(map, size)
/* no space now; see if we can ever get space */
if (vm_map_max(map) - vm_map_min(map) < size) {
vm_map_unlock(map);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
return (0);
}
vm_map_unlock(map);
- msleep(map, &vm_mtx, PVM, "kmaw", 0);
+ tsleep(map, PVM, "kmaw", 0);
}
vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
vm_map_unlock(map);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
return (addr);
}
@@ -514,17 +473,12 @@ kmem_free_wakeup(map, addr, size)
vm_offset_t addr;
vm_size_t size;
{
- int hadvmlock;
+ GIANT_REQUIRED;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
vm_map_lock(map);
(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
wakeup(map);
vm_map_unlock(map);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
}
/*
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 229a822..707f5e6 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -173,7 +173,7 @@ vmspace_alloc(min, max)
{
struct vmspace *vm;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
vm = zalloc(vmspace_zone);
CTR1(KTR_VM, "vmspace_alloc: %p", vm);
vm_map_init(&vm->vm_map, min, max);
@@ -201,8 +201,8 @@ void
vmspace_free(vm)
struct vmspace *vm;
{
+ GIANT_REQUIRED;
- mtx_assert(&vm_mtx, MA_OWNED);
if (vm->vm_refcnt == 0)
panic("vmspace_free: attempt to free already freed vmspace");
@@ -273,7 +273,8 @@ vm_map_create(pmap, min, max)
{
vm_map_t result;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
result = zalloc(mapzone);
CTR1(KTR_VM, "vm_map_create: %p", result);
vm_map_init(result, min, max);
@@ -291,8 +292,8 @@ vm_map_init(map, min, max)
struct vm_map *map;
vm_offset_t min, max;
{
+ GIANT_REQUIRED;
- mtx_assert(&vm_mtx, MA_OWNED);
map->header.next = map->header.prev = &map->header;
map->nentries = 0;
map->size = 0;
@@ -310,8 +311,7 @@ void
vm_map_destroy(map)
struct vm_map *map;
{
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
lockdestroy(&map->lock);
}
@@ -400,8 +400,6 @@ vm_map_entry_unlink(vm_map_t map,
* in the "entry" parameter. The boolean
* result indicates whether the address is
* actually contained in the map.
- *
- * Doesn't block.
*/
boolean_t
vm_map_lookup_entry(map, address, entry)
@@ -412,7 +410,7 @@ vm_map_lookup_entry(map, address, entry)
vm_map_entry_t cur;
vm_map_entry_t last;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
/*
* Start looking either from the head of the list, or from the hint.
*/
@@ -492,7 +490,8 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
vm_map_entry_t temp_entry;
vm_eflags_t protoeflags;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
/*
* Check that the start and end points are not bogus.
*/
@@ -654,7 +653,7 @@ vm_map_findspace(map, start, length, addr)
vm_map_entry_t entry, next;
vm_offset_t end;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (start < map->min_offset)
start = map->min_offset;
if (start > map->max_offset)
@@ -723,7 +722,8 @@ vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
vm_offset_t start;
int result, s = 0;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
start = *addr;
if (map == kmem_map)
@@ -769,7 +769,8 @@ vm_map_simplify_entry(map, entry)
vm_map_entry_t next, prev;
vm_size_t prevsize, esize;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
return;
@@ -988,7 +989,8 @@ vm_map_submap(map, start, end, submap)
vm_map_entry_t entry;
int result = KERN_INVALID_ARGUMENT;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
@@ -1027,7 +1029,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
vm_map_entry_t current;
vm_map_entry_t entry;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
@@ -1117,7 +1119,8 @@ vm_map_madvise(map, start, end, behav)
vm_map_entry_t current, entry;
int modify_map = 0;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
/*
* Some madvise calls directly modify the vm_map_entry, in which case
* we need to use an exclusive lock on the map and we need to perform
@@ -1271,7 +1274,8 @@ vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
vm_map_entry_t entry;
vm_map_entry_t temp_entry;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
switch (new_inheritance) {
case VM_INHERIT_NONE:
case VM_INHERIT_COPY:
@@ -1458,7 +1462,8 @@ vm_map_pageable(map, start, end, new_pageable)
vm_offset_t failed = 0;
int rv;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
@@ -1689,8 +1694,8 @@ vm_map_clean(map, start, end, syncio, invalidate)
vm_object_t object;
vm_ooffset_t offset;
- mtx_assert(&Giant, MA_OWNED);
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
vm_map_lock_read(map);
VM_MAP_RANGE_CHECK(map, start, end);
if (!vm_map_lookup_entry(map, start, &entry)) {
@@ -1769,9 +1774,7 @@ vm_map_clean(map, start, end, syncio, invalidate)
int flags;
vm_object_reference(object);
- mtx_unlock(&vm_mtx);
vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
- mtx_lock(&vm_mtx);
flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
flags |= invalidate ? OBJPC_INVAL : 0;
vm_object_page_clean(object,
@@ -1848,7 +1851,8 @@ vm_map_delete(map, start, end)
vm_map_entry_t entry;
vm_map_entry_t first_entry;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
/*
* Find the start of the region, and clip it
*/
@@ -1950,7 +1954,8 @@ vm_map_remove(map, start, end)
{
int result, s = 0;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (map == kmem_map)
s = splvm();
@@ -1979,7 +1984,8 @@ vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
vm_map_entry_t entry;
vm_map_entry_t tmp_entry;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
return (FALSE);
}
@@ -2027,7 +2033,8 @@ vm_map_split(entry)
vm_size_t size;
vm_ooffset_t offset;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
orig_object = entry->object.vm_object;
if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
return;
@@ -2194,7 +2201,8 @@ vmspace_fork(vm1)
vm_map_entry_t new_entry;
vm_object_t object;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
vm_map_lock(old_map);
old_map->infork = 1;
@@ -2304,7 +2312,8 @@ vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
vm_size_t init_ssize;
int rv;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (VM_MIN_ADDRESS > 0 && addrbos < VM_MIN_ADDRESS)
return (KERN_NO_SPACE);
@@ -2368,8 +2377,6 @@ vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
* the stack. Also returns KERN_SUCCESS if addr is outside the
* stack range (this is strange, but preserves compatibility with
* the grow function in vm_machdep.c).
- *
- * Will grab vm_mtx if needed
*/
int
vm_map_growstack (struct proc *p, vm_offset_t addr)
@@ -2383,16 +2390,8 @@ vm_map_growstack (struct proc *p, vm_offset_t addr)
int grow_amount;
int rv;
int is_procstack;
- int hadvmlock;
-
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
-#define myreturn(rval) do { \
- if (!hadvmlock) \
- mtx_unlock(&vm_mtx); \
- return (rval); \
-} while (0)
+
+ GIANT_REQUIRED;
Retry:
vm_map_lock_read(map);
@@ -2400,12 +2399,12 @@ Retry:
/* If addr is already in the entry range, no need to grow.*/
if (vm_map_lookup_entry(map, addr, &prev_entry)) {
vm_map_unlock_read(map);
- myreturn (KERN_SUCCESS);
+ return (KERN_SUCCESS);
}
if ((stack_entry = prev_entry->next) == &map->header) {
vm_map_unlock_read(map);
- myreturn (KERN_SUCCESS);
+ return (KERN_SUCCESS);
}
if (prev_entry == &map->header)
end = stack_entry->start - stack_entry->avail_ssize;
@@ -2423,14 +2422,14 @@ Retry:
addr >= stack_entry->start ||
addr < stack_entry->start - stack_entry->avail_ssize) {
vm_map_unlock_read(map);
- myreturn (KERN_SUCCESS);
+ return (KERN_SUCCESS);
}
/* Find the minimum grow amount */
grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
if (grow_amount > stack_entry->avail_ssize) {
vm_map_unlock_read(map);
- myreturn (KERN_NO_SPACE);
+ return (KERN_NO_SPACE);
}
/* If there is no longer enough space between the entries
@@ -2449,7 +2448,7 @@ Retry:
stack_entry->avail_ssize = stack_entry->start - end;
vm_map_unlock(map);
- myreturn (KERN_NO_SPACE);
+ return (KERN_NO_SPACE);
}
is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
@@ -2460,7 +2459,7 @@ Retry:
if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
vm_map_unlock_read(map);
- myreturn (KERN_NO_SPACE);
+ return (KERN_NO_SPACE);
}
/* Round up the grow amount modulo SGROWSIZ */
@@ -2512,8 +2511,7 @@ Retry:
}
vm_map_unlock(map);
- myreturn (rv);
-#undef myreturn
+ return (rv);
}
/*
@@ -2527,7 +2525,7 @@ vmspace_exec(struct proc *p) {
struct vmspace *newvmspace;
vm_map_t map = &p->p_vmspace->vm_map;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
(caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
@@ -2555,7 +2553,7 @@ vmspace_unshare(struct proc *p) {
struct vmspace *oldvmspace = p->p_vmspace;
struct vmspace *newvmspace;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (oldvmspace->vm_refcnt == 1)
return;
newvmspace = vmspace_fork(oldvmspace);
@@ -2588,9 +2586,6 @@ vmspace_unshare(struct proc *p) {
* specified, the map may be changed to perform virtual
* copying operations, although the data referenced will
* remain the same.
- *
- * Can block locking maps and while calling vm_object_shadow().
- * Will drop/reaquire the vm_mtx.
*/
int
vm_map_lookup(vm_map_t *var_map, /* IN/OUT */
@@ -2607,7 +2602,7 @@ vm_map_lookup(vm_map_t *var_map, /* IN/OUT */
vm_prot_t prot;
vm_prot_t fault_type = fault_typea;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
RetryLookup:;
/*
@@ -2779,8 +2774,7 @@ vm_map_lookup_done(map, entry)
/*
* Unlock the main-level map
*/
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
vm_map_unlock_read(map);
}
@@ -2809,7 +2803,8 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
off_t ooffset;
int cnt;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (npages)
*npages = 0;
@@ -3021,8 +3016,6 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
* Performs the copy_on_write operations necessary to allow the virtual copies
* into user space to work. This has to be called for write(2) system calls
* from other processes, file unlinking, and file size shrinkage.
- *
- * Requires that the vm_mtx is held
*/
void
vm_freeze_copyopts(object, froma, toa)
@@ -3033,7 +3026,7 @@ vm_freeze_copyopts(object, froma, toa)
vm_object_t robject;
vm_pindex_t idx;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if ((object == NULL) ||
((object->flags & OBJ_OPT) == 0))
return;
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 5ea3ccf..5442c85 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -212,7 +212,6 @@ struct vmspace {
do { \
lockmgr(&(map)->lock, LK_DRAIN|LK_INTERLOCK, \
&(map)->ref_lock, curproc); \
- mtx_lock(&vm_mtx); \
(map)->timestamp++; \
} while(0)
@@ -227,11 +226,9 @@ struct vmspace {
#define vm_map_lock(map) \
do { \
vm_map_printf("locking map LK_EXCLUSIVE: %p\n", map); \
- mtx_assert(&vm_mtx, MA_OWNED); \
- if (lockmgr(&(map)->lock, LK_EXCLUSIVE | LK_INTERLOCK, \
- &vm_mtx, curproc) != 0) \
+ if (lockmgr(&(map)->lock, LK_EXCLUSIVE, \
+ NULL, curproc) != 0) \
panic("vm_map_lock: failed to get lock"); \
- mtx_lock(&vm_mtx); \
(map)->timestamp++; \
} while(0)
@@ -244,10 +241,8 @@ struct vmspace {
#define vm_map_lock_read(map) \
do { \
vm_map_printf("locking map LK_SHARED: %p\n", map); \
- mtx_assert(&vm_mtx, MA_OWNED); \
- lockmgr(&(map)->lock, LK_SHARED | LK_INTERLOCK, \
- &vm_mtx, curproc); \
- mtx_lock(&vm_mtx); \
+ lockmgr(&(map)->lock, LK_SHARED, \
+ NULL, curproc); \
} while (0)
#define vm_map_unlock_read(map) \
@@ -261,8 +256,7 @@ _vm_map_lock_upgrade(vm_map_t map, struct proc *p) {
int error;
vm_map_printf("locking map LK_EXCLUPGRADE: %p\n", map);
- error = lockmgr(&map->lock, LK_EXCLUPGRADE | LK_INTERLOCK, &vm_mtx, p);
- mtx_lock(&vm_mtx);
+ error = lockmgr(&map->lock, LK_EXCLUPGRADE, NULL, p);
if (error == 0)
map->timestamp++;
return error;
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index 24af2ec..3ff9e68 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -145,10 +145,9 @@ vmtotal(SYSCTL_HANDLER_ARGS)
/*
* Mark all objects as inactive.
*/
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
TAILQ_FOREACH(object, &vm_object_list, object_list)
vm_object_clear_flag(object, OBJ_ACTIVE);
- mtx_unlock(&vm_mtx);
/*
* Calculate process statistics.
*/
@@ -199,7 +198,6 @@ vmtotal(SYSCTL_HANDLER_ARGS)
* Note active objects.
*/
paging = 0;
- mtx_lock(&vm_mtx);
for (map = &p->p_vmspace->vm_map, entry = map->header.next;
entry != &map->header; entry = entry->next) {
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) ||
@@ -208,7 +206,6 @@ vmtotal(SYSCTL_HANDLER_ARGS)
vm_object_set_flag(entry->object.vm_object, OBJ_ACTIVE);
paging |= entry->object.vm_object->paging_in_progress;
}
- mtx_unlock(&vm_mtx);
if (paging)
totalp->t_pw++;
}
@@ -216,7 +213,6 @@ vmtotal(SYSCTL_HANDLER_ARGS)
/*
* Calculate object memory usage statistics.
*/
- mtx_lock(&vm_mtx);
TAILQ_FOREACH(object, &vm_object_list, object_list) {
/*
* devices, like /dev/mem, will badly skew our totals
@@ -240,7 +236,6 @@ vmtotal(SYSCTL_HANDLER_ARGS)
}
}
totalp->t_free = cnt.v_free_count + cnt.v_cache_count;
- mtx_unlock(&vm_mtx);
return (sysctl_handle_opaque(oidp, totalp, sizeof total, req));
}
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 4b9abe3..fcc78e6 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -113,8 +113,9 @@ sbrk(p, uap)
struct proc *p;
struct sbrk_args *uap;
{
-
/* Not yet implemented */
+ /* mtx_lock(&Giant); */
+ /* mtx_unlock(&Giant); */
return (EOPNOTSUPP);
}
@@ -130,8 +131,9 @@ sstk(p, uap)
struct proc *p;
struct sstk_args *uap;
{
-
/* Not yet implemented */
+ /* mtx_lock(&Giant); */
+ /* mtx_unlock(&Giant); */
return (EOPNOTSUPP);
}
@@ -148,7 +150,7 @@ ogetpagesize(p, uap)
struct proc *p;
struct getpagesize_args *uap;
{
-
+ /* MP SAFE */
p->p_retval[0] = PAGE_SIZE;
return (0);
}
@@ -268,7 +270,7 @@ mmap(p, uap)
addr < round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ)))
addr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
- mtx_lock(&Giant);
+ mtx_lock(&Giant); /* syscall marked mp-safe but isn't */
if (flags & MAP_ANON) {
/*
* Mapping blank space is trivial.
@@ -315,8 +317,10 @@ mmap(p, uap)
/*
* Get the proper underlying object
*/
- if (VOP_GETVOBJECT(vp, &obj) != 0)
- return (EINVAL);
+ if (VOP_GETVOBJECT(vp, &obj) != 0) {
+ error = EINVAL;
+ goto done;
+ }
vp = (struct vnode*)obj->handle;
}
/*
@@ -518,6 +522,8 @@ msync(p, uap)
if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
return (EINVAL);
+ mtx_lock(&Giant);
+
map = &p->p_vmspace->vm_map;
/*
@@ -527,10 +533,6 @@ msync(p, uap)
* the range of the map entry containing addr. This can be incorrect
* if the region splits or is coalesced with a neighbor.
*/
-#ifndef BLEED
- mtx_lock(&Giant);
-#endif
- mtx_lock(&vm_mtx);
if (size == 0) {
vm_map_entry_t entry;
@@ -538,10 +540,7 @@ msync(p, uap)
rv = vm_map_lookup_entry(map, addr, &entry);
vm_map_unlock_read(map);
if (rv == FALSE) {
- mtx_unlock(&vm_mtx);
-#ifndef BLEED
mtx_unlock(&Giant);
-#endif
return (EINVAL);
}
addr = entry->start;
@@ -554,10 +553,8 @@ msync(p, uap)
rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0,
(flags & MS_INVALIDATE) != 0);
- mtx_unlock(&vm_mtx);
-#ifndef BLEED
mtx_unlock(&Giant);
-#endif
+
switch (rv) {
case KERN_SUCCESS:
break;
@@ -610,20 +607,17 @@ munmap(p, uap)
if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS)
return (EINVAL);
#endif
+ mtx_lock(&Giant);
map = &p->p_vmspace->vm_map;
/*
* Make sure entire range is allocated.
*/
- mtx_lock(&Giant);
- mtx_lock(&vm_mtx);
if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE)) {
- mtx_unlock(&vm_mtx);
mtx_unlock(&Giant);
return (EINVAL);
}
/* returns nothing but KERN_SUCCESS anyway */
(void) vm_map_remove(map, addr, addr + size);
- mtx_unlock(&vm_mtx);
mtx_unlock(&Giant);
return (0);
}
@@ -674,10 +668,8 @@ mprotect(p, uap)
return(EINVAL);
mtx_lock(&Giant);
- mtx_lock(&vm_mtx);
ret = vm_map_protect(&p->p_vmspace->vm_map, addr,
addr + size, prot, FALSE);
- mtx_unlock(&vm_mtx);
mtx_unlock(&Giant);
switch (ret) {
case KERN_SUCCESS:
@@ -716,16 +708,10 @@ minherit(p, uap)
if (addr + size < addr)
return(EINVAL);
-#ifndef BLEED
mtx_lock(&Giant);
-#endif
- mtx_lock(&vm_mtx);
ret = vm_map_inherit(&p->p_vmspace->vm_map, addr, addr+size,
inherit);
- mtx_unlock(&vm_mtx);
-#ifndef BLEED
mtx_unlock(&Giant);
-#endif
switch (ret) {
case KERN_SUCCESS:
@@ -779,15 +765,9 @@ madvise(p, uap)
start = trunc_page((vm_offset_t) uap->addr);
end = round_page((vm_offset_t) uap->addr + uap->len);
-#ifndef BLEED
mtx_lock(&Giant);
-#endif
- mtx_lock(&vm_mtx);
ret = vm_map_madvise(&p->p_vmspace->vm_map, start, end, uap->behav);
- mtx_unlock(&vm_mtx);
-#ifndef BLEED
mtx_unlock(&Giant);
-#endif
return (ret ? EINVAL : 0);
}
@@ -833,11 +813,8 @@ mincore(p, uap)
*/
vec = uap->vec;
- map = &p->p_vmspace->vm_map;
-#ifndef BLEED
mtx_lock(&Giant);
-#endif
- mtx_lock(&vm_mtx);
+ map = &p->p_vmspace->vm_map;
pmap = vmspace_pmap(p->p_vmspace);
vm_map_lock_read(map);
@@ -917,7 +894,6 @@ RestartScan:
* the map, we release the lock.
*/
vm_map_unlock_read(map);
- mtx_unlock(&vm_mtx);
/*
* calculate index into user supplied byte vector
@@ -931,9 +907,7 @@ RestartScan:
while((lastvecindex + 1) < vecindex) {
error = subyte( vec + lastvecindex, 0);
if (error) {
-#ifndef BLEED
mtx_unlock(&Giant);
-#endif
return (EFAULT);
}
++lastvecindex;
@@ -944,9 +918,7 @@ RestartScan:
*/
error = subyte( vec + vecindex, mincoreinfo);
if (error) {
-#ifndef BLEED
mtx_unlock(&Giant);
-#endif
return (EFAULT);
}
@@ -954,7 +926,6 @@ RestartScan:
* If the map has changed, due to the subyte, the previous
* output may be invalid.
*/
- mtx_lock(&vm_mtx);
vm_map_lock_read(map);
if (timestamp != map->timestamp)
goto RestartScan;
@@ -969,7 +940,6 @@ RestartScan:
* the map, we release the lock.
*/
vm_map_unlock_read(map);
- mtx_unlock(&vm_mtx);
/*
* Zero the last entries in the byte vector.
@@ -978,9 +948,7 @@ RestartScan:
while((lastvecindex + 1) < vecindex) {
error = subyte( vec + lastvecindex, 0);
if (error) {
-#ifndef BLEED
mtx_unlock(&Giant);
-#endif
return (EFAULT);
}
++lastvecindex;
@@ -990,15 +958,11 @@ RestartScan:
* If the map has changed, due to the subyte, the previous
* output may be invalid.
*/
- mtx_lock(&vm_mtx);
vm_map_lock_read(map);
if (timestamp != map->timestamp)
goto RestartScan;
vm_map_unlock_read(map);
- mtx_unlock(&vm_mtx);
-#ifndef BLEED
mtx_unlock(&Giant);
-#endif
return (0);
}
@@ -1043,16 +1007,10 @@ mlock(p, uap)
return (error);
#endif
-#ifndef BLEED
mtx_lock(&Giant);
-#endif
- mtx_lock(&vm_mtx);
error = vm_map_user_pageable(&p->p_vmspace->vm_map, addr,
addr + size, FALSE);
- mtx_unlock(&vm_mtx);
-#ifndef BLEED
mtx_unlock(&Giant);
-#endif
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
@@ -1067,6 +1025,8 @@ mlockall(p, uap)
struct proc *p;
struct mlockall_args *uap;
{
+ /* mtx_lock(&Giant); */
+ /* mtx_unlock(&Giant); */
return 0;
}
@@ -1081,6 +1041,8 @@ munlockall(p, uap)
struct proc *p;
struct munlockall_args *uap;
{
+ /* mtx_lock(&Giant); */
+ /* mtx_unlock(&Giant); */
return 0;
}
@@ -1117,16 +1079,10 @@ munlock(p, uap)
return (error);
#endif
-#ifndef BLEED
mtx_lock(&Giant);
-#endif
- mtx_lock(&vm_mtx);
error = vm_map_user_pageable(&p->p_vmspace->vm_map, addr,
addr + size, TRUE);
- mtx_unlock(&vm_mtx);
-#ifndef BLEED
mtx_unlock(&Giant);
-#endif
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
@@ -1175,9 +1131,7 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
return (EINVAL);
fitit = FALSE;
mtx_lock(&Giant);
- mtx_lock(&vm_mtx);
(void) vm_map_remove(map, *addr, *addr + size);
- mtx_unlock(&vm_mtx);
}
/*
@@ -1252,7 +1206,6 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
maxprot |= VM_PROT_EXECUTE;
#endif
- mtx_lock(&vm_mtx);
if (fitit)
*addr = pmap_addr_hint(object, *addr, size);
@@ -1279,7 +1232,6 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
if (rv != KERN_SUCCESS)
(void) vm_map_remove(map, *addr, *addr + size);
}
- mtx_unlock(&vm_mtx);
mtx_unlock(&Giant);
switch (rv) {
case KERN_SUCCESS:
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 9717325..96be4c0 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -147,7 +147,8 @@ _vm_object_allocate(type, size, object)
{
int incr;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
TAILQ_INIT(&object->memq);
TAILQ_INIT(&object->shadow_head);
@@ -192,8 +193,8 @@ _vm_object_allocate(type, size, object)
void
vm_object_init()
{
+ GIANT_REQUIRED;
- mtx_assert(&vm_mtx, MA_OWNED);
TAILQ_INIT(&vm_object_list);
mtx_init(&vm_object_list_mtx, "vm object_list", MTX_DEF);
vm_object_count = 0;
@@ -230,7 +231,8 @@ vm_object_allocate(type, size)
{
vm_object_t result;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
result = (vm_object_t) zalloc(obj_zone);
_vm_object_allocate(type, size, result);
@@ -247,8 +249,8 @@ void
vm_object_reference(object)
vm_object_t object;
{
+ GIANT_REQUIRED;
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
if (object == NULL)
return;
@@ -257,20 +259,14 @@ vm_object_reference(object)
object->ref_count++;
if (object->type == OBJT_VNODE) {
- mtx_unlock(VM_OBJECT_MTX(object));
- mtx_assert(&Giant, MA_OWNED);
while (vget((struct vnode *) object->handle, LK_RETRY|LK_NOOBJ, curproc)) {
printf("vm_object_reference: delay in getting object\n");
}
- mtx_lock(VM_OBJECT_MTX(object));
}
}
/*
* handle deallocating a object of type OBJT_VNODE
- *
- * requires vm_mtx
- * may block
*/
void
vm_object_vndeallocate(object)
@@ -278,7 +274,7 @@ vm_object_vndeallocate(object)
{
struct vnode *vp = (struct vnode *) object->handle;
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
+ GIANT_REQUIRED;
KASSERT(object->type == OBJT_VNODE,
("vm_object_vndeallocate: not a vnode object"));
KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
@@ -297,10 +293,7 @@ vm_object_vndeallocate(object)
/*
* vrele may need a vop lock
*/
- mtx_unlock(VM_OBJECT_MTX(object));
- mtx_assert(&Giant, MA_OWNED);
vrele(vp);
- mtx_lock(VM_OBJECT_MTX(object));
}
/*
@@ -313,7 +306,6 @@ vm_object_vndeallocate(object)
* may be relinquished.
*
* No object may be locked.
- * vm_mtx must be held
*/
void
vm_object_deallocate(object)
@@ -321,7 +313,8 @@ vm_object_deallocate(object)
{
vm_object_t temp;
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
+ GIANT_REQUIRED;
+
while (object != NULL) {
if (object->type == OBJT_VNODE) {
@@ -355,9 +348,6 @@ vm_object_deallocate(object)
("vm_object_deallocate: ref_count: %d, shadow_count: %d",
object->ref_count,
object->shadow_count));
-#ifdef objlocks
- mtx_lock(VM_OBJECT_MTX(robject));
-#endif
if ((robject->handle == NULL) &&
(robject->type == OBJT_DEFAULT ||
robject->type == OBJT_SWAP)) {
@@ -368,32 +358,16 @@ vm_object_deallocate(object)
robject->paging_in_progress ||
object->paging_in_progress
) {
-#ifdef objlocks
- mtx_unlock(VM_OBJECT_MTX(object));
-#endif
vm_object_pip_sleep(robject, "objde1");
-#ifdef objlocks
- mtx_unlock(VM_OBJECT_MTX(robject));
- mtx_lock(VM_OBJECT_MTX(object));
-#endif
vm_object_pip_sleep(object, "objde2");
-#ifdef objlocks
- mtx_lock(VM_OBJECT_MTX(robject));
-#endif
}
if (robject->ref_count == 1) {
robject->ref_count--;
-#ifdef objlocks
- mtx_unlock(VM_OBJECT_MTX(object));
-#endif
object = robject;
goto doterm;
}
-#ifdef objlocks
- mtx_unlock(VM_OBJECT_MTX(object));
-#endif
object = robject;
vm_object_collapse(object);
continue;
@@ -435,8 +409,8 @@ vm_object_terminate(object)
vm_page_t p;
int s;
- mtx_assert(&Giant, MA_OWNED);
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
+ GIANT_REQUIRED;
+
/*
* Make sure no one uses us.
*/
@@ -468,9 +442,7 @@ vm_object_terminate(object)
vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
vp = (struct vnode *) object->handle;
- mtx_unlock(VM_OBJECT_MTX(object));
vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
- mtx_lock(VM_OBJECT_MTX(object));
}
KASSERT(object->ref_count == 0,
@@ -555,7 +527,8 @@ vm_object_page_clean(object, start, end, flags)
vm_page_t ma[vm_pageout_page_count];
int curgeneration;
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
+ GIANT_REQUIRED;
+
if (object->type != OBJT_VNODE ||
(object->flags & OBJ_MIGHTBEDIRTY) == 0)
return;
@@ -763,7 +736,8 @@ vm_object_pmap_copy_1(object, start, end)
vm_pindex_t idx;
vm_page_t p;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
return;
@@ -791,7 +765,7 @@ vm_object_pmap_remove(object, start, end)
{
vm_page_t p;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (object == NULL)
return;
TAILQ_FOREACH(p, &object->memq, listq) {
@@ -834,7 +808,7 @@ vm_object_madvise(object, pindex, count, advise)
vm_object_t tobject;
vm_page_t m;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (object == NULL)
return;
@@ -948,7 +922,7 @@ vm_object_shadow(object, offset, length)
vm_object_t source;
vm_object_t result;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
source = *object;
/*
@@ -1015,7 +989,7 @@ vm_object_backing_scan(vm_object_t object, int op)
vm_pindex_t backing_offset_index;
s = splvm();
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
backing_object = object->backing_object;
backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
@@ -1229,8 +1203,7 @@ void
vm_object_collapse(object)
vm_object_t object;
{
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
while (TRUE) {
vm_object_t backing_object;
@@ -1443,7 +1416,7 @@ vm_object_page_remove(object, start, end, clean_only)
unsigned int size;
int all;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (object == NULL ||
object->resident_page_count == 0)
@@ -1561,7 +1534,7 @@ vm_object_coalesce(prev_object, prev_pindex, prev_size, next_size)
{
vm_pindex_t next_pindex;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (prev_object == NULL) {
return (TRUE);
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 2b29baf..c9c0920 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -169,49 +169,44 @@ extern vm_object_t kmem_object;
#ifdef _KERNEL
-/*
- * For now a global vm lock.
- */
-#define VM_OBJECT_MTX(object) (&vm_mtx)
-
static __inline void
vm_object_set_flag(vm_object_t object, u_short bits)
{
-
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
- object->flags |= bits;
+ GIANT_REQUIRED;
+ atomic_set_short(&object->flags, bits);
+ /* object->flags |= bits; */
}
static __inline void
vm_object_clear_flag(vm_object_t object, u_short bits)
{
-
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
- object->flags &= ~bits;
+ GIANT_REQUIRED;
+ atomic_clear_short(&object->flags, bits);
+ /* object->flags &= ~bits; */
}
static __inline void
vm_object_pip_add(vm_object_t object, short i)
{
-
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
- object->paging_in_progress += i;
+ GIANT_REQUIRED;
+ atomic_add_short(&object->paging_in_progress, i);
+ /* object->paging_in_progress += i; */
}
static __inline void
vm_object_pip_subtract(vm_object_t object, short i)
{
-
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
- object->paging_in_progress -= i;
+ GIANT_REQUIRED;
+ atomic_subtract_short(&object->paging_in_progress, i);
+ /* object->paging_in_progress -= i; */
}
static __inline void
vm_object_pip_wakeup(vm_object_t object)
{
-
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
- object->paging_in_progress--;
+ GIANT_REQUIRED;
+ atomic_subtract_short(&object->paging_in_progress, 1);
+ /* object->paging_in_progress--; */
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
vm_object_clear_flag(object, OBJ_PIPWNT);
wakeup(object);
@@ -221,10 +216,9 @@ vm_object_pip_wakeup(vm_object_t object)
static __inline void
vm_object_pip_wakeupn(vm_object_t object, short i)
{
-
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
+ GIANT_REQUIRED;
if (i)
- object->paging_in_progress -= i;
+ atomic_subtract_short(&object->paging_in_progress, i);
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
vm_object_clear_flag(object, OBJ_PIPWNT);
wakeup(object);
@@ -234,13 +228,12 @@ vm_object_pip_wakeupn(vm_object_t object, short i)
static __inline void
vm_object_pip_sleep(vm_object_t object, char *waitid)
{
-
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
+ GIANT_REQUIRED;
if (object->paging_in_progress) {
int s = splvm();
if (object->paging_in_progress) {
vm_object_set_flag(object, OBJ_PIPWNT);
- msleep(object, VM_OBJECT_MTX(object), PVM, waitid, 0);
+ tsleep(object, PVM, waitid, 0);
}
splx(s);
}
@@ -249,8 +242,7 @@ vm_object_pip_sleep(vm_object_t object, char *waitid)
static __inline void
vm_object_pip_wait(vm_object_t object, char *waitid)
{
-
- mtx_assert(VM_OBJECT_MTX(object), MA_OWNED);
+ GIANT_REQUIRED;
while (object->paging_in_progress)
vm_object_pip_sleep(object, waitid);
}
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index dc391cb..e5ef9f8 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -148,7 +148,6 @@ vm_set_page_size()
*
* Add a new page to the freelist for use by the system.
* Must be called at splhigh().
- * Must be called with the vm_mtx held.
*/
vm_page_t
vm_add_new_page(pa)
@@ -156,7 +155,8 @@ vm_add_new_page(pa)
{
vm_page_t m;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
++cnt.v_page_count;
++cnt.v_free_count;
m = PHYS_TO_VM_PAGE(pa);
@@ -363,7 +363,8 @@ vm_page_insert(m, object, pindex)
{
register struct vm_page **bucket;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (m->object != NULL)
panic("vm_page_insert: already inserted");
@@ -423,7 +424,8 @@ vm_page_remove(m)
{
vm_object_t object;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
if (m->object == NULL)
return;
@@ -487,8 +489,6 @@ vm_page_remove(m)
* an interrupt makes a change, but the generation algorithm will not
* operate properly in an SMP environment where both cpu's are able to run
* kernel code simultaneously.
- * NOTE: under the giant vm lock we should be ok, there should be
- * no reason to check vm_page_bucket_generation
*
* The object must be locked. No side effects.
* This routine may not block.
@@ -604,7 +604,7 @@ vm_page_unqueue(m)
int queue = m->queue;
struct vpgqueues *pq;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (queue != PQ_NONE) {
m->queue = PQ_NONE;
pq = &vm_page_queues[queue];
@@ -645,7 +645,7 @@ _vm_page_list_find(basequeue, index)
vm_page_t m = NULL;
struct vpgqueues *pq;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
pq = &vm_page_queues[basequeue];
/*
@@ -683,7 +683,7 @@ vm_page_select_cache(object, pindex)
{
vm_page_t m;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
while (TRUE) {
m = vm_page_list_find(
PQ_CACHE,
@@ -735,7 +735,6 @@ vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zer
* VM_ALLOC_INTERRUPT interrupt time request
* VM_ALLOC_ZERO zero page
*
- * vm_mtx must be locked.
* This routine may not block.
*
* Additional special handling is required when called from an
@@ -752,7 +751,8 @@ vm_page_alloc(object, pindex, page_req)
register vm_page_t m = NULL;
int s;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
KASSERT(!vm_page_lookup(object, pindex),
("vm_page_alloc: page already allocated"));
@@ -885,13 +885,13 @@ vm_wait()
s = splvm();
if (curproc == pageproc) {
vm_pageout_pages_needed = 1;
- msleep(&vm_pageout_pages_needed, &vm_mtx, PSWP, "VMWait", 0);
+ tsleep(&vm_pageout_pages_needed, PSWP, "VMWait", 0);
} else {
if (!vm_pages_needed) {
vm_pages_needed = 1;
wakeup(&vm_pages_needed);
}
- msleep(&cnt.v_free_count, &vm_mtx, PVM, "vmwait", 0);
+ tsleep(&cnt.v_free_count, PVM, "vmwait", 0);
}
splx(s);
}
@@ -938,8 +938,9 @@ vm_page_activate(m)
{
int s;
+ GIANT_REQUIRED;
s = splvm();
- mtx_assert(&vm_mtx, MA_OWNED);
+
if (m->queue != PQ_ACTIVE) {
if ((m->queue - m->pc) == PQ_CACHE)
cnt.v_reactivated++;
@@ -1012,9 +1013,8 @@ vm_page_free_toq(vm_page_t m)
struct vpgqueues *pq;
vm_object_t object = m->object;
+ GIANT_REQUIRED;
s = splvm();
-
- mtx_assert(&vm_mtx, MA_OWNED);
cnt.v_tfree++;
if (m->busy || ((m->queue - m->pc) == PQ_FREE) ||
@@ -1252,7 +1252,7 @@ _vm_page_deactivate(vm_page_t m, int athead)
{
int s;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
/*
* Ignore if already inactive.
*/
@@ -1290,8 +1290,8 @@ vm_page_deactivate(vm_page_t m)
int
vm_page_try_to_cache(vm_page_t m)
{
+ GIANT_REQUIRED;
- mtx_assert(VM_PAGE_MTX(m), MA_OWNED);
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
(m->flags & (PG_BUSY|PG_UNMANAGED))) {
return(0);
@@ -1339,7 +1339,7 @@ vm_page_cache(m)
{
int s;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy || m->wire_count) {
printf("vm_page_cache: attempting to cache busy page\n");
return;
@@ -1397,7 +1397,7 @@ vm_page_dontneed(m)
int dnw;
int head;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
dnw = ++dnweight;
/*
@@ -1438,7 +1438,6 @@ vm_page_dontneed(m)
* to be in the object. If the page doesn't exist, allocate it.
*
* This routine may block.
- * Requires vm_mtx.
*/
vm_page_t
vm_page_grab(object, pindex, allocflags)
@@ -1449,7 +1448,7 @@ vm_page_grab(object, pindex, allocflags)
vm_page_t m;
int s, generation;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
retrylookup:
if ((m = vm_page_lookup(object, pindex)) != NULL) {
if (m->busy || (m->flags & PG_BUSY)) {
@@ -1459,7 +1458,7 @@ retrylookup:
while ((object->generation == generation) &&
(m->busy || (m->flags & PG_BUSY))) {
vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
- msleep(m, &vm_mtx, PVM, "pgrbwt", 0);
+ tsleep(m, PVM, "pgrbwt", 0);
if ((allocflags & VM_ALLOC_RETRY) == 0) {
splx(s);
return NULL;
@@ -1522,8 +1521,6 @@ vm_page_bits(int base, int size)
* This routine may not block.
*
* (base + size) must be less then or equal to PAGE_SIZE.
- *
- * vm_mtx needs to be held
*/
void
vm_page_set_validclean(m, base, size)
@@ -1535,7 +1532,7 @@ vm_page_set_validclean(m, base, size)
int frag;
int endoff;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (size == 0) /* handle degenerate case */
return;
@@ -1609,8 +1606,7 @@ vm_page_clear_dirty(m, base, size)
int base;
int size;
{
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
m->dirty &= ~vm_page_bits(base, size);
}
@@ -1630,7 +1626,7 @@ vm_page_set_invalid(m, base, size)
{
int bits;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
bits = vm_page_bits(base, size);
m->valid &= ~bits;
m->dirty &= ~bits;
@@ -1918,16 +1914,10 @@ contigmalloc(size, type, flags, low, high, alignment, boundary)
unsigned long boundary;
{
void * ret;
- int hadvmlock;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
ret = contigmalloc1(size, type, flags, low, high, alignment, boundary,
kernel_map);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
-
return (ret);
}
@@ -1938,14 +1928,8 @@ contigfree(addr, size, type)
unsigned long size;
struct malloc_type *type;
{
- int hadvmlock;
-
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
kmem_free(kernel_map, (vm_offset_t)addr, size);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
}
vm_offset_t
@@ -1956,15 +1940,10 @@ vm_page_alloc_contig(size, low, high, alignment)
vm_offset_t alignment;
{
vm_offset_t ret;
- int hadvmlock;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
ret = ((vm_offset_t)contigmalloc1(size, M_DEVBUF, M_NOWAIT, low, high,
alignment, 0ul, kernel_map));
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
return (ret);
}
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 6bc7266..1050e8e 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -305,28 +305,23 @@ extern long first_page; /* first physical page number */
(&vm_page_array[atop(pa) - first_page ])
/*
- * For now, a global vm lock
- */
-#define VM_PAGE_MTX(m) (&vm_mtx)
-
-/*
* Functions implemented as macros
*/
static __inline void
vm_page_flag_set(vm_page_t m, unsigned short bits)
{
-
- mtx_assert(VM_PAGE_MTX(m), MA_OWNED);
- m->flags |= bits;
+ GIANT_REQUIRED;
+ atomic_set_short(&(m)->flags, bits);
+ /* m->flags |= bits; */
}
static __inline void
vm_page_flag_clear(vm_page_t m, unsigned short bits)
{
-
- mtx_assert(VM_PAGE_MTX(m), MA_OWNED);
- m->flags &= ~bits;
+ GIANT_REQUIRED;
+ atomic_clear_short(&(m)->flags, bits);
+ /* m->flags &= ~bits; */
}
#if 0
@@ -386,17 +381,15 @@ vm_page_wakeup(vm_page_t m)
static __inline void
vm_page_io_start(vm_page_t m)
{
-
- mtx_assert(VM_PAGE_MTX(m), MA_OWNED);
- m->busy++;
+ GIANT_REQUIRED;
+ atomic_add_char(&(m)->busy, 1);
}
static __inline void
vm_page_io_finish(vm_page_t m)
{
-
- mtx_assert(VM_PAGE_MTX(m), MA_OWNED);
- m->busy--;
+ GIANT_REQUIRED;
+ atomic_subtract_char(&(m)->busy, 1);
if (m->busy == 0)
vm_page_flash(m);
}
@@ -463,16 +456,14 @@ void vm_page_free_toq(vm_page_t m);
static __inline void
vm_page_hold(vm_page_t mem)
{
-
- mtx_assert(VM_PAGE_MTX(m), MA_OWNED);
+ GIANT_REQUIRED;
mem->hold_count++;
}
static __inline void
vm_page_unhold(vm_page_t mem)
{
-
- mtx_assert(VM_PAGE_MTX(m), MA_OWNED);
+ GIANT_REQUIRED;
--mem->hold_count;
KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
}
@@ -578,6 +569,7 @@ vm_page_free_zero(m)
static __inline int
vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
{
+ GIANT_REQUIRED;
if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
int s = splvm();
if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
@@ -585,7 +577,7 @@ vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
* Page is busy. Wait and retry.
*/
vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
- msleep(m, VM_PAGE_MTX(m), PVM, msg, 0);
+ tsleep(m, PVM, msg, 0);
}
splx(s);
return(TRUE);
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index f4c5670..444bfdc 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -226,7 +226,8 @@ vm_pageout_clean(m)
int ib, is, page_base;
vm_pindex_t pindex = m->pindex;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
object = m->object;
/*
@@ -366,7 +367,7 @@ vm_pageout_flush(mc, count, flags)
int numpagedout = 0;
int i;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
/*
* Initiate I/O. Bump the vm_page_t->busy counter and
* mark the pages read-only.
@@ -449,8 +450,6 @@ vm_pageout_flush(mc, count, flags)
* backing_objects.
*
* The object and map must be locked.
- *
- * Requires the vm_mtx
*/
static void
vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
@@ -464,7 +463,7 @@ vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
int remove_mode;
int s;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS)
return;
@@ -553,7 +552,7 @@ vm_pageout_map_deactivate_pages(map, desired)
vm_map_entry_t tmpe;
vm_object_t obj, bigobj;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) {
return;
}
@@ -619,7 +618,7 @@ vm_pageout_page_free(vm_page_t m) {
vm_object_t object = m->object;
int type = object->type;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (type == OBJT_SWAP || type == OBJT_DEFAULT)
vm_object_reference(object);
vm_page_busy(m);
@@ -649,8 +648,7 @@ vm_pageout_scan(int pass)
int maxlaunder;
int s;
- mtx_assert(&Giant, MA_OWNED);
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
/*
* Do whatever cleanup that the pmap code can.
*/
@@ -878,17 +876,14 @@ rescan0:
vp = object->handle;
mp = NULL;
- mtx_unlock(&vm_mtx);
if (vp->v_type == VREG)
vn_start_write(vp, &mp, V_NOWAIT);
if (vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) {
vn_finished_write(mp);
- mtx_lock(&vm_mtx);
if (object->flags & OBJ_MIGHTBEDIRTY)
vnodes_skipped++;
continue;
}
- mtx_lock(&vm_mtx);
/*
* The page might have been moved to another
@@ -902,10 +897,8 @@ rescan0:
object->handle != vp) {
if (object->flags & OBJ_MIGHTBEDIRTY)
vnodes_skipped++;
- mtx_unlock(&vm_mtx);
vput(vp);
vn_finished_write(mp);
- mtx_lock(&vm_mtx);
continue;
}
@@ -916,10 +909,8 @@ rescan0:
* statistics are more correct if we don't.
*/
if (m->busy || (m->flags & PG_BUSY)) {
- mtx_unlock(&vm_mtx);
vput(vp);
vn_finished_write(mp);
- mtx_lock(&vm_mtx);
continue;
}
@@ -933,10 +924,8 @@ rescan0:
splx(s);
if (object->flags & OBJ_MIGHTBEDIRTY)
vnodes_skipped++;
- mtx_unlock(&vm_mtx);
vput(vp);
vn_finished_write(mp);
- mtx_lock(&vm_mtx);
continue;
}
}
@@ -967,10 +956,8 @@ rescan0:
TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq);
splx(s);
if (vp) {
- mtx_unlock(&vm_mtx);
vput(vp);
vn_finished_write(mp);
- mtx_lock(&vm_mtx);
}
}
}
@@ -1154,11 +1141,9 @@ rescan0:
#if 0
if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) {
#endif
- mtx_unlock(&vm_mtx);
bigproc = NULL;
bigsize = 0;
sx_slock(&allproc_lock);
- mtx_lock(&vm_mtx);
LIST_FOREACH(p, &allproc, p_list) {
/*
* If this process is already locked, skip it.
@@ -1350,7 +1335,6 @@ vm_pageout()
int pass;
mtx_lock(&Giant);
- mtx_lock(&vm_mtx);
/*
* Initialize some paging parameters.
@@ -1410,7 +1394,6 @@ vm_pageout()
*/
if (vm_pageout_stats_free_max == 0)
vm_pageout_stats_free_max = 5;
- mtx_unlock(&vm_mtx);
PROC_LOCK(curproc);
curproc->p_flag |= P_BUFEXHAUST;
@@ -1420,7 +1403,6 @@ vm_pageout()
/*
* The pageout daemon is never done, so loop forever.
*/
- mtx_lock(&vm_mtx);
while (TRUE) {
int error;
int s = splvm();
@@ -1444,7 +1426,7 @@ vm_pageout()
*/
++pass;
if (pass > 1)
- msleep(&vm_pages_needed, &vm_mtx, PVM,
+ tsleep(&vm_pages_needed, PVM,
"psleep", hz/2);
} else {
/*
@@ -1455,8 +1437,8 @@ vm_pageout()
pass = 1;
else
pass = 0;
- error = msleep(&vm_pages_needed, &vm_mtx,
- PVM, "psleep", vm_pageout_stats_interval * hz);
+ error = tsleep(&vm_pages_needed, PVM,
+ "psleep", vm_pageout_stats_interval * hz);
if (error && !vm_pages_needed) {
splx(s);
pass = 0;
@@ -1501,14 +1483,11 @@ vm_daemon()
mtx_lock(&Giant);
while (TRUE) {
- mtx_lock(&vm_mtx);
- msleep(&vm_daemon_needed, &vm_mtx, PPAUSE, "psleep", 0);
+ tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0);
if (vm_pageout_req_swapout) {
swapout_procs(vm_pageout_req_swapout);
- mtx_assert(&vm_mtx, MA_OWNED);
vm_pageout_req_swapout = 0;
}
- mtx_unlock(&vm_mtx);
/*
* scan the processes for exceeding their rlimits or if
* process is swapped out -- deactivate pages
@@ -1525,7 +1504,6 @@ vm_daemon()
if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
continue;
}
- mtx_lock(&vm_mtx);
/*
* if the process is in a non-running type state,
* don't touch it.
@@ -1533,7 +1511,6 @@ vm_daemon()
mtx_lock_spin(&sched_lock);
if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
mtx_unlock_spin(&sched_lock);
- mtx_unlock(&vm_mtx);
continue;
}
/*
@@ -1557,7 +1534,6 @@ vm_daemon()
vm_pageout_map_deactivate_pages(
&p->p_vmspace->vm_map, limit);
}
- mtx_unlock(&vm_mtx);
}
sx_sunlock(&allproc_lock);
}
diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c
index e53a14c..07d0655 100644
--- a/sys/vm/vm_pager.c
+++ b/sys/vm/vm_pager.c
@@ -245,18 +245,14 @@ vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size,
{
vm_object_t ret;
struct pagerops *ops;
- int hadvmlock;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
+
ops = pagertab[type];
if (ops)
ret = (*ops->pgo_alloc) (handle, size, prot, off);
else
ret = NULL;
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
return (ret);
}
@@ -264,8 +260,7 @@ void
vm_pager_deallocate(object)
vm_object_t object;
{
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
(*pagertab[object->type]->pgo_dealloc) (object);
}
@@ -385,8 +380,6 @@ initpbuf(struct buf *bp)
*
* NOTE: pfreecnt can be NULL, but this 'feature' will be removed
* relatively soon when the rest of the subsystems get smart about it. XXX
- *
- * vm_mtx can be held or unheld
*/
struct buf *
getpbuf(pfreecnt)
@@ -394,12 +387,9 @@ getpbuf(pfreecnt)
{
int s;
struct buf *bp;
- int hadvmlock;
s = splvm();
- hadvmlock = mtx_owned(&vm_mtx);
- if (hadvmlock)
- mtx_unlock(&vm_mtx);
+ GIANT_REQUIRED;
mtx_lock(&pbuf_mtx);
for (;;) {
@@ -424,8 +414,6 @@ getpbuf(pfreecnt)
splx(s);
initpbuf(bp);
- if (hadvmlock)
- mtx_lock(&vm_mtx);
return bp;
}
diff --git a/sys/vm/vm_pager.h b/sys/vm/vm_pager.h
index b4511ca..427d103 100644
--- a/sys/vm/vm_pager.h
+++ b/sys/vm/vm_pager.h
@@ -124,12 +124,12 @@ vm_pager_get_pages(
) {
int r;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
+
r = (*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage);
if (r == VM_PAGER_OK && m[reqpage]->valid != VM_PAGE_BITS_ALL) {
vm_page_zero_invalid(m[reqpage], TRUE);
}
- mtx_assert(&vm_mtx, MA_OWNED);
return(r);
}
@@ -141,11 +141,9 @@ vm_pager_put_pages(
int flags,
int *rtvals
) {
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
(*pagertab[object->type]->pgo_putpages)
(object, m, count, flags, rtvals);
- mtx_assert(&vm_mtx, MA_OWNED);
}
/*
@@ -168,10 +166,9 @@ vm_pager_has_page(
) {
boolean_t ret;
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
ret = (*pagertab[object->type]->pgo_haspage)
(object, offset, before, after);
- mtx_assert(&vm_mtx, MA_OWNED);
return (ret);
}
@@ -186,11 +183,9 @@ vm_pager_has_page(
static __inline void
vm_pager_page_unswapped(vm_page_t m)
{
-
- mtx_assert(&vm_mtx, MA_OWNED);
+ GIANT_REQUIRED;
if (pagertab[m->object->type]->pgo_pageunswapped)
(*pagertab[m->object->type]->pgo_pageunswapped)(m);
- mtx_assert(&vm_mtx, MA_OWNED);
}
#endif
diff --git a/sys/vm/vm_unix.c b/sys/vm/vm_unix.c
index 0dfb83e..dcd02e8 100644
--- a/sys/vm/vm_unix.c
+++ b/sys/vm/vm_unix.c
@@ -74,6 +74,9 @@ obreak(p, uap)
register struct vmspace *vm = p->p_vmspace;
vm_offset_t new, old, base;
int rv;
+ int error = 0;
+
+ mtx_lock(&Giant); /* syscall marked mp-safe but isn't */
base = round_page((vm_offset_t) vm->vm_daddr);
new = round_page((vm_offset_t)uap->nsize);
@@ -84,52 +87,46 @@ obreak(p, uap)
* reduce their usage, even if they remain over the limit.
*/
if (new > old &&
- (new - base) > (unsigned) p->p_rlimit[RLIMIT_DATA].rlim_cur)
- return ENOMEM;
- if (new >= VM_MAXUSER_ADDRESS)
- return (ENOMEM);
+ (new - base) > (unsigned) p->p_rlimit[RLIMIT_DATA].rlim_cur) {
+ error = ENOMEM;
+ goto done;
+ }
+ if (new >= VM_MAXUSER_ADDRESS) {
+ error = ENOMEM;
+ goto done;
+ }
} else if (new < base) {
/*
* This is simply an invalid value. If someone wants to
* do fancy address space manipulations, mmap and munmap
* can do most of what the user would want.
*/
- return EINVAL;
+ error = EINVAL;
+ goto done;
}
if (new > old) {
vm_size_t diff;
diff = new - old;
-#ifndef BLEED
- mtx_lock(&Giant);
-#endif
- mtx_lock(&vm_mtx);
rv = vm_map_find(&vm->vm_map, NULL, 0, &old, diff, FALSE,
VM_PROT_ALL, VM_PROT_ALL, 0);
if (rv != KERN_SUCCESS) {
- mtx_unlock(&vm_mtx);
- return (ENOMEM);
+ error = ENOMEM;
+ goto done;
}
vm->vm_dsize += btoc(diff);
- mtx_unlock(&vm_mtx);
-#ifndef BLEED
- mtx_unlock(&Giant);
-#endif
} else if (new < old) {
- mtx_lock(&Giant);
- mtx_lock(&vm_mtx);
rv = vm_map_remove(&vm->vm_map, new, old);
if (rv != KERN_SUCCESS) {
- mtx_unlock(&vm_mtx);
- mtx_unlock(&Giant);
- return (ENOMEM);
+ error = ENOMEM;
+ goto done;
}
vm->vm_dsize -= btoc(old - new);
- mtx_unlock(&vm_mtx);
- mtx_unlock(&Giant);
}
- return (0);
+done:
+ mtx_unlock(&Giant);
+ return (error);
}
#ifndef _SYS_SYSPROTO_H_
@@ -144,6 +141,7 @@ ovadvise(p, uap)
struct proc *p;
struct ovadvise_args *uap;
{
-
+ /* START_GIANT_OPTIONAL */
+ /* END_GIANT_OPTIONAL */
return (EINVAL);
}
diff --git a/sys/vm/vm_zone.c b/sys/vm/vm_zone.c
index 30fadbe..5c6431b 100644
--- a/sys/vm/vm_zone.c
+++ b/sys/vm/vm_zone.c
@@ -19,6 +19,7 @@
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
+#include <sys/proc.h>
#include <sys/mutex.h>
#include <sys/queue.h>
#include <sys/sysctl.h>
@@ -119,6 +120,8 @@ zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
{
int totsize, oldzflags;
+ GIANT_REQUIRED;
+
oldzflags = z->zflags;
if ((z->zflags & ZONE_BOOT) == 0) {
z->zsize = (size + ZONE_ROUNDING - 1) & ~(ZONE_ROUNDING - 1);
@@ -137,8 +140,6 @@ zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
* in pages as needed.
*/
if (z->zflags & ZONE_INTERRUPT) {
- int hadvmlock;
-
totsize = round_page(z->zsize * nentries);
atomic_add_int(&zone_kmem_kvaspace, totsize);
z->zkva = kmem_alloc_pageable(kernel_map, totsize);
@@ -146,17 +147,12 @@ zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
return 0;
z->zpagemax = totsize / PAGE_SIZE;
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
if (obj == NULL) {
z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax);
} else {
z->zobj = obj;
_vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj);
}
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
z->zallocflag = VM_ALLOC_INTERRUPT;
z->zmax += nentries;
} else {
@@ -364,12 +360,8 @@ void *
zalloc(vm_zone_t z)
{
void *item;
- int hadvmlock;
KASSERT(z != NULL, ("invalid zone"));
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
mtx_lock(&z->zmtx);
if (z->zfreecnt <= z->zfreemin) {
@@ -390,8 +382,6 @@ zalloc(vm_zone_t z)
out:
mtx_unlock(&z->zmtx);
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
return item;
}
@@ -401,13 +391,8 @@ out:
void
zfree(vm_zone_t z, void *item)
{
- int hadvmlock;
-
KASSERT(z != NULL, ("invalid zone"));
KASSERT(item != NULL, ("invalid item"));
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
mtx_lock(&z->zmtx);
((void **) item)[0] = z->zitems;
@@ -419,8 +404,6 @@ zfree(vm_zone_t z, void *item)
z->zitems = item;
z->zfreecnt++;
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
mtx_unlock(&z->zmtx);
}
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 1bcdf9f..bc78e5a 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -110,7 +110,8 @@ vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_object_t object;
struct vnode *vp;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
+
/*
* Pageout to vnode, no can do yet.
*/
@@ -123,13 +124,11 @@ vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
* Prevent race condition when allocating the object. This
* can happen with NFS vnodes since the nfsnode isn't locked.
*/
- mtx_unlock(&vm_mtx);
while (vp->v_flag & VOLOCK) {
vp->v_flag |= VOWANT;
tsleep(vp, PVM, "vnpobj", 0);
}
vp->v_flag |= VOLOCK;
- mtx_lock(&vm_mtx);
/*
* If the object is being terminated, wait for it to
@@ -137,7 +136,7 @@ vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
*/
while (((object = vp->v_object) != NULL) &&
(object->flags & OBJ_DEAD)) {
- msleep(object, &vm_mtx, PVM, "vadead", 0);
+ tsleep(object, PVM, "vadead", 0);
}
if (vp->v_usecount == 0)
@@ -160,13 +159,11 @@ vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
vp->v_usecount++;
}
- mtx_unlock(&vm_mtx);
vp->v_flag &= ~VOLOCK;
if (vp->v_flag & VOWANT) {
vp->v_flag &= ~VOWANT;
wakeup(vp);
}
- mtx_lock(&vm_mtx);
return (object);
}
@@ -176,7 +173,7 @@ vnode_pager_dealloc(object)
{
register struct vnode *vp = object->handle;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
if (vp == NULL)
panic("vnode_pager_dealloc: pager already dealloced");
@@ -203,7 +200,7 @@ vnode_pager_haspage(object, pindex, before, after)
int bsize;
int pagesperblock, blocksperpage;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
/*
* If no vp or vp is doomed or marked transparent to VM, we do not
* have the page.
@@ -228,10 +225,8 @@ vnode_pager_haspage(object, pindex, before, after)
blocksperpage = (PAGE_SIZE / bsize);
reqblock = pindex * blocksperpage;
}
- mtx_unlock(&vm_mtx);
err = VOP_BMAP(vp, reqblock, (struct vnode **) 0, &bn,
after, before);
- mtx_lock(&vm_mtx);
if (err)
return TRUE;
if ( bn == -1)
@@ -279,6 +274,8 @@ vnode_pager_setsize(vp, nsize)
vm_pindex_t nobjsize;
vm_object_t object = vp->v_object;
+ GIANT_REQUIRED;
+
if (object == NULL)
return;
@@ -294,11 +291,6 @@ vnode_pager_setsize(vp, nsize)
* File has shrunk. Toss any cached pages beyond the new EOF.
*/
if (nsize < object->un_pager.vnp.vnp_size) {
- int hadvmlock;
-
- hadvmlock = mtx_owned(&vm_mtx);
- if (!hadvmlock)
- mtx_lock(&vm_mtx);
vm_freeze_copyopts(object, OFF_TO_IDX(nsize), object->size);
if (nobjsize < object->size) {
vm_object_page_remove(object, nobjsize, object->size,
@@ -339,8 +331,6 @@ vnode_pager_setsize(vp, nsize)
m->dirty = VM_PAGE_BITS_ALL;
}
}
- if (!hadvmlock)
- mtx_unlock(&vm_mtx);
}
object->un_pager.vnp.vnp_size = nsize;
object->size = nobjsize;
@@ -364,7 +354,7 @@ vnode_pager_addr(vp, address, run)
daddr_t vblock;
int voffset;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
if ((int) address < 0)
return -1;
@@ -374,11 +364,9 @@ vnode_pager_addr(vp, address, run)
bsize = vp->v_mount->mnt_stat.f_iosize;
vblock = address / bsize;
voffset = address % bsize;
- mtx_unlock(&vm_mtx);
err = VOP_BMAP(vp, vblock, &rtvp, &block, run, NULL);
- mtx_lock(&vm_mtx);
if (err || (block == -1))
rtaddress = -1;
else {
@@ -421,17 +409,16 @@ vnode_pager_input_smlfs(object, m)
vm_offset_t bsize;
int error = 0;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
+
vp = object->handle;
if (vp->v_mount == NULL)
return VM_PAGER_BAD;
bsize = vp->v_mount->mnt_stat.f_iosize;
- mtx_unlock(&vm_mtx);
VOP_BMAP(vp, 0, &dp, 0, NULL, NULL);
- mtx_lock(&vm_mtx);
kva = vm_pager_map_page(m);
for (i = 0; i < PAGE_SIZE / bsize; i++) {
@@ -442,7 +429,6 @@ vnode_pager_input_smlfs(object, m)
fileaddr = vnode_pager_addr(vp,
IDX_TO_OFF(m->pindex) + i * bsize, (int *)0);
if (fileaddr != -1) {
- mtx_unlock(&vm_mtx);
bp = getpbuf(&vnode_pbuf_freecnt);
/* build a minimal buffer header */
@@ -478,7 +464,6 @@ vnode_pager_input_smlfs(object, m)
* free the buffer header back to the swap buffer pool
*/
relpbuf(bp, &vnode_pbuf_freecnt);
- mtx_lock(&vm_mtx);
if (error)
break;
@@ -514,7 +499,7 @@ vnode_pager_input_old(object, m)
vm_offset_t kva;
struct vnode *vp;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
error = 0;
/*
@@ -534,7 +519,6 @@ vnode_pager_input_old(object, m)
kva = vm_pager_map_page(m);
vp = object->handle;
- mtx_unlock(&vm_mtx);
aiov.iov_base = (caddr_t) kva;
aiov.iov_len = size;
auio.uio_iov = &aiov;
@@ -554,7 +538,6 @@ vnode_pager_input_old(object, m)
else if (count != PAGE_SIZE)
bzero((caddr_t) kva + count, PAGE_SIZE - count);
}
- mtx_lock(&vm_mtx);
vm_pager_unmap_page(kva);
}
pmap_clear_modify(m);
@@ -588,7 +571,7 @@ vnode_pager_getpages(object, m, count, reqpage)
struct vnode *vp;
int bytes = count * PAGE_SIZE;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
vp = object->handle;
rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0);
KASSERT(rtval != EOPNOTSUPP,
@@ -620,7 +603,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
int count;
int error = 0;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
object = vp->v_object;
count = bytecount / PAGE_SIZE;
@@ -640,9 +623,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
/*
* if we can't bmap, use old VOP code
*/
- mtx_unlock(&vm_mtx);
if (VOP_BMAP(vp, 0, &dp, 0, NULL, NULL)) {
- mtx_lock(&vm_mtx);
for (i = 0; i < count; i++) {
if (i != reqpage) {
vm_page_free(m[i]);
@@ -659,7 +640,6 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
*/
} else if ((PAGE_SIZE / bsize) > 1 &&
(vp->v_mount->mnt_stat.f_type != nfs_mount_type)) {
- mtx_lock(&vm_mtx);
for (i = 0; i < count; i++) {
if (i != reqpage) {
vm_page_free(m[i]);
@@ -669,7 +649,6 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
cnt.v_vnodepgsin++;
return vnode_pager_input_smlfs(object, m[reqpage]);
}
- mtx_lock(&vm_mtx);
/*
* If we have a completely valid page available to us, we can
@@ -770,7 +749,6 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
* and map the pages to be read into the kva
*/
pmap_qenter(kva, m, count);
- mtx_unlock(&vm_mtx);
/* build a minimal buffer header */
bp->b_iocmd = BIO_READ;
@@ -808,7 +786,6 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
if (size != count * PAGE_SIZE)
bzero((caddr_t) kva + size, PAGE_SIZE * count - size);
}
- mtx_lock(&vm_mtx);
pmap_qremove(kva, count);
/*
@@ -899,7 +876,7 @@ vnode_pager_putpages(object, m, count, sync, rtvals)
struct mount *mp;
int bytes = count * PAGE_SIZE;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
/*
* Force synchronous operation if we are extremely low on memory
* to prevent a low-memory deadlock. VOP operations often need to
@@ -920,17 +897,13 @@ vnode_pager_putpages(object, m, count, sync, rtvals)
*/
vp = object->handle;
- mtx_unlock(&vm_mtx);
if (vp->v_type != VREG)
mp = NULL;
(void)vn_start_write(vp, &mp, V_WAIT);
- mtx_lock(&vm_mtx);
rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0);
KASSERT(rtval != EOPNOTSUPP,
("vnode_pager: stale FS putpages\n"));
- mtx_unlock(&vm_mtx);
vn_finished_write(mp);
- mtx_lock(&vm_mtx);
}
@@ -962,7 +935,7 @@ vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals)
int error;
int ioflags;
- mtx_assert(&Giant, MA_OWNED);
+ GIANT_REQUIRED;
object = vp->v_object;
count = bytecount / PAGE_SIZE;
@@ -992,7 +965,6 @@ vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals)
}
}
}
- mtx_unlock(&vm_mtx);
/*
* pageouts are already clustered, use IO_ASYNC t o force a bawrite()
@@ -1013,7 +985,6 @@ vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals)
auio.uio_resid = maxsize;
auio.uio_procp = (struct proc *) 0;
error = VOP_WRITE(vp, &auio, ioflags, curproc->p_ucred);
- mtx_lock(&vm_mtx);
cnt.v_vnodeout++;
cnt.v_vnodepgsout += ncount;
@@ -1036,18 +1007,15 @@ vnode_pager_lock(object)
{
struct proc *p = curproc; /* XXX */
- mtx_assert(&vm_mtx, MA_NOTOWNED);
- mtx_assert(&Giant, MA_OWNED);
- mtx_lock(&vm_mtx);
+ GIANT_REQUIRED;
+
for (; object != NULL; object = object->backing_object) {
if (object->type != OBJT_VNODE)
continue;
if (object->flags & OBJ_DEAD) {
- mtx_unlock(&vm_mtx);
return NULL;
}
- mtx_unlock(&vm_mtx);
/* XXX; If object->handle can change, we need to cache it. */
while (vget(object->handle,
LK_NOPAUSE | LK_SHARED | LK_RETRY | LK_CANRECURSE, p)) {
@@ -1057,6 +1025,5 @@ vnode_pager_lock(object)
}
return object->handle;
}
- mtx_unlock(&vm_mtx);
return NULL;
}
OpenPOWER on IntegriCloud