summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2013-03-09 03:19:53 +0000
committerattilio <attilio@FreeBSD.org>2013-03-09 03:19:53 +0000
commit76954ad68a25c559c6a8b2911674760afd4962f6 (patch)
tree80cdb7116c19e2e4f42aeed31a65f76a54db11df /sys
parent993799493c64eb0b9faeab971fbe4ecfe0214278 (diff)
parent16a80466e5837ad617b6b144297fd6069188b9b3 (diff)
downloadFreeBSD-src-76954ad68a25c559c6a8b2911674760afd4962f6.zip
FreeBSD-src-76954ad68a25c559c6a8b2911674760afd4962f6.tar.gz
Merge from vmcontention.
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/machdep.c1
-rw-r--r--sys/amd64/amd64/pmap.c12
-rw-r--r--sys/arm/arm/cpufunc_asm_arm11x6.S2
-rw-r--r--sys/arm/arm/machdep.c1
-rw-r--r--sys/arm/arm/pmap-v6.c6
-rw-r--r--sys/arm/arm/pmap.c6
-rw-r--r--sys/cddl/compat/opensolaris/kern/opensolaris_vm.c68
-rw-r--r--sys/cddl/compat/opensolaris/sys/freebsd_rwlock.h34
-rw-r--r--sys/cddl/compat/opensolaris/sys/vm.h44
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_context.h1
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c55
-rw-r--r--sys/compat/linprocfs/linprocfs.c8
-rw-r--r--sys/conf/files1
-rw-r--r--sys/dev/agp/agp.c19
-rw-r--r--sys/dev/agp/agp_i810.c9
-rw-r--r--sys/dev/drm/drmP.h1
-rw-r--r--sys/dev/drm2/drmP.h1
-rw-r--r--sys/dev/drm2/drm_fb_helper.c5
-rw-r--r--sys/dev/drm2/drm_global.c1
-rw-r--r--sys/dev/drm2/i915/i915_gem.c50
-rw-r--r--sys/dev/drm2/ttm/ttm_bo.c1
-rw-r--r--sys/dev/drm2/ttm/ttm_bo_vm.c8
-rw-r--r--sys/dev/drm2/ttm/ttm_memory.c2
-rw-r--r--sys/dev/drm2/ttm/ttm_tt.c10
-rw-r--r--sys/dev/hwpmc/hwpmc_mod.c21
-rw-r--r--sys/dev/md/md.c17
-rw-r--r--sys/dev/netmap/netmap.c1
-rw-r--r--sys/dev/oce/oce_hw.h2
-rw-r--r--sys/dev/oce/oce_sysctl.c2
-rw-r--r--sys/dev/sound/pcm/dsp.c2
-rw-r--r--sys/fs/fuse/fuse_io.c5
-rw-r--r--sys/fs/fuse/fuse_vnops.c20
-rw-r--r--sys/fs/nfsclient/nfs_clbio.c19
-rw-r--r--sys/fs/nfsclient/nfs_clnode.c4
-rw-r--r--sys/fs/nfsclient/nfs_clvnops.c4
-rw-r--r--sys/fs/nfsserver/nfs_nfsdport.c8
-rw-r--r--sys/fs/procfs/procfs_map.c12
-rw-r--r--sys/fs/tmpfs/tmpfs_subr.c12
-rw-r--r--sys/fs/tmpfs/tmpfs_vnops.c46
-rw-r--r--sys/geom/label/g_label_ntfs.c2
-rw-r--r--sys/geom/raid/g_raid.c22
-rw-r--r--sys/i386/i386/machdep.c1
-rw-r--r--sys/i386/i386/pmap.c12
-rw-r--r--sys/i386/xen/pmap.c12
-rw-r--r--sys/ia64/ia64/machdep.c1
-rw-r--r--sys/ia64/ia64/pmap.c10
-rw-r--r--sys/kern/imgact_elf.c10
-rw-r--r--sys/kern/kern_exec.c7
-rw-r--r--sys/kern/kern_proc.c21
-rw-r--r--sys/kern/kern_sharedpage.c6
-rw-r--r--sys/kern/kern_shutdown.c1
-rw-r--r--sys/kern/kern_timeout.c71
-rw-r--r--sys/kern/subr_param.c12
-rw-r--r--sys/kern/subr_uio.c6
-rw-r--r--sys/kern/sys_process.c11
-rw-r--r--sys/kern/sysv_shm.c5
-rw-r--r--sys/kern/uipc_shm.c35
-rw-r--r--sys/kern/uipc_syscalls.c21
-rw-r--r--sys/kern/vfs_aio.c5
-rw-r--r--sys/kern/vfs_bio.c57
-rw-r--r--sys/kern/vfs_cluster.c24
-rw-r--r--sys/kern/vfs_default.c6
-rw-r--r--sys/kern/vfs_subr.c21
-rw-r--r--sys/kern/vfs_syscalls.c5
-rw-r--r--sys/kern/vfs_vnops.c5
-rw-r--r--sys/mips/mips/machdep.c1
-rw-r--r--sys/mips/mips/pmap.c10
-rw-r--r--sys/modules/ath/Makefile9
-rw-r--r--sys/modules/zfs/Makefile1
-rw-r--r--sys/net/if.c3
-rw-r--r--sys/net/route.c40
-rw-r--r--sys/net/route.h2
-rw-r--r--sys/net80211/ieee80211.c2
-rw-r--r--sys/net80211/ieee80211_freebsd.c38
-rw-r--r--sys/net80211/ieee80211_freebsd.h30
-rw-r--r--sys/net80211/ieee80211_hostap.c26
-rw-r--r--sys/net80211/ieee80211_ht.c4
-rw-r--r--sys/net80211/ieee80211_hwmp.c24
-rw-r--r--sys/net80211/ieee80211_mesh.c51
-rw-r--r--sys/net80211/ieee80211_output.c506
-rw-r--r--sys/net80211/ieee80211_power.c5
-rw-r--r--sys/net80211/ieee80211_proto.h4
-rw-r--r--sys/net80211/ieee80211_superg.c14
-rw-r--r--sys/net80211/ieee80211_var.h1
-rw-r--r--sys/net80211/ieee80211_wds.c3
-rw-r--r--sys/nfsclient/nfs_bio.c19
-rw-r--r--sys/nfsclient/nfs_vnops.c4
-rw-r--r--sys/nfsserver/nfs_serv.c9
-rw-r--r--sys/ofed/drivers/infiniband/core/umem.c6
-rw-r--r--sys/ofed/include/linux/linux_compat.c1
-rw-r--r--sys/pc98/pc98/machdep.c1
-rw-r--r--sys/powerpc/aim/machdep.c1
-rw-r--r--sys/powerpc/aim/mmu_oea.c8
-rw-r--r--sys/powerpc/aim/mmu_oea64.c8
-rw-r--r--sys/powerpc/booke/machdep.c1
-rw-r--r--sys/powerpc/booke/pmap.c10
-rw-r--r--sys/security/mac/mac_process.c14
-rw-r--r--sys/sparc64/conf/GENERIC3
-rw-r--r--sys/sparc64/sparc64/machdep.c1
-rw-r--r--sys/sparc64/sparc64/pmap.c18
-rw-r--r--sys/sys/callout.h2
-rw-r--r--sys/sys/systm.h2
-rw-r--r--sys/ufs/ffs/ffs_rawread.c5
-rw-r--r--sys/ufs/ffs/ffs_vnops.c7
-rw-r--r--sys/vm/default_pager.c6
-rw-r--r--sys/vm/device_pager.c23
-rw-r--r--sys/vm/phys_pager.c7
-rw-r--r--sys/vm/sg_pager.c7
-rw-r--r--sys/vm/swap_pager.c73
-rw-r--r--sys/vm/uma_core.c1
-rw-r--r--sys/vm/vm_fault.c84
-rw-r--r--sys/vm/vm_glue.c21
-rw-r--r--sys/vm/vm_init.c9
-rw-r--r--sys/vm/vm_kern.c32
-rw-r--r--sys/vm/vm_map.c45
-rw-r--r--sys/vm/vm_meter.c9
-rw-r--r--sys/vm/vm_mmap.c13
-rw-r--r--sys/vm/vm_object.c207
-rw-r--r--sys/vm/vm_object.h37
-rw-r--r--sys/vm/vm_page.c75
-rw-r--r--sys/vm/vm_pageout.c87
-rw-r--r--sys/vm/vm_pager.c9
-rw-r--r--sys/vm/vm_pager.h8
-rw-r--r--sys/vm/vm_reserv.c7
-rw-r--r--sys/vm/vnode_pager.c117
125 files changed, 1579 insertions, 1102 deletions
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index 1d7178f..f5e1437 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -80,6 +80,7 @@ __FBSDID("$FreeBSD$");
#include <sys/pcpu.h>
#include <sys/ptrace.h>
#include <sys/reboot.h>
+#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
#ifdef SMP
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index e0ccc8e..40754af 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -3556,7 +3556,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
va >= kmi.clean_eva,
("pmap_enter: managed mapping within the clean submap"));
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
pa = VM_PAGE_TO_PHYS(m);
newpte = (pt_entry_t)(pa | PG_A | PG_V);
if ((access & VM_PROT_WRITE) != 0)
@@ -3823,7 +3823,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
vm_page_t m, mpte;
vm_pindex_t diff, psize;
- VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m_start->object);
psize = atop(end - start);
mpte = NULL;
m = m_start;
@@ -4005,7 +4005,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
vm_page_t p, pdpg;
int pat_mode;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
if ((addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
@@ -4619,7 +4619,7 @@ pmap_is_modified(vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PG_M set.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
@@ -4750,7 +4750,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
@@ -4894,7 +4894,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
diff --git a/sys/arm/arm/cpufunc_asm_arm11x6.S b/sys/arm/arm/cpufunc_asm_arm11x6.S
index fc9b7ec..e223208 100644
--- a/sys/arm/arm/cpufunc_asm_arm11x6.S
+++ b/sys/arm/arm/cpufunc_asm_arm11x6.S
@@ -62,6 +62,8 @@
#include <machine/asm.h>
__FBSDID("$FreeBSD$");
+ .cpu arm1136js
+
#if 0
#define Invalidate_I_cache(Rtmp1, Rtmp2) \
mcr p15, 0, Rtmp1, c7, c5, 0 /* Invalidate Entire I cache */
diff --git a/sys/arm/arm/machdep.c b/sys/arm/arm/machdep.c
index 5262e71..408493c 100644
--- a/sys/arm/arm/machdep.c
+++ b/sys/arm/arm/machdep.c
@@ -71,6 +71,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/pcpu.h>
#include <sys/ptrace.h>
+#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
#include <sys/syscallsubr.h>
diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c
index a4f3db1..2affa3e 100644
--- a/sys/arm/arm/pmap-v6.c
+++ b/sys/arm/arm/pmap-v6.c
@@ -2212,7 +2212,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
vm_pindex_t pindex, vm_size_t size)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
}
@@ -3428,7 +3428,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
@@ -3475,7 +3475,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) != 0 ||
(m->aflags & PGA_WRITEABLE) != 0)
pmap_clearbit(m, PVF_WRITE);
diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c
index 313e6fa..7070cb2 100644
--- a/sys/arm/arm/pmap.c
+++ b/sys/arm/arm/pmap.c
@@ -3006,7 +3006,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
vm_pindex_t pindex, vm_size_t size)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
}
@@ -4461,7 +4461,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
@@ -4523,7 +4523,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) != 0 ||
(m->aflags & PGA_WRITEABLE) != 0)
pmap_clearbit(m, PVF_WRITE);
diff --git a/sys/cddl/compat/opensolaris/kern/opensolaris_vm.c b/sys/cddl/compat/opensolaris/kern/opensolaris_vm.c
new file mode 100644
index 0000000..871f1c6
--- /dev/null
+++ b/sys/cddl/compat/opensolaris/kern/opensolaris_vm.c
@@ -0,0 +1,68 @@
+/*-
+ * Copyright (c) 2013 EMC Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/freebsd_rwlock.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pager.h>
+
+const int zfs_vm_pagerret_bad = VM_PAGER_BAD;
+const int zfs_vm_pagerret_error = VM_PAGER_ERROR;
+const int zfs_vm_pagerret_ok = VM_PAGER_OK;
+
+void
+zfs_vmobject_assert_wlocked(vm_object_t object)
+{
+
+ /*
+ * This is not ideal because FILE/LINE used by assertions will not
+ * be too helpful, but it must be an hard function for
+ * compatibility reasons.
+ */
+ VM_OBJECT_ASSERT_WLOCKED(object);
+}
+
+void
+zfs_vmobject_wlock(vm_object_t object)
+{
+
+ VM_OBJECT_WLOCK(object);
+}
+
+void
+zfs_vmobject_wunlock(vm_object_t object)
+{
+
+ VM_OBJECT_WUNLOCK(object);
+}
diff --git a/sys/cddl/compat/opensolaris/sys/freebsd_rwlock.h b/sys/cddl/compat/opensolaris/sys/freebsd_rwlock.h
new file mode 100644
index 0000000..9e494a9
--- /dev/null
+++ b/sys/cddl/compat/opensolaris/sys/freebsd_rwlock.h
@@ -0,0 +1,34 @@
+/*-
+ * Copyright (c) 2013 EMC Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _OPENSOLARIS_SYS_FREEBSD_RWLOCK_H_
+#define _OPENSOLARIS_SYS_FREEBSD_RWLOCK_H_
+
+#include_next <sys/rwlock.h>
+
+#endif
diff --git a/sys/cddl/compat/opensolaris/sys/vm.h b/sys/cddl/compat/opensolaris/sys/vm.h
new file mode 100644
index 0000000..40e4ffa
--- /dev/null
+++ b/sys/cddl/compat/opensolaris/sys/vm.h
@@ -0,0 +1,44 @@
+/*-
+ * Copyright (c) 2013 EMC Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _OPENSOLARIS_SYS_VM_H_
+#define _OPENSOLARIS_SYS_VM_H_
+
+#ifdef _KERNEL
+
+extern const int zfs_vm_pagerret_bad;
+extern const int zfs_vm_pagerret_error;
+extern const int zfs_vm_pagerret_ok;
+
+void zfs_vmobject_assert_wlocked(vm_object_t object);
+void zfs_vmobject_wlock(vm_object_t object);
+void zfs_vmobject_wunlock(vm_object_t object);
+
+#endif /* _KERNEL */
+
+#endif /* _OPENSOLARIS_SYS_VM_H_ */
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_context.h b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_context.h
index cfec75d..b76349b 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_context.h
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_context.h
@@ -103,7 +103,6 @@ extern "C" {
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <vm/vm_object.h>
-#include <vm/vm_pager.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
/* There is clash. vm_map.h defines the two below and vdev_cache.c use them. */
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
index 7b060d8..41e4e9e 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
@@ -33,6 +33,7 @@
#include <sys/sysmacros.h>
#include <sys/resource.h>
#include <sys/vfs.h>
+#include <sys/vm.h>
#include <sys/vnode.h>
#include <sys/file.h>
#include <sys/stat.h>
@@ -329,7 +330,7 @@ page_busy(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes)
vm_page_t pp;
obj = vp->v_object;
- VM_OBJECT_LOCK_ASSERT(obj, MA_OWNED);
+ zfs_vmobject_assert_wlocked(obj);
for (;;) {
if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
@@ -377,7 +378,7 @@ page_hold(vnode_t *vp, int64_t start)
vm_page_t pp;
obj = vp->v_object;
- VM_OBJECT_LOCK_ASSERT(obj, MA_OWNED);
+ zfs_vmobject_assert_wlocked(obj);
for (;;) {
if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
@@ -450,7 +451,7 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
ASSERT(obj != NULL);
off = start & PAGEOFFSET;
- VM_OBJECT_LOCK(obj);
+ zfs_vmobject_wlock(obj);
for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
vm_page_t pp;
int nbytes = imin(PAGESIZE - off, len);
@@ -467,23 +468,23 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
("zfs update_pages: unbusy page in putpages case"));
KASSERT(!pmap_page_is_write_mapped(pp),
("zfs update_pages: writable page in putpages case"));
- VM_OBJECT_UNLOCK(obj);
+ zfs_vmobject_wunlock(obj);
va = zfs_map_page(pp, &sf);
(void) dmu_write(os, oid, start, nbytes, va, tx);
zfs_unmap_page(sf);
- VM_OBJECT_LOCK(obj);
+ zfs_vmobject_wlock(obj);
vm_page_undirty(pp);
} else if ((pp = page_busy(vp, start, off, nbytes)) != NULL) {
- VM_OBJECT_UNLOCK(obj);
+ zfs_vmobject_wunlock(obj);
va = zfs_map_page(pp, &sf);
(void) dmu_read(os, oid, start+off, nbytes,
va+off, DMU_READ_PREFETCH);;
zfs_unmap_page(sf);
- VM_OBJECT_LOCK(obj);
+ zfs_vmobject_wlock(obj);
page_unbusy(pp);
}
len -= nbytes;
@@ -491,7 +492,7 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
}
if (segflg != UIO_NOCOPY)
vm_object_pip_wakeupn(obj, 0);
- VM_OBJECT_UNLOCK(obj);
+ zfs_vmobject_wunlock(obj);
}
/*
@@ -523,7 +524,7 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
ASSERT(obj != NULL);
ASSERT((uio->uio_loffset & PAGEOFFSET) == 0);
- VM_OBJECT_LOCK(obj);
+ zfs_vmobject_wlock(obj);
for (start = uio->uio_loffset; len > 0; start += PAGESIZE) {
int bytes = MIN(PAGESIZE, len);
@@ -531,14 +532,14 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_IGN_SBUSY);
if (pp->valid == 0) {
vm_page_io_start(pp);
- VM_OBJECT_UNLOCK(obj);
+ zfs_vmobject_wunlock(obj);
va = zfs_map_page(pp, &sf);
error = dmu_read(os, zp->z_id, start, bytes, va,
DMU_READ_PREFETCH);
if (bytes != PAGESIZE && error == 0)
bzero(va + bytes, PAGESIZE - bytes);
zfs_unmap_page(sf);
- VM_OBJECT_LOCK(obj);
+ zfs_vmobject_wlock(obj);
vm_page_io_finish(pp);
vm_page_lock(pp);
if (error) {
@@ -555,7 +556,7 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
uio->uio_offset += bytes;
len -= bytes;
}
- VM_OBJECT_UNLOCK(obj);
+ zfs_vmobject_wunlock(obj);
return (error);
}
@@ -587,7 +588,7 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
start = uio->uio_loffset;
off = start & PAGEOFFSET;
- VM_OBJECT_LOCK(obj);
+ zfs_vmobject_wlock(obj);
for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
vm_page_t pp;
uint64_t bytes = MIN(PAGESIZE - off, len);
@@ -596,23 +597,23 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
struct sf_buf *sf;
caddr_t va;
- VM_OBJECT_UNLOCK(obj);
+ zfs_vmobject_wunlock(obj);
va = zfs_map_page(pp, &sf);
error = uiomove(va + off, bytes, UIO_READ, uio);
zfs_unmap_page(sf);
- VM_OBJECT_LOCK(obj);
+ zfs_vmobject_wlock(obj);
page_unhold(pp);
} else {
- VM_OBJECT_UNLOCK(obj);
+ zfs_vmobject_wunlock(obj);
error = dmu_read_uio(os, zp->z_id, uio, bytes);
- VM_OBJECT_LOCK(obj);
+ zfs_vmobject_wlock(obj);
}
len -= bytes;
off = 0;
if (error)
break;
}
- VM_OBJECT_UNLOCK(obj);
+ zfs_vmobject_wunlock(obj);
return (error);
}
@@ -5684,7 +5685,7 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage)
mfirst = m[reqstart];
mlast = m[reqstart + reqsize - 1];
- VM_OBJECT_LOCK(object);
+ zfs_vmobject_wlock(object);
for (i = 0; i < reqstart; i++) {
vm_page_lock(m[i]);
@@ -5700,9 +5701,9 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage)
if (mreq->valid && reqsize == 1) {
if (mreq->valid != VM_PAGE_BITS_ALL)
vm_page_zero_invalid(mreq, TRUE);
- VM_OBJECT_UNLOCK(object);
+ zfs_vmobject_wunlock(object);
ZFS_EXIT(zfsvfs);
- return (VM_PAGER_OK);
+ return (zfs_vm_pagerret_ok);
}
PCPU_INC(cnt.v_vnodein);
@@ -5716,16 +5717,16 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage)
vm_page_unlock(m[i]);
}
}
- VM_OBJECT_UNLOCK(object);
+ zfs_vmobject_wunlock(object);
ZFS_EXIT(zfsvfs);
- return (VM_PAGER_BAD);
+ return (zfs_vm_pagerret_bad);
}
lsize = PAGE_SIZE;
if (IDX_TO_OFF(mlast->pindex) + lsize > object->un_pager.vnp.vnp_size)
lsize = object->un_pager.vnp.vnp_size - IDX_TO_OFF(mlast->pindex);
- VM_OBJECT_UNLOCK(object);
+ zfs_vmobject_wunlock(object);
for (i = reqstart; i < reqstart + reqsize; i++) {
size = PAGE_SIZE;
@@ -5741,7 +5742,7 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage)
break;
}
- VM_OBJECT_LOCK(object);
+ zfs_vmobject_wlock(object);
for (i = reqstart; i < reqstart + reqsize; i++) {
if (!error)
@@ -5751,11 +5752,11 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage)
vm_page_readahead_finish(m[i]);
}
- VM_OBJECT_UNLOCK(object);
+ zfs_vmobject_wunlock(object);
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
ZFS_EXIT(zfsvfs);
- return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
+ return (error ? zfs_vm_pagerret_error : zfs_vm_pagerret_ok);
}
static int
diff --git a/sys/compat/linprocfs/linprocfs.c b/sys/compat/linprocfs/linprocfs.c
index bb21d8a..a49a9bc 100644
--- a/sys/compat/linprocfs/linprocfs.c
+++ b/sys/compat/linprocfs/linprocfs.c
@@ -1031,9 +1031,9 @@ linprocfs_doprocmaps(PFS_FILL_ARGS)
e_end = entry->end;
obj = entry->object.vm_object;
for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
- VM_OBJECT_LOCK(tobj);
+ VM_OBJECT_WLOCK(tobj);
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(lobj);
lobj = tobj;
}
last_timestamp = map->timestamp;
@@ -1049,11 +1049,11 @@ linprocfs_doprocmaps(PFS_FILL_ARGS)
else
vp = NULL;
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(lobj);
flags = obj->flags;
ref_count = obj->ref_count;
shadow_count = obj->shadow_count;
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
if (vp) {
vn_fullpath(td, vp, &name, &freename);
vn_lock(vp, LK_SHARED | LK_RETRY);
diff --git a/sys/conf/files b/sys/conf/files
index 79212fe..b0d772e 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -157,6 +157,7 @@ cddl/compat/opensolaris/kern/opensolaris_sysevent.c optional zfs compile-with
cddl/compat/opensolaris/kern/opensolaris_taskq.c optional zfs compile-with "${ZFS_C}"
cddl/compat/opensolaris/kern/opensolaris_uio.c optional zfs compile-with "${ZFS_C}"
cddl/compat/opensolaris/kern/opensolaris_vfs.c optional zfs compile-with "${ZFS_C}"
+cddl/compat/opensolaris/kern/opensolaris_vm.c optional zfs compile-with "${ZFS_C}"
cddl/compat/opensolaris/kern/opensolaris_zone.c optional zfs compile-with "${ZFS_C}"
cddl/contrib/opensolaris/common/acl/acl_common.c optional zfs compile-with "${ZFS_C}"
cddl/contrib/opensolaris/common/avl/avl.c optional zfs compile-with "${ZFS_C}"
diff --git a/sys/dev/agp/agp.c b/sys/dev/agp/agp.c
index 79b2d54..2c3e4b9 100644
--- a/sys/dev/agp/agp.c
+++ b/sys/dev/agp/agp.c
@@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/rwlock.h>
#include <dev/agp/agppriv.h>
#include <dev/agp/agpvar.h>
@@ -544,7 +545,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
* because vm_page_grab() may sleep and we can't hold a mutex
* while sleeping.
*/
- VM_OBJECT_LOCK(mem->am_obj);
+ VM_OBJECT_WLOCK(mem->am_obj);
for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
/*
* Find a page from the object and wire it
@@ -557,14 +558,14 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
AGP_DPF("found page pa=%#jx\n", (uintmax_t)VM_PAGE_TO_PHYS(m));
}
- VM_OBJECT_UNLOCK(mem->am_obj);
+ VM_OBJECT_WUNLOCK(mem->am_obj);
mtx_lock(&sc->as_lock);
if (mem->am_is_bound) {
device_printf(dev, "memory already bound\n");
error = EINVAL;
- VM_OBJECT_LOCK(mem->am_obj);
+ VM_OBJECT_WLOCK(mem->am_obj);
i = 0;
goto bad;
}
@@ -573,7 +574,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
* Bind the individual pages and flush the chipset's
* TLB.
*/
- VM_OBJECT_LOCK(mem->am_obj);
+ VM_OBJECT_WLOCK(mem->am_obj);
for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
@@ -601,7 +602,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
}
vm_page_wakeup(m);
}
- VM_OBJECT_UNLOCK(mem->am_obj);
+ VM_OBJECT_WUNLOCK(mem->am_obj);
/*
* Flush the cpu cache since we are providing a new mapping
@@ -622,7 +623,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
return 0;
bad:
mtx_unlock(&sc->as_lock);
- VM_OBJECT_LOCK_ASSERT(mem->am_obj, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(mem->am_obj);
for (k = 0; k < mem->am_size; k += PAGE_SIZE) {
m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(k));
if (k >= i)
@@ -631,7 +632,7 @@ bad:
vm_page_unwire(m, 0);
vm_page_unlock(m);
}
- VM_OBJECT_UNLOCK(mem->am_obj);
+ VM_OBJECT_WUNLOCK(mem->am_obj);
return error;
}
@@ -658,14 +659,14 @@ agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
*/
for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
AGP_UNBIND_PAGE(dev, mem->am_offset + i);
- VM_OBJECT_LOCK(mem->am_obj);
+ VM_OBJECT_WLOCK(mem->am_obj);
for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
m = vm_page_lookup(mem->am_obj, atop(i));
vm_page_lock(m);
vm_page_unwire(m, 0);
vm_page_unlock(m);
}
- VM_OBJECT_UNLOCK(mem->am_obj);
+ VM_OBJECT_WUNLOCK(mem->am_obj);
agp_flush_cache();
AGP_FLUSH_TLB(dev);
diff --git a/sys/dev/agp/agp_i810.c b/sys/dev/agp/agp_i810.c
index 9ec3992..63679e8 100644
--- a/sys/dev/agp/agp_i810.c
+++ b/sys/dev/agp/agp_i810.c
@@ -56,6 +56,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/rwlock.h>
#include <dev/agp/agppriv.h>
#include <dev/agp/agpreg.h>
@@ -1967,10 +1968,10 @@ agp_i810_alloc_memory(device_t dev, int type, vm_size_t size)
* Allocate and wire down the page now so that we can
* get its physical address.
*/
- VM_OBJECT_LOCK(mem->am_obj);
+ VM_OBJECT_WLOCK(mem->am_obj);
m = vm_page_grab(mem->am_obj, 0, VM_ALLOC_NOBUSY |
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
- VM_OBJECT_UNLOCK(mem->am_obj);
+ VM_OBJECT_WUNLOCK(mem->am_obj);
mem->am_physical = VM_PAGE_TO_PHYS(m);
} else {
/* Our allocation is already nicely wired down for us.
@@ -2005,12 +2006,12 @@ agp_i810_free_memory(device_t dev, struct agp_memory *mem)
/*
* Unwire the page which we wired in alloc_memory.
*/
- VM_OBJECT_LOCK(mem->am_obj);
+ VM_OBJECT_WLOCK(mem->am_obj);
m = vm_page_lookup(mem->am_obj, 0);
vm_page_lock(m);
vm_page_unwire(m, 0);
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(mem->am_obj);
+ VM_OBJECT_WUNLOCK(mem->am_obj);
} else {
contigfree(sc->argb_cursor, mem->am_size, M_AGP);
sc->argb_cursor = NULL;
diff --git a/sys/dev/drm/drmP.h b/sys/dev/drm/drmP.h
index ffb69b4..bf5feed 100644
--- a/sys/dev/drm/drmP.h
+++ b/sys/dev/drm/drmP.h
@@ -59,6 +59,7 @@ struct drm_file;
#include <sys/fcntl.h>
#include <sys/uio.h>
#include <sys/filio.h>
+#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/bus.h>
#include <sys/queue.h>
diff --git a/sys/dev/drm2/drmP.h b/sys/dev/drm2/drmP.h
index 32c710a..8a456f1 100644
--- a/sys/dev/drm2/drmP.h
+++ b/sys/dev/drm2/drmP.h
@@ -58,6 +58,7 @@ struct drm_file;
#include <sys/fcntl.h>
#include <sys/uio.h>
#include <sys/filio.h>
+#include <sys/rwlock.h>
#include <sys/selinfo.h>
#include <sys/sysctl.h>
#include <sys/bus.h>
diff --git a/sys/dev/drm2/drm_fb_helper.c b/sys/dev/drm2/drm_fb_helper.c
index 2f24ec4..37b50cf 100644
--- a/sys/dev/drm2/drm_fb_helper.c
+++ b/sys/dev/drm2/drm_fb_helper.c
@@ -555,8 +555,11 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
for (i = 0; i < helper->connector_count; i++)
free(helper->connector_info[i], DRM_MEM_KMS);
free(helper->connector_info, DRM_MEM_KMS);
- for (i = 0; i < helper->crtc_count; i++)
+ for (i = 0; i < helper->crtc_count; i++) {
free(helper->crtc_info[i].mode_set.connectors, DRM_MEM_KMS);
+ if (helper->crtc_info[i].mode_set.mode)
+ drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode);
+ }
free(helper->crtc_info, DRM_MEM_KMS);
}
diff --git a/sys/dev/drm2/drm_global.c b/sys/dev/drm2/drm_global.c
index 992d061..6372044 100644
--- a/sys/dev/drm2/drm_global.c
+++ b/sys/dev/drm2/drm_global.c
@@ -104,6 +104,7 @@ void drm_global_item_unref(struct drm_global_reference *ref)
MPASS(ref->object == item->object);
if (--item->refcount == 0) {
ref->release(ref);
+ free(item->object, M_DRM_GLOBAL);
item->object = NULL;
}
sx_xunlock(&item->mutex);
diff --git a/sys/dev/drm2/i915/i915_gem.c b/sys/dev/drm2/i915/i915_gem.c
index 73c0b53..f3f82e7 100644
--- a/sys/dev/drm2/i915/i915_gem.c
+++ b/sys/dev/drm2/i915/i915_gem.c
@@ -990,14 +990,14 @@ i915_gem_swap_io(struct drm_device *dev, struct drm_i915_gem_object *obj,
vm_obj = obj->base.vm_obj;
ret = 0;
- VM_OBJECT_LOCK(vm_obj);
+ VM_OBJECT_WLOCK(vm_obj);
vm_object_pip_add(vm_obj, 1);
while (size > 0) {
obj_pi = OFF_TO_IDX(offset);
obj_po = offset & PAGE_MASK;
m = i915_gem_wire_page(vm_obj, obj_pi);
- VM_OBJECT_UNLOCK(vm_obj);
+ VM_OBJECT_WUNLOCK(vm_obj);
sched_pin();
sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
@@ -1031,7 +1031,7 @@ i915_gem_swap_io(struct drm_device *dev, struct drm_i915_gem_object *obj,
}
sf_buf_free(sf);
sched_unpin();
- VM_OBJECT_LOCK(vm_obj);
+ VM_OBJECT_WLOCK(vm_obj);
if (rw == UIO_WRITE)
vm_page_dirty(m);
vm_page_reference(m);
@@ -1044,7 +1044,7 @@ i915_gem_swap_io(struct drm_device *dev, struct drm_i915_gem_object *obj,
break;
}
vm_object_pip_wakeup(vm_obj);
- VM_OBJECT_UNLOCK(vm_obj);
+ VM_OBJECT_WUNLOCK(vm_obj);
return (ret);
}
@@ -1357,7 +1357,7 @@ i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
} else
oldm = NULL;
retry:
- VM_OBJECT_UNLOCK(vm_obj);
+ VM_OBJECT_WUNLOCK(vm_obj);
unlocked_vmobj:
cause = ret = 0;
m = NULL;
@@ -1407,7 +1407,7 @@ unlocked_vmobj:
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
obj->fault_mappable = true;
- VM_OBJECT_LOCK(vm_obj);
+ VM_OBJECT_WLOCK(vm_obj);
m = vm_phys_fictitious_to_vm_page(dev->agp->base + obj->gtt_offset +
offset);
if (m == NULL) {
@@ -1452,7 +1452,7 @@ out:
kern_yield(PRI_USER);
goto unlocked_vmobj;
}
- VM_OBJECT_LOCK(vm_obj);
+ VM_OBJECT_WLOCK(vm_obj);
vm_object_pip_wakeup(vm_obj);
return (VM_PAGER_ERROR);
}
@@ -2208,12 +2208,12 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
obj->pages = malloc(page_count * sizeof(vm_page_t), DRM_I915_GEM,
M_WAITOK);
vm_obj = obj->base.vm_obj;
- VM_OBJECT_LOCK(vm_obj);
+ VM_OBJECT_WLOCK(vm_obj);
for (i = 0; i < page_count; i++) {
if ((obj->pages[i] = i915_gem_wire_page(vm_obj, i)) == NULL)
goto failed;
}
- VM_OBJECT_UNLOCK(vm_obj);
+ VM_OBJECT_WUNLOCK(vm_obj);
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj);
return (0);
@@ -2226,7 +2226,7 @@ failed:
vm_page_unlock(m);
atomic_add_long(&i915_gem_wired_pages_cnt, -1);
}
- VM_OBJECT_UNLOCK(vm_obj);
+ VM_OBJECT_WUNLOCK(vm_obj);
free(obj->pages, DRM_I915_GEM);
obj->pages = NULL;
return (-EIO);
@@ -2272,7 +2272,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;
page_count = obj->base.size / PAGE_SIZE;
- VM_OBJECT_LOCK(obj->base.vm_obj);
+ VM_OBJECT_WLOCK(obj->base.vm_obj);
#if GEM_PARANOID_CHECK_GTT
i915_gem_assert_pages_not_mapped(obj->base.dev, obj->pages, page_count);
#endif
@@ -2287,7 +2287,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
vm_page_unlock(m);
atomic_add_long(&i915_gem_wired_pages_cnt, -1);
}
- VM_OBJECT_UNLOCK(obj->base.vm_obj);
+ VM_OBJECT_WUNLOCK(obj->base.vm_obj);
obj->dirty = 0;
free(obj->pages, DRM_I915_GEM);
obj->pages = NULL;
@@ -2309,7 +2309,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
if (devobj != NULL) {
page_count = OFF_TO_IDX(obj->base.size);
- VM_OBJECT_LOCK(devobj);
+ VM_OBJECT_WLOCK(devobj);
retry:
for (i = 0; i < page_count; i++) {
m = vm_page_lookup(devobj, i);
@@ -2319,7 +2319,7 @@ retry:
goto retry;
cdev_pager_free_page(devobj, m);
}
- VM_OBJECT_UNLOCK(devobj);
+ VM_OBJECT_WUNLOCK(devobj);
vm_object_deallocate(devobj);
}
@@ -2437,9 +2437,9 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
vm_object_t vm_obj;
vm_obj = obj->base.vm_obj;
- VM_OBJECT_LOCK(vm_obj);
+ VM_OBJECT_WLOCK(vm_obj);
vm_object_page_remove(vm_obj, 0, 0, false);
- VM_OBJECT_UNLOCK(vm_obj);
+ VM_OBJECT_WUNLOCK(vm_obj);
obj->madv = I915_MADV_PURGED_INTERNAL;
}
@@ -2488,7 +2488,7 @@ i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex)
vm_page_t m;
int rv;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
if (vm_pager_has_page(object, pindex, NULL, NULL)) {
@@ -3567,13 +3567,13 @@ i915_gem_detach_phys_object(struct drm_device *dev,
vaddr = obj->phys_obj->handle->vaddr;
page_count = obj->base.size / PAGE_SIZE;
- VM_OBJECT_LOCK(obj->base.vm_obj);
+ VM_OBJECT_WLOCK(obj->base.vm_obj);
for (i = 0; i < page_count; i++) {
m = i915_gem_wire_page(obj->base.vm_obj, i);
if (m == NULL)
continue; /* XXX */
- VM_OBJECT_UNLOCK(obj->base.vm_obj);
+ VM_OBJECT_WUNLOCK(obj->base.vm_obj);
sf = sf_buf_alloc(m, 0);
if (sf != NULL) {
dst = (char *)sf_buf_kva(sf);
@@ -3582,7 +3582,7 @@ i915_gem_detach_phys_object(struct drm_device *dev,
}
drm_clflush_pages(&m, 1);
- VM_OBJECT_LOCK(obj->base.vm_obj);
+ VM_OBJECT_WLOCK(obj->base.vm_obj);
vm_page_reference(m);
vm_page_lock(m);
vm_page_dirty(m);
@@ -3590,7 +3590,7 @@ i915_gem_detach_phys_object(struct drm_device *dev,
vm_page_unlock(m);
atomic_add_long(&i915_gem_wired_pages_cnt, -1);
}
- VM_OBJECT_UNLOCK(obj->base.vm_obj);
+ VM_OBJECT_WUNLOCK(obj->base.vm_obj);
intel_gtt_chipset_flush();
obj->phys_obj->cur_obj = NULL;
@@ -3632,7 +3632,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
page_count = obj->base.size / PAGE_SIZE;
- VM_OBJECT_LOCK(obj->base.vm_obj);
+ VM_OBJECT_WLOCK(obj->base.vm_obj);
ret = 0;
for (i = 0; i < page_count; i++) {
m = i915_gem_wire_page(obj->base.vm_obj, i);
@@ -3640,14 +3640,14 @@ i915_gem_attach_phys_object(struct drm_device *dev,
ret = -EIO;
break;
}
- VM_OBJECT_UNLOCK(obj->base.vm_obj);
+ VM_OBJECT_WUNLOCK(obj->base.vm_obj);
sf = sf_buf_alloc(m, 0);
src = (char *)sf_buf_kva(sf);
dst = (char *)obj->phys_obj->handle->vaddr + IDX_TO_OFF(i);
memcpy(dst, src, PAGE_SIZE);
sf_buf_free(sf);
- VM_OBJECT_LOCK(obj->base.vm_obj);
+ VM_OBJECT_WLOCK(obj->base.vm_obj);
vm_page_reference(m);
vm_page_lock(m);
@@ -3655,7 +3655,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
vm_page_unlock(m);
atomic_add_long(&i915_gem_wired_pages_cnt, -1);
}
- VM_OBJECT_UNLOCK(obj->base.vm_obj);
+ VM_OBJECT_WUNLOCK(obj->base.vm_obj);
return (0);
}
diff --git a/sys/dev/drm2/ttm/ttm_bo.c b/sys/dev/drm2/ttm/ttm_bo.c
index 12e5131..9cb9336 100644
--- a/sys/dev/drm2/ttm/ttm_bo.c
+++ b/sys/dev/drm2/ttm/ttm_bo.c
@@ -1400,7 +1400,6 @@ static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob)
ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
vm_page_free(glob->dummy_read_page);
- free(glob, M_DRM_GLOBAL);
}
void ttm_bo_global_release(struct drm_global_reference *ref)
diff --git a/sys/dev/drm2/ttm/ttm_bo_vm.c b/sys/dev/drm2/ttm/ttm_bo_vm.c
index 03e2f2b..3eb4cc7 100644
--- a/sys/dev/drm2/ttm/ttm_bo_vm.c
+++ b/sys/dev/drm2/ttm/ttm_bo_vm.c
@@ -118,7 +118,7 @@ ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
} else
oldm = NULL;
retry:
- VM_OBJECT_UNLOCK(vm_obj);
+ VM_OBJECT_WUNLOCK(vm_obj);
m = NULL;
reserve:
@@ -213,7 +213,7 @@ reserve:
VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
}
- VM_OBJECT_LOCK(vm_obj);
+ VM_OBJECT_WLOCK(vm_obj);
if ((m->flags & VPO_BUSY) != 0) {
vm_page_sleep(m, "ttmpbs");
ttm_mem_io_unlock(man);
@@ -241,11 +241,11 @@ out_unlock1:
return (retval);
out_io_unlock:
- VM_OBJECT_LOCK(vm_obj);
+ VM_OBJECT_WLOCK(vm_obj);
goto out_io_unlock1;
out_unlock:
- VM_OBJECT_LOCK(vm_obj);
+ VM_OBJECT_WLOCK(vm_obj);
goto out_unlock1;
}
diff --git a/sys/dev/drm2/ttm/ttm_memory.c b/sys/dev/drm2/ttm/ttm_memory.c
index ee74d94..dc85656 100644
--- a/sys/dev/drm2/ttm/ttm_memory.c
+++ b/sys/dev/drm2/ttm/ttm_memory.c
@@ -125,8 +125,6 @@ static ssize_t ttm_mem_zone_store(struct ttm_mem_zone *zone,
static void ttm_mem_global_kobj_release(struct ttm_mem_global *glob)
{
-
- free(glob, M_TTM_ZONE);
}
static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
diff --git a/sys/dev/drm2/ttm/ttm_tt.c b/sys/dev/drm2/ttm/ttm_tt.c
index 82547f1..35c22df 100644
--- a/sys/dev/drm2/ttm/ttm_tt.c
+++ b/sys/dev/drm2/ttm/ttm_tt.c
@@ -285,7 +285,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
obj = ttm->swap_storage;
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
vm_object_pip_add(obj, 1);
for (i = 0; i < ttm->num_pages; ++i) {
from_page = vm_page_grab(obj, i, VM_ALLOC_RETRY);
@@ -312,7 +312,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
vm_page_wakeup(from_page);
}
vm_object_pip_wakeup(obj);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
vm_object_deallocate(obj);
@@ -322,7 +322,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
err_ret:
vm_object_pip_wakeup(obj);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
return (ret);
}
@@ -346,7 +346,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage)
} else
obj = persistent_swap_storage;
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
vm_object_pip_add(obj, 1);
for (i = 0; i < ttm->num_pages; ++i) {
from_page = ttm->pages[i];
@@ -359,7 +359,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage)
vm_page_wakeup(to_page);
}
vm_object_pip_wakeup(obj);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
ttm->swap_storage = obj;
diff --git a/sys/dev/hwpmc/hwpmc_mod.c b/sys/dev/hwpmc/hwpmc_mod.c
index 2f2f05a..0c250bc 100644
--- a/sys/dev/hwpmc/hwpmc_mod.c
+++ b/sys/dev/hwpmc/hwpmc_mod.c
@@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
#include <sys/smp.h>
@@ -1671,7 +1672,7 @@ pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
}
obj = entry->object.vm_object;
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
/*
* Walk the backing_object list to find the base
@@ -1679,9 +1680,9 @@ pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
*/
for (lobj = tobj = obj; tobj != NULL; tobj = tobj->backing_object) {
if (tobj != obj)
- VM_OBJECT_LOCK(tobj);
+ VM_OBJECT_WLOCK(tobj);
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(lobj);
lobj = tobj;
}
@@ -1691,14 +1692,14 @@ pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
if (lobj == NULL) {
PMCDBG(LOG,OPS,2, "hwpmc: lobj unexpectedly NULL! pid=%d "
"vm_map=%p vm_obj=%p\n", p->p_pid, map, obj);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
continue;
}
if (lobj->type != OBJT_VNODE || lobj->handle == NULL) {
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(obj);
continue;
}
@@ -1710,8 +1711,8 @@ pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
if (entry->start == last_end && lobj->handle == last_vp) {
last_end = entry->end;
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(obj);
continue;
}
@@ -1733,9 +1734,9 @@ pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
vp = lobj->handle;
vref(vp);
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(lobj);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
freepath = NULL;
pmc_getfilename(vp, &fullpath, &freepath);
diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c
index b72f294..18936b0 100644
--- a/sys/dev/md/md.c
+++ b/sys/dev/md/md.c
@@ -75,6 +75,7 @@
#include <sys/namei.h>
#include <sys/proc.h>
#include <sys/queue.h>
+#include <sys/rwlock.h>
#include <sys/sbuf.h>
#include <sys/sched.h>
#include <sys/sf_buf.h>
@@ -657,17 +658,17 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
rv = VM_PAGER_OK;
- VM_OBJECT_LOCK(sc->object);
+ VM_OBJECT_WLOCK(sc->object);
vm_object_pip_add(sc->object, 1);
for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
m = vm_page_grab(sc->object, i,
VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
- VM_OBJECT_UNLOCK(sc->object);
+ VM_OBJECT_WUNLOCK(sc->object);
sched_pin();
sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
- VM_OBJECT_LOCK(sc->object);
+ VM_OBJECT_WLOCK(sc->object);
if (bp->bio_cmd == BIO_READ) {
if (m->valid != VM_PAGE_BITS_ALL)
rv = vm_pager_get_pages(sc->object, &m, 1, 0);
@@ -732,7 +733,7 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
offs = 0;
}
vm_object_pip_subtract(sc->object, 1);
- VM_OBJECT_UNLOCK(sc->object);
+ VM_OBJECT_WUNLOCK(sc->object);
return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
}
@@ -1068,7 +1069,7 @@ mdresize(struct md_s *sc, struct md_ioctl *mdio)
oldpages = OFF_TO_IDX(round_page(sc->mediasize));
newpages = OFF_TO_IDX(round_page(mdio->md_mediasize));
if (newpages < oldpages) {
- VM_OBJECT_LOCK(sc->object);
+ VM_OBJECT_WLOCK(sc->object);
vm_object_page_remove(sc->object, newpages, 0, 0);
swap_pager_freespace(sc->object, newpages,
oldpages - newpages);
@@ -1076,7 +1077,7 @@ mdresize(struct md_s *sc, struct md_ioctl *mdio)
newpages), sc->cred);
sc->object->charge = IDX_TO_OFF(newpages);
sc->object->size = newpages;
- VM_OBJECT_UNLOCK(sc->object);
+ VM_OBJECT_WUNLOCK(sc->object);
} else if (newpages > oldpages) {
res = swap_reserve_by_cred(IDX_TO_OFF(newpages -
oldpages), sc->cred);
@@ -1093,10 +1094,10 @@ mdresize(struct md_s *sc, struct md_ioctl *mdio)
return (EDOM);
}
}
- VM_OBJECT_LOCK(sc->object);
+ VM_OBJECT_WLOCK(sc->object);
sc->object->charge = IDX_TO_OFF(newpages);
sc->object->size = newpages;
- VM_OBJECT_UNLOCK(sc->object);
+ VM_OBJECT_WUNLOCK(sc->object);
}
break;
default:
diff --git a/sys/dev/netmap/netmap.c b/sys/dev/netmap/netmap.c
index 35d5303..4fec312 100644
--- a/sys/dev/netmap/netmap.c
+++ b/sys/dev/netmap/netmap.c
@@ -81,6 +81,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mman.h> /* PROT_EXEC */
#include <sys/poll.h>
#include <sys/proc.h>
+#include <sys/rwlock.h>
#include <vm/vm.h> /* vtophys */
#include <vm/pmap.h> /* vtophys */
#include <sys/socket.h> /* sockaddrs */
diff --git a/sys/dev/oce/oce_hw.h b/sys/dev/oce/oce_hw.h
index aac0a8a..43945dc 100644
--- a/sys/dev/oce/oce_hw.h
+++ b/sys/dev/oce/oce_hw.h
@@ -38,8 +38,6 @@
/* $FreeBSD$ */
-/* $FreeBSD$ */
-
#include <sys/types.h>
#undef _BIG_ENDIAN /* TODO */
diff --git a/sys/dev/oce/oce_sysctl.c b/sys/dev/oce/oce_sysctl.c
index d8a88de..6629941 100644
--- a/sys/dev/oce/oce_sysctl.c
+++ b/sys/dev/oce/oce_sysctl.c
@@ -38,8 +38,6 @@
/* $FreeBSD$ */
-/* $FreeBSD$ */
-
#include "oce_if.h"
diff --git a/sys/dev/sound/pcm/dsp.c b/sys/dev/sound/pcm/dsp.c
index 2cfc170..e52e46e 100644
--- a/sys/dev/sound/pcm/dsp.c
+++ b/sys/dev/sound/pcm/dsp.c
@@ -32,6 +32,8 @@
#include <dev/sound/pcm/sound.h>
#include <sys/ctype.h>
+#include <sys/lock.h>
+#include <sys/rwlock.h>
#include <sys/sysent.h>
#include <vm/vm.h>
diff --git a/sys/fs/fuse/fuse_io.c b/sys/fs/fuse/fuse_io.c
index 5feec1e..89b57bb 100644
--- a/sys/fs/fuse/fuse_io.c
+++ b/sys/fs/fuse/fuse_io.c
@@ -69,6 +69,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/sx.h>
#include <sys/mutex.h>
+#include <sys/rwlock.h>
#include <sys/proc.h>
#include <sys/mount.h>
#include <sys/vnode.h>
@@ -784,9 +785,9 @@ fuse_io_invalbuf(struct vnode *vp, struct thread *td)
fvdat->flag |= FN_FLUSHINPROG;
if (vp->v_bufobj.bo_object != NULL) {
- VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
+ VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
- VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
+ VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
}
error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
while (error) {
diff --git a/sys/fs/fuse/fuse_vnops.c b/sys/fs/fuse/fuse_vnops.c
index cc9733d..69c511d 100644
--- a/sys/fs/fuse/fuse_vnops.c
+++ b/sys/fs/fuse/fuse_vnops.c
@@ -67,7 +67,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/queue.h>
#include <sys/lock.h>
-#include <sys/mutex.h>
+#include <sys/rwlock.h>
#include <sys/sx.h>
#include <sys/proc.h>
#include <sys/mount.h>
@@ -1758,7 +1758,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
* can only occur at the file EOF.
*/
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
fuse_vm_page_lock_queues();
if (pages[ap->a_reqpage]->valid != 0) {
for (i = 0; i < npages; ++i) {
@@ -1769,11 +1769,11 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
}
}
fuse_vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
return 0;
}
fuse_vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
/*
* We use only the kva address for the buffer, but this is extremely
@@ -1803,7 +1803,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
if (error && (uio.uio_resid == count)) {
FS_DEBUG("error %d\n", error);
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
fuse_vm_page_lock_queues();
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
@@ -1813,7 +1813,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
}
}
fuse_vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
return VM_PAGER_ERROR;
}
/*
@@ -1823,7 +1823,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
*/
size = count - uio.uio_resid;
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
fuse_vm_page_lock_queues();
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
vm_page_t m;
@@ -1886,7 +1886,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
}
}
fuse_vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
return 0;
}
@@ -1975,9 +1975,9 @@ fuse_vnop_putpages(struct vop_putpages_args *ap)
for (i = 0; i < nwritten; i++) {
rtvals[i] = VM_PAGER_OK;
- VM_OBJECT_LOCK(pages[i]->object);
+ VM_OBJECT_WLOCK(pages[i]->object);
vm_page_undirty(pages[i]);
- VM_OBJECT_UNLOCK(pages[i]->object);
+ VM_OBJECT_WUNLOCK(pages[i]->object);
}
}
return rtvals[0];
diff --git a/sys/fs/nfsclient/nfs_clbio.c b/sys/fs/nfsclient/nfs_clbio.c
index dba3bc9..f0a44a4 100644
--- a/sys/fs/nfsclient/nfs_clbio.c
+++ b/sys/fs/nfsclient/nfs_clbio.c
@@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$");
#include <sys/buf.h>
#include <sys/kernel.h>
#include <sys/mount.h>
+#include <sys/rwlock.h>
#include <sys/vmmeter.h>
#include <sys/vnode.h>
@@ -134,7 +135,7 @@ ncl_getpages(struct vop_getpages_args *ap)
* allow the pager to zero-out the blanks. Partially valid pages
* can only occur at the file EOF.
*/
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (pages[ap->a_reqpage]->valid != 0) {
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
@@ -143,10 +144,10 @@ ncl_getpages(struct vop_getpages_args *ap)
vm_page_unlock(pages[i]);
}
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (0);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* We use only the kva address for the buffer, but this is extremely
@@ -176,7 +177,7 @@ ncl_getpages(struct vop_getpages_args *ap)
if (error && (uio.uio_resid == count)) {
ncl_printf("nfs_getpages: error %d\n", error);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
vm_page_lock(pages[i]);
@@ -184,7 +185,7 @@ ncl_getpages(struct vop_getpages_args *ap)
vm_page_unlock(pages[i]);
}
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (VM_PAGER_ERROR);
}
@@ -195,7 +196,7 @@ ncl_getpages(struct vop_getpages_args *ap)
*/
size = count - uio.uio_resid;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
vm_page_t m;
nextoff = toff + PAGE_SIZE;
@@ -231,7 +232,7 @@ ncl_getpages(struct vop_getpages_args *ap)
if (i != ap->a_reqpage)
vm_page_readahead_finish(m);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (0);
}
@@ -1353,9 +1354,9 @@ ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
* Now, flush as required.
*/
if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
- VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
+ VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
- VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
+ VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
/*
* If the page clean was interrupted, fail the invalidation.
* Not doing so, we run the risk of losing dirty pages in the
diff --git a/sys/fs/nfsclient/nfs_clnode.c b/sys/fs/nfsclient/nfs_clnode.c
index eaaec9c..0cd503c 100644
--- a/sys/fs/nfsclient/nfs_clnode.c
+++ b/sys/fs/nfsclient/nfs_clnode.c
@@ -216,10 +216,10 @@ ncl_inactive(struct vop_inactive_args *ap)
* stateid is available for the writes.
*/
if (vp->v_object != NULL) {
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
retv = vm_object_page_clean(vp->v_object, 0, 0,
OBJPC_SYNC);
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
} else
retv = TRUE;
if (retv == TRUE) {
diff --git a/sys/fs/nfsclient/nfs_clvnops.c b/sys/fs/nfsclient/nfs_clvnops.c
index f778009..2e105f8 100644
--- a/sys/fs/nfsclient/nfs_clvnops.c
+++ b/sys/fs/nfsclient/nfs_clvnops.c
@@ -697,9 +697,9 @@ nfs_close(struct vop_close_args *ap)
* mmap'ed writes or via write().
*/
if (nfs_clean_pages_on_close && vp->v_object) {
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, 0);
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
}
mtx_lock(&np->n_mtx);
if (np->n_flag & NMODIFIED) {
diff --git a/sys/fs/nfsserver/nfs_nfsdport.c b/sys/fs/nfsserver/nfs_nfsdport.c
index ef98e2b..d5cc979 100644
--- a/sys/fs/nfsserver/nfs_nfsdport.c
+++ b/sys/fs/nfsserver/nfs_nfsdport.c
@@ -1267,9 +1267,9 @@ nfsvno_fsync(struct vnode *vp, u_int64_t off, int cnt, struct ucred *cred,
*/
if (vp->v_object &&
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, OBJPC_SYNC);
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
}
error = VOP_FSYNC(vp, MNT_WAIT, td);
} else {
@@ -1298,10 +1298,10 @@ nfsvno_fsync(struct vnode *vp, u_int64_t off, int cnt, struct ucred *cred,
if (vp->v_object &&
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, off, off + cnt,
OBJPC_SYNC);
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
}
bo = &vp->v_bufobj;
diff --git a/sys/fs/procfs/procfs_map.c b/sys/fs/procfs/procfs_map.c
index 3f57add..542c8fe 100644
--- a/sys/fs/procfs/procfs_map.c
+++ b/sys/fs/procfs/procfs_map.c
@@ -43,9 +43,9 @@
#include <sys/filedesc.h>
#include <sys/malloc.h>
#include <sys/mount.h>
-#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/sbuf.h>
#ifdef COMPAT_FREEBSD32
#include <sys/sysent.h>
@@ -132,7 +132,7 @@ procfs_doprocmap(PFS_FILL_ARGS)
privateresident = 0;
obj = entry->object.vm_object;
if (obj != NULL) {
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
if (obj->shadow_count == 1)
privateresident = obj->resident_page_count;
}
@@ -148,9 +148,9 @@ procfs_doprocmap(PFS_FILL_ARGS)
for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
if (tobj != obj)
- VM_OBJECT_LOCK(tobj);
+ VM_OBJECT_WLOCK(tobj);
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(lobj);
lobj = tobj;
}
last_timestamp = map->timestamp;
@@ -181,12 +181,12 @@ procfs_doprocmap(PFS_FILL_ARGS)
break;
}
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(lobj);
flags = obj->flags;
ref_count = obj->ref_count;
shadow_count = obj->shadow_count;
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
if (vp != NULL) {
vn_fullpath(td, vp, &fullpath, &freepath);
vrele(vp);
diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c
index 47ac2e6..b003b2c 100644
--- a/sys/fs/tmpfs/tmpfs_subr.c
+++ b/sys/fs/tmpfs/tmpfs_subr.c
@@ -38,9 +38,11 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/fnv_hash.h>
+#include <sys/lock.h>
#include <sys/namei.h>
#include <sys/priv.h>
#include <sys/proc.h>
+#include <sys/rwlock.h>
#include <sys/stat.h>
#include <sys/systm.h>
#include <sys/sysctl.h>
@@ -1270,7 +1272,7 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr)
tmpfs_pages_check_avail(tmp, newpages - oldpages) == 0)
return (ENOSPC);
- VM_OBJECT_LOCK(uobj);
+ VM_OBJECT_WLOCK(uobj);
if (newsize < oldsize) {
/*
* Zero the truncated part of the last page.
@@ -1290,9 +1292,9 @@ retry:
} else if (vm_pager_has_page(uobj, idx, NULL, NULL)) {
m = vm_page_alloc(uobj, idx, VM_ALLOC_NORMAL);
if (m == NULL) {
- VM_OBJECT_UNLOCK(uobj);
+ VM_OBJECT_WUNLOCK(uobj);
VM_WAIT;
- VM_OBJECT_LOCK(uobj);
+ VM_OBJECT_WLOCK(uobj);
goto retry;
} else if (m->valid != VM_PAGE_BITS_ALL) {
ma[0] = m;
@@ -1312,7 +1314,7 @@ retry:
if (ignerr)
m = NULL;
else {
- VM_OBJECT_UNLOCK(uobj);
+ VM_OBJECT_WUNLOCK(uobj);
return (EIO);
}
}
@@ -1334,7 +1336,7 @@ retry:
}
}
uobj->size = newpages;
- VM_OBJECT_UNLOCK(uobj);
+ VM_OBJECT_WUNLOCK(uobj);
TMPFS_LOCK(tmp);
tmp->tm_pages_used += (newpages - oldpages);
diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c
index bddcf24..54c95ff 100644
--- a/sys/fs/tmpfs/tmpfs_vnops.c
+++ b/sys/fs/tmpfs/tmpfs_vnops.c
@@ -39,9 +39,11 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/fcntl.h>
#include <sys/lockf.h>
+#include <sys/lock.h>
#include <sys/namei.h>
#include <sys/priv.h>
#include <sys/proc.h>
+#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/sf_buf.h>
#include <sys/stat.h>
@@ -445,7 +447,7 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx,
vm_page_t m;
int error, rv;
- VM_OBJECT_LOCK(tobj);
+ VM_OBJECT_WLOCK(tobj);
m = vm_page_grab(tobj, idx, VM_ALLOC_WIRED |
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
@@ -455,20 +457,20 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx,
vm_page_lock(m);
vm_page_free(m);
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(tobj);
+ VM_OBJECT_WUNLOCK(tobj);
return (EIO);
}
} else
vm_page_zero_invalid(m, TRUE);
}
- VM_OBJECT_UNLOCK(tobj);
+ VM_OBJECT_WUNLOCK(tobj);
error = uiomove_fromphys(&m, offset, tlen, uio);
- VM_OBJECT_LOCK(tobj);
+ VM_OBJECT_WLOCK(tobj);
vm_page_lock(m);
vm_page_unwire(m, TRUE);
vm_page_unlock(m);
vm_page_wakeup(m);
- VM_OBJECT_UNLOCK(tobj);
+ VM_OBJECT_WUNLOCK(tobj);
return (error);
}
@@ -511,7 +513,7 @@ tmpfs_mappedread(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio
offset = addr & PAGE_MASK;
tlen = MIN(PAGE_SIZE - offset, len);
- VM_OBJECT_LOCK(vobj);
+ VM_OBJECT_WLOCK(vobj);
lookupvpg:
if (((m = vm_page_lookup(vobj, idx)) != NULL) &&
vm_page_is_valid(m, offset, tlen)) {
@@ -525,11 +527,11 @@ lookupvpg:
goto lookupvpg;
}
vm_page_busy(m);
- VM_OBJECT_UNLOCK(vobj);
+ VM_OBJECT_WUNLOCK(vobj);
error = uiomove_fromphys(&m, offset, tlen, uio);
- VM_OBJECT_LOCK(vobj);
+ VM_OBJECT_WLOCK(vobj);
vm_page_wakeup(m);
- VM_OBJECT_UNLOCK(vobj);
+ VM_OBJECT_WUNLOCK(vobj);
return (error);
} else if (m != NULL && uio->uio_segflg == UIO_NOCOPY) {
KASSERT(offset == 0,
@@ -544,7 +546,7 @@ lookupvpg:
goto lookupvpg;
}
vm_page_busy(m);
- VM_OBJECT_UNLOCK(vobj);
+ VM_OBJECT_WUNLOCK(vobj);
sched_pin();
sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
ma = (char *)sf_buf_kva(sf);
@@ -557,14 +559,14 @@ lookupvpg:
}
sf_buf_free(sf);
sched_unpin();
- VM_OBJECT_LOCK(vobj);
+ VM_OBJECT_WLOCK(vobj);
if (error == 0)
m->valid = VM_PAGE_BITS_ALL;
vm_page_wakeup(m);
- VM_OBJECT_UNLOCK(vobj);
+ VM_OBJECT_WUNLOCK(vobj);
return (error);
}
- VM_OBJECT_UNLOCK(vobj);
+ VM_OBJECT_WUNLOCK(vobj);
error = tmpfs_nocacheread(tobj, idx, offset, tlen, uio);
return (error);
@@ -634,7 +636,7 @@ tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *ui
offset = addr & PAGE_MASK;
tlen = MIN(PAGE_SIZE - offset, len);
- VM_OBJECT_LOCK(vobj);
+ VM_OBJECT_WLOCK(vobj);
lookupvpg:
if (((vpg = vm_page_lookup(vobj, idx)) != NULL) &&
vm_page_is_valid(vpg, offset, tlen)) {
@@ -649,15 +651,15 @@ lookupvpg:
}
vm_page_busy(vpg);
vm_page_undirty(vpg);
- VM_OBJECT_UNLOCK(vobj);
+ VM_OBJECT_WUNLOCK(vobj);
error = uiomove_fromphys(&vpg, offset, tlen, uio);
} else {
if (vm_page_is_cached(vobj, idx))
vm_page_cache_free(vobj, idx, idx + 1);
- VM_OBJECT_UNLOCK(vobj);
+ VM_OBJECT_WUNLOCK(vobj);
vpg = NULL;
}
- VM_OBJECT_LOCK(tobj);
+ VM_OBJECT_WLOCK(tobj);
tpg = vm_page_grab(tobj, idx, VM_ALLOC_WIRED |
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (tpg->valid != VM_PAGE_BITS_ALL) {
@@ -673,14 +675,14 @@ lookupvpg:
} else
vm_page_zero_invalid(tpg, TRUE);
}
- VM_OBJECT_UNLOCK(tobj);
+ VM_OBJECT_WUNLOCK(tobj);
if (vpg == NULL)
error = uiomove_fromphys(&tpg, offset, tlen, uio);
else {
KASSERT(vpg->valid == VM_PAGE_BITS_ALL, ("parts of vpg invalid"));
pmap_copy_page(vpg, tpg);
}
- VM_OBJECT_LOCK(tobj);
+ VM_OBJECT_WLOCK(tobj);
if (error == 0) {
KASSERT(tpg->valid == VM_PAGE_BITS_ALL,
("parts of tpg invalid"));
@@ -691,11 +693,11 @@ lookupvpg:
vm_page_unlock(tpg);
vm_page_wakeup(tpg);
out:
- VM_OBJECT_UNLOCK(tobj);
+ VM_OBJECT_WUNLOCK(tobj);
if (vpg != NULL) {
- VM_OBJECT_LOCK(vobj);
+ VM_OBJECT_WLOCK(vobj);
vm_page_wakeup(vpg);
- VM_OBJECT_UNLOCK(vobj);
+ VM_OBJECT_WUNLOCK(vobj);
}
return (error);
diff --git a/sys/geom/label/g_label_ntfs.c b/sys/geom/label/g_label_ntfs.c
index 1ed4a07..e2c84cd 100644
--- a/sys/geom/label/g_label_ntfs.c
+++ b/sys/geom/label/g_label_ntfs.c
@@ -115,7 +115,7 @@ g_label_ntfs_taste(struct g_consumer *cp, char *label, size_t size)
mftrecsz = (char)bf->bf_mftrecsz;
recsize = (mftrecsz > 0) ? (mftrecsz * bf->bf_bps * bf->bf_spc) : (1 << -mftrecsz);
- if (recsize % pp->sectorsize != 0)
+ if (recsize == 0 || recsize % pp->sectorsize != 0)
goto done;
voloff = bf->bf_mftcn * bf->bf_spc * bf->bf_bps +
diff --git a/sys/geom/raid/g_raid.c b/sys/geom/raid/g_raid.c
index 91d14c3..e3dd6ba 100644
--- a/sys/geom/raid/g_raid.c
+++ b/sys/geom/raid/g_raid.c
@@ -92,6 +92,11 @@ TUNABLE_INT("kern.geom.raid.idle_threshold", &g_raid_idle_threshold);
SYSCTL_UINT(_kern_geom_raid, OID_AUTO, idle_threshold, CTLFLAG_RW,
&g_raid_idle_threshold, 1000000,
"Time in microseconds to consider a volume idle.");
+static u_int ar_legacy_aliases = 1;
+SYSCTL_INT(_kern_geom_raid, OID_AUTO, legacy_aliases, CTLFLAG_RW,
+ &ar_legacy_aliases, 0, "Create aliases named as the legacy ataraid style.");
+TUNABLE_INT("kern.geom_raid.legacy_aliases", &ar_legacy_aliases);
+
#define MSLEEP(rv, ident, mtx, priority, wmesg, timeout) do { \
G_RAID_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \
@@ -1637,6 +1642,7 @@ g_raid_launch_provider(struct g_raid_volume *vol)
struct g_raid_softc *sc;
struct g_provider *pp;
char name[G_RAID_MAX_VOLUMENAME];
+ char announce_buf[80], buf1[32];
off_t off;
sc = vol->v_softc;
@@ -1650,6 +1656,22 @@ g_raid_launch_provider(struct g_raid_volume *vol)
/* Otherwise use sequential volume number. */
snprintf(name, sizeof(name), "raid/r%d", vol->v_global_id);
}
+
+ /*
+ * Create a /dev/ar%d that the old ataraid(4) stack once
+ * created as an alias for /dev/raid/r%d if requested.
+ * This helps going from stable/7 ataraid devices to newer
+ * FreeBSD releases. sbruno 07 MAY 2013
+ */
+
+ if (ar_legacy_aliases) {
+ snprintf(announce_buf, sizeof(announce_buf),
+ "kern.devalias.%s", name);
+ snprintf(buf1, sizeof(buf1),
+ "ar%d", vol->v_global_id);
+ setenv(announce_buf, buf1);
+ }
+
pp = g_new_providerf(sc->sc_geom, "%s", name);
pp->private = vol;
pp->mediasize = vol->v_mediasize;
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index 3f15f08..226b62b 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -81,6 +81,7 @@ __FBSDID("$FreeBSD$");
#include <sys/pcpu.h>
#include <sys/ptrace.h>
#include <sys/reboot.h>
+#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
#ifdef SMP
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 8a3e71f..013f259 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -3520,7 +3520,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
va));
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
mpte = NULL;
@@ -3774,7 +3774,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
vm_page_t m, mpte;
vm_pindex_t diff, psize;
- VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m_start->object);
psize = atop(end - start);
mpte = NULL;
m = m_start;
@@ -3952,7 +3952,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
vm_page_t p;
int pat_mode;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
if (pseflag &&
@@ -4571,7 +4571,7 @@ pmap_is_modified(vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PG_M set.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
@@ -4706,7 +4706,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
@@ -4858,7 +4858,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
diff --git a/sys/i386/xen/pmap.c b/sys/i386/xen/pmap.c
index 532c870..3c7c3f5 100644
--- a/sys/i386/xen/pmap.c
+++ b/sys/i386/xen/pmap.c
@@ -2668,7 +2668,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
va));
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
mpte = NULL;
@@ -2871,7 +2871,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
multicall_entry_t *mclp = mcl;
int error, count = 0;
- VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m_start->object);
psize = atop(end - start);
mpte = NULL;
m = m_start;
@@ -3111,7 +3111,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
vm_page_t p;
int pat_mode;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
if (pseflag &&
@@ -3657,7 +3657,7 @@ pmap_is_modified(vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PG_M set.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (rv);
@@ -3788,7 +3788,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
@@ -3889,7 +3889,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
diff --git a/sys/ia64/ia64/machdep.c b/sys/ia64/ia64/machdep.c
index ac6a829..5373151 100644
--- a/sys/ia64/ia64/machdep.c
+++ b/sys/ia64/ia64/machdep.c
@@ -55,6 +55,7 @@ __FBSDID("$FreeBSD$");
#include <sys/ptrace.h>
#include <sys/random.h>
#include <sys/reboot.h>
+#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
#include <sys/syscall.h>
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index fcc00f4..1dff1f9 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -1802,7 +1802,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
vm_page_t m;
vm_pindex_t diff, psize;
- VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m_start->object);
psize = atop(end - start);
m = m_start;
rw_wlock(&pvh_global_lock);
@@ -1893,7 +1893,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_size_t size)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
}
@@ -2211,7 +2211,7 @@ pmap_is_modified(vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can be dirty.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (rv);
@@ -2295,7 +2295,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
@@ -2373,7 +2373,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index e6f385a..8e16ca0 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -45,7 +45,6 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mount.h>
-#include <sys/mutex.h>
#include <sys/mman.h>
#include <sys/namei.h>
#include <sys/pioctl.h>
@@ -53,6 +52,7 @@ __FBSDID("$FreeBSD$");
#include <sys/procfs.h>
#include <sys/racct.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/sf_buf.h>
#include <sys/smp.h>
#include <sys/systm.h>
@@ -1278,15 +1278,15 @@ each_writable_segment(td, func, closure)
continue;
/* Ignore memory-mapped devices and such things. */
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
while ((backing_object = object->backing_object) != NULL) {
- VM_OBJECT_LOCK(backing_object);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WLOCK(backing_object);
+ VM_OBJECT_WUNLOCK(object);
object = backing_object;
}
ignore_entry = object->type != OBJT_DEFAULT &&
object->type != OBJT_SWAP && object->type != OBJT_VNODE;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (ignore_entry)
continue;
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index 7c0d2d6..3890157 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -57,6 +57,7 @@ __FBSDID("$FreeBSD$");
#include <sys/pioctl.h>
#include <sys/namei.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/sdt.h>
#include <sys/sf_buf.h>
@@ -929,7 +930,7 @@ exec_map_first_page(imgp)
object = imgp->vp->v_object;
if (object == NULL)
return (EACCES);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
#if VM_NRESERVLEVEL > 0
if ((object->flags & OBJ_COLORED) == 0) {
object->flags |= OBJ_COLORED;
@@ -964,7 +965,7 @@ exec_map_first_page(imgp)
vm_page_free(ma[0]);
vm_page_unlock(ma[0]);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (EIO);
}
}
@@ -972,7 +973,7 @@ exec_map_first_page(imgp)
vm_page_hold(ma[0]);
vm_page_unlock(ma[0]);
vm_page_wakeup(ma[0]);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
imgp->firstpage = sf_buf_alloc(ma[0], 0);
imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index 2a74a59..6c46801 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -55,6 +55,7 @@ __FBSDID("$FreeBSD$");
#include <sys/ptrace.h>
#include <sys/refcount.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/sbuf.h>
#include <sys/sysent.h>
#include <sys/sched.h>
@@ -1994,7 +1995,7 @@ sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
kve->kve_private_resident = 0;
obj = entry->object.vm_object;
if (obj != NULL) {
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
if (obj->shadow_count == 1)
kve->kve_private_resident =
obj->resident_page_count;
@@ -2009,9 +2010,9 @@ sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
if (tobj != obj)
- VM_OBJECT_LOCK(tobj);
+ VM_OBJECT_WLOCK(tobj);
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(lobj);
lobj = tobj;
}
@@ -2071,11 +2072,11 @@ sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
break;
}
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(lobj);
kve->kve_ref_count = obj->ref_count;
kve->kve_shadow_count = obj->shadow_count;
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
if (vp != NULL) {
vn_fullpath(curthread, vp, &fullpath,
&freepath);
@@ -2161,7 +2162,7 @@ sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
kve->kve_private_resident = 0;
obj = entry->object.vm_object;
if (obj != NULL) {
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
if (obj->shadow_count == 1)
kve->kve_private_resident =
obj->resident_page_count;
@@ -2182,9 +2183,9 @@ sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
if (tobj != obj)
- VM_OBJECT_LOCK(tobj);
+ VM_OBJECT_WLOCK(tobj);
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(lobj);
lobj = tobj;
}
@@ -2246,11 +2247,11 @@ sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
break;
}
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(lobj);
kve->kve_ref_count = obj->ref_count;
kve->kve_shadow_count = obj->shadow_count;
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
if (vp != NULL) {
vn_fullpath(curthread, vp, &fullpath,
&freepath);
diff --git a/sys/kern/kern_sharedpage.c b/sys/kern/kern_sharedpage.c
index 619304a..20b9038 100644
--- a/sys/kern/kern_sharedpage.c
+++ b/sys/kern/kern_sharedpage.c
@@ -34,7 +34,7 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/lock.h>
-#include <sys/mutex.h>
+#include <sys/rwlock.h>
#include <sys/sysent.h>
#include <sys/sysctl.h>
#include <sys/vdso.h>
@@ -107,11 +107,11 @@ shared_page_init(void *dummy __unused)
sx_init(&shared_page_alloc_sx, "shpsx");
shared_page_obj = vm_pager_allocate(OBJT_PHYS, 0, PAGE_SIZE,
VM_PROT_DEFAULT, 0, NULL);
- VM_OBJECT_LOCK(shared_page_obj);
+ VM_OBJECT_WLOCK(shared_page_obj);
m = vm_page_grab(shared_page_obj, 0, VM_ALLOC_RETRY | VM_ALLOC_NOBUSY |
VM_ALLOC_ZERO);
m->valid = VM_PAGE_BITS_ALL;
- VM_OBJECT_UNLOCK(shared_page_obj);
+ VM_OBJECT_WUNLOCK(shared_page_obj);
addr = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
pmap_qenter(addr, &m, 1);
shared_page_mapping = (char *)addr;
diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c
index fcbae28..b120263 100644
--- a/sys/kern/kern_shutdown.c
+++ b/sys/kern/kern_shutdown.c
@@ -62,6 +62,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/reboot.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index df459cd..fa82abd 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -47,6 +47,7 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/callout.h>
+#include <sys/file.h>
#include <sys/interrupt.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
@@ -101,6 +102,11 @@ SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir,
0, "Average number of MP direct callouts made per callout_process call. "
"Units = 1/1000");
#endif
+
+static int ncallout;
+SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN, &ncallout, 0,
+ "Number of entries in callwheel and size of timeout() preallocation");
+
/*
* TODO:
* allocate more timeout table slots when table overflows.
@@ -181,6 +187,7 @@ struct callout_cpu cc_cpu;
static int timeout_cpu;
+static void callout_cpu_init(struct callout_cpu *cc);
static void softclock_call_cc(struct callout *c, struct callout_cpu *cc,
#ifdef CALLOUT_PROFILING
int *mpcalls, int *lockcalls, int *gcalls,
@@ -240,18 +247,21 @@ cc_cce_migrating(struct callout_cpu *cc, int direct)
}
/*
- * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
- *
- * This code is called very early in the kernel initialization sequence,
- * and may be called more then once.
+ * Kernel low level callwheel initialization
+ * called on cpu0 during kernel startup.
*/
-caddr_t
-kern_timeout_callwheel_alloc(caddr_t v)
+static void
+callout_callwheel_init(void *dummy)
{
struct callout_cpu *cc;
- timeout_cpu = PCPU_GET(cpuid);
- cc = CC_CPU(timeout_cpu);
+ /*
+ * Calculate the size of the callout wheel and the preallocated
+ * timeout() structures.
+ */
+ ncallout = imin(16 + maxproc + maxfiles, 18508);
+ TUNABLE_INT_FETCH("kern.ncallout", &ncallout);
+
/*
* Calculate callout wheel size, should be next power of two higher
* than 'ncallout'.
@@ -259,13 +269,23 @@ kern_timeout_callwheel_alloc(caddr_t v)
callwheelsize = 1 << fls(ncallout);
callwheelmask = callwheelsize - 1;
- cc->cc_callout = (struct callout *)v;
- v = (caddr_t)(cc->cc_callout + ncallout);
- cc->cc_callwheel = (struct callout_list *)v;
- v = (caddr_t)(cc->cc_callwheel + callwheelsize);
- return(v);
+ /*
+ * Only cpu0 handles timeout(9) and receives a preallocation.
+ *
+ * XXX: Once all timeout(9) consumers are converted this can
+ * be removed.
+ */
+ timeout_cpu = PCPU_GET(cpuid);
+ cc = CC_CPU(timeout_cpu);
+ cc->cc_callout = malloc(ncallout * sizeof(struct callout),
+ M_CALLOUT, M_WAITOK);
+ callout_cpu_init(cc);
}
+SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL);
+/*
+ * Initialize the per-cpu callout structures.
+ */
static void
callout_cpu_init(struct callout_cpu *cc)
{
@@ -274,13 +294,15 @@ callout_cpu_init(struct callout_cpu *cc)
mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
SLIST_INIT(&cc->cc_callfree);
+ cc->cc_callwheel = malloc(sizeof(struct callout_tailq) * callwheelsize,
+ M_CALLOUT, M_WAITOK);
for (i = 0; i < callwheelsize; i++)
LIST_INIT(&cc->cc_callwheel[i]);
TAILQ_INIT(&cc->cc_expireq);
cc->cc_firstevent = INT64_MAX;
for (i = 0; i < 2; i++)
cc_cce_cleanup(cc, i);
- if (cc->cc_callout == NULL)
+ if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */
return;
for (i = 0; i < ncallout; i++) {
c = &cc->cc_callout[i];
@@ -321,19 +343,6 @@ callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
#endif
/*
- * kern_timeout_callwheel_init() - initialize previously reserved callwheel
- * space.
- *
- * This code is called just once, after the space reserved for the
- * callout wheel has been finalized.
- */
-void
-kern_timeout_callwheel_init(void)
-{
- callout_cpu_init(CC_CPU(timeout_cpu));
-}
-
-/*
* Start standard softclock thread.
*/
static void
@@ -353,18 +362,14 @@ start_softclock(void *dummy)
if (cpu == timeout_cpu)
continue;
cc = CC_CPU(cpu);
+ cc->cc_callout = NULL; /* Only cpu0 handles timeout(9). */
+ callout_cpu_init(cc);
if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK,
INTR_MPSAFE, &cc->cc_cookie))
panic("died while creating standard software ithreads");
- cc->cc_callout = NULL; /* Only cpu0 handles timeout(). */
- cc->cc_callwheel = malloc(
- sizeof(struct callout_list) * callwheelsize, M_CALLOUT,
- M_WAITOK);
- callout_cpu_init(cc);
}
#endif
}
-
SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
#define CC_HASH_SHIFT 8
diff --git a/sys/kern/subr_param.c b/sys/kern/subr_param.c
index 0a3580b..825a3a0 100644
--- a/sys/kern/subr_param.c
+++ b/sys/kern/subr_param.c
@@ -91,7 +91,6 @@ int maxprocperuid; /* max # of procs per user */
int maxfiles; /* sys. wide open files limit */
int maxfilesperproc; /* per-proc open files limit */
int msgbufsize; /* size of kernel message buffer */
-int ncallout; /* maximum # of timer events */
int nbuf;
int ngroups_max; /* max # groups per process */
int nswbuf;
@@ -109,8 +108,6 @@ u_long sgrowsiz; /* amount to grow stack */
SYSCTL_INT(_kern, OID_AUTO, hz, CTLFLAG_RDTUN, &hz, 0,
"Number of clock ticks per second");
-SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN, &ncallout, 0,
- "Number of pre-allocated timer events");
SYSCTL_INT(_kern, OID_AUTO, nbuf, CTLFLAG_RDTUN, &nbuf, 0,
"Number of buffers in the buffer cache");
SYSCTL_INT(_kern, OID_AUTO, nswbuf, CTLFLAG_RDTUN, &nswbuf, 0,
@@ -327,15 +324,6 @@ init_param2(long physpages)
TUNABLE_INT_FETCH("kern.nbuf", &nbuf);
/*
- * XXX: Does the callout wheel have to be so big?
- *
- * Clip callout to result of previous function of maxusers maximum
- * 384. This is still huge, but acceptable.
- */
- ncallout = imin(16 + maxproc + maxfiles, 18508);
- TUNABLE_INT_FETCH("kern.ncallout", &ncallout);
-
- /*
* The default for maxpipekva is min(1/64 of the kernel address space,
* max(1/64 of main memory, 512KB)). See sys_pipe.c for more details.
*/
diff --git a/sys/kern/subr_uio.c b/sys/kern/subr_uio.c
index 2b47395..1ee265c 100644
--- a/sys/kern/subr_uio.c
+++ b/sys/kern/subr_uio.c
@@ -45,9 +45,9 @@ __FBSDID("$FreeBSD$");
#include <sys/limits.h>
#include <sys/lock.h>
#include <sys/mman.h>
-#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/sysctl.h>
#include <sys/vnode.h>
@@ -104,7 +104,7 @@ vm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr)
&upindex, &prot, &wired)) != KERN_SUCCESS) {
return(EFAULT);
}
- VM_OBJECT_LOCK(uobject);
+ VM_OBJECT_WLOCK(uobject);
retry:
if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) {
if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco"))
@@ -124,7 +124,7 @@ retry:
}
vm_page_insert(kern_pg, uobject, upindex);
vm_page_dirty(kern_pg);
- VM_OBJECT_UNLOCK(uobject);
+ VM_OBJECT_WUNLOCK(uobject);
vm_map_lookup_done(map, entry);
return(KERN_SUCCESS);
}
diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c
index e70e60e..7dc43c4 100644
--- a/sys/kern/sys_process.c
+++ b/sys/kern/sys_process.c
@@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/vnode.h>
#include <sys/ptrace.h>
+#include <sys/rwlock.h>
#include <sys/sx.h>
#include <sys/malloc.h>
#include <sys/signalvar.h>
@@ -381,7 +382,7 @@ ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve)
obj = entry->object.vm_object;
if (obj != NULL)
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
} while (0);
vm_map_unlock_read(map);
@@ -394,9 +395,9 @@ ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve)
lobj = obj;
for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) {
if (tobj != obj)
- VM_OBJECT_LOCK(tobj);
+ VM_OBJECT_WLOCK(tobj);
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(lobj);
lobj = tobj;
pve->pve_offset += tobj->backing_object_offset;
}
@@ -404,8 +405,8 @@ ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve)
if (vp != NULL)
vref(vp);
if (lobj != obj)
- VM_OBJECT_UNLOCK(lobj);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(lobj);
+ VM_OBJECT_WUNLOCK(obj);
if (vp != NULL) {
freepath = NULL;
diff --git a/sys/kern/sysv_shm.c b/sys/kern/sysv_shm.c
index 5c35514..a1c6b34 100644
--- a/sys/kern/sysv_shm.c
+++ b/sys/kern/sysv_shm.c
@@ -79,6 +79,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/racct.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/syscallsubr.h>
@@ -707,10 +708,10 @@ shmget_allocate_segment(td, uap, mode)
#endif
return (ENOMEM);
}
- VM_OBJECT_LOCK(shm_object);
+ VM_OBJECT_WLOCK(shm_object);
vm_object_clear_flag(shm_object, OBJ_ONEMAPPING);
vm_object_set_flag(shm_object, OBJ_NOSPLIT);
- VM_OBJECT_UNLOCK(shm_object);
+ VM_OBJECT_WUNLOCK(shm_object);
shmseg->object = shm_object;
shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = cred->cr_uid;
diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c
index 0cbb8b3..fc33de5 100644
--- a/sys/kern/uipc_shm.c
+++ b/sys/kern/uipc_shm.c
@@ -61,6 +61,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/refcount.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/stat.h>
#include <sys/sysctl.h>
#include <sys/sysproto.h>
@@ -253,9 +254,9 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
int base, rv;
object = shmfd->shm_object;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (length == shmfd->shm_size) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (0);
}
nobjsize = OFF_TO_IDX(length + PAGE_MASK);
@@ -267,7 +268,7 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
* object is mapped into the kernel.
*/
if (shmfd->shm_kmappings > 0) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (EBUSY);
}
@@ -288,9 +289,9 @@ retry:
} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL);
if (m == NULL) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
VM_WAIT;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
goto retry;
} else if (m->valid != VM_PAGE_BITS_ALL) {
ma[0] = m;
@@ -308,7 +309,7 @@ retry:
} else {
vm_page_free(m);
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (EIO);
}
}
@@ -338,7 +339,7 @@ retry:
/* Attempt to reserve the swap */
delta = ptoa(nobjsize - object->size);
if (!swap_reserve_by_cred(delta, object->cred)) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (ENOMEM);
}
object->charge += delta;
@@ -349,7 +350,7 @@ retry:
shmfd->shm_mtime = shmfd->shm_ctime;
mtx_unlock(&shm_timestamp_lock);
object->size = nobjsize;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (0);
}
@@ -370,10 +371,10 @@ shm_alloc(struct ucred *ucred, mode_t mode)
shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL,
shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
- VM_OBJECT_LOCK(shmfd->shm_object);
+ VM_OBJECT_WLOCK(shmfd->shm_object);
vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
vm_object_set_flag(shmfd->shm_object, OBJ_NOSPLIT);
- VM_OBJECT_UNLOCK(shmfd->shm_object);
+ VM_OBJECT_WUNLOCK(shmfd->shm_object);
vfs_timestamp(&shmfd->shm_birthtime);
shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
shmfd->shm_birthtime;
@@ -761,20 +762,20 @@ shm_map(struct file *fp, size_t size, off_t offset, void **memp)
return (EINVAL);
shmfd = fp->f_data;
obj = shmfd->shm_object;
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
/*
* XXXRW: This validation is probably insufficient, and subject to
* sign errors. It should be fixed.
*/
if (offset >= shmfd->shm_size ||
offset + size > round_page(shmfd->shm_size)) {
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
return (EINVAL);
}
shmfd->shm_kmappings++;
vm_object_reference_locked(obj);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
/* Map the object into the kernel_map and wire it. */
kva = vm_map_min(kernel_map);
@@ -796,9 +797,9 @@ shm_map(struct file *fp, size_t size, off_t offset, void **memp)
vm_object_deallocate(obj);
/* On failure, drop our mapping reference. */
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
shmfd->shm_kmappings--;
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
return (vm_mmap_to_errno(rv));
}
@@ -840,10 +841,10 @@ shm_unmap(struct file *fp, void *mem, size_t size)
if (obj != shmfd->shm_object)
return (EINVAL);
vm_map_remove(map, kva, kva + size);
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
shmfd->shm_kmappings--;
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
return (0);
}
diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c
index cd37a4d..894cffc 100644
--- a/sys/kern/uipc_syscalls.c
+++ b/sys/kern/uipc_syscalls.c
@@ -60,6 +60,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mount.h>
#include <sys/mbuf.h>
#include <sys/protosw.h>
+#include <sys/rwlock.h>
#include <sys/sf_buf.h>
#include <sys/sysent.h>
#include <sys/socket.h>
@@ -1907,12 +1908,12 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap,
* reclamation of its vnode does not
* immediately destroy it.
*/
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
if ((obj->flags & OBJ_DEAD) == 0) {
vm_object_reference_locked(obj);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
} else {
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
obj = NULL;
}
}
@@ -2089,7 +2090,7 @@ retry_space:
vm_offset_t pgoff;
struct mbuf *m0;
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
/*
* Calculate the amount to transfer.
* Not to exceed a page, the EOF,
@@ -2107,7 +2108,7 @@ retry_space:
xfsize = omin(rem, xfsize);
xfsize = omin(space - loopbytes, xfsize);
if (xfsize <= 0) {
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
done = 1; /* all data sent */
break;
}
@@ -2128,7 +2129,7 @@ retry_space:
* block.
*/
if (pg->valid && vm_page_is_valid(pg, pgoff, xfsize))
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
else if (m != NULL)
error = EAGAIN; /* send what we already got */
else if (uap->flags & SF_NODISKIO)
@@ -2142,7 +2143,7 @@ retry_space:
* when the I/O completes.
*/
vm_page_io_start(pg);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
/*
* Get the page from backing store.
@@ -2164,10 +2165,10 @@ retry_space:
td->td_ucred, NOCRED, &resid, td);
VOP_UNLOCK(vp, 0);
after_read:
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
vm_page_io_finish(pg);
if (!error)
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
mbstat.sf_iocnt++;
}
if (error) {
@@ -2182,7 +2183,7 @@ retry_space:
pg->busy == 0 && !(pg->oflags & VPO_BUSY))
vm_page_free(pg);
vm_page_unlock(pg);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
if (error == EAGAIN)
error = 0; /* not a real error */
break;
diff --git a/sys/kern/vfs_aio.c b/sys/kern/vfs_aio.c
index cba1638..cafe440 100644
--- a/sys/kern/vfs_aio.c
+++ b/sys/kern/vfs_aio.c
@@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$");
#include <sys/resourcevar.h>
#include <sys/signalvar.h>
#include <sys/protosw.h>
+#include <sys/rwlock.h>
#include <sys/sema.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
@@ -841,9 +842,9 @@ aio_fsync_vnode(struct thread *td, struct vnode *vp)
goto drop;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (vp->v_object != NULL) {
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, 0);
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
}
error = VOP_FSYNC(vp, MNT_WAIT, td);
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index f9b4dbb..6d110ab 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -58,6 +58,7 @@ __FBSDID("$FreeBSD$");
#include <sys/kthread.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/vmmeter.h>
#include <sys/vnode.h>
@@ -458,7 +459,7 @@ vfs_buf_test_cache(struct buf *bp,
vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (bp->b_flags & B_CACHE) {
int base = (foff + off) & PAGE_MASK;
if (vm_page_is_valid(m, base, size) == 0)
@@ -1388,7 +1389,7 @@ brelse(struct buf *bp)
*/
resid = bp->b_bufsize;
foff = bp->b_offset;
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
for (i = 0; i < bp->b_npages; i++) {
int had_bogus = 0;
@@ -1436,7 +1437,7 @@ brelse(struct buf *bp)
resid -= PAGE_SIZE - (foff & PAGE_MASK);
foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
}
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
if (bp->b_flags & (B_INVAL | B_RELBUF))
vfs_vmio_release(bp);
@@ -1658,7 +1659,7 @@ vfs_vmio_release(struct buf *bp)
vm_page_t m;
pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
- VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
bp->b_pages[i] = NULL;
@@ -1690,7 +1691,7 @@ vfs_vmio_release(struct buf *bp)
}
vm_page_unlock(m);
}
- VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
if (bp->b_bufsize) {
bufspacewakeup();
@@ -2467,7 +2468,7 @@ inmem(struct vnode * vp, daddr_t blkno)
size = vp->v_mount->mnt_stat.f_iosize;
off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
if (!m)
@@ -2479,11 +2480,11 @@ inmem(struct vnode * vp, daddr_t blkno)
(vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
goto notinmem;
}
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
return 1;
notinmem:
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
return (0);
}
@@ -2513,7 +2514,7 @@ vfs_clean_pages_dirty_buf(struct buf *bp)
KASSERT(bp->b_offset != NOOFFSET,
("vfs_clean_pages_dirty_buf: no buffer offset"));
- VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
vfs_drain_busy_pages(bp);
vfs_setdirty_locked_object(bp);
for (i = 0; i < bp->b_npages; i++) {
@@ -2526,7 +2527,7 @@ vfs_clean_pages_dirty_buf(struct buf *bp)
/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
foff = noff;
}
- VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
}
static void
@@ -2536,7 +2537,7 @@ vfs_setdirty_locked_object(struct buf *bp)
int i;
object = bp->b_bufobj->bo_object;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* We qualify the scan for modified pages on whether the
@@ -3042,7 +3043,7 @@ allocbuf(struct buf *bp, int size)
(vm_offset_t)bp->b_data) +
(desiredpages << PAGE_SHIFT),
(bp->b_npages - desiredpages));
- VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
for (i = desiredpages; i < bp->b_npages; i++) {
/*
* the page is not freed here -- it
@@ -3061,7 +3062,7 @@ allocbuf(struct buf *bp, int size)
vm_page_unwire(m, 0);
vm_page_unlock(m);
}
- VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
bp->b_npages = desiredpages;
}
} else if (size > bp->b_bcount) {
@@ -3082,7 +3083,7 @@ allocbuf(struct buf *bp, int size)
obj = bp->b_bufobj->bo_object;
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
while (bp->b_npages < desiredpages) {
vm_page_t m;
@@ -3144,7 +3145,7 @@ allocbuf(struct buf *bp, int size)
toff += tinc;
tinc = PAGE_SIZE;
}
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
/*
* Step 3, fixup the KVM pmap. Remember that
@@ -3399,7 +3400,7 @@ bufdone_finish(struct buf *bp)
bp->b_flags |= B_CACHE;
}
bogus = 0;
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
for (i = 0; i < bp->b_npages; i++) {
int bogusflag = 0;
int resid;
@@ -3441,7 +3442,7 @@ bufdone_finish(struct buf *bp)
iosize -= resid;
}
vm_object_pip_wakeupn(obj, 0);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
if (bogus)
pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
bp->b_pages, bp->b_npages);
@@ -3479,7 +3480,7 @@ vfs_unbusy_pages(struct buf *bp)
return;
obj = bp->b_bufobj->bo_object;
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
if (m == bogus_page) {
@@ -3494,7 +3495,7 @@ vfs_unbusy_pages(struct buf *bp)
vm_page_io_finish(m);
}
vm_object_pip_wakeupn(obj, 0);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
}
/*
@@ -3573,7 +3574,7 @@ vfs_drain_busy_pages(struct buf *bp)
vm_page_t m;
int i, last_busied;
- VM_OBJECT_LOCK_ASSERT(bp->b_bufobj->bo_object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(bp->b_bufobj->bo_object);
last_busied = 0;
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
@@ -3615,7 +3616,7 @@ vfs_busy_pages(struct buf *bp, int clear_modify)
foff = bp->b_offset;
KASSERT(bp->b_offset != NOOFFSET,
("vfs_busy_pages: no buffer offset"));
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
vfs_drain_busy_pages(bp);
if (bp->b_bufsize != 0)
vfs_setdirty_locked_object(bp);
@@ -3652,7 +3653,7 @@ vfs_busy_pages(struct buf *bp, int clear_modify)
}
foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
}
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
if (bogus)
pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
bp->b_pages, bp->b_npages);
@@ -3683,7 +3684,7 @@ vfs_bio_set_valid(struct buf *bp, int base, int size)
base += (bp->b_offset & PAGE_MASK);
n = PAGE_SIZE - (base & PAGE_MASK);
- VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
m = bp->b_pages[i];
if (n > size)
@@ -3693,7 +3694,7 @@ vfs_bio_set_valid(struct buf *bp, int base, int size)
size -= n;
n = PAGE_SIZE;
}
- VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
}
/*
@@ -3720,13 +3721,13 @@ vfs_bio_clrbuf(struct buf *bp)
}
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
- VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
(bp->b_offset & PAGE_MASK) == 0) {
if (bp->b_pages[0] == bogus_page)
goto unlock;
mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
- VM_OBJECT_LOCK_ASSERT(bp->b_pages[0]->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[0]->object);
if ((bp->b_pages[0]->valid & mask) == mask)
goto unlock;
if ((bp->b_pages[0]->valid & mask) == 0) {
@@ -3745,7 +3746,7 @@ vfs_bio_clrbuf(struct buf *bp)
continue;
j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE;
mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
- VM_OBJECT_LOCK_ASSERT(bp->b_pages[i]->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[i]->object);
if ((bp->b_pages[i]->valid & mask) == mask)
continue;
if ((bp->b_pages[i]->valid & mask) == 0)
@@ -3759,7 +3760,7 @@ vfs_bio_clrbuf(struct buf *bp)
bp->b_pages[i]->valid |= mask;
}
unlock:
- VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
bp->b_resid = 0;
}
diff --git a/sys/kern/vfs_cluster.c b/sys/kern/vfs_cluster.c
index 663b66f..70937a2 100644
--- a/sys/kern/vfs_cluster.c
+++ b/sys/kern/vfs_cluster.c
@@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/vmmeter.h>
#include <vm/vm.h>
#include <vm/vm_object.h>
@@ -406,21 +407,20 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
*/
off = tbp->b_offset;
tsize = size;
- VM_OBJECT_LOCK(tbp->b_bufobj->bo_object);
+ VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
for (j = 0; tsize > 0; j++) {
toff = off & PAGE_MASK;
tinc = tsize;
if (toff + tinc > PAGE_SIZE)
tinc = PAGE_SIZE - toff;
- VM_OBJECT_LOCK_ASSERT(tbp->b_pages[j]->object,
- MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(tbp->b_pages[j]->object);
if ((tbp->b_pages[j]->valid &
vm_page_bits(toff, tinc)) != 0)
break;
off += tinc;
tsize -= tinc;
}
- VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
+ VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
if (tsize > 0) {
bqrelse(tbp);
break;
@@ -455,7 +455,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
BUF_KERNPROC(tbp);
TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
tbp, b_cluster.cluster_entry);
- VM_OBJECT_LOCK(tbp->b_bufobj->bo_object);
+ VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
for (j = 0; j < tbp->b_npages; j += 1) {
vm_page_t m;
m = tbp->b_pages[j];
@@ -469,7 +469,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
if (m->valid == VM_PAGE_BITS_ALL)
tbp->b_pages[j] = bogus_page;
}
- VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
+ VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
/*
* Don't inherit tbp->b_bufsize as it may be larger due to
* a non-page-aligned size. Instead just aggregate using
@@ -487,13 +487,13 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
* Fully valid pages in the cluster are already good and do not need
* to be re-read from disk. Replace the page with bogus_page
*/
- VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
for (j = 0; j < bp->b_npages; j++) {
- VM_OBJECT_LOCK_ASSERT(bp->b_pages[j]->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[j]->object);
if (bp->b_pages[j]->valid == VM_PAGE_BITS_ALL)
bp->b_pages[j] = bogus_page;
}
- VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
+ VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
if (bp->b_bufsize > bp->b_kvasize)
panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
bp->b_bufsize, bp->b_kvasize);
@@ -918,12 +918,12 @@ cluster_wbuild(vp, size, start_lbn, len)
if (tbp->b_flags & B_VMIO) {
vm_page_t m;
- VM_OBJECT_LOCK(tbp->b_bufobj->bo_object);
+ VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
if (i != 0) { /* if not first buffer */
for (j = 0; j < tbp->b_npages; j += 1) {
m = tbp->b_pages[j];
if (m->oflags & VPO_BUSY) {
- VM_OBJECT_UNLOCK(
+ VM_OBJECT_WUNLOCK(
tbp->b_object);
bqrelse(tbp);
goto finishcluster;
@@ -940,7 +940,7 @@ cluster_wbuild(vp, size, start_lbn, len)
bp->b_npages++;
}
}
- VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
+ VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
}
bp->b_bcount += size;
bp->b_bufsize += size;
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index 1dd0185..ed071b0 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -47,8 +47,8 @@ __FBSDID("$FreeBSD$");
#include <sys/lockf.h>
#include <sys/malloc.h>
#include <sys/mount.h>
-#include <sys/mutex.h>
#include <sys/namei.h>
+#include <sys/rwlock.h>
#include <sys/fcntl.h>
#include <sys/unistd.h>
#include <sys/vnode.h>
@@ -1047,10 +1047,10 @@ vop_stdadvise(struct vop_advise_args *ap)
if (vp->v_object != NULL) {
start = trunc_page(ap->a_start);
end = round_page(ap->a_end);
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_cache(vp->v_object, OFF_TO_IDX(start),
OFF_TO_IDX(end));
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
}
VOP_UNLOCK(vp, 0);
break;
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 1c26368..d0c6bb8 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -67,6 +67,7 @@ __FBSDID("$FreeBSD$");
#include <sys/namei.h>
#include <sys/priv.h>
#include <sys/reboot.h>
+#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/sleepqueue.h>
#include <sys/smp.h>
@@ -1244,9 +1245,9 @@ bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo)
bufobj_wwait(bo, 0, 0);
BO_UNLOCK(bo);
if (bo->bo_object != NULL) {
- VM_OBJECT_LOCK(bo->bo_object);
+ VM_OBJECT_WLOCK(bo->bo_object);
vm_object_pip_wait(bo->bo_object, "bovlbx");
- VM_OBJECT_UNLOCK(bo->bo_object);
+ VM_OBJECT_WUNLOCK(bo->bo_object);
}
BO_LOCK(bo);
} while (bo->bo_numoutput > 0);
@@ -1257,10 +1258,10 @@ bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo)
*/
if (bo->bo_object != NULL &&
(flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0) {
- VM_OBJECT_LOCK(bo->bo_object);
+ VM_OBJECT_WLOCK(bo->bo_object);
vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ?
OBJPR_CLEANONLY : 0);
- VM_OBJECT_UNLOCK(bo->bo_object);
+ VM_OBJECT_WUNLOCK(bo->bo_object);
}
#ifdef INVARIANTS
@@ -2520,9 +2521,9 @@ vinactive(struct vnode *vp, struct thread *td)
*/
obj = vp->v_object;
if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0) {
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0, OBJPC_NOSYNC);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
}
VOP_INACTIVE(vp, td);
VI_LOCK(vp);
@@ -2603,9 +2604,9 @@ loop:
*/
if (flags & WRITECLOSE) {
if (vp->v_object != NULL) {
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, 0);
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
}
error = VOP_FSYNC(vp, MNT_WAIT, td);
if (error != 0) {
@@ -3503,11 +3504,11 @@ vfs_msync(struct mount *mp, int flags)
obj = vp->v_object;
if (obj != NULL) {
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0,
flags == MNT_WAIT ?
OBJPC_SYNC : OBJPC_NOSYNC);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
}
vput(vp);
}
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index 4c1d97c..fc78235 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -61,6 +61,7 @@ __FBSDID("$FreeBSD$");
#include <sys/filio.h>
#include <sys/limits.h>
#include <sys/linker.h>
+#include <sys/rwlock.h>
#include <sys/sdt.h>
#include <sys/stat.h>
#include <sys/sx.h>
@@ -3437,9 +3438,9 @@ sys_fsync(td, uap)
vn_lock(vp, lock_flags | LK_RETRY);
AUDIT_ARG_VNODE1(vp);
if (vp->v_object != NULL) {
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, 0);
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
}
error = VOP_FSYNC(vp, MNT_WAIT, td);
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index 96ce9e2..d367340 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -55,6 +55,7 @@ __FBSDID("$FreeBSD$");
#include <sys/buf.h>
#include <sys/filio.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/sx.h>
#include <sys/sysctl.h>
#include <sys/ttycom.h>
@@ -1892,9 +1893,9 @@ vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end)
if ((object = vp->v_object) == NULL)
return;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_object_page_remove(object, start, end, 0);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
int
diff --git a/sys/mips/mips/machdep.c b/sys/mips/mips/machdep.c
index a5fd953..119cbc2 100644
--- a/sys/mips/mips/machdep.c
+++ b/sys/mips/mips/machdep.c
@@ -58,6 +58,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mbuf.h>
#include <sys/msgbuf.h>
#include <sys/reboot.h>
+#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/sysctl.h>
#include <sys/sysproto.h>
diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c
index 7925b8c..9e1b812 100644
--- a/sys/mips/mips/pmap.c
+++ b/sys/mips/mips/pmap.c
@@ -2399,7 +2399,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
vm_page_t m, mpte;
vm_pindex_t diff, psize;
- VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m_start->object);
psize = atop(end - start);
mpte = NULL;
m = m_start;
@@ -2423,7 +2423,7 @@ void
pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
}
@@ -2768,7 +2768,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
@@ -2834,7 +2834,7 @@ pmap_is_modified(vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PTE_D set.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
@@ -2882,7 +2882,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
diff --git a/sys/modules/ath/Makefile b/sys/modules/ath/Makefile
index 7e65785..4f96c63 100644
--- a/sys/modules/ath/Makefile
+++ b/sys/modules/ath/Makefile
@@ -123,6 +123,15 @@ SRCS+= ah_eeprom_9287.c
.PATH: ${.CURDIR}/../../dev/ath/ath_hal/ar9002
SRCS+= ar9287.c ar9287_reset.c ar9287_attach.c ar9287_cal.c ar9287_olc.c
+# + AR9300 HAL
+# .PATH: ${.CURDIR}/../../dev/ath/ath_hal/ar9003
+#SRCS+= ar9300_interrupts.c ar9300_radar.c ar9300_ani.c ar9300_keycache.c
+#SRCS+= ar9300_radio.c ar9300_xmit.c ar9300_attach.c ar9300_mci.c ar9300_stub.c
+#SRCS+= ar9300_xmit_ds.c ar9300_beacon.c ar9300_misc.c ar9300_recv.c
+#SRCS+= ar9300_stub_funcs.c ar9300_eeprom.c ar9300_paprd.c ar9300_recv_ds.c
+#SRCS+= ar9300_freebsd.c ar9300_phy.c ar9300_reset.c ar9300_gpio.c
+#SRCS+= ar9300_power.c ar9300_timer.c
+
# NB: rate control is bound to the driver by symbol names so only pick one
.if ${ATH_RATE} == "sample"
.PATH: ${.CURDIR}/../../dev/ath/ath_rate/sample
diff --git a/sys/modules/zfs/Makefile b/sys/modules/zfs/Makefile
index c1dd6e4..b0ef51c 100644
--- a/sys/modules/zfs/Makefile
+++ b/sys/modules/zfs/Makefile
@@ -28,6 +28,7 @@ SRCS+= opensolaris_sysevent.c
SRCS+= opensolaris_taskq.c
SRCS+= opensolaris_uio.c
SRCS+= opensolaris_vfs.c
+SRCS+= opensolaris_vm.c
SRCS+= opensolaris_zone.c
_A=${.CURDIR}/../../cddl/contrib/opensolaris/common/atomic
diff --git a/sys/net/if.c b/sys/net/if.c
index 57bd4e1..be6d006 100644
--- a/sys/net/if.c
+++ b/sys/net/if.c
@@ -1357,7 +1357,8 @@ if_rtdel(struct radix_node *rn, void *arg)
return (0);
err = rtrequest_fib(RTM_DELETE, rt_key(rt), rt->rt_gateway,
- rt_mask(rt), rt->rt_flags|RTF_RNH_LOCKED,
+ rt_mask(rt),
+ rt->rt_flags|RTF_RNH_LOCKED|RTF_PINNED,
(struct rtentry **) NULL, rt->rt_fibnum);
if (err) {
log(LOG_WARNING, "if_rtdel: error %d\n", err);
diff --git a/sys/net/route.c b/sys/net/route.c
index c0f6526..3a1a775 100644
--- a/sys/net/route.c
+++ b/sys/net/route.c
@@ -1112,6 +1112,14 @@ rtrequest1_fib(int req, struct rt_addrinfo *info, struct rtentry **ret_nrt,
error = 0;
}
#endif
+ if ((flags & RTF_PINNED) == 0) {
+ /* Check if target route can be deleted */
+ rt = (struct rtentry *)rnh->rnh_lookup(dst,
+ netmask, rnh);
+ if ((rt != NULL) && (rt->rt_flags & RTF_PINNED))
+ senderr(EADDRINUSE);
+ }
+
/*
* Remove the item from the tree and return it.
* Complain if it is not there and do no more processing.
@@ -1430,6 +1438,7 @@ rtinit1(struct ifaddr *ifa, int cmd, int flags, int fibnum)
int didwork = 0;
int a_failure = 0;
static struct sockaddr_dl null_sdl = {sizeof(null_sdl), AF_LINK};
+ struct radix_node_head *rnh;
if (flags & RTF_HOST) {
dst = ifa->ifa_dstaddr;
@@ -1488,7 +1497,6 @@ rtinit1(struct ifaddr *ifa, int cmd, int flags, int fibnum)
*/
for ( fibnum = startfib; fibnum <= endfib; fibnum++) {
if (cmd == RTM_DELETE) {
- struct radix_node_head *rnh;
struct radix_node *rn;
/*
* Look up an rtentry that is in the routing tree and
@@ -1538,7 +1546,8 @@ rtinit1(struct ifaddr *ifa, int cmd, int flags, int fibnum)
*/
bzero((caddr_t)&info, sizeof(info));
info.rti_ifa = ifa;
- info.rti_flags = flags | (ifa->ifa_flags & ~IFA_RTSELF);
+ info.rti_flags = flags |
+ (ifa->ifa_flags & ~IFA_RTSELF) | RTF_PINNED;
info.rti_info[RTAX_DST] = dst;
/*
* doing this for compatibility reasons
@@ -1550,6 +1559,33 @@ rtinit1(struct ifaddr *ifa, int cmd, int flags, int fibnum)
info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
info.rti_info[RTAX_NETMASK] = netmask;
error = rtrequest1_fib(cmd, &info, &rt, fibnum);
+
+ if ((error == EEXIST) && (cmd == RTM_ADD)) {
+ /*
+ * Interface route addition failed.
+ * Atomically delete current prefix generating
+ * RTM_DELETE message, and retry adding
+ * interface prefix.
+ */
+ rnh = rt_tables_get_rnh(fibnum, dst->sa_family);
+ RADIX_NODE_HEAD_LOCK(rnh);
+
+ /* Delete old prefix */
+ info.rti_ifa = NULL;
+ info.rti_flags = RTF_RNH_LOCKED;
+
+ error = rtrequest1_fib(RTM_DELETE, &info, &rt, fibnum);
+ if (error == 0) {
+ info.rti_ifa = ifa;
+ info.rti_flags = flags | RTF_RNH_LOCKED |
+ (ifa->ifa_flags & ~IFA_RTSELF) | RTF_PINNED;
+ error = rtrequest1_fib(cmd, &info, &rt, fibnum);
+ }
+
+ RADIX_NODE_HEAD_UNLOCK(rnh);
+ }
+
+
if (error == 0 && rt != NULL) {
/*
* notify any listening routing agents of the change
diff --git a/sys/net/route.h b/sys/net/route.h
index f12ed810..997f3cd 100644
--- a/sys/net/route.h
+++ b/sys/net/route.h
@@ -176,7 +176,7 @@ struct ortentry {
/* 0x20000 unused, was RTF_WASCLONED */
#define RTF_PROTO3 0x40000 /* protocol specific routing flag */
/* 0x80000 unused */
-#define RTF_PINNED 0x100000 /* future use */
+#define RTF_PINNED 0x100000 /* route is immutable */
#define RTF_LOCAL 0x200000 /* route represents a local address */
#define RTF_BROADCAST 0x400000 /* route represents a bcast address */
#define RTF_MULTICAST 0x800000 /* route represents a mcast address */
diff --git a/sys/net80211/ieee80211.c b/sys/net80211/ieee80211.c
index 49f8a8e..c5ebc62 100644
--- a/sys/net80211/ieee80211.c
+++ b/sys/net80211/ieee80211.c
@@ -278,6 +278,7 @@ ieee80211_ifattach(struct ieee80211com *ic,
KASSERT(ifp->if_type == IFT_IEEE80211, ("if_type %d", ifp->if_type));
IEEE80211_LOCK_INIT(ic, ifp->if_xname);
+ IEEE80211_TX_LOCK_INIT(ic, ifp->if_xname);
TAILQ_INIT(&ic->ic_vaps);
/* Create a taskqueue for all state changes */
@@ -385,6 +386,7 @@ ieee80211_ifdetach(struct ieee80211com *ic)
ifmedia_removeall(&ic->ic_media);
taskqueue_free(ic->ic_tq);
+ IEEE80211_TX_LOCK_DESTROY(ic);
IEEE80211_LOCK_DESTROY(ic);
}
diff --git a/sys/net80211/ieee80211_freebsd.c b/sys/net80211/ieee80211_freebsd.c
index 72db271..e20fc57 100644
--- a/sys/net80211/ieee80211_freebsd.c
+++ b/sys/net80211/ieee80211_freebsd.c
@@ -504,6 +504,44 @@ ieee80211_process_callback(struct ieee80211_node *ni,
}
}
+/*
+ * Transmit a frame to the parent interface.
+ *
+ * TODO: if the transmission fails, make sure the parent node is freed
+ * (the callers will first need modifying.)
+ */
+int
+ieee80211_parent_transmit(struct ieee80211com *ic,
+ struct mbuf *m)
+{
+ struct ifnet *parent = ic->ic_ifp;
+ /*
+ * Assert the IC TX lock is held - this enforces the
+ * processing -> queuing order is maintained
+ */
+ IEEE80211_TX_LOCK_ASSERT(ic);
+
+ return (parent->if_transmit(parent, m));
+}
+
+/*
+ * Transmit a frame to the VAP interface.
+ */
+int
+ieee80211_vap_transmit(struct ieee80211vap *vap, struct mbuf *m)
+{
+ struct ifnet *ifp = vap->iv_ifp;
+
+ /*
+ * When transmitting via the VAP, we shouldn't hold
+ * any IC TX lock as the VAP TX path will acquire it.
+ */
+ IEEE80211_TX_UNLOCK_ASSERT(vap->iv_ic);
+
+ return (ifp->if_transmit(ifp, m));
+
+}
+
#include <sys/libkern.h>
void
diff --git a/sys/net80211/ieee80211_freebsd.h b/sys/net80211/ieee80211_freebsd.h
index 9491c1a..3ea117b 100644
--- a/sys/net80211/ieee80211_freebsd.h
+++ b/sys/net80211/ieee80211_freebsd.h
@@ -57,6 +57,30 @@ typedef struct {
mtx_assert(IEEE80211_LOCK_OBJ(_ic), MA_NOTOWNED)
/*
+ * Transmit lock.
+ *
+ * This is a (mostly) temporary lock designed to serialise all of the
+ * transmission operations throughout the stack.
+ */
+typedef struct {
+ char name[16]; /* e.g. "ath0_com_lock" */
+ struct mtx mtx;
+} ieee80211_tx_lock_t;
+#define IEEE80211_TX_LOCK_INIT(_ic, _name) do { \
+ ieee80211_tx_lock_t *cl = &(_ic)->ic_txlock; \
+ snprintf(cl->name, sizeof(cl->name), "%s_tx_lock", _name); \
+ mtx_init(&cl->mtx, cl->name, NULL, MTX_DEF); \
+} while (0)
+#define IEEE80211_TX_LOCK_OBJ(_ic) (&(_ic)->ic_txlock.mtx)
+#define IEEE80211_TX_LOCK_DESTROY(_ic) mtx_destroy(IEEE80211_TX_LOCK_OBJ(_ic))
+#define IEEE80211_TX_LOCK(_ic) mtx_lock(IEEE80211_TX_LOCK_OBJ(_ic))
+#define IEEE80211_TX_UNLOCK(_ic) mtx_unlock(IEEE80211_TX_LOCK_OBJ(_ic))
+#define IEEE80211_TX_LOCK_ASSERT(_ic) \
+ mtx_assert(IEEE80211_TX_LOCK_OBJ(_ic), MA_OWNED)
+#define IEEE80211_TX_UNLOCK_ASSERT(_ic) \
+ mtx_assert(IEEE80211_TX_LOCK_OBJ(_ic), MA_NOTOWNED)
+
+/*
* Node locking definitions.
*/
typedef struct {
@@ -272,9 +296,11 @@ int ieee80211_add_callback(struct mbuf *m,
void (*func)(struct ieee80211_node *, void *, int), void *arg);
void ieee80211_process_callback(struct ieee80211_node *, struct mbuf *, int);
-void get_random_bytes(void *, size_t);
-
struct ieee80211com;
+int ieee80211_parent_transmit(struct ieee80211com *, struct mbuf *);
+int ieee80211_vap_transmit(struct ieee80211vap *, struct mbuf *);
+
+void get_random_bytes(void *, size_t);
void ieee80211_sysctl_attach(struct ieee80211com *);
void ieee80211_sysctl_detach(struct ieee80211com *);
diff --git a/sys/net80211/ieee80211_hostap.c b/sys/net80211/ieee80211_hostap.c
index 7087748..fe83ebb 100644
--- a/sys/net80211/ieee80211_hostap.c
+++ b/sys/net80211/ieee80211_hostap.c
@@ -412,7 +412,7 @@ hostap_deliver_data(struct ieee80211vap *vap,
if (mcopy != NULL) {
int len, err;
len = mcopy->m_pkthdr.len;
- err = ifp->if_transmit(ifp, mcopy);
+ err = ieee80211_vap_transmit(vap, mcopy);
if (err) {
/* NB: IFQ_HANDOFF reclaims mcopy */
} else {
@@ -2255,8 +2255,8 @@ void
ieee80211_recv_pspoll(struct ieee80211_node *ni, struct mbuf *m0)
{
struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_frame_min *wh;
- struct ifnet *ifp;
struct mbuf *m;
uint16_t aid;
int qlen;
@@ -2320,23 +2320,15 @@ ieee80211_recv_pspoll(struct ieee80211_node *ni, struct mbuf *m0)
}
m->m_flags |= M_PWR_SAV; /* bypass PS handling */
- if (m->m_flags & M_ENCAP)
- ifp = vap->iv_ic->ic_ifp;
- else
- ifp = vap->iv_ifp;
-
/*
- * Free any node ref which this mbuf may have.
- *
- * Much like psq_mfree(), we assume that M_ENCAP nodes have
- * node references.
+ * Do the right thing; if it's an encap'ed frame then
+ * call ieee80211_parent_transmit() (and free the ref) else
+ * call ieee80211_vap_transmit().
*/
- if (ifp->if_transmit(ifp, m) != 0) {
- /*
- * XXX m is invalid (freed) at this point, determine M_ENCAP
- * an alternate way.
- */
- if (ifp == vap->iv_ic->ic_ifp)
+ if (m->m_flags & M_ENCAP) {
+ if (ieee80211_parent_transmit(ic, m) != 0)
ieee80211_free_node(ni);
+ } else {
+ (void) ieee80211_vap_transmit(vap, m);
}
}
diff --git a/sys/net80211/ieee80211_ht.c b/sys/net80211/ieee80211_ht.c
index 495c949..e09cd22 100644
--- a/sys/net80211/ieee80211_ht.c
+++ b/sys/net80211/ieee80211_ht.c
@@ -2392,7 +2392,9 @@ ieee80211_send_bar(struct ieee80211_node *ni,
* ic_raw_xmit will free the node reference
* regardless of queue/TX success or failure.
*/
- ret = ic->ic_raw_xmit(ni, m, NULL);
+ IEEE80211_TX_LOCK(ic);
+ ret = ieee80211_raw_output(vap, ni, m, NULL);
+ IEEE80211_TX_UNLOCK(ic);
if (ret != 0) {
IEEE80211_NOTE(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_11N,
ni, "send BAR: failed: (ret = %d)\n",
diff --git a/sys/net80211/ieee80211_hwmp.c b/sys/net80211/ieee80211_hwmp.c
index 414ac53..8c481da 100644
--- a/sys/net80211/ieee80211_hwmp.c
+++ b/sys/net80211/ieee80211_hwmp.c
@@ -592,6 +592,7 @@ hwmp_send_action(struct ieee80211vap *vap,
struct ieee80211_bpf_params params;
struct mbuf *m;
uint8_t *frm;
+ int ret;
if (IEEE80211_IS_MULTICAST(da)) {
ni = ieee80211_ref_node(vap->iv_bss);
@@ -654,6 +655,9 @@ hwmp_send_action(struct ieee80211vap *vap,
vap->iv_stats.is_tx_nobuf++;
return ENOMEM;
}
+
+ IEEE80211_TX_LOCK(ic);
+
ieee80211_send_setup(ni, m,
IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_ACTION,
IEEE80211_NONQOS_TID, vap->iv_myaddr, da, vap->iv_myaddr);
@@ -669,7 +673,9 @@ hwmp_send_action(struct ieee80211vap *vap,
else
params.ibp_try0 = ni->ni_txparms->maxretry;
params.ibp_power = ni->ni_txpower;
- return ic->ic_raw_xmit(ni, m, &params);
+ ret = ieee80211_raw_output(vap, ni, m, &params);
+ IEEE80211_TX_UNLOCK(ic);
+ return (ret);
}
#define ADDSHORT(frm, v) do { \
@@ -1271,12 +1277,9 @@ hwmp_recv_prep(struct ieee80211vap *vap, struct ieee80211_node *ni,
struct ieee80211_mesh_route *rtext = NULL;
struct ieee80211_hwmp_route *hr;
struct ieee80211com *ic = vap->iv_ic;
- struct ifnet *ifp = vap->iv_ifp;
struct mbuf *m, *next;
uint32_t metric = 0;
const uint8_t *addr;
- int is_encap;
- struct ieee80211_node *ni_encap;
IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
"received PREP, orig %6D, targ %6D", prep->prep_origaddr, ":",
@@ -1450,22 +1453,21 @@ hwmp_recv_prep(struct ieee80211vap *vap, struct ieee80211_node *ni,
m = ieee80211_ageq_remove(&ic->ic_stageq,
(struct ieee80211_node *)(uintptr_t)
ieee80211_mac_hash(ic, addr)); /* either dest or ext_dest */
+
+ /*
+ * All frames in the stageq here should be non-M_ENCAP; or things
+ * will get very unhappy.
+ */
for (; m != NULL; m = next) {
- is_encap = !! (m->m_flags & M_ENCAP);
- ni_encap = (struct ieee80211_node *) m->m_pkthdr.rcvif;
next = m->m_nextpkt;
m->m_nextpkt = NULL;
IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
"flush queued frame %p len %d", m, m->m_pkthdr.len);
-
/*
* If the mbuf has M_ENCAP set, ensure we free it.
* Note that after if_transmit() is called, m is invalid.
*/
- if (ifp->if_transmit(ifp, m) != 0) {
- if (is_encap)
- ieee80211_free_node(ni_encap);
- }
+ (void) ieee80211_vap_transmit(vap, m);
}
#undef IS_PROXY
#undef PROXIED_BY_US
diff --git a/sys/net80211/ieee80211_mesh.c b/sys/net80211/ieee80211_mesh.c
index 9750a4a..7f1ba21 100644
--- a/sys/net80211/ieee80211_mesh.c
+++ b/sys/net80211/ieee80211_mesh.c
@@ -1041,11 +1041,12 @@ mesh_transmit_to_gate(struct ieee80211vap *vap, struct mbuf *m,
{
struct ifnet *ifp = vap->iv_ifp;
struct ieee80211com *ic = vap->iv_ic;
- struct ifnet *parent = ic->ic_ifp;
struct ieee80211_node *ni;
struct ether_header *eh;
int error;
+ IEEE80211_TX_UNLOCK_ASSERT(ic);
+
eh = mtod(m, struct ether_header *);
ni = ieee80211_mesh_find_txnode(vap, rt_gate->rt_dest);
if (ni == NULL) {
@@ -1132,6 +1133,8 @@ mesh_transmit_to_gate(struct ieee80211vap *vap, struct mbuf *m,
}
}
#endif /* IEEE80211_SUPPORT_SUPERG */
+
+ IEEE80211_TX_LOCK(ic);
if (__predict_true((vap->iv_caps & IEEE80211_C_8023ENCAP) == 0)) {
/*
* Encapsulate the packet in prep for transmission.
@@ -1143,9 +1146,9 @@ mesh_transmit_to_gate(struct ieee80211vap *vap, struct mbuf *m,
return;
}
}
- error = parent->if_transmit(parent, m);
+ error = ieee80211_parent_transmit(ic, m);
+ IEEE80211_TX_UNLOCK(ic);
if (error != 0) {
- m_freem(m);
ieee80211_free_node(ni);
} else {
ifp->if_opackets++;
@@ -1171,6 +1174,8 @@ ieee80211_mesh_forward_to_gates(struct ieee80211vap *vap,
struct ieee80211_mesh_gate_route *gr = NULL, *gr_next;
struct mbuf *m, *mcopy, *next;
+ IEEE80211_TX_UNLOCK_ASSERT(ic);
+
KASSERT( rt_dest->rt_flags == IEEE80211_MESHRT_FLAGS_DISCOVER,
("Route is not marked with IEEE80211_MESHRT_FLAGS_DISCOVER"));
@@ -1240,7 +1245,6 @@ mesh_forward(struct ieee80211vap *vap, struct mbuf *m,
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_mesh_state *ms = vap->iv_mesh;
struct ifnet *ifp = vap->iv_ifp;
- struct ifnet *parent = ic->ic_ifp;
const struct ieee80211_frame *wh =
mtod(m, const struct ieee80211_frame *);
struct mbuf *mcopy;
@@ -1249,6 +1253,9 @@ mesh_forward(struct ieee80211vap *vap, struct mbuf *m,
struct ieee80211_node *ni;
int err;
+ /* This is called from the RX path - don't hold this lock */
+ IEEE80211_TX_UNLOCK_ASSERT(ic);
+
/*
* mesh ttl of 1 means we are the last one receving it,
* according to amendment we decrement and then check if
@@ -1320,7 +1327,20 @@ mesh_forward(struct ieee80211vap *vap, struct mbuf *m,
/* XXX do we know m_nextpkt is NULL? */
mcopy->m_pkthdr.rcvif = (void *) ni;
- err = parent->if_transmit(parent, mcopy);
+
+ /*
+ * XXX this bypasses all of the VAP TX handling; it passes frames
+ * directly to the parent interface.
+ *
+ * Because of this, there's no TX lock being held as there's no
+ * encaps state being used.
+ *
+ * Doing a direct parent transmit may not be the correct thing
+ * to do here; we'll have to re-think this soon.
+ */
+ IEEE80211_TX_LOCK(ic);
+ err = ieee80211_parent_transmit(ic, mcopy);
+ IEEE80211_TX_UNLOCK(ic);
if (err != 0) {
/* NB: IFQ_HANDOFF reclaims mbuf */
ieee80211_free_node(ni);
@@ -1457,6 +1477,10 @@ mesh_recv_indiv_data_to_fwrd(struct ieee80211vap *vap, struct mbuf *m,
struct ieee80211_qosframe_addr4 *qwh;
struct ieee80211_mesh_state *ms = vap->iv_mesh;
struct ieee80211_mesh_route *rt_meshda, *rt_meshsa;
+ struct ieee80211com *ic = vap->iv_ic;
+
+ /* This is called from the RX path - don't hold this lock */
+ IEEE80211_TX_UNLOCK_ASSERT(ic);
qwh = (struct ieee80211_qosframe_addr4 *)wh;
@@ -1512,8 +1536,12 @@ mesh_recv_indiv_data_to_me(struct ieee80211vap *vap, struct mbuf *m,
const struct ieee80211_meshcntl_ae10 *mc10;
struct ieee80211_mesh_state *ms = vap->iv_mesh;
struct ieee80211_mesh_route *rt;
+ struct ieee80211com *ic = vap->iv_ic;
int ae;
+ /* This is called from the RX path - don't hold this lock */
+ IEEE80211_TX_UNLOCK_ASSERT(ic);
+
qwh = (struct ieee80211_qosframe_addr4 *)wh;
mc10 = (const struct ieee80211_meshcntl_ae10 *)mc;
@@ -1575,6 +1603,10 @@ mesh_recv_group_data(struct ieee80211vap *vap, struct mbuf *m,
{
#define MC01(mc) ((const struct ieee80211_meshcntl_ae01 *)mc)
struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ struct ieee80211com *ic = vap->iv_ic;
+
+ /* This is called from the RX path - don't hold this lock */
+ IEEE80211_TX_UNLOCK_ASSERT(ic);
mesh_forward(vap, m, mc);
@@ -1621,6 +1653,9 @@ mesh_input(struct ieee80211_node *ni, struct mbuf *m, int rssi, int nf)
need_tap = 1; /* mbuf need to be tapped. */
type = -1; /* undefined */
+ /* This is called from the RX path - don't hold this lock */
+ IEEE80211_TX_UNLOCK_ASSERT(ic);
+
if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_min)) {
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
ni->ni_macaddr, NULL,
@@ -2743,6 +2778,7 @@ mesh_send_action(struct ieee80211_node *ni,
struct ieee80211com *ic = ni->ni_ic;
struct ieee80211_bpf_params params;
struct ieee80211_frame *wh;
+ int ret;
KASSERT(ni != NULL, ("null node"));
@@ -2761,6 +2797,7 @@ mesh_send_action(struct ieee80211_node *ni,
return ENOMEM;
}
+ IEEE80211_TX_LOCK(ic);
wh = mtod(m, struct ieee80211_frame *);
ieee80211_send_setup(ni, m,
IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_ACTION,
@@ -2778,7 +2815,9 @@ mesh_send_action(struct ieee80211_node *ni,
IEEE80211_NODE_STAT(ni, tx_mgmt);
- return ic->ic_raw_xmit(ni, m, &params);
+ ret = ieee80211_raw_output(vap, ni, m, &params);
+ IEEE80211_TX_UNLOCK(ic);
+ return (ret);
}
#define ADDSHORT(frm, v) do { \
diff --git a/sys/net80211/ieee80211_output.c b/sys/net80211/ieee80211_output.c
index 7481488..0d51a55 100644
--- a/sys/net80211/ieee80211_output.c
+++ b/sys/net80211/ieee80211_output.c
@@ -110,6 +110,255 @@ doprint(struct ieee80211vap *vap, int subtype)
#endif
/*
+ * Send the given mbuf through the given vap.
+ *
+ * This consumes the mbuf regardless of whether the transmit
+ * was successful or not.
+ *
+ * This does none of the initial checks that ieee80211_start()
+ * does (eg CAC timeout, interface wakeup) - the caller must
+ * do this first.
+ */
+static int
+ieee80211_start_pkt(struct ieee80211vap *vap, struct mbuf *m)
+{
+#define IS_DWDS(vap) \
+ (vap->iv_opmode == IEEE80211_M_WDS && \
+ (vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY) == 0)
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ifnet *ifp = vap->iv_ifp;
+ struct ieee80211_node *ni;
+ struct ether_header *eh;
+ int error;
+
+ /*
+ * Cancel any background scan.
+ */
+ if (ic->ic_flags & IEEE80211_F_SCAN)
+ ieee80211_cancel_anyscan(vap);
+ /*
+ * Find the node for the destination so we can do
+ * things like power save and fast frames aggregation.
+ *
+ * NB: past this point various code assumes the first
+ * mbuf has the 802.3 header present (and contiguous).
+ */
+ ni = NULL;
+ if (m->m_len < sizeof(struct ether_header) &&
+ (m = m_pullup(m, sizeof(struct ether_header))) == NULL) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
+ "discard frame, %s\n", "m_pullup failed");
+ vap->iv_stats.is_tx_nobuf++; /* XXX */
+ ifp->if_oerrors++;
+ return (ENOBUFS);
+ }
+ eh = mtod(m, struct ether_header *);
+ if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
+ if (IS_DWDS(vap)) {
+ /*
+ * Only unicast frames from the above go out
+ * DWDS vaps; multicast frames are handled by
+ * dispatching the frame as it comes through
+ * the AP vap (see below).
+ */
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_WDS,
+ eh->ether_dhost, "mcast", "%s", "on DWDS");
+ vap->iv_stats.is_dwds_mcast++;
+ m_freem(m);
+ /* XXX better status? */
+ return (ENOBUFS);
+ }
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+ /*
+ * Spam DWDS vap's w/ multicast traffic.
+ */
+ /* XXX only if dwds in use? */
+ ieee80211_dwds_mcast(vap, m);
+ }
+ }
+#ifdef IEEE80211_SUPPORT_MESH
+ if (vap->iv_opmode != IEEE80211_M_MBSS) {
+#endif
+ ni = ieee80211_find_txnode(vap, eh->ether_dhost);
+ if (ni == NULL) {
+ /* NB: ieee80211_find_txnode does stat+msg */
+ ifp->if_oerrors++;
+ m_freem(m);
+ /* XXX better status? */
+ return (ENOBUFS);
+ }
+ if (ni->ni_associd == 0 &&
+ (ni->ni_flags & IEEE80211_NODE_ASSOCID)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_OUTPUT,
+ eh->ether_dhost, NULL,
+ "sta not associated (type 0x%04x)",
+ htons(eh->ether_type));
+ vap->iv_stats.is_tx_notassoc++;
+ ifp->if_oerrors++;
+ m_freem(m);
+ ieee80211_free_node(ni);
+ /* XXX better status? */
+ return (ENOBUFS);
+ }
+#ifdef IEEE80211_SUPPORT_MESH
+ } else {
+ if (!IEEE80211_ADDR_EQ(eh->ether_shost, vap->iv_myaddr)) {
+ /*
+ * Proxy station only if configured.
+ */
+ if (!ieee80211_mesh_isproxyena(vap)) {
+ IEEE80211_DISCARD_MAC(vap,
+ IEEE80211_MSG_OUTPUT |
+ IEEE80211_MSG_MESH,
+ eh->ether_dhost, NULL,
+ "%s", "proxy not enabled");
+ vap->iv_stats.is_mesh_notproxy++;
+ ifp->if_oerrors++;
+ m_freem(m);
+ /* XXX better status? */
+ return (ENOBUFS);
+ }
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
+ "forward frame from DS SA(%6D), DA(%6D)\n",
+ eh->ether_shost, ":",
+ eh->ether_dhost, ":");
+ ieee80211_mesh_proxy_check(vap, eh->ether_shost);
+ }
+ ni = ieee80211_mesh_discover(vap, eh->ether_dhost, m);
+ if (ni == NULL) {
+ /*
+ * NB: ieee80211_mesh_discover holds/disposes
+ * frame (e.g. queueing on path discovery).
+ */
+ ifp->if_oerrors++;
+ /* XXX better status? */
+ return (ENOBUFS);
+ }
+ }
+#endif
+ if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) &&
+ (m->m_flags & M_PWR_SAV) == 0) {
+ /*
+ * Station in power save mode; pass the frame
+ * to the 802.11 layer and continue. We'll get
+ * the frame back when the time is right.
+ * XXX lose WDS vap linkage?
+ */
+ (void) ieee80211_pwrsave(ni, m);
+ ieee80211_free_node(ni);
+ /* XXX better status? */
+ return (ENOBUFS);
+ }
+ /* calculate priority so drivers can find the tx queue */
+ if (ieee80211_classify(ni, m)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_OUTPUT,
+ eh->ether_dhost, NULL,
+ "%s", "classification failure");
+ vap->iv_stats.is_tx_classify++;
+ ifp->if_oerrors++;
+ m_freem(m);
+ ieee80211_free_node(ni);
+ /* XXX better status? */
+ return (ENOBUFS);
+ }
+ /*
+ * Stash the node pointer. Note that we do this after
+ * any call to ieee80211_dwds_mcast because that code
+ * uses any existing value for rcvif to identify the
+ * interface it (might have been) received on.
+ */
+ m->m_pkthdr.rcvif = (void *)ni;
+
+ BPF_MTAP(ifp, m); /* 802.3 tx */
+
+
+ /*
+ * Check if A-MPDU tx aggregation is setup or if we
+ * should try to enable it. The sta must be associated
+ * with HT and A-MPDU enabled for use. When the policy
+ * routine decides we should enable A-MPDU we issue an
+ * ADDBA request and wait for a reply. The frame being
+ * encapsulated will go out w/o using A-MPDU, or possibly
+ * it might be collected by the driver and held/retransmit.
+ * The default ic_ampdu_enable routine handles staggering
+ * ADDBA requests in case the receiver NAK's us or we are
+ * otherwise unable to establish a BA stream.
+ */
+ if ((ni->ni_flags & IEEE80211_NODE_AMPDU_TX) &&
+ (vap->iv_flags_ht & IEEE80211_FHT_AMPDU_TX) &&
+ (m->m_flags & M_EAPOL) == 0) {
+ int tid = WME_AC_TO_TID(M_WME_GETAC(m));
+ struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid];
+
+ ieee80211_txampdu_count_packet(tap);
+ if (IEEE80211_AMPDU_RUNNING(tap)) {
+ /*
+ * Operational, mark frame for aggregation.
+ *
+ * XXX do tx aggregation here
+ */
+ m->m_flags |= M_AMPDU_MPDU;
+ } else if (!IEEE80211_AMPDU_REQUESTED(tap) &&
+ ic->ic_ampdu_enable(ni, tap)) {
+ /*
+ * Not negotiated yet, request service.
+ */
+ ieee80211_ampdu_request(ni, tap);
+ /* XXX hold frame for reply? */
+ }
+ }
+
+#ifdef IEEE80211_SUPPORT_SUPERG
+ else if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF)) {
+ m = ieee80211_ff_check(ni, m);
+ if (m == NULL) {
+ /* NB: any ni ref held on stageq */
+ /* XXX better status? */
+ return (ENOBUFS);
+ }
+ }
+#endif /* IEEE80211_SUPPORT_SUPERG */
+
+ /*
+ * Grab the TX lock - serialise the TX process from this
+ * point (where TX state is being checked/modified)
+ * through to driver queue.
+ */
+ IEEE80211_TX_LOCK(ic);
+
+ if (__predict_true((vap->iv_caps & IEEE80211_C_8023ENCAP) == 0)) {
+ /*
+ * Encapsulate the packet in prep for transmission.
+ */
+ m = ieee80211_encap(vap, ni, m);
+ if (m == NULL) {
+ /* NB: stat+msg handled in ieee80211_encap */
+ IEEE80211_TX_UNLOCK(ic);
+ ieee80211_free_node(ni);
+ /* XXX better status? */
+ return (ENOBUFS);
+ }
+ }
+ error = ieee80211_parent_transmit(ic, m);
+
+ /*
+ * Unlock at this point - no need to hold it across
+ * ieee80211_free_node() (ie, the comlock)
+ */
+ IEEE80211_TX_UNLOCK(ic);
+ if (error != 0) {
+ /* NB: IFQ_HANDOFF reclaims mbuf */
+ ieee80211_free_node(ni);
+ } else {
+ ifp->if_opackets++;
+ }
+ ic->ic_lastdata = ticks;
+
+ return (0);
+#undef IS_DWDS
+}
+
+/*
* Start method for vap's. All packets from the stack come
* through here. We handle common processing of the packets
* before dispatching them to the underlying device.
@@ -117,16 +366,10 @@ doprint(struct ieee80211vap *vap, int subtype)
void
ieee80211_start(struct ifnet *ifp)
{
-#define IS_DWDS(vap) \
- (vap->iv_opmode == IEEE80211_M_WDS && \
- (vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY) == 0)
struct ieee80211vap *vap = ifp->if_softc;
struct ieee80211com *ic = vap->iv_ic;
struct ifnet *parent = ic->ic_ifp;
- struct ieee80211_node *ni;
struct mbuf *m;
- struct ether_header *eh;
- int error;
/* NB: parent must be up and running */
if (!IFNET_IS_UP_RUNNING(parent)) {
@@ -165,6 +408,7 @@ ieee80211_start(struct ifnet *ifp)
}
IEEE80211_UNLOCK(ic);
}
+
for (;;) {
IFQ_DEQUEUE(&ifp->if_snd, m);
if (m == NULL)
@@ -180,203 +424,23 @@ ieee80211_start(struct ifnet *ifp)
*/
m->m_flags &= ~(M_80211_TX - M_PWR_SAV - M_MORE_DATA);
/*
- * Cancel any background scan.
+ * Bump to the packet transmission path.
*/
- if (ic->ic_flags & IEEE80211_F_SCAN)
- ieee80211_cancel_anyscan(vap);
- /*
- * Find the node for the destination so we can do
- * things like power save and fast frames aggregation.
- *
- * NB: past this point various code assumes the first
- * mbuf has the 802.3 header present (and contiguous).
- */
- ni = NULL;
- if (m->m_len < sizeof(struct ether_header) &&
- (m = m_pullup(m, sizeof(struct ether_header))) == NULL) {
- IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
- "discard frame, %s\n", "m_pullup failed");
- vap->iv_stats.is_tx_nobuf++; /* XXX */
- ifp->if_oerrors++;
- continue;
- }
- eh = mtod(m, struct ether_header *);
- if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
- if (IS_DWDS(vap)) {
- /*
- * Only unicast frames from the above go out
- * DWDS vaps; multicast frames are handled by
- * dispatching the frame as it comes through
- * the AP vap (see below).
- */
- IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_WDS,
- eh->ether_dhost, "mcast", "%s", "on DWDS");
- vap->iv_stats.is_dwds_mcast++;
- m_freem(m);
- continue;
- }
- if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
- /*
- * Spam DWDS vap's w/ multicast traffic.
- */
- /* XXX only if dwds in use? */
- ieee80211_dwds_mcast(vap, m);
- }
- }
-#ifdef IEEE80211_SUPPORT_MESH
- if (vap->iv_opmode != IEEE80211_M_MBSS) {
-#endif
- ni = ieee80211_find_txnode(vap, eh->ether_dhost);
- if (ni == NULL) {
- /* NB: ieee80211_find_txnode does stat+msg */
- ifp->if_oerrors++;
- m_freem(m);
- continue;
- }
- if (ni->ni_associd == 0 &&
- (ni->ni_flags & IEEE80211_NODE_ASSOCID)) {
- IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_OUTPUT,
- eh->ether_dhost, NULL,
- "sta not associated (type 0x%04x)",
- htons(eh->ether_type));
- vap->iv_stats.is_tx_notassoc++;
- ifp->if_oerrors++;
- m_freem(m);
- ieee80211_free_node(ni);
- continue;
- }
-#ifdef IEEE80211_SUPPORT_MESH
- } else {
- if (!IEEE80211_ADDR_EQ(eh->ether_shost, vap->iv_myaddr)) {
- /*
- * Proxy station only if configured.
- */
- if (!ieee80211_mesh_isproxyena(vap)) {
- IEEE80211_DISCARD_MAC(vap,
- IEEE80211_MSG_OUTPUT |
- IEEE80211_MSG_MESH,
- eh->ether_dhost, NULL,
- "%s", "proxy not enabled");
- vap->iv_stats.is_mesh_notproxy++;
- ifp->if_oerrors++;
- m_freem(m);
- continue;
- }
- IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
- "forward frame from DS SA(%6D), DA(%6D)\n",
- eh->ether_shost, ":",
- eh->ether_dhost, ":");
- ieee80211_mesh_proxy_check(vap, eh->ether_shost);
- }
- ni = ieee80211_mesh_discover(vap, eh->ether_dhost, m);
- if (ni == NULL) {
- /*
- * NB: ieee80211_mesh_discover holds/disposes
- * frame (e.g. queueing on path discovery).
- */
- ifp->if_oerrors++;
- continue;
- }
- }
-#endif
- if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) &&
- (m->m_flags & M_PWR_SAV) == 0) {
- /*
- * Station in power save mode; pass the frame
- * to the 802.11 layer and continue. We'll get
- * the frame back when the time is right.
- * XXX lose WDS vap linkage?
- */
- (void) ieee80211_pwrsave(ni, m);
- ieee80211_free_node(ni);
- continue;
- }
- /* calculate priority so drivers can find the tx queue */
- if (ieee80211_classify(ni, m)) {
- IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_OUTPUT,
- eh->ether_dhost, NULL,
- "%s", "classification failure");
- vap->iv_stats.is_tx_classify++;
- ifp->if_oerrors++;
- m_freem(m);
- ieee80211_free_node(ni);
- continue;
- }
- /*
- * Stash the node pointer. Note that we do this after
- * any call to ieee80211_dwds_mcast because that code
- * uses any existing value for rcvif to identify the
- * interface it (might have been) received on.
- */
- m->m_pkthdr.rcvif = (void *)ni;
-
- BPF_MTAP(ifp, m); /* 802.3 tx */
-
- /*
- * Check if A-MPDU tx aggregation is setup or if we
- * should try to enable it. The sta must be associated
- * with HT and A-MPDU enabled for use. When the policy
- * routine decides we should enable A-MPDU we issue an
- * ADDBA request and wait for a reply. The frame being
- * encapsulated will go out w/o using A-MPDU, or possibly
- * it might be collected by the driver and held/retransmit.
- * The default ic_ampdu_enable routine handles staggering
- * ADDBA requests in case the receiver NAK's us or we are
- * otherwise unable to establish a BA stream.
- */
- if ((ni->ni_flags & IEEE80211_NODE_AMPDU_TX) &&
- (vap->iv_flags_ht & IEEE80211_FHT_AMPDU_TX) &&
- (m->m_flags & M_EAPOL) == 0) {
- int tid = WME_AC_TO_TID(M_WME_GETAC(m));
- struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid];
-
- ieee80211_txampdu_count_packet(tap);
- if (IEEE80211_AMPDU_RUNNING(tap)) {
- /*
- * Operational, mark frame for aggregation.
- *
- * XXX do tx aggregation here
- */
- m->m_flags |= M_AMPDU_MPDU;
- } else if (!IEEE80211_AMPDU_REQUESTED(tap) &&
- ic->ic_ampdu_enable(ni, tap)) {
- /*
- * Not negotiated yet, request service.
- */
- ieee80211_ampdu_request(ni, tap);
- /* XXX hold frame for reply? */
- }
- }
-#ifdef IEEE80211_SUPPORT_SUPERG
- else if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF)) {
- m = ieee80211_ff_check(ni, m);
- if (m == NULL) {
- /* NB: any ni ref held on stageq */
- continue;
- }
- }
-#endif /* IEEE80211_SUPPORT_SUPERG */
- if (__predict_true((vap->iv_caps & IEEE80211_C_8023ENCAP) == 0)) {
- /*
- * Encapsulate the packet in prep for transmission.
- */
- m = ieee80211_encap(vap, ni, m);
- if (m == NULL) {
- /* NB: stat+msg handled in ieee80211_encap */
- ieee80211_free_node(ni);
- continue;
- }
- }
- error = parent->if_transmit(parent, m);
- if (error != 0) {
- /* NB: IFQ_HANDOFF reclaims mbuf */
- ieee80211_free_node(ni);
- } else {
- ifp->if_opackets++;
- }
- ic->ic_lastdata = ticks;
+ (void) ieee80211_start_pkt(vap, m);
+ /* mbuf is consumed here */
}
-#undef IS_DWDS
+}
+
+/*
+ * 802.11 raw output routine.
+ */
+int
+ieee80211_raw_output(struct ieee80211vap *vap, struct ieee80211_node *ni,
+ struct mbuf *m, const struct ieee80211_bpf_params *params)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+
+ return (ic->ic_raw_xmit(ni, m, params));
}
/*
@@ -392,7 +456,9 @@ ieee80211_output(struct ifnet *ifp, struct mbuf *m,
struct ieee80211_node *ni = NULL;
struct ieee80211vap *vap;
struct ieee80211_frame *wh;
+ struct ieee80211com *ic = NULL;
int error;
+ int ret;
IFQ_LOCK(&ifp->if_snd);
if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
@@ -409,6 +475,7 @@ ieee80211_output(struct ifnet *ifp, struct mbuf *m,
}
IFQ_UNLOCK(&ifp->if_snd);
vap = ifp->if_softc;
+ ic = vap->iv_ic;
/*
* Hand to the 802.3 code if not tagged as
* a raw 802.11 frame.
@@ -489,15 +556,19 @@ ieee80211_output(struct ifnet *ifp, struct mbuf *m,
/* NB: ieee80211_encap does not include 802.11 header */
IEEE80211_NODE_STAT_ADD(ni, tx_bytes, m->m_pkthdr.len);
+ IEEE80211_TX_LOCK(ic);
+
/*
* NB: DLT_IEEE802_11_RADIO identifies the parameters are
* present by setting the sa_len field of the sockaddr (yes,
* this is a hack).
* NB: we assume sa_data is suitably aligned to cast.
*/
- return vap->iv_ic->ic_raw_xmit(ni, m,
+ ret = ieee80211_raw_output(vap, ni, m,
(const struct ieee80211_bpf_params *)(dst->sa_len ?
dst->sa_data : NULL));
+ IEEE80211_TX_UNLOCK(ic);
+ return (ret);
bad:
if (m != NULL)
m_freem(m);
@@ -526,8 +597,11 @@ ieee80211_send_setup(
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211_tx_ampdu *tap;
struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
+ struct ieee80211com *ic = ni->ni_ic;
ieee80211_seq seqno;
+ IEEE80211_TX_LOCK_ASSERT(ic);
+
wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | type;
if ((type & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_DATA) {
switch (vap->iv_opmode) {
@@ -621,6 +695,7 @@ ieee80211_mgmt_output(struct ieee80211_node *ni, struct mbuf *m, int type,
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct ieee80211_frame *wh;
+ int ret;
KASSERT(ni != NULL, ("null node"));
@@ -642,6 +717,8 @@ ieee80211_mgmt_output(struct ieee80211_node *ni, struct mbuf *m, int type,
return ENOMEM;
}
+ IEEE80211_TX_LOCK(ic);
+
wh = mtod(m, struct ieee80211_frame *);
ieee80211_send_setup(ni, m,
IEEE80211_FC0_TYPE_MGT | type, IEEE80211_NONQOS_TID,
@@ -670,7 +747,9 @@ ieee80211_mgmt_output(struct ieee80211_node *ni, struct mbuf *m, int type,
#endif
IEEE80211_NODE_STAT(ni, tx_mgmt);
- return ic->ic_raw_xmit(ni, m, params);
+ ret = ieee80211_raw_output(vap, ni, m, params);
+ IEEE80211_TX_UNLOCK(ic);
+ return (ret);
}
/*
@@ -694,6 +773,7 @@ ieee80211_send_nulldata(struct ieee80211_node *ni)
struct ieee80211_frame *wh;
int hdrlen;
uint8_t *frm;
+ int ret;
if (vap->iv_state == IEEE80211_S_CAC) {
IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT | IEEE80211_MSG_DOTH,
@@ -729,6 +809,8 @@ ieee80211_send_nulldata(struct ieee80211_node *ni)
return ENOMEM;
}
+ IEEE80211_TX_LOCK(ic);
+
wh = mtod(m, struct ieee80211_frame *); /* NB: a little lie */
if (ni->ni_flags & IEEE80211_NODE_QOS) {
const int tid = WME_AC_TO_TID(WME_AC_BE);
@@ -771,7 +853,9 @@ ieee80211_send_nulldata(struct ieee80211_node *ni)
ieee80211_chan2ieee(ic, ic->ic_curchan),
wh->i_fc[1] & IEEE80211_FC1_PWR_MGT ? "ena" : "dis");
- return ic->ic_raw_xmit(ni, m, NULL);
+ ret = ieee80211_raw_output(vap, ni, m, NULL);
+ IEEE80211_TX_UNLOCK(ic);
+ return (ret);
}
/*
@@ -1034,6 +1118,8 @@ ieee80211_encap(struct ieee80211vap *vap, struct ieee80211_node *ni,
ieee80211_seq seqno;
int meshhdrsize, meshae;
uint8_t *qos;
+
+ IEEE80211_TX_LOCK_ASSERT(ic);
/*
* Copy existing Ethernet header to a safe place. The
@@ -1806,6 +1892,7 @@ ieee80211_send_probereq(struct ieee80211_node *ni,
const struct ieee80211_rateset *rs;
struct mbuf *m;
uint8_t *frm;
+ int ret;
if (vap->iv_state == IEEE80211_S_CAC) {
IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT, ni,
@@ -1878,6 +1965,7 @@ ieee80211_send_probereq(struct ieee80211_node *ni,
return ENOMEM;
}
+ IEEE80211_TX_LOCK(ic);
wh = mtod(m, struct ieee80211_frame *);
ieee80211_send_setup(ni, m,
IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_REQ,
@@ -1905,7 +1993,9 @@ ieee80211_send_probereq(struct ieee80211_node *ni,
} else
params.ibp_try0 = tp->maxretry;
params.ibp_power = ni->ni_txpower;
- return ic->ic_raw_xmit(ni, m, &params);
+ ret = ieee80211_raw_output(vap, ni, m, &params);
+ IEEE80211_TX_UNLOCK(ic);
+ return (ret);
}
/*
@@ -2474,6 +2564,7 @@ ieee80211_send_proberesp(struct ieee80211vap *vap,
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_frame *wh;
struct mbuf *m;
+ int ret;
if (vap->iv_state == IEEE80211_S_CAC) {
IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT, bss,
@@ -2502,6 +2593,7 @@ ieee80211_send_proberesp(struct ieee80211vap *vap,
M_PREPEND(m, sizeof(struct ieee80211_frame), M_NOWAIT);
KASSERT(m != NULL, ("no room for header"));
+ IEEE80211_TX_LOCK(ic);
wh = mtod(m, struct ieee80211_frame *);
ieee80211_send_setup(bss, m,
IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP,
@@ -2517,7 +2609,9 @@ ieee80211_send_proberesp(struct ieee80211vap *vap,
legacy ? " <legacy>" : "");
IEEE80211_NODE_STAT(bss, tx_mgmt);
- return ic->ic_raw_xmit(bss, m, NULL);
+ ret = ieee80211_raw_output(vap, bss, m, NULL);
+ IEEE80211_TX_UNLOCK(ic);
+ return (ret);
}
/*
diff --git a/sys/net80211/ieee80211_power.c b/sys/net80211/ieee80211_power.c
index 31bc578..995aa88 100644
--- a/sys/net80211/ieee80211_power.c
+++ b/sys/net80211/ieee80211_power.c
@@ -413,6 +413,7 @@ static void
pwrsave_flushq(struct ieee80211_node *ni)
{
struct ieee80211_psq *psq = &ni->ni_psq;
+ struct ieee80211com *ic = ni->ni_ic;
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211_psq_head *qhead;
struct ifnet *parent, *ifp;
@@ -463,7 +464,7 @@ pwrsave_flushq(struct ieee80211_node *ni)
* For encaped frames, we need to free the node
* reference upon failure.
*/
- if (parent->if_transmit(parent, m) != 0)
+ if (ieee80211_parent_transmit(ic, m) != 0)
ieee80211_free_node(ni);
}
}
@@ -475,7 +476,7 @@ pwrsave_flushq(struct ieee80211_node *ni)
ifp_q = m->m_nextpkt;
KASSERT((!(m->m_flags & M_ENCAP)),
("%s: vapq with M_ENCAP frame!\n", __func__));
- (void) ifp->if_transmit(ifp, m);
+ (void) ieee80211_vap_transmit(vap, m);
}
}
}
diff --git a/sys/net80211/ieee80211_proto.h b/sys/net80211/ieee80211_proto.h
index f8bce62..a73df52 100644
--- a/sys/net80211/ieee80211_proto.h
+++ b/sys/net80211/ieee80211_proto.h
@@ -98,10 +98,12 @@ int ieee80211_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
int ieee80211_output(struct ifnet *, struct mbuf *,
struct sockaddr *, struct route *ro);
+int ieee80211_raw_output(struct ieee80211vap *, struct ieee80211_node *,
+ struct mbuf *, const struct ieee80211_bpf_params *);
void ieee80211_send_setup(struct ieee80211_node *, struct mbuf *, int, int,
const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
-void ieee80211_start(struct ifnet *);
+void ieee80211_start(struct ifnet *ifp);
int ieee80211_send_nulldata(struct ieee80211_node *);
int ieee80211_classify(struct ieee80211_node *, struct mbuf *m);
struct mbuf *ieee80211_mbuf_adjust(struct ieee80211vap *, int,
diff --git a/sys/net80211/ieee80211_superg.c b/sys/net80211/ieee80211_superg.c
index 9ac5878..4971f12 100644
--- a/sys/net80211/ieee80211_superg.c
+++ b/sys/net80211/ieee80211_superg.c
@@ -501,15 +501,17 @@ static void
ff_transmit(struct ieee80211_node *ni, struct mbuf *m)
{
struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
int error;
+ IEEE80211_TX_LOCK_ASSERT(vap->iv_ic);
+
/* encap and xmit */
m = ieee80211_encap(vap, ni, m);
if (m != NULL) {
struct ifnet *ifp = vap->iv_ifp;
- struct ifnet *parent = ni->ni_ic->ic_ifp;
- error = parent->if_transmit(parent, m);
+ error = ieee80211_parent_transmit(ic, m);;
if (error != 0) {
/* NB: IFQ_HANDOFF reclaims mbuf */
ieee80211_free_node(ni);
@@ -532,6 +534,8 @@ ff_flush(struct mbuf *head, struct mbuf *last)
struct ieee80211_node *ni;
struct ieee80211vap *vap;
+ IEEE80211_TX_LOCK_ASSERT(vap->iv_ic);
+
for (m = head; m != last; m = next) {
next = m->m_nextpkt;
m->m_nextpkt = NULL;
@@ -590,7 +594,9 @@ ieee80211_ff_age(struct ieee80211com *ic, struct ieee80211_stageq *sq,
M_AGE_SUB(m, quanta);
IEEE80211_UNLOCK(ic);
+ IEEE80211_TX_LOCK(ic);
ff_flush(head, m);
+ IEEE80211_TX_UNLOCK(ic);
}
static void
@@ -679,6 +685,8 @@ ieee80211_ff_check(struct ieee80211_node *ni, struct mbuf *m)
struct mbuf *mstaged;
uint32_t txtime, limit;
+ IEEE80211_TX_UNLOCK_ASSERT(ic);
+
/*
* Check if the supplied frame can be aggregated.
*
@@ -734,10 +742,12 @@ ieee80211_ff_check(struct ieee80211_node *ni, struct mbuf *m)
IEEE80211_UNLOCK(ic);
if (mstaged != NULL) {
+ IEEE80211_TX_LOCK(ic);
IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
"%s: flush staged frame", __func__);
/* encap and xmit */
ff_transmit(ni, mstaged);
+ IEEE80211_TX_UNLOCK(ic);
}
return m; /* NB: original frame */
}
diff --git a/sys/net80211/ieee80211_var.h b/sys/net80211/ieee80211_var.h
index 7ec00ee..2e8f58b 100644
--- a/sys/net80211/ieee80211_var.h
+++ b/sys/net80211/ieee80211_var.h
@@ -118,6 +118,7 @@ struct ieee80211_frame;
struct ieee80211com {
struct ifnet *ic_ifp; /* associated device */
ieee80211_com_lock_t ic_comlock; /* state update lock */
+ ieee80211_tx_lock_t ic_txlock; /* ic/vap TX lock */
TAILQ_HEAD(, ieee80211vap) ic_vaps; /* list of vap instances */
int ic_headroom; /* driver tx headroom needs */
enum ieee80211_phytype ic_phytype; /* XXX wrong for multi-mode */
diff --git a/sys/net80211/ieee80211_wds.c b/sys/net80211/ieee80211_wds.c
index b9fd4ff..ad88a05 100644
--- a/sys/net80211/ieee80211_wds.c
+++ b/sys/net80211/ieee80211_wds.c
@@ -232,7 +232,6 @@ void
ieee80211_dwds_mcast(struct ieee80211vap *vap0, struct mbuf *m)
{
struct ieee80211com *ic = vap0->iv_ic;
- struct ifnet *parent = ic->ic_ifp;
const struct ether_header *eh = mtod(m, const struct ether_header *);
struct ieee80211_node *ni;
struct ieee80211vap *vap;
@@ -296,7 +295,7 @@ ieee80211_dwds_mcast(struct ieee80211vap *vap0, struct mbuf *m)
mcopy->m_flags |= M_MCAST;
mcopy->m_pkthdr.rcvif = (void *) ni;
- err = parent->if_transmit(parent, mcopy);
+ err = ieee80211_parent_transmit(ic, mcopy);
if (err) {
/* NB: IFQ_HANDOFF reclaims mbuf */
ifp->if_oerrors++;
diff --git a/sys/nfsclient/nfs_bio.c b/sys/nfsclient/nfs_bio.c
index 0003974..31a86d1 100644
--- a/sys/nfsclient/nfs_bio.c
+++ b/sys/nfsclient/nfs_bio.c
@@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mbuf.h>
#include <sys/mount.h>
#include <sys/proc.h>
+#include <sys/rwlock.h>
#include <sys/vmmeter.h>
#include <sys/vnode.h>
@@ -128,7 +129,7 @@ nfs_getpages(struct vop_getpages_args *ap)
* allow the pager to zero-out the blanks. Partially valid pages
* can only occur at the file EOF.
*/
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (pages[ap->a_reqpage]->valid != 0) {
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
@@ -137,10 +138,10 @@ nfs_getpages(struct vop_getpages_args *ap)
vm_page_unlock(pages[i]);
}
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (0);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* We use only the kva address for the buffer, but this is extremely
@@ -170,7 +171,7 @@ nfs_getpages(struct vop_getpages_args *ap)
if (error && (uio.uio_resid == count)) {
nfs_printf("nfs_getpages: error %d\n", error);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
vm_page_lock(pages[i]);
@@ -178,7 +179,7 @@ nfs_getpages(struct vop_getpages_args *ap)
vm_page_unlock(pages[i]);
}
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (VM_PAGER_ERROR);
}
@@ -189,7 +190,7 @@ nfs_getpages(struct vop_getpages_args *ap)
*/
size = count - uio.uio_resid;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
vm_page_t m;
nextoff = toff + PAGE_SIZE;
@@ -225,7 +226,7 @@ nfs_getpages(struct vop_getpages_args *ap)
if (i != ap->a_reqpage)
vm_page_readahead_finish(m);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (0);
}
@@ -1296,9 +1297,9 @@ nfs_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
* Now, flush as required.
*/
if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
- VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
+ VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
- VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
+ VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
/*
* If the page clean was interrupted, fail the invalidation.
* Not doing so, we run the risk of losing dirty pages in the
diff --git a/sys/nfsclient/nfs_vnops.c b/sys/nfsclient/nfs_vnops.c
index 5026daf..f5017eb 100644
--- a/sys/nfsclient/nfs_vnops.c
+++ b/sys/nfsclient/nfs_vnops.c
@@ -629,9 +629,9 @@ nfs_close(struct vop_close_args *ap)
* mmap'ed writes or via write().
*/
if (nfs_clean_pages_on_close && vp->v_object) {
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, 0);
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
}
mtx_lock(&np->n_mtx);
if (np->n_flag & NMODIFIED) {
diff --git a/sys/nfsserver/nfs_serv.c b/sys/nfsserver/nfs_serv.c
index bef72af..2e7d8b0 100644
--- a/sys/nfsserver/nfs_serv.c
+++ b/sys/nfsserver/nfs_serv.c
@@ -87,6 +87,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/bio.h>
#include <sys/buf.h>
+#include <sys/rwlock.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
@@ -3332,9 +3333,9 @@ nfsrv_commit(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
*/
if (vp->v_object &&
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, OBJPC_SYNC);
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
}
error = VOP_FSYNC(vp, MNT_WAIT, curthread);
} else {
@@ -3363,10 +3364,10 @@ nfsrv_commit(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
if (vp->v_object &&
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
- VM_OBJECT_LOCK(vp->v_object);
+ VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, off, off + cnt,
OBJPC_SYNC);
- VM_OBJECT_UNLOCK(vp->v_object);
+ VM_OBJECT_WUNLOCK(vp->v_object);
}
bo = &vp->v_bufobj;
diff --git a/sys/ofed/drivers/infiniband/core/umem.c b/sys/ofed/drivers/infiniband/core/umem.c
index 0c6fed2..1d12367 100644
--- a/sys/ofed/drivers/infiniband/core/umem.c
+++ b/sys/ofed/drivers/infiniband/core/umem.c
@@ -140,10 +140,10 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
struct page *page = sg_page(&chunk->page_list[i]);
if (umem->writable && dirty) {
if (object && object != page->object)
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (object != page->object) {
object = page->object;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
}
vm_page_dirty(page);
}
@@ -151,7 +151,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
kfree(chunk);
}
if (object)
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
#endif
}
diff --git a/sys/ofed/include/linux/linux_compat.c b/sys/ofed/include/linux/linux_compat.c
index fb3783a..329376d 100644
--- a/sys/ofed/include/linux/linux_compat.c
+++ b/sys/ofed/include/linux/linux_compat.c
@@ -37,6 +37,7 @@
#include <sys/fcntl.h>
#include <sys/file.h>
#include <sys/filio.h>
+#include <sys/rwlock.h>
#include <vm/vm.h>
#include <vm/pmap.h>
diff --git a/sys/pc98/pc98/machdep.c b/sys/pc98/pc98/machdep.c
index b0d5acd..383897b 100644
--- a/sys/pc98/pc98/machdep.c
+++ b/sys/pc98/pc98/machdep.c
@@ -80,6 +80,7 @@ __FBSDID("$FreeBSD$");
#include <sys/pcpu.h>
#include <sys/ptrace.h>
#include <sys/reboot.h>
+#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
#ifdef SMP
diff --git a/sys/powerpc/aim/machdep.c b/sys/powerpc/aim/machdep.c
index 23cf745..5ae42bd 100644
--- a/sys/powerpc/aim/machdep.c
+++ b/sys/powerpc/aim/machdep.c
@@ -84,6 +84,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/ptrace.h>
#include <sys/reboot.h>
+#include <sys/rwlock.h>
#include <sys/signalvar.h>
#include <sys/syscallsubr.h>
#include <sys/sysctl.h>
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index e0af4e5..6a339ca 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -1122,7 +1122,7 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
rw_assert(&pvh_global_lock, RA_WLOCKED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
/* XXX change the pvo head for fake pages */
if ((m->oflags & VPO_UNMANAGED) != 0) {
@@ -1291,7 +1291,7 @@ moea_is_modified(mmu_t mmu, vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PTE_CHG set.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
@@ -1331,7 +1331,7 @@ moea_clear_modify(mmu_t mmu, vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("moea_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("moea_clear_modify: page %p is busy", m));
@@ -1366,7 +1366,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 296b546..14b88f0 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -1184,7 +1184,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
}
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
/* XXX change the pvo head for fake pages */
if ((m->oflags & VPO_UNMANAGED) != 0) {
@@ -1447,7 +1447,7 @@ moea64_is_modified(mmu_t mmu, vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have LPTE_CHG set.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
@@ -1482,7 +1482,7 @@ moea64_clear_modify(mmu_t mmu, vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("moea64_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("moea64_clear_modify: page %p is busy", m));
@@ -1515,7 +1515,7 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
diff --git a/sys/powerpc/booke/machdep.c b/sys/powerpc/booke/machdep.c
index 1522f9f..e9a2d52 100644
--- a/sys/powerpc/booke/machdep.c
+++ b/sys/powerpc/booke/machdep.c
@@ -101,6 +101,7 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/exec.h>
#include <sys/ktr.h>
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 8e0b76f..343b046 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -1561,7 +1561,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
("mmu_booke_enter_locked: user pmap, non user va"));
}
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@@ -1958,7 +1958,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
@@ -2173,7 +2173,7 @@ mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can be modified.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (rv);
@@ -2245,7 +2245,7 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("mmu_booke_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("mmu_booke_clear_modify: page %p is busy", m));
@@ -2660,7 +2660,7 @@ mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("mmu_booke_object_init_pt: non-device object"));
}
diff --git a/sys/security/mac/mac_process.c b/sys/security/mac/mac_process.c
index 8e5e5bc..15f704d 100644
--- a/sys/security/mac/mac_process.c
+++ b/sys/security/mac/mac_process.c
@@ -54,9 +54,9 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
-#include <sys/mutex.h>
#include <sys/mac.h>
#include <sys/proc.h>
+#include <sys/rwlock.h>
#include <sys/sbuf.h>
#include <sys/sdt.h>
#include <sys/systm.h>
@@ -284,14 +284,14 @@ mac_proc_vm_revoke_recurse(struct thread *td, struct ucred *cred,
object = vme->object.vm_object;
if (object == NULL)
continue;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
while ((backing_object = object->backing_object) != NULL) {
- VM_OBJECT_LOCK(backing_object);
+ VM_OBJECT_WLOCK(backing_object);
offset += object->backing_object_offset;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
object = backing_object;
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* At the moment, vm_maps and objects aren't considered by
* the MAC system, so only things with backing by a normal
@@ -334,10 +334,10 @@ mac_proc_vm_revoke_recurse(struct thread *td, struct ucred *cred,
vm_object_reference(object);
(void) vn_start_write(vp, &mp, V_WAIT);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_object_page_clean(object, offset, offset +
vme->end - vme->start, OBJPC_SYNC);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
VOP_UNLOCK(vp, 0);
vn_finished_write(mp);
vm_object_deallocate(object);
diff --git a/sys/sparc64/conf/GENERIC b/sys/sparc64/conf/GENERIC
index 79124ab..e19f17c 100644
--- a/sys/sparc64/conf/GENERIC
+++ b/sys/sparc64/conf/GENERIC
@@ -122,6 +122,9 @@ device cd # CD
device pass # Passthrough device (direct ATA/SCSI access)
device ses # Enclosure Services (SES and SAF-TE)
device ctl # CAM Target Layer
+options CTL_DISABLE # Disable CTL by default to save memory.
+ # Re-enable with kern.cam.ctl.disable=0 in
+ # /boot/loader.conf
# RAID controllers
#device amr # AMI MegaRAID
diff --git a/sys/sparc64/sparc64/machdep.c b/sys/sparc64/sparc64/machdep.c
index 7c7c234..00ae00f 100644
--- a/sys/sparc64/sparc64/machdep.c
+++ b/sys/sparc64/sparc64/machdep.c
@@ -65,6 +65,7 @@ __FBSDID("$FreeBSD$");
#include <sys/pcpu.h>
#include <sys/ptrace.h>
#include <sys/reboot.h>
+#include <sys/rwlock.h>
#include <sys/signalvar.h>
#include <sys/smp.h>
#include <sys/syscallsubr.h>
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index f689b17..8bfc454 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -1229,7 +1229,7 @@ pmap_pinit(pmap_t pm)
pm->pm_context[i] = -1;
CPU_ZERO(&pm->pm_active);
- VM_OBJECT_LOCK(pm->pm_tsb_obj);
+ VM_OBJECT_WLOCK(pm->pm_tsb_obj);
for (i = 0; i < TSB_PAGES; i++) {
m = vm_page_grab(pm->pm_tsb_obj, i, VM_ALLOC_NOBUSY |
VM_ALLOC_RETRY | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
@@ -1237,7 +1237,7 @@ pmap_pinit(pmap_t pm)
m->md.pmap = pm;
ma[i] = m;
}
- VM_OBJECT_UNLOCK(pm->pm_tsb_obj);
+ VM_OBJECT_WUNLOCK(pm->pm_tsb_obj);
pmap_qenter((vm_offset_t)pm->pm_tsb, ma, TSB_PAGES);
bzero(&pm->pm_stats, sizeof(pm->pm_stats));
@@ -1291,7 +1291,7 @@ pmap_release(pmap_t pm)
pmap_qremove((vm_offset_t)pm->pm_tsb, TSB_PAGES);
obj = pm->pm_tsb_obj;
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
KASSERT(obj->ref_count == 1, ("pmap_release: tsbobj ref count != 1"));
while (!TAILQ_EMPTY(&obj->memq)) {
m = TAILQ_FIRST(&obj->memq);
@@ -1300,7 +1300,7 @@ pmap_release(pmap_t pm)
atomic_subtract_int(&cnt.v_wire_count, 1);
vm_page_free_zero(m);
}
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
PMAP_LOCK_DESTROY(pm);
}
@@ -1495,7 +1495,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
rw_assert(&tte_list_global_lock, RA_WLOCKED);
PMAP_LOCK_ASSERT(pm, MA_OWNED);
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
PMAP_STATS_INC(pmap_nenter);
pa = VM_PAGE_TO_PHYS(m);
@@ -1662,7 +1662,7 @@ pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
vm_pindex_t pindex, vm_size_t size)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
}
@@ -2060,7 +2060,7 @@ pmap_is_modified(vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no TTEs can have TD_W set.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (rv);
@@ -2128,7 +2128,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
@@ -2183,7 +2183,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
diff --git a/sys/sys/callout.h b/sys/sys/callout.h
index 7a4dec9..9e3eb90 100644
--- a/sys/sys/callout.h
+++ b/sys/sys/callout.h
@@ -63,8 +63,6 @@ struct callout_handle {
};
#ifdef _KERNEL
-extern int ncallout;
-
#define callout_active(c) ((c)->c_flags & CALLOUT_ACTIVE)
#define callout_deactivate(c) ((c)->c_flags &= ~CALLOUT_ACTIVE)
#define callout_drain(c) _callout_stop_safe(c, 1)
diff --git a/sys/sys/systm.h b/sys/sys/systm.h
index f149e6c..12337de 100644
--- a/sys/sys/systm.h
+++ b/sys/sys/systm.h
@@ -321,8 +321,6 @@ typedef void timeout_t(void *); /* timeout function type */
void callout_handle_init(struct callout_handle *);
struct callout_handle timeout(timeout_t *, void *, int);
void untimeout(timeout_t *, void *, struct callout_handle);
-caddr_t kern_timeout_callwheel_alloc(caddr_t v);
-void kern_timeout_callwheel_init(void);
/* Stubs for obsolete functions that used to be for interrupt management */
static __inline intrmask_t splbio(void) { return 0; }
diff --git a/sys/ufs/ffs/ffs_rawread.c b/sys/ufs/ffs/ffs_rawread.c
index f8e3e00..cfdee3a 100644
--- a/sys/ufs/ffs/ffs_rawread.c
+++ b/sys/ufs/ffs/ffs_rawread.c
@@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
#include <sys/ttycom.h>
#include <sys/bio.h>
#include <sys/buf.h>
+#include <sys/rwlock.h>
#include <ufs/ufs/extattr.h>
#include <ufs/ufs/quota.h>
#include <ufs/ufs/inode.h>
@@ -143,9 +144,9 @@ ffs_rawread_sync(struct vnode *vp)
if ((obj = vp->v_object) != NULL &&
(obj->flags & OBJ_MIGHTBEDIRTY) != 0) {
VI_UNLOCK(vp);
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
} else
VI_UNLOCK(vp);
diff --git a/sys/ufs/ffs/ffs_vnops.c b/sys/ufs/ffs/ffs_vnops.c
index 5c99d5b..c065187 100644
--- a/sys/ufs/ffs/ffs_vnops.c
+++ b/sys/ufs/ffs/ffs_vnops.c
@@ -75,6 +75,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/priv.h>
+#include <sys/rwlock.h>
#include <sys/stat.h>
#include <sys/vmmeter.h>
#include <sys/vnode.h>
@@ -842,7 +843,7 @@ ffs_getpages(ap)
* user programs might reference data beyond the actual end of file
* occuring within the page. We have to zero that data.
*/
- VM_OBJECT_LOCK(mreq->object);
+ VM_OBJECT_WLOCK(mreq->object);
if (mreq->valid) {
if (mreq->valid != VM_PAGE_BITS_ALL)
vm_page_zero_invalid(mreq, TRUE);
@@ -853,10 +854,10 @@ ffs_getpages(ap)
vm_page_unlock(ap->a_m[i]);
}
}
- VM_OBJECT_UNLOCK(mreq->object);
+ VM_OBJECT_WUNLOCK(mreq->object);
return VM_PAGER_OK;
}
- VM_OBJECT_UNLOCK(mreq->object);
+ VM_OBJECT_WUNLOCK(mreq->object);
return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
ap->a_count,
diff --git a/sys/vm/default_pager.c b/sys/vm/default_pager.c
index 12dc823..a71a22a 100644
--- a/sys/vm/default_pager.c
+++ b/sys/vm/default_pager.c
@@ -45,7 +45,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
-#include <sys/mutex.h>
+#include <sys/rwlock.h>
#include <vm/vm.h>
#include <vm/vm_object.h>
@@ -91,10 +91,10 @@ default_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
object = vm_object_allocate(OBJT_DEFAULT,
OFF_TO_IDX(round_page(offset + size)));
if (cred != NULL) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
object->cred = cred;
object->charge = size;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
return (object);
}
diff --git a/sys/vm/device_pager.c b/sys/vm/device_pager.c
index 809c32c..fd20664 100644
--- a/sys/vm/device_pager.c
+++ b/sys/vm/device_pager.c
@@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/mutex.h>
#include <sys/mman.h>
+#include <sys/rwlock.h>
#include <sys/sx.h>
#include <vm/vm.h>
@@ -206,7 +207,7 @@ void
cdev_pager_free_page(vm_object_t object, vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type == OBJT_MGTDEVICE) {
KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("unmanaged %p", m));
pmap_remove_all(m);
@@ -221,7 +222,7 @@ static void
dev_pager_free_page(vm_object_t object, vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((object->type == OBJT_DEVICE &&
(m->oflags & VPO_UNMANAGED) != 0),
("Managed device or page obj %p m %p", object, m));
@@ -235,13 +236,13 @@ dev_pager_dealloc(object)
{
vm_page_t m;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
object->un_pager.devp.ops->cdev_pg_dtor(object->un_pager.devp.dev);
mtx_lock(&dev_pager_mtx);
TAILQ_REMOVE(&dev_pager_object_list, object, pager_object_list);
mtx_unlock(&dev_pager_mtx);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (object->type == OBJT_DEVICE) {
/*
@@ -258,11 +259,11 @@ dev_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int reqpage)
{
int error, i;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
error = object->un_pager.devp.ops->cdev_pg_fault(object,
IDX_TO_OFF(ma[reqpage]->pindex), PROT_READ, &ma[reqpage]);
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
for (i = 0; i < count; i++) {
if (i != reqpage) {
@@ -304,12 +305,12 @@ old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
pidx = OFF_TO_IDX(offset);
memattr = object->memattr;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
dev = object->handle;
csw = dev_refthread(dev, &ref);
if (csw == NULL) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
return (VM_PAGER_FAIL);
}
td = curthread;
@@ -321,7 +322,7 @@ old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
if (ret != 0) {
printf(
"WARNING: dev_pager_getpage: map function returns error %d", ret);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
return (VM_PAGER_FAIL);
}
@@ -338,7 +339,7 @@ old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
* the new physical address.
*/
page = *mres;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_page_updatefake(page, paddr, memattr);
} else {
/*
@@ -346,7 +347,7 @@ old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
* free up the all of the original pages.
*/
page = vm_page_getfake(paddr, memattr);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_page_lock(*mres);
vm_page_free(*mres);
vm_page_unlock(*mres);
diff --git a/sys/vm/phys_pager.c b/sys/vm/phys_pager.c
index 0ffafea..7b9f7b2 100644
--- a/sys/vm/phys_pager.c
+++ b/sys/vm/phys_pager.c
@@ -34,6 +34,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/mutex.h>
#include <sys/mman.h>
+#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <vm/vm.h>
@@ -123,11 +124,11 @@ phys_pager_dealloc(vm_object_t object)
{
if (object->handle != NULL) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
mtx_lock(&phys_pager_mtx);
TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list);
mtx_unlock(&phys_pager_mtx);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
}
}
@@ -139,7 +140,7 @@ phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
{
int i;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
for (i = 0; i < count; i++) {
if (m[i]->valid == 0) {
if ((m[i]->flags & PG_ZERO) == 0)
diff --git a/sys/vm/sg_pager.c b/sys/vm/sg_pager.c
index 097039e..76cae68 100644
--- a/sys/vm/sg_pager.c
+++ b/sys/vm/sg_pager.c
@@ -36,6 +36,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/rwlock.h>
#include <sys/sglist.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
@@ -142,10 +143,10 @@ sg_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
size_t space;
int i;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
sg = object->handle;
memattr = object->memattr;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
offset = m[reqpage]->pindex;
/*
@@ -180,7 +181,7 @@ sg_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
/* Construct a new fake page. */
page = vm_page_getfake(paddr, memattr);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
TAILQ_INSERT_TAIL(&object->un_pager.sgp.sgp_pglist, page, pageq);
/* Free the original pages and insert this fake page into the object. */
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 712fd83..2049996 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -89,6 +89,7 @@ __FBSDID("$FreeBSD$");
#include <sys/racct.h>
#include <sys/resource.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/sysproto.h>
#include <sys/blist.h>
@@ -621,14 +622,14 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
crhold(cred);
}
object = vm_object_allocate(OBJT_DEFAULT, pindex);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
object->handle = handle;
if (cred != NULL) {
object->cred = cred;
object->charge = size;
}
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
sx_xunlock(&sw_alloc_sx);
mtx_unlock(&Giant);
@@ -639,13 +640,13 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
crhold(cred);
}
object = vm_object_allocate(OBJT_DEFAULT, pindex);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (cred != NULL) {
object->cred = cred;
object->charge = size;
}
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
return (object);
}
@@ -674,7 +675,7 @@ swap_pager_dealloc(vm_object_t object)
mtx_unlock(&sw_alloc_mtx);
}
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
vm_object_pip_wait(object, "swpdea");
/*
@@ -815,7 +816,7 @@ void
swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
swp_pager_meta_free(object, start, size);
}
@@ -834,7 +835,7 @@ swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
daddr_t blk = SWAPBLK_NONE;
vm_pindex_t beg = start; /* save start index */
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
while (size) {
if (n == 0) {
n = BLIST_MAX_ALLOC;
@@ -842,7 +843,7 @@ swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
n >>= 1;
if (n == 0) {
swp_pager_meta_free(object, beg, start - beg);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (-1);
}
}
@@ -854,7 +855,7 @@ swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
--n;
}
swp_pager_meta_free(object, start, n);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (0);
}
@@ -883,8 +884,8 @@ swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
{
vm_pindex_t i;
- VM_OBJECT_LOCK_ASSERT(srcobject, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(dstobject, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(srcobject);
+ VM_OBJECT_ASSERT_WLOCKED(dstobject);
/*
* If destroysource is set, we remove the source object from the
@@ -934,11 +935,11 @@ swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
* swp_pager_meta_build() can sleep.
*/
vm_object_pip_add(srcobject, 1);
- VM_OBJECT_UNLOCK(srcobject);
+ VM_OBJECT_WUNLOCK(srcobject);
vm_object_pip_add(dstobject, 1);
swp_pager_meta_build(dstobject, i, srcaddr);
vm_object_pip_wakeup(dstobject);
- VM_OBJECT_LOCK(srcobject);
+ VM_OBJECT_WLOCK(srcobject);
vm_object_pip_wakeup(srcobject);
}
} else {
@@ -987,7 +988,7 @@ swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *aft
{
daddr_t blk0;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* do we have good backing store at the requested index ?
*/
@@ -1058,7 +1059,7 @@ static void
swap_pager_unswapped(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
}
@@ -1147,7 +1148,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
/*
* Getpbuf() can sleep.
*/
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* Get a swap buffer header to perform the IO
*/
@@ -1168,7 +1169,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
bp->b_bufsize = PAGE_SIZE * (j - i);
bp->b_pager.pg_reqpage = reqpage - i;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
{
int k;
@@ -1187,7 +1188,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
* does not remove it.
*/
vm_object_pip_add(object, bp->b_npages);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* perform the I/O. NOTE!!! bp cannot be considered valid after
@@ -1208,7 +1209,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
* cleared on completion. If an I/O error occurs, SWAPBLK_NONE
* is set in the meta-data.
*/
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
while ((mreq->oflags & VPO_SWAPINPROG) != 0) {
mreq->oflags |= VPO_WANTED;
PCPU_INC(cnt.v_intrans);
@@ -1283,7 +1284,7 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
*/
if (object->type != OBJT_SWAP)
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (curproc != pageproc)
sync = TRUE;
@@ -1378,7 +1379,7 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
bp->b_bufsize = PAGE_SIZE * n;
bp->b_blkno = blk;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (j = 0; j < n; ++j) {
vm_page_t mreq = m[i+j];
@@ -1393,7 +1394,7 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
mreq->oflags |= VPO_SWAPINPROG;
bp->b_pages[j] = mreq;
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
bp->b_npages = n;
/*
* Must set dirty range for NFS to work.
@@ -1443,7 +1444,7 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
*/
swp_pager_async_iodone(bp);
}
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
}
/*
@@ -1487,7 +1488,7 @@ swp_pager_async_iodone(struct buf *bp)
if (bp->b_npages) {
object = bp->b_pages[0]->object;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
}
/*
@@ -1611,7 +1612,7 @@ swp_pager_async_iodone(struct buf *bp)
*/
if (object != NULL) {
vm_object_pip_wakeupn(object, bp->b_npages);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
/*
@@ -1652,7 +1653,7 @@ swap_pager_isswapped(vm_object_t object, struct swdevt *sp)
int bcount;
int i;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type != OBJT_SWAP)
return (0);
@@ -1746,13 +1747,13 @@ restart:
for (j = 0; j < SWAP_META_PAGES; ++j) {
if (swp_pager_isondev(swap->swb_pages[j], sp)) {
/* avoid deadlock */
- if (!VM_OBJECT_TRYLOCK(object)) {
+ if (!VM_OBJECT_TRYWLOCK(object)) {
break;
} else {
mtx_unlock(&swhash_mtx);
swp_pager_force_pagein(object,
pindex + j);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
mtx_lock(&swhash_mtx);
goto restart;
}
@@ -1808,7 +1809,7 @@ swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
struct swblock **pswap;
int idx;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* Convert default object to swap object if necessary
*/
@@ -1845,7 +1846,7 @@ retry:
swap = *pswap = uma_zalloc(swap_zone, M_NOWAIT);
if (swap == NULL) {
mtx_unlock(&swhash_mtx);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (uma_zone_exhausted(swap_zone)) {
if (atomic_cmpset_int(&exhausted, 0, 1))
printf("swap zone exhausted, "
@@ -1854,7 +1855,7 @@ retry:
pause("swzonex", 10);
} else
VM_WAIT;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
goto retry;
}
@@ -1906,7 +1907,7 @@ static void
swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type != OBJT_SWAP)
return;
@@ -1952,7 +1953,7 @@ swp_pager_meta_free_all(vm_object_t object)
{
daddr_t index = 0;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type != OBJT_SWAP)
return;
@@ -2011,7 +2012,7 @@ swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags)
daddr_t r1;
int idx;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* The meta data only exists of the object is OBJT_SWAP
* and even then might not be allocated yet.
@@ -2464,14 +2465,14 @@ vmspace_swap_count(struct vmspace *vmspace)
for (cur = map->header.next; cur != &map->header; cur = cur->next) {
if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
(object = cur->object.vm_object) != NULL) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (object->type == OBJT_SWAP &&
object->un_pager.swp.swp_bcount != 0) {
n = (cur->end - cur->start) / PAGE_SIZE;
count += object->un_pager.swp.swp_bcount *
SWAP_META_PAGES * n / object->size + 1;
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
}
return (count);
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index d298064..1879b7e 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -72,6 +72,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/rwlock.h>
#include <sys/sbuf.h>
#include <sys/smp.h>
#include <sys/vmmeter.h>
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index b79b3f5..6c41c07 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -81,9 +81,9 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/lock.h>
-#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/vmmeter.h>
#include <sys/vnode.h>
@@ -163,14 +163,14 @@ unlock_and_deallocate(struct faultstate *fs)
{
vm_object_pip_wakeup(fs->object);
- VM_OBJECT_UNLOCK(fs->object);
+ VM_OBJECT_WUNLOCK(fs->object);
if (fs->object != fs->first_object) {
- VM_OBJECT_LOCK(fs->first_object);
+ VM_OBJECT_WLOCK(fs->first_object);
vm_page_lock(fs->first_m);
vm_page_free(fs->first_m);
vm_page_unlock(fs->first_m);
vm_object_pip_wakeup(fs->first_object);
- VM_OBJECT_UNLOCK(fs->first_object);
+ VM_OBJECT_WUNLOCK(fs->first_object);
fs->first_m = NULL;
}
vm_object_deallocate(fs->first_object);
@@ -290,7 +290,7 @@ RetryFault:;
* truncation operations) during I/O. This must be done after
* obtaining the vnode lock in order to avoid possible deadlocks.
*/
- VM_OBJECT_LOCK(fs.first_object);
+ VM_OBJECT_WLOCK(fs.first_object);
vm_object_reference_locked(fs.first_object);
vm_object_pip_add(fs.first_object, 1);
@@ -363,17 +363,17 @@ RetryFault:;
vm_page_aflag_set(fs.m, PGA_REFERENCED);
vm_page_unlock(fs.m);
if (fs.object != fs.first_object) {
- if (!VM_OBJECT_TRYLOCK(
+ if (!VM_OBJECT_TRYWLOCK(
fs.first_object)) {
- VM_OBJECT_UNLOCK(fs.object);
- VM_OBJECT_LOCK(fs.first_object);
- VM_OBJECT_LOCK(fs.object);
+ VM_OBJECT_WUNLOCK(fs.object);
+ VM_OBJECT_WLOCK(fs.first_object);
+ VM_OBJECT_WLOCK(fs.object);
}
vm_page_lock(fs.first_m);
vm_page_free(fs.first_m);
vm_page_unlock(fs.first_m);
vm_object_pip_wakeup(fs.first_object);
- VM_OBJECT_UNLOCK(fs.first_object);
+ VM_OBJECT_WUNLOCK(fs.first_object);
fs.first_m = NULL;
}
unlock_map(&fs);
@@ -383,7 +383,7 @@ RetryFault:;
"vmpfw");
}
vm_object_pip_wakeup(fs.object);
- VM_OBJECT_UNLOCK(fs.object);
+ VM_OBJECT_WUNLOCK(fs.object);
PCPU_INC(cnt.v_intrans);
vm_object_deallocate(fs.first_object);
goto RetryFault;
@@ -646,12 +646,12 @@ vnode_locked:
*/
if (fs.object != fs.first_object) {
vm_object_pip_wakeup(fs.object);
- VM_OBJECT_UNLOCK(fs.object);
+ VM_OBJECT_WUNLOCK(fs.object);
fs.object = fs.first_object;
fs.pindex = fs.first_pindex;
fs.m = fs.first_m;
- VM_OBJECT_LOCK(fs.object);
+ VM_OBJECT_WLOCK(fs.object);
}
fs.first_m = NULL;
@@ -669,11 +669,11 @@ vnode_locked:
} else {
KASSERT(fs.object != next_object,
("object loop %p", next_object));
- VM_OBJECT_LOCK(next_object);
+ VM_OBJECT_WLOCK(next_object);
vm_object_pip_add(next_object, 1);
if (fs.object != fs.first_object)
vm_object_pip_wakeup(fs.object);
- VM_OBJECT_UNLOCK(fs.object);
+ VM_OBJECT_WUNLOCK(fs.object);
fs.object = next_object;
}
}
@@ -725,7 +725,7 @@ vnode_locked:
*/
((fs.object->type == OBJT_DEFAULT) ||
(fs.object->type == OBJT_SWAP)) &&
- (is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object)) &&
+ (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs.first_object)) &&
/*
* We don't chase down the shadow chain
*/
@@ -774,7 +774,7 @@ vnode_locked:
* conditional
*/
vm_object_pip_wakeup(fs.object);
- VM_OBJECT_UNLOCK(fs.object);
+ VM_OBJECT_WUNLOCK(fs.object);
/*
* Only use the new page below...
*/
@@ -782,7 +782,7 @@ vnode_locked:
fs.pindex = fs.first_pindex;
fs.m = fs.first_m;
if (!is_first_object_locked)
- VM_OBJECT_LOCK(fs.object);
+ VM_OBJECT_WLOCK(fs.object);
PCPU_INC(cnt.v_cow_faults);
curthread->td_cow++;
} else {
@@ -903,7 +903,7 @@ vnode_locked:
*/
KASSERT(fs.m->valid == VM_PAGE_BITS_ALL,
("vm_fault: page %p partially invalid", fs.m));
- VM_OBJECT_UNLOCK(fs.object);
+ VM_OBJECT_WUNLOCK(fs.object);
/*
* Put this page into the physical map. We had to do the unlock above
@@ -914,7 +914,7 @@ vnode_locked:
pmap_enter(fs.map->pmap, vaddr, fault_type, fs.m, prot, wired);
if ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 && wired == 0)
vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
- VM_OBJECT_LOCK(fs.object);
+ VM_OBJECT_WLOCK(fs.object);
vm_page_lock(fs.m);
/*
@@ -960,13 +960,13 @@ vm_fault_cache_behind(const struct faultstate *fs, int distance)
vm_pindex_t pindex;
object = fs->object;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
first_object = fs->first_object;
if (first_object != object) {
- if (!VM_OBJECT_TRYLOCK(first_object)) {
- VM_OBJECT_UNLOCK(object);
- VM_OBJECT_LOCK(first_object);
- VM_OBJECT_LOCK(object);
+ if (!VM_OBJECT_TRYWLOCK(first_object)) {
+ VM_OBJECT_WUNLOCK(object);
+ VM_OBJECT_WLOCK(first_object);
+ VM_OBJECT_WLOCK(object);
}
}
/* Neither fictitious nor unmanaged pages can be cached. */
@@ -999,7 +999,7 @@ vm_fault_cache_behind(const struct faultstate *fs, int distance)
}
}
if (first_object != object)
- VM_OBJECT_UNLOCK(first_object);
+ VM_OBJECT_WUNLOCK(first_object);
}
/*
@@ -1044,28 +1044,28 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
lobject = object;
- VM_OBJECT_LOCK(lobject);
+ VM_OBJECT_WLOCK(lobject);
while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
lobject->type == OBJT_DEFAULT &&
(backing_object = lobject->backing_object) != NULL) {
KASSERT((lobject->backing_object_offset & PAGE_MASK) ==
0, ("vm_fault_prefault: unaligned object offset"));
pindex += lobject->backing_object_offset >> PAGE_SHIFT;
- VM_OBJECT_LOCK(backing_object);
- VM_OBJECT_UNLOCK(lobject);
+ VM_OBJECT_WLOCK(backing_object);
+ VM_OBJECT_WUNLOCK(lobject);
lobject = backing_object;
}
/*
* give-up when a page is not in memory
*/
if (m == NULL) {
- VM_OBJECT_UNLOCK(lobject);
+ VM_OBJECT_WUNLOCK(lobject);
break;
}
if (m->valid == VM_PAGE_BITS_ALL &&
(m->flags & PG_FICTITIOUS) == 0)
pmap_enter_quick(pmap, addr, m, entry->protection);
- VM_OBJECT_UNLOCK(lobject);
+ VM_OBJECT_WUNLOCK(lobject);
}
}
@@ -1257,7 +1257,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
dst_object->pg_color = atop(dst_entry->start);
#endif
- VM_OBJECT_LOCK(dst_object);
+ VM_OBJECT_WLOCK(dst_object);
KASSERT(upgrade || dst_entry->object.vm_object == NULL,
("vm_fault_copy_entry: vm_object not NULL"));
dst_entry->object.vm_object = dst_object;
@@ -1307,9 +1307,9 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
dst_m = vm_page_alloc(dst_object, dst_pindex,
VM_ALLOC_NORMAL);
if (dst_m == NULL) {
- VM_OBJECT_UNLOCK(dst_object);
+ VM_OBJECT_WUNLOCK(dst_object);
VM_WAIT;
- VM_OBJECT_LOCK(dst_object);
+ VM_OBJECT_WLOCK(dst_object);
}
} while (dst_m == NULL);
@@ -1318,7 +1318,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
* (Because the source is wired down, the page will be in
* memory.)
*/
- VM_OBJECT_LOCK(src_object);
+ VM_OBJECT_WLOCK(src_object);
object = src_object;
pindex = src_pindex + dst_pindex;
while ((src_m = vm_page_lookup(object, pindex)) == NULL &&
@@ -1327,18 +1327,18 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
/*
* Allow fallback to backing objects if we are reading.
*/
- VM_OBJECT_LOCK(backing_object);
+ VM_OBJECT_WLOCK(backing_object);
pindex += OFF_TO_IDX(object->backing_object_offset);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
object = backing_object;
}
if (src_m == NULL)
panic("vm_fault_copy_wired: page missing");
pmap_copy_page(src_m, dst_m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
dst_m->valid = VM_PAGE_BITS_ALL;
dst_m->dirty = VM_PAGE_BITS_ALL;
- VM_OBJECT_UNLOCK(dst_object);
+ VM_OBJECT_WUNLOCK(dst_object);
/*
* Enter it in the pmap. If a wired, copy-on-write
@@ -1350,7 +1350,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
/*
* Mark it no longer busy, and put it on the active list.
*/
- VM_OBJECT_LOCK(dst_object);
+ VM_OBJECT_WLOCK(dst_object);
if (upgrade) {
vm_page_lock(src_m);
@@ -1367,7 +1367,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
}
vm_page_wakeup(dst_m);
}
- VM_OBJECT_UNLOCK(dst_object);
+ VM_OBJECT_WUNLOCK(dst_object);
if (upgrade) {
dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY);
vm_object_deallocate(src_object);
@@ -1403,7 +1403,7 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
vm_page_t rtm;
int cbehind, cahead;
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
object = m->object;
pindex = m->pindex;
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index f44f04c..e0a8bf7 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -71,6 +71,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/racct.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/sf_buf.h>
#include <sys/shm.h>
@@ -238,7 +239,7 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
vm_pindex_t pindex;
int rv;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
pindex = OFF_TO_IDX(offset);
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
@@ -260,7 +261,7 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
vm_page_unlock(m);
vm_page_wakeup(m);
out:
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (m);
}
@@ -394,7 +395,7 @@ vm_thread_new(struct thread *td, int pages)
* For the length of the stack, link in a real page of ram for each
* page of stack.
*/
- VM_OBJECT_LOCK(ksobj);
+ VM_OBJECT_WLOCK(ksobj);
for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page.
@@ -404,7 +405,7 @@ vm_thread_new(struct thread *td, int pages)
ma[i] = m;
m->valid = VM_PAGE_BITS_ALL;
}
- VM_OBJECT_UNLOCK(ksobj);
+ VM_OBJECT_WUNLOCK(ksobj);
pmap_qenter(ks, ma, pages);
return (1);
}
@@ -417,7 +418,7 @@ vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
atomic_add_int(&kstacks, -1);
pmap_qremove(ks, pages);
- VM_OBJECT_LOCK(ksobj);
+ VM_OBJECT_WLOCK(ksobj);
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
@@ -427,7 +428,7 @@ vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
vm_page_free(m);
vm_page_unlock(m);
}
- VM_OBJECT_UNLOCK(ksobj);
+ VM_OBJECT_WUNLOCK(ksobj);
vm_object_deallocate(ksobj);
kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
@@ -505,7 +506,7 @@ vm_thread_swapout(struct thread *td)
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
pmap_qremove(td->td_kstack, pages);
- VM_OBJECT_LOCK(ksobj);
+ VM_OBJECT_WLOCK(ksobj);
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
@@ -515,7 +516,7 @@ vm_thread_swapout(struct thread *td)
vm_page_unwire(m, 0);
vm_page_unlock(m);
}
- VM_OBJECT_UNLOCK(ksobj);
+ VM_OBJECT_WUNLOCK(ksobj);
}
/*
@@ -530,7 +531,7 @@ vm_thread_swapin(struct thread *td)
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
- VM_OBJECT_LOCK(ksobj);
+ VM_OBJECT_WLOCK(ksobj);
for (i = 0; i < pages; i++)
ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY |
VM_ALLOC_WIRED);
@@ -557,7 +558,7 @@ vm_thread_swapin(struct thread *td)
} else if (ma[i]->oflags & VPO_BUSY)
vm_page_wakeup(ma[i]);
}
- VM_OBJECT_UNLOCK(ksobj);
+ VM_OBJECT_WUNLOCK(ksobj);
pmap_qenter(td->td_kstack, ma, pages);
cpu_thread_swapin(td);
}
diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c
index c507691..08c9b03 100644
--- a/sys/vm/vm_init.c
+++ b/sys/vm/vm_init.c
@@ -68,8 +68,8 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/lock.h>
-#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/selinfo.h>
@@ -157,8 +157,6 @@ vm_ksubmap_init(struct kva_md_info *kmi)
again:
v = (caddr_t)firstaddr;
- v = kern_timeout_callwheel_alloc(v);
-
/*
* Discount the physical memory larger than the size of kernel_map
* to avoid eating up all of KVA space.
@@ -202,10 +200,5 @@ again:
* XXX: Mbuf system machine-specific initializations should
* go here, if anywhere.
*/
-
- /*
- * Initialize the callouts we just allocated.
- */
- kern_timeout_callwheel_init();
}
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index ad9aa0d..9f602b7 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -70,9 +70,9 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h> /* for ticks and hz */
#include <sys/eventhandler.h>
#include <sys/lock.h>
-#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/malloc.h>
+#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <vm/vm.h>
@@ -234,7 +234,7 @@ kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL,
VM_PROT_ALL, 0);
pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
end_offset = offset + size;
for (; offset < end_offset; offset += PAGE_SIZE) {
tries = 0;
@@ -242,12 +242,12 @@ retry:
m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags, 1,
low, high, PAGE_SIZE, 0, memattr);
if (m == NULL) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
vm_map_unlock(map);
vm_pageout_grow_cache(tries, low, high);
vm_map_lock(map);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
tries++;
goto retry;
}
@@ -266,7 +266,7 @@ retry:
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vm_map_unlock(map);
vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM |
VM_MAP_WIRE_NOHOLES);
@@ -303,18 +303,18 @@ kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL,
VM_PROT_ALL, 0);
pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
tries = 0;
retry:
m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags,
atop(size), low, high, alignment, boundary, memattr);
if (m == NULL) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
vm_map_unlock(map);
vm_pageout_grow_cache(tries, low, high);
vm_map_lock(map);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
tries++;
goto retry;
}
@@ -328,7 +328,7 @@ retry:
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vm_map_unlock(map);
vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM |
VM_MAP_WIRE_NOHOLES);
@@ -488,7 +488,7 @@ kmem_back(vm_map_t map, vm_offset_t addr, vm_size_t size, int flags)
pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
- VM_OBJECT_LOCK(kmem_object);
+ VM_OBJECT_WLOCK(kmem_object);
for (i = 0; i < size; i += PAGE_SIZE) {
retry:
m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags);
@@ -500,7 +500,7 @@ retry:
*/
if (m == NULL) {
if ((flags & M_NOWAIT) == 0) {
- VM_OBJECT_UNLOCK(kmem_object);
+ VM_OBJECT_WUNLOCK(kmem_object);
entry->eflags |= MAP_ENTRY_IN_TRANSITION;
vm_map_unlock(map);
VM_WAIT;
@@ -510,7 +510,7 @@ retry:
MAP_ENTRY_IN_TRANSITION,
("kmem_back: volatile entry"));
entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
- VM_OBJECT_LOCK(kmem_object);
+ VM_OBJECT_WLOCK(kmem_object);
goto retry;
}
/*
@@ -526,7 +526,7 @@ retry:
vm_page_unwire(m, 0);
vm_page_free(m);
}
- VM_OBJECT_UNLOCK(kmem_object);
+ VM_OBJECT_WUNLOCK(kmem_object);
vm_map_delete(map, addr, addr + size);
return (KERN_NO_SPACE);
}
@@ -536,7 +536,7 @@ retry:
KASSERT((m->oflags & VPO_UNMANAGED) != 0,
("kmem_malloc: page %p is managed", m));
}
- VM_OBJECT_UNLOCK(kmem_object);
+ VM_OBJECT_WUNLOCK(kmem_object);
/*
* Mark map entry as non-pageable. Repeat the assert.
@@ -556,7 +556,7 @@ retry:
/*
* Loop thru pages, entering them in the pmap.
*/
- VM_OBJECT_LOCK(kmem_object);
+ VM_OBJECT_WLOCK(kmem_object);
for (i = 0; i < size; i += PAGE_SIZE) {
m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
/*
@@ -566,7 +566,7 @@ retry:
TRUE);
vm_page_wakeup(m);
}
- VM_OBJECT_UNLOCK(kmem_object);
+ VM_OBJECT_WUNLOCK(kmem_object);
return (KERN_SUCCESS);
}
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 35ac468..72d3983 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -77,6 +77,7 @@ __FBSDID("$FreeBSD$");
#include <sys/vnode.h>
#include <sys/racct.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/file.h>
#include <sys/sysctl.h>
#include <sys/sysent.h>
@@ -1222,10 +1223,10 @@ charged:
* reference counting is insufficient to recognize
* aliases with precision.)
*/
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (object->ref_count > 1 || object->shadow_count != 0)
vm_object_clear_flag(object, OBJ_ONEMAPPING);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
else if ((prev_entry != &map->header) &&
(prev_entry->eflags == protoeflags) &&
@@ -1623,12 +1624,12 @@ _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
} else if (entry->object.vm_object != NULL &&
((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
entry->cred != NULL) {
- VM_OBJECT_LOCK(entry->object.vm_object);
+ VM_OBJECT_WLOCK(entry->object.vm_object);
KASSERT(entry->object.vm_object->cred == NULL,
("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
entry->object.vm_object->cred = entry->cred;
entry->object.vm_object->charge = entry->end - entry->start;
- VM_OBJECT_UNLOCK(entry->object.vm_object);
+ VM_OBJECT_WUNLOCK(entry->object.vm_object);
entry->cred = NULL;
}
@@ -1700,12 +1701,12 @@ _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
} else if (entry->object.vm_object != NULL &&
((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
entry->cred != NULL) {
- VM_OBJECT_LOCK(entry->object.vm_object);
+ VM_OBJECT_WLOCK(entry->object.vm_object);
KASSERT(entry->object.vm_object->cred == NULL,
("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
entry->object.vm_object->cred = entry->cred;
entry->object.vm_object->charge = entry->end - entry->start;
- VM_OBJECT_UNLOCK(entry->object.vm_object);
+ VM_OBJECT_WUNLOCK(entry->object.vm_object);
entry->cred = NULL;
}
@@ -1805,7 +1806,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
return;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
pmap_object_init_pt(map->pmap, addr, object, pindex, size);
goto unlock_return;
@@ -1856,7 +1857,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
pmap_enter_object(map->pmap, start, addr + ptoa(psize),
p_start, prot);
unlock_return:
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
/*
@@ -1932,9 +1933,9 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
continue;
}
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
continue;
}
@@ -1946,7 +1947,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
KASSERT(obj->charge == 0,
("vm_map_protect: object %p overcharged\n", obj));
if (!swap_reserve(ptoa(obj->size))) {
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
vm_map_unlock(map);
return (KERN_RESOURCE_SHORTAGE);
}
@@ -1954,7 +1955,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
crhold(cred);
obj->cred = cred;
obj->charge = ptoa(obj->size);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
}
/*
@@ -2717,7 +2718,7 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
count = OFF_TO_IDX(size);
offidxstart = OFF_TO_IDX(entry->offset);
offidxend = offidxstart + count;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (object->ref_count != 1 &&
((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
object == kernel_object || object == kmem_object)) {
@@ -2746,7 +2747,7 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
}
}
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
} else
entry->object.vm_object = NULL;
if (map->system_map)
@@ -2954,7 +2955,7 @@ vm_map_copy_entry(
*/
size = src_entry->end - src_entry->start;
if ((src_object = src_entry->object.vm_object) != NULL) {
- VM_OBJECT_LOCK(src_object);
+ VM_OBJECT_WLOCK(src_object);
charged = ENTRY_CHARGED(src_entry);
if ((src_object->handle == NULL) &&
(src_object->type == OBJT_DEFAULT ||
@@ -2975,7 +2976,7 @@ vm_map_copy_entry(
src_object->cred = src_entry->cred;
src_object->charge = size;
}
- VM_OBJECT_UNLOCK(src_object);
+ VM_OBJECT_WUNLOCK(src_object);
dst_entry->object.vm_object = src_object;
if (charged) {
cred = curthread->td_ucred;
@@ -3151,7 +3152,7 @@ vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
vm_object_deallocate(object);
object = old_entry->object.vm_object;
}
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_object_clear_flag(object, OBJ_ONEMAPPING);
if (old_entry->cred != NULL) {
KASSERT(object->cred == NULL, ("vmspace_fork both cred"));
@@ -3159,7 +3160,7 @@ vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
object->charge = old_entry->end - old_entry->start;
old_entry->cred = NULL;
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* Clone the entry, referencing the shared object.
@@ -3845,10 +3846,10 @@ RetryLookup:;
crfree(entry->cred);
entry->cred = NULL;
} else if (entry->cred != NULL) {
- VM_OBJECT_LOCK(eobject);
+ VM_OBJECT_WLOCK(eobject);
eobject->cred = entry->cred;
eobject->charge = size;
- VM_OBJECT_UNLOCK(eobject);
+ VM_OBJECT_WUNLOCK(eobject);
entry->cred = NULL;
}
@@ -3873,10 +3874,10 @@ RetryLookup:;
atop(size));
entry->offset = 0;
if (entry->cred != NULL) {
- VM_OBJECT_LOCK(entry->object.vm_object);
+ VM_OBJECT_WLOCK(entry->object.vm_object);
entry->object.vm_object->cred = entry->cred;
entry->object.vm_object->charge = size;
- VM_OBJECT_UNLOCK(entry->object.vm_object);
+ VM_OBJECT_WUNLOCK(entry->object.vm_object);
entry->cred = NULL;
}
vm_map_lock_downgrade(map);
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index 05174e9..713a2be 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -39,6 +39,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/resource.h>
+#include <sys/rwlock.h>
#include <sys/sx.h>
#include <sys/vmmeter.h>
#include <sys/smp.h>
@@ -110,7 +111,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
*/
mtx_lock(&vm_object_list_mtx);
TAILQ_FOREACH(object, &vm_object_list, object_list) {
- if (!VM_OBJECT_TRYLOCK(object)) {
+ if (!VM_OBJECT_TRYWLOCK(object)) {
/*
* Avoid a lock-order reversal. Consequently,
* the reported number of active pages may be
@@ -119,7 +120,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
continue;
}
vm_object_clear_flag(object, OBJ_ACTIVE);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
mtx_unlock(&vm_object_list_mtx);
/*
@@ -178,10 +179,10 @@ vmtotal(SYSCTL_HANDLER_ARGS)
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) ||
(object = entry->object.vm_object) == NULL)
continue;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_object_set_flag(object, OBJ_ACTIVE);
paging |= object->paging_in_progress;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
vm_map_unlock_read(map);
vmspace_free(vm);
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index cf94fe5..248d9e8 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -59,6 +59,7 @@ __FBSDID("$FreeBSD$");
#include <sys/racct.h>
#include <sys/resource.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/vnode.h>
#include <sys/fcntl.h>
@@ -880,12 +881,12 @@ RestartScan:
m = PHYS_TO_VM_PAGE(locked_pa);
if (m->object != object) {
if (object != NULL)
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
object = m->object;
- locked = VM_OBJECT_TRYLOCK(object);
+ locked = VM_OBJECT_TRYWLOCK(object);
vm_page_unlock(m);
if (!locked) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_page_lock(m);
goto retry;
}
@@ -903,9 +904,9 @@ RestartScan:
*/
if (current->object.vm_object != object) {
if (object != NULL)
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
object = current->object.vm_object;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
}
if (object->type == OBJT_DEFAULT ||
object->type == OBJT_SWAP ||
@@ -942,7 +943,7 @@ RestartScan:
mincoreinfo |= MINCORE_REFERENCED_OTHER;
}
if (object != NULL)
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* subyte may page fault. In case it needs to modify
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 606b605..255d919 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -78,6 +78,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h> /* for curproc, pageproc */
#include <sys/socket.h>
#include <sys/resourcevar.h>
+#include <sys/rwlock.h>
#include <sys/vnode.h>
#include <sys/vmmeter.h>
#include <sys/sx.h>
@@ -195,8 +196,8 @@ vm_object_zinit(void *mem, int size, int flags)
vm_object_t object;
object = (vm_object_t)mem;
- bzero(&object->mtx, sizeof(object->mtx));
- mtx_init(&object->mtx, "vm object", NULL, MTX_DEF | MTX_DUPOK);
+ bzero(&object->lock, sizeof(object->lock));
+ rw_init_flags(&object->lock, "vm object", RW_DUPOK);
/* These are true for any object that has been freed */
object->rtree.rt_root = 0;
@@ -267,7 +268,7 @@ vm_object_init(void)
TAILQ_INIT(&vm_object_list);
mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
- mtx_init(&kernel_object->mtx, "vm object", "kernel object", MTX_DEF);
+ rw_init(&kernel_object->lock, "kernel vm object");
_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kernel_object);
#if VM_NRESERVLEVEL > 0
@@ -275,7 +276,7 @@ vm_object_init(void)
kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
#endif
- mtx_init(&kmem_object->mtx, "vm object", "kmem object", MTX_DEF);
+ rw_init(&kmem_object->lock, "kmem vm object");
_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kmem_object);
#if VM_NRESERVLEVEL > 0
@@ -303,7 +304,7 @@ void
vm_object_clear_flag(vm_object_t object, u_short bits)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
object->flags &= ~bits;
}
@@ -320,7 +321,7 @@ int
vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
switch (object->type) {
case OBJT_DEFAULT:
case OBJT_DEVICE:
@@ -346,7 +347,7 @@ void
vm_object_pip_add(vm_object_t object, short i)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
object->paging_in_progress += i;
}
@@ -354,7 +355,7 @@ void
vm_object_pip_subtract(vm_object_t object, short i)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
object->paging_in_progress -= i;
}
@@ -362,7 +363,7 @@ void
vm_object_pip_wakeup(vm_object_t object)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
object->paging_in_progress--;
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
vm_object_clear_flag(object, OBJ_PIPWNT);
@@ -374,7 +375,7 @@ void
vm_object_pip_wakeupn(vm_object_t object, short i)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (i)
object->paging_in_progress -= i;
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
@@ -387,7 +388,7 @@ void
vm_object_pip_wait(vm_object_t object, char *waitid)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
while (object->paging_in_progress) {
object->flags |= OBJ_PIPWNT;
VM_OBJECT_SLEEP(object, object, PVM, waitid, 0);
@@ -421,9 +422,9 @@ vm_object_reference(vm_object_t object)
{
if (object == NULL)
return;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_object_reference_locked(object);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
/*
@@ -438,7 +439,7 @@ vm_object_reference_locked(vm_object_t object)
{
struct vnode *vp;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
object->ref_count++;
if (object->type == OBJT_VNODE) {
vp = object->handle;
@@ -454,7 +455,7 @@ vm_object_vndeallocate(vm_object_t object)
{
struct vnode *vp = (struct vnode *) object->handle;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_VNODE,
("vm_object_vndeallocate: not a vnode object"));
KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
@@ -467,23 +468,23 @@ vm_object_vndeallocate(vm_object_t object)
if (object->ref_count > 1) {
object->ref_count--;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/* vrele may need the vnode lock. */
vrele(vp);
} else {
vhold(vp);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
vdrop(vp);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
object->ref_count--;
if (object->type == OBJT_DEAD) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
VOP_UNLOCK(vp, 0);
} else {
if (object->ref_count == 0)
VOP_UNSET_TEXT(vp);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vput(vp);
}
}
@@ -506,7 +507,7 @@ vm_object_deallocate(vm_object_t object)
vm_object_t temp;
while (object != NULL) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (object->type == OBJT_VNODE) {
vm_object_vndeallocate(object);
return;
@@ -523,7 +524,7 @@ vm_object_deallocate(vm_object_t object)
*/
object->ref_count--;
if (object->ref_count > 1) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return;
} else if (object->ref_count == 1) {
if (object->shadow_count == 0 &&
@@ -542,12 +543,12 @@ vm_object_deallocate(vm_object_t object)
("vm_object_deallocate: ref_count: %d, shadow_count: %d",
object->ref_count,
object->shadow_count));
- if (!VM_OBJECT_TRYLOCK(robject)) {
+ if (!VM_OBJECT_TRYWLOCK(robject)) {
/*
* Avoid a potential deadlock.
*/
object->ref_count++;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* More likely than not the thread
* holding robject's lock has lower
@@ -571,27 +572,27 @@ vm_object_deallocate(vm_object_t object)
robject->ref_count++;
retry:
if (robject->paging_in_progress) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vm_object_pip_wait(robject,
"objde1");
temp = robject->backing_object;
if (object == temp) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
goto retry;
}
} else if (object->paging_in_progress) {
- VM_OBJECT_UNLOCK(robject);
+ VM_OBJECT_WUNLOCK(robject);
object->flags |= OBJ_PIPWNT;
VM_OBJECT_SLEEP(object, object,
PDROP | PVM, "objde2", 0);
- VM_OBJECT_LOCK(robject);
+ VM_OBJECT_WLOCK(robject);
temp = robject->backing_object;
if (object == temp) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
goto retry;
}
} else
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (robject->ref_count == 1) {
robject->ref_count--;
@@ -600,21 +601,21 @@ retry:
}
object = robject;
vm_object_collapse(object);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
continue;
}
- VM_OBJECT_UNLOCK(robject);
+ VM_OBJECT_WUNLOCK(robject);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return;
}
doterm:
temp = object->backing_object;
if (temp != NULL) {
- VM_OBJECT_LOCK(temp);
+ VM_OBJECT_WLOCK(temp);
LIST_REMOVE(object, shadow_list);
temp->shadow_count--;
- VM_OBJECT_UNLOCK(temp);
+ VM_OBJECT_WUNLOCK(temp);
object->backing_object = NULL;
}
/*
@@ -625,7 +626,7 @@ doterm:
if ((object->flags & OBJ_DEAD) == 0)
vm_object_terminate(object);
else
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
object = temp;
}
}
@@ -677,7 +678,7 @@ vm_object_terminate(vm_object_t object)
{
vm_page_t p, p_next;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* Make sure no one uses us.
@@ -703,11 +704,11 @@ vm_object_terminate(vm_object_t object)
* Clean pages and flush buffers.
*/
vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vinvalbuf(vp, V_SAVE, 0, 0);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
}
KASSERT(object->ref_count == 0,
@@ -762,7 +763,7 @@ vm_object_terminate(vm_object_t object)
* Let the pager know object is dead.
*/
vm_pager_deallocate(object);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vm_object_destroy(object);
}
@@ -818,7 +819,7 @@ vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end,
int curgeneration, n, pagerflags;
boolean_t clearobjflags, eio, res;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_VNODE, ("Not a vnode object"));
if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 ||
object->resident_page_count == 0)
@@ -904,7 +905,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags,
int count, i, mreq, runlen;
vm_page_lock_assert(p, MA_NOTOWNED);
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
count = 1;
mreq = 0;
@@ -962,11 +963,11 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
return (TRUE);
res = TRUE;
error = 0;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
while ((backing_object = object->backing_object) != NULL) {
- VM_OBJECT_LOCK(backing_object);
+ VM_OBJECT_WLOCK(backing_object);
offset += object->backing_object_offset;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
object = backing_object;
if (object->size < OFF_TO_IDX(offset + size))
size = IDX_TO_OFF(object->size) - offset;
@@ -986,7 +987,7 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
if (object->type == OBJT_VNODE &&
(object->flags & OBJ_MIGHTBEDIRTY) != 0) {
vp = object->handle;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
(void) vn_start_write(vp, &mp, V_WAIT);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (syncio && !invalidate && offset == 0 &&
@@ -1004,17 +1005,17 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0;
fsync_after = FALSE;
}
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
res = vm_object_page_clean(object, offset, offset + size,
flags);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (fsync_after)
error = VOP_FSYNC(vp, MNT_WAIT, curthread);
VOP_UNLOCK(vp, 0);
vn_finished_write(mp);
if (error != 0)
res = FALSE;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
}
if ((object->type == OBJT_VNODE ||
object->type == OBJT_DEVICE) && invalidate) {
@@ -1032,7 +1033,7 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
vm_object_page_remove(object, OFF_TO_IDX(offset),
OFF_TO_IDX(offset + size + PAGE_MASK), flags);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (res);
}
@@ -1067,7 +1068,7 @@ vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end,
if (object == NULL)
return;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
/*
* Locate and adjust resident pages
*/
@@ -1108,10 +1109,10 @@ shadowlookup:
backing_object = tobject->backing_object;
if (backing_object == NULL)
goto unlock_tobject;
- VM_OBJECT_LOCK(backing_object);
+ VM_OBJECT_WLOCK(backing_object);
tpindex += OFF_TO_IDX(tobject->backing_object_offset);
if (tobject != object)
- VM_OBJECT_UNLOCK(tobject);
+ VM_OBJECT_WUNLOCK(tobject);
tobject = backing_object;
goto shadowlookup;
} else if (m->valid != VM_PAGE_BITS_ALL)
@@ -1139,10 +1140,10 @@ shadowlookup:
}
vm_page_unlock(m);
if (object != tobject)
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
m->oflags |= VPO_WANTED;
VM_OBJECT_SLEEP(tobject, m, PDROP | PVM, "madvpo", 0);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
goto relookup;
}
if (advise == MADV_WILLNEED) {
@@ -1175,9 +1176,9 @@ shadowlookup:
swap_pager_freespace(tobject, tpindex, 1);
unlock_tobject:
if (tobject != object)
- VM_OBJECT_UNLOCK(tobject);
+ VM_OBJECT_WUNLOCK(tobject);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
/*
@@ -1205,15 +1206,15 @@ vm_object_shadow(
* Don't create the new object if the old object isn't shared.
*/
if (source != NULL) {
- VM_OBJECT_LOCK(source);
+ VM_OBJECT_WLOCK(source);
if (source->ref_count == 1 &&
source->handle == NULL &&
(source->type == OBJT_DEFAULT ||
source->type == OBJT_SWAP)) {
- VM_OBJECT_UNLOCK(source);
+ VM_OBJECT_WUNLOCK(source);
return;
}
- VM_OBJECT_UNLOCK(source);
+ VM_OBJECT_WUNLOCK(source);
}
/*
@@ -1238,7 +1239,7 @@ vm_object_shadow(
*/
result->backing_object_offset = *offset;
if (source != NULL) {
- VM_OBJECT_LOCK(source);
+ VM_OBJECT_WLOCK(source);
LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
source->shadow_count++;
#if VM_NRESERVLEVEL > 0
@@ -1246,7 +1247,7 @@ vm_object_shadow(
result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) &
((1 << (VM_NFREEORDER - 1)) - 1);
#endif
- VM_OBJECT_UNLOCK(source);
+ VM_OBJECT_WUNLOCK(source);
}
@@ -1277,7 +1278,7 @@ vm_object_split(vm_map_entry_t entry)
return;
if (orig_object->ref_count <= 1)
return;
- VM_OBJECT_UNLOCK(orig_object);
+ VM_OBJECT_WUNLOCK(orig_object);
offidxstart = OFF_TO_IDX(entry->offset);
size = atop(entry->end - entry->start);
@@ -1292,17 +1293,17 @@ vm_object_split(vm_map_entry_t entry)
* At this point, the new object is still private, so the order in
* which the original and new objects are locked does not matter.
*/
- VM_OBJECT_LOCK(new_object);
- VM_OBJECT_LOCK(orig_object);
+ VM_OBJECT_WLOCK(new_object);
+ VM_OBJECT_WLOCK(orig_object);
source = orig_object->backing_object;
if (source != NULL) {
- VM_OBJECT_LOCK(source);
+ VM_OBJECT_WLOCK(source);
if ((source->flags & OBJ_DEAD) != 0) {
- VM_OBJECT_UNLOCK(source);
- VM_OBJECT_UNLOCK(orig_object);
- VM_OBJECT_UNLOCK(new_object);
+ VM_OBJECT_WUNLOCK(source);
+ VM_OBJECT_WUNLOCK(orig_object);
+ VM_OBJECT_WUNLOCK(new_object);
vm_object_deallocate(new_object);
- VM_OBJECT_LOCK(orig_object);
+ VM_OBJECT_WLOCK(orig_object);
return;
}
LIST_INSERT_HEAD(&source->shadow_head,
@@ -1310,7 +1311,7 @@ vm_object_split(vm_map_entry_t entry)
source->shadow_count++;
vm_object_reference_locked(source); /* for new_object */
vm_object_clear_flag(source, OBJ_ONEMAPPING);
- VM_OBJECT_UNLOCK(source);
+ VM_OBJECT_WUNLOCK(source);
new_object->backing_object_offset =
orig_object->backing_object_offset + entry->offset;
new_object->backing_object = source;
@@ -1337,10 +1338,10 @@ retry:
* not be changed by this operation.
*/
if ((m->oflags & VPO_BUSY) || m->busy) {
- VM_OBJECT_UNLOCK(new_object);
+ VM_OBJECT_WUNLOCK(new_object);
m->oflags |= VPO_WANTED;
VM_OBJECT_SLEEP(orig_object, m, PVM, "spltwt", 0);
- VM_OBJECT_LOCK(new_object);
+ VM_OBJECT_WLOCK(new_object);
goto retry;
}
#if VM_NRESERVLEVEL > 0
@@ -1384,14 +1385,14 @@ retry:
vm_page_cache_transfer(orig_object, offidxstart,
new_object);
}
- VM_OBJECT_UNLOCK(orig_object);
+ VM_OBJECT_WUNLOCK(orig_object);
TAILQ_FOREACH(m, &new_object->memq, listq)
vm_page_wakeup(m);
- VM_OBJECT_UNLOCK(new_object);
+ VM_OBJECT_WUNLOCK(new_object);
entry->object.vm_object = new_object;
entry->offset = 0LL;
vm_object_deallocate(orig_object);
- VM_OBJECT_LOCK(new_object);
+ VM_OBJECT_WLOCK(new_object);
}
#define OBSC_TEST_ALL_SHADOWED 0x0001
@@ -1406,8 +1407,8 @@ vm_object_backing_scan(vm_object_t object, int op)
vm_object_t backing_object;
vm_pindex_t backing_offset_index;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(object->backing_object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
+ VM_OBJECT_ASSERT_WLOCKED(object->backing_object);
backing_object = object->backing_object;
backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
@@ -1495,12 +1496,12 @@ vm_object_backing_scan(vm_object_t object, int op)
}
} else if (op & OBSC_COLLAPSE_WAIT) {
if ((p->oflags & VPO_BUSY) || p->busy) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
p->oflags |= VPO_WANTED;
VM_OBJECT_SLEEP(backing_object, p,
PDROP | PVM, "vmocol", 0);
- VM_OBJECT_LOCK(object);
- VM_OBJECT_LOCK(backing_object);
+ VM_OBJECT_WLOCK(object);
+ VM_OBJECT_WLOCK(backing_object);
/*
* If we slept, anything could have
* happened. Since the object is
@@ -1627,8 +1628,8 @@ vm_object_qcollapse(vm_object_t object)
{
vm_object_t backing_object = object->backing_object;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(backing_object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
+ VM_OBJECT_ASSERT_WLOCKED(backing_object);
if (backing_object->ref_count != 1)
return;
@@ -1646,7 +1647,7 @@ vm_object_qcollapse(vm_object_t object)
void
vm_object_collapse(vm_object_t object)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
while (TRUE) {
vm_object_t backing_object;
@@ -1663,7 +1664,7 @@ vm_object_collapse(vm_object_t object)
* we check the backing object first, because it is most likely
* not collapsable.
*/
- VM_OBJECT_LOCK(backing_object);
+ VM_OBJECT_WLOCK(backing_object);
if (backing_object->handle != NULL ||
(backing_object->type != OBJT_DEFAULT &&
backing_object->type != OBJT_SWAP) ||
@@ -1672,7 +1673,7 @@ vm_object_collapse(vm_object_t object)
(object->type != OBJT_DEFAULT &&
object->type != OBJT_SWAP) ||
(object->flags & OBJ_DEAD)) {
- VM_OBJECT_UNLOCK(backing_object);
+ VM_OBJECT_WUNLOCK(backing_object);
break;
}
@@ -1681,7 +1682,7 @@ vm_object_collapse(vm_object_t object)
backing_object->paging_in_progress != 0
) {
vm_object_qcollapse(object);
- VM_OBJECT_UNLOCK(backing_object);
+ VM_OBJECT_WUNLOCK(backing_object);
break;
}
/*
@@ -1742,7 +1743,7 @@ vm_object_collapse(vm_object_t object)
LIST_REMOVE(object, shadow_list);
backing_object->shadow_count--;
if (backing_object->backing_object) {
- VM_OBJECT_LOCK(backing_object->backing_object);
+ VM_OBJECT_WLOCK(backing_object->backing_object);
LIST_REMOVE(backing_object, shadow_list);
LIST_INSERT_HEAD(
&backing_object->backing_object->shadow_head,
@@ -1750,7 +1751,7 @@ vm_object_collapse(vm_object_t object)
/*
* The shadow_count has not changed.
*/
- VM_OBJECT_UNLOCK(backing_object->backing_object);
+ VM_OBJECT_WUNLOCK(backing_object->backing_object);
}
object->backing_object = backing_object->backing_object;
object->backing_object_offset +=
@@ -1766,7 +1767,7 @@ vm_object_collapse(vm_object_t object)
KASSERT(backing_object->ref_count == 1, (
"backing_object %p was somehow re-referenced during collapse!",
backing_object));
- VM_OBJECT_UNLOCK(backing_object);
+ VM_OBJECT_WUNLOCK(backing_object);
vm_object_destroy(backing_object);
object_collapses++;
@@ -1780,7 +1781,7 @@ vm_object_collapse(vm_object_t object)
if (object->resident_page_count != object->size &&
vm_object_backing_scan(object,
OBSC_TEST_ALL_SHADOWED) == 0) {
- VM_OBJECT_UNLOCK(backing_object);
+ VM_OBJECT_WUNLOCK(backing_object);
break;
}
@@ -1794,7 +1795,7 @@ vm_object_collapse(vm_object_t object)
new_backing_object = backing_object->backing_object;
if ((object->backing_object = new_backing_object) != NULL) {
- VM_OBJECT_LOCK(new_backing_object);
+ VM_OBJECT_WLOCK(new_backing_object);
LIST_INSERT_HEAD(
&new_backing_object->shadow_head,
object,
@@ -1802,7 +1803,7 @@ vm_object_collapse(vm_object_t object)
);
new_backing_object->shadow_count++;
vm_object_reference_locked(new_backing_object);
- VM_OBJECT_UNLOCK(new_backing_object);
+ VM_OBJECT_WUNLOCK(new_backing_object);
object->backing_object_offset +=
backing_object->backing_object_offset;
}
@@ -1812,7 +1813,7 @@ vm_object_collapse(vm_object_t object)
* its ref_count was at least 2, it will not vanish.
*/
backing_object->ref_count--;
- VM_OBJECT_UNLOCK(backing_object);
+ VM_OBJECT_WUNLOCK(backing_object);
object_bypasses++;
}
@@ -1855,7 +1856,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
vm_page_t p, next;
int wirings;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((object->flags & OBJ_UNMANAGED) == 0 ||
(options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED,
("vm_object_page_remove: illegal options for object %p", object));
@@ -1950,7 +1951,7 @@ vm_object_page_cache(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
struct mtx *mtx, *new_mtx;
vm_page_t p, next;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0,
("vm_object_page_cache: illegal object %p", object));
if (object->resident_page_count == 0)
@@ -1998,7 +1999,7 @@ vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
vm_pindex_t pindex;
int rv;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
for (pindex = start; pindex < end; pindex++) {
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL |
VM_ALLOC_RETRY);
@@ -2059,10 +2060,10 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
if (prev_object == NULL)
return (TRUE);
- VM_OBJECT_LOCK(prev_object);
+ VM_OBJECT_WLOCK(prev_object);
if (prev_object->type != OBJT_DEFAULT &&
prev_object->type != OBJT_SWAP) {
- VM_OBJECT_UNLOCK(prev_object);
+ VM_OBJECT_WUNLOCK(prev_object);
return (FALSE);
}
@@ -2077,7 +2078,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
* pages not mapped to prev_entry may be in use anyway)
*/
if (prev_object->backing_object != NULL) {
- VM_OBJECT_UNLOCK(prev_object);
+ VM_OBJECT_WUNLOCK(prev_object);
return (FALSE);
}
@@ -2087,7 +2088,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
if ((prev_object->ref_count > 1) &&
(prev_object->size != next_pindex)) {
- VM_OBJECT_UNLOCK(prev_object);
+ VM_OBJECT_WUNLOCK(prev_object);
return (FALSE);
}
@@ -2141,7 +2142,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
if (next_pindex + next_size > prev_object->size)
prev_object->size = next_pindex + next_size;
- VM_OBJECT_UNLOCK(prev_object);
+ VM_OBJECT_WUNLOCK(prev_object);
return (TRUE);
}
@@ -2149,7 +2150,7 @@ void
vm_object_set_writeable_dirty(vm_object_t object)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type != OBJT_VNODE)
return;
object->generation++;
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index d69e679..7598ea3 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -70,6 +70,7 @@
#include <sys/queue.h>
#include <sys/_lock.h>
#include <sys/_mutex.h>
+#include <sys/_rwlock.h>
#include <vm/_vm_radix.h>
@@ -78,9 +79,9 @@
*
* vm_object_t Virtual memory object.
*
- * The root of cached pages pool is protected by both the per-object mutex
+ * The root of cached pages pool is protected by both the per-object lock
* and the free pages queue mutex.
- * On insert in the cache radix trie, the per-object mutex is expected
+ * On insert in the cache radix trie, the per-object lock is expected
* to be already held and the free pages queue mutex will be
* acquired during the operation too.
* On remove and lookup from the cache radix trie, only the free
@@ -91,13 +92,13 @@
*
* List of locks
* (c) const until freed
- * (o) per-object mutex
+ * (o) per-object lock
* (f) free pages queue mutex
*
*/
struct vm_object {
- struct mtx mtx;
+ struct rwlock lock;
TAILQ_ENTRY(vm_object) object_list; /* list of all objects */
LIST_HEAD(, vm_object) shadow_head; /* objects that this is a shadow for */
LIST_ENTRY(vm_object) shadow_list; /* chain of shadow objects */
@@ -205,14 +206,26 @@ extern struct vm_object kmem_object_store;
#define kernel_object (&kernel_object_store)
#define kmem_object (&kmem_object_store)
-#define VM_OBJECT_LOCK(object) mtx_lock(&(object)->mtx)
-#define VM_OBJECT_LOCK_ASSERT(object, type) \
- mtx_assert(&(object)->mtx, (type))
-#define VM_OBJECT_SLEEP(object, wchan, pri, wmesg, timo) \
- msleep((wchan), &(object)->mtx, (pri), \
- (wmesg), (timo))
-#define VM_OBJECT_TRYLOCK(object) mtx_trylock(&(object)->mtx)
-#define VM_OBJECT_UNLOCK(object) mtx_unlock(&(object)->mtx)
+#define VM_OBJECT_ASSERT_LOCKED(object) \
+ rw_assert(&(object)->lock, RA_LOCKED)
+#define VM_OBJECT_ASSERT_RLOCKED(object) \
+ rw_assert(&(object)->lock, RA_RLOCKED)
+#define VM_OBJECT_ASSERT_WLOCKED(object) \
+ rw_assert(&(object)->lock, RA_WLOCKED)
+#define VM_OBJECT_RLOCK(object) \
+ rw_rlock(&(object)->lock)
+#define VM_OBJECT_RUNLOCK(object) \
+ rw_runlock(&(object)->lock)
+#define VM_OBJECT_SLEEP(object, wchan, pri, wmesg, timo) \
+ rw_sleep((wchan), &(object)->lock, (pri), (wmesg), (timo))
+#define VM_OBJECT_TRYRLOCK(object) \
+ rw_try_rlock(&(object)->lock)
+#define VM_OBJECT_TRYWLOCK(object) \
+ rw_try_wlock(&(object)->lock)
+#define VM_OBJECT_WLOCK(object) \
+ rw_wlock(&(object)->lock)
+#define VM_OBJECT_WUNLOCK(object) \
+ rw_wunlock(&(object)->lock)
/*
* The object must be locked or thread private.
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 47ffc31..e51a28d 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -96,6 +96,7 @@ __FBSDID("$FreeBSD$");
#include <sys/msgbuf.h>
#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/vmmeter.h>
#include <sys/vnode.h>
@@ -469,7 +470,7 @@ void
vm_page_busy(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("vm_page_busy: page already busy!!!"));
m->oflags |= VPO_BUSY;
@@ -484,7 +485,7 @@ void
vm_page_flash(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (m->oflags & VPO_WANTED) {
m->oflags &= ~VPO_WANTED;
wakeup(m);
@@ -502,7 +503,7 @@ void
vm_page_wakeup(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT(m->oflags & VPO_BUSY, ("vm_page_wakeup: page not busy!!!"));
m->oflags &= ~VPO_BUSY;
vm_page_flash(m);
@@ -512,7 +513,7 @@ void
vm_page_io_start(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
m->busy++;
}
@@ -520,7 +521,7 @@ void
vm_page_io_finish(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT(m->busy > 0, ("vm_page_io_finish: page %p is not busy", m));
m->busy--;
if (m->busy == 0)
@@ -752,7 +753,7 @@ void
vm_page_sleep(vm_page_t m, const char *msg)
{
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (mtx_owned(vm_page_lockptr(m)))
vm_page_unlock(m);
@@ -810,7 +811,7 @@ vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
{
vm_page_t neighbor;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (m->object != NULL)
panic("vm_page_insert: page already inserted");
@@ -876,7 +877,7 @@ vm_page_remove(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
if ((object = m->object) == NULL)
return;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (m->oflags & VPO_BUSY) {
m->oflags &= ~VPO_BUSY;
vm_page_flash(m);
@@ -914,7 +915,7 @@ vm_page_t
vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
return (vm_radix_lookup(&object->rtree, pindex));
}
@@ -931,7 +932,7 @@ vm_page_find_least(vm_object_t object, vm_pindex_t pindex)
{
vm_page_t m;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex)
m = vm_radix_lookup_ge(&object->rtree, pindex);
return (m);
@@ -948,7 +949,7 @@ vm_page_next(vm_page_t m)
{
vm_page_t next;
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((next = TAILQ_NEXT(m, listq)) != NULL &&
next->pindex != m->pindex + 1)
next = NULL;
@@ -966,7 +967,7 @@ vm_page_prev(vm_page_t m)
{
vm_page_t prev;
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL &&
prev->pindex != m->pindex - 1)
prev = NULL;
@@ -1094,7 +1095,7 @@ vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart,
* requires the object to be locked. In contrast, removal does
* not.
*/
- VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(new_object);
KASSERT(vm_object_cache_is_empty(new_object),
("vm_page_cache_transfer: object %p has cached pages",
new_object));
@@ -1135,7 +1136,7 @@ vm_page_is_cached(vm_object_t object, vm_pindex_t pindex)
* page queues lock in order to prove that the specified page doesn't
* exist.
*/
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if (__predict_true(vm_object_cache_is_empty(object)))
return (FALSE);
mtx_lock(&vm_page_queue_free_mtx);
@@ -1184,7 +1185,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0),
("vm_page_alloc: inconsistent object/req"));
if (object != NULL)
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
req_class = req & VM_ALLOC_CLASS_MASK;
@@ -1392,7 +1393,7 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0),
("vm_page_alloc_contig: inconsistent object/req"));
if (object != NULL) {
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_PHYS,
("vm_page_alloc_contig: object %p isn't OBJT_PHYS",
object));
@@ -1803,7 +1804,7 @@ vm_page_activate(vm_page_t m)
int queue;
vm_page_lock_assert(m, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((queue = m->queue) != PQ_ACTIVE) {
if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
if (m->act_count < ACT_INIT)
@@ -2087,7 +2088,7 @@ vm_page_try_to_cache(vm_page_t m)
{
vm_page_lock_assert(m, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
(m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0)
return (0);
@@ -2110,7 +2111,7 @@ vm_page_try_to_free(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
if (m->object != NULL)
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
(m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0)
return (0);
@@ -2136,7 +2137,7 @@ vm_page_cache(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
object = m->object;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) || m->busy ||
m->hold_count || m->wire_count)
panic("vm_page_cache: attempting to cache busy page");
@@ -2241,7 +2242,7 @@ vm_page_dontneed(vm_page_t m)
int head;
vm_page_lock_assert(m, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
dnw = PCPU_GET(dnweight);
PCPU_INC(dnweight);
@@ -2306,7 +2307,7 @@ vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
{
vm_page_t m;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((allocflags & VM_ALLOC_RETRY) != 0,
("vm_page_grab: VM_ALLOC_RETRY is required"));
retrylookup:
@@ -2335,9 +2336,9 @@ retrylookup:
m = vm_page_alloc(object, pindex, allocflags & ~(VM_ALLOC_RETRY |
VM_ALLOC_IGN_SBUSY));
if (m == NULL) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
VM_WAIT;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
goto retrylookup;
} else if (m->valid != 0)
return (m);
@@ -2387,7 +2388,7 @@ vm_page_set_valid_range(vm_page_t m, int base, int size)
{
int endoff, frag;
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (size == 0) /* handle degenerate case */
return;
@@ -2440,7 +2441,7 @@ vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
* write mapped, then the page's dirty field cannot possibly be
* set by a concurrent pmap operation.
*/
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 && !pmap_page_is_write_mapped(m))
m->dirty &= ~pagebits;
else {
@@ -2494,7 +2495,7 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
vm_page_bits_t oldvalid, pagebits;
int endoff, frag;
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (size == 0) /* handle degenerate case */
return;
@@ -2584,7 +2585,7 @@ vm_page_set_invalid(vm_page_t m, int base, int size)
{
vm_page_bits_t bits;
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("vm_page_set_invalid: page %p is busy", m));
bits = vm_page_bits(base, size);
@@ -2613,7 +2614,7 @@ vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
int b;
int i;
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
/*
* Scan the valid bits looking for invalid sections that
* must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the
@@ -2652,7 +2653,7 @@ vm_page_is_valid(vm_page_t m, int base, int size)
{
vm_page_bits_t bits;
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
bits = vm_page_bits(base, size);
if (m->valid && ((m->valid & bits) == bits))
return 1;
@@ -2667,7 +2668,7 @@ void
vm_page_test_dirty(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m))
vm_page_dirty(m);
}
@@ -2721,7 +2722,7 @@ vm_page_cowfault(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
object = m->object;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->paging_in_progress != 0,
("vm_page_cowfault: object %p's paging-in-progress count is zero.",
object));
@@ -2734,9 +2735,9 @@ vm_page_cowfault(vm_page_t m)
if (mnew == NULL) {
vm_page_insert(m, object, pindex);
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
VM_WAIT;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (m == vm_page_lookup(object, pindex)) {
vm_page_lock(m);
goto retry_alloc;
@@ -2793,11 +2794,11 @@ vm_page_cowsetup(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
if ((m->flags & PG_FICTITIOUS) != 0 ||
(m->oflags & VPO_UNMANAGED) != 0 ||
- m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYLOCK(m->object))
+ m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYWLOCK(m->object))
return (EBUSY);
m->cow++;
pmap_remove_write(m);
- VM_OBJECT_UNLOCK(m->object);
+ VM_OBJECT_WUNLOCK(m->object);
return (0);
}
@@ -2814,7 +2815,7 @@ vm_page_object_lock_assert(vm_page_t m)
* here.
*/
if (m->object != NULL && (m->oflags & VPO_BUSY) == 0)
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
}
#endif
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index ac593a4..c0a0da4 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -92,6 +92,7 @@ __FBSDID("$FreeBSD$");
#include <sys/signalvar.h>
#include <sys/vnode.h>
#include <sys/vmmeter.h>
+#include <sys/rwlock.h>
#include <sys/sx.h>
#include <sys/sysctl.h>
@@ -248,7 +249,7 @@ vm_pageout_init_marker(vm_page_t marker, u_short queue)
/*
* vm_pageout_fallback_object_lock:
*
- * Lock vm object currently associated with `m'. VM_OBJECT_TRYLOCK is
+ * Lock vm object currently associated with `m'. VM_OBJECT_TRYWLOCK is
* known to have failed and page queue must be either PQ_ACTIVE or
* PQ_INACTIVE. To avoid lock order violation, unlock the page queues
* while locking the vm object. Use marker page to detect page queue
@@ -276,7 +277,7 @@ vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, pageq);
vm_pagequeue_unlock(pq);
vm_page_unlock(m);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_page_lock(m);
vm_pagequeue_lock(pq);
@@ -346,7 +347,7 @@ vm_pageout_clean(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
object = m->object;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
@@ -484,7 +485,7 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
int numpagedout = 0;
int i, runlen;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* Initiate I/O. Bump the vm_page_t->busy counter and
@@ -595,12 +596,12 @@ vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
continue;
}
object = m->object;
- if ((!VM_OBJECT_TRYLOCK(object) &&
+ if ((!VM_OBJECT_TRYWLOCK(object) &&
(!vm_pageout_fallback_object_lock(m, &next) ||
m->hold_count != 0)) || (m->oflags & VPO_BUSY) != 0 ||
m->busy != 0) {
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
continue;
}
vm_page_test_dirty(m);
@@ -609,19 +610,19 @@ vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
if (m->dirty != 0) {
vm_page_unlock(m);
if (tries == 0 || (object->flags & OBJ_DEAD) != 0) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
continue;
}
if (object->type == OBJT_VNODE) {
vm_pagequeue_unlock(pq);
vp = object->handle;
vm_object_reference_locked(object);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
(void)vn_start_write(vp, &mp, V_WAIT);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
VOP_UNLOCK(vp, 0);
vm_object_deallocate(object);
vn_finished_write(mp);
@@ -632,7 +633,7 @@ vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
m_tmp = m;
vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC,
0, NULL, NULL);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (TRUE);
}
} else {
@@ -644,7 +645,7 @@ vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
vm_page_cache(m);
vm_page_unlock(m);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
vm_pagequeue_unlock(pq);
return (FALSE);
@@ -713,13 +714,13 @@ vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
vm_page_t p;
int actcount, remove_mode;
- VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(first_object);
if ((first_object->flags & OBJ_FICTITIOUS) != 0)
return;
for (object = first_object;; object = backing_object) {
if (pmap_resident_count(pmap) <= desired)
goto unlock_return;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
if ((object->flags & OBJ_UNMANAGED) != 0 ||
object->paging_in_progress != 0)
goto unlock_return;
@@ -775,13 +776,13 @@ vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
}
if ((backing_object = object->backing_object) == NULL)
goto unlock_return;
- VM_OBJECT_LOCK(backing_object);
+ VM_OBJECT_WLOCK(backing_object);
if (object != first_object)
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
unlock_return:
if (object != first_object)
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
/*
@@ -811,15 +812,15 @@ vm_pageout_map_deactivate_pages(map, desired)
while (tmpe != &map->header) {
if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
obj = tmpe->object.vm_object;
- if (obj != NULL && VM_OBJECT_TRYLOCK(obj)) {
+ if (obj != NULL && VM_OBJECT_TRYWLOCK(obj)) {
if (obj->shadow_count <= 1 &&
(bigobj == NULL ||
bigobj->resident_page_count < obj->resident_page_count)) {
if (bigobj != NULL)
- VM_OBJECT_UNLOCK(bigobj);
+ VM_OBJECT_WUNLOCK(bigobj);
bigobj = obj;
} else
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
}
}
if (tmpe->wired_count > 0)
@@ -829,7 +830,7 @@ vm_pageout_map_deactivate_pages(map, desired)
if (bigobj != NULL) {
vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired);
- VM_OBJECT_UNLOCK(bigobj);
+ VM_OBJECT_WUNLOCK(bigobj);
}
/*
* Next, hunt around for other pages to deactivate. We actually
@@ -842,9 +843,9 @@ vm_pageout_map_deactivate_pages(map, desired)
if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
obj = tmpe->object.vm_object;
if (obj != NULL) {
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
vm_pageout_object_deactivate_pages(map->pmap, obj, desired);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
}
}
tmpe = tmpe->next;
@@ -963,10 +964,10 @@ vm_pageout_scan(int pass)
continue;
}
object = m->object;
- if (!VM_OBJECT_TRYLOCK(object) &&
+ if (!VM_OBJECT_TRYWLOCK(object) &&
!vm_pageout_fallback_object_lock(m, &next)) {
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
continue;
}
@@ -979,7 +980,7 @@ vm_pageout_scan(int pass)
*/
if (m->busy != 0 || (m->oflags & VPO_BUSY) != 0) {
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
addl_page_shortage++;
continue;
}
@@ -1016,7 +1017,7 @@ vm_pageout_scan(int pass)
vm_page_activate(m);
vm_page_unlock(m);
m->act_count += actcount + ACT_ADVANCE;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
goto relock_queues;
}
@@ -1032,13 +1033,13 @@ vm_pageout_scan(int pass)
vm_page_activate(m);
vm_page_unlock(m);
m->act_count += actcount + ACT_ADVANCE + 1;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
goto relock_queues;
}
if (m->hold_count != 0) {
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* Held pages are essentially stuck in the
@@ -1122,7 +1123,7 @@ vm_pageout_scan(int pass)
if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
vm_pagequeue_lock(pq);
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
queues_locked = TRUE;
vm_page_requeue_locked(m);
goto relock_queues;
@@ -1165,17 +1166,17 @@ vm_pageout_scan(int pass)
KASSERT(mp != NULL,
("vp %p with NULL v_mount", vp));
vm_object_reference_locked(object);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (vget(vp, LK_EXCLUSIVE | LK_TIMELOCK,
curthread)) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
++pageout_lock_miss;
if (object->flags & OBJ_MIGHTBEDIRTY)
vnodes_skipped++;
vp = NULL;
goto unlock_and_continue;
}
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
vm_page_lock(m);
vm_pagequeue_lock(pq);
queues_locked = TRUE;
@@ -1236,7 +1237,7 @@ vm_pageout_scan(int pass)
}
unlock_and_continue:
vm_page_lock_assert(m, MA_NOTOWNED);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (mp != NULL) {
if (queues_locked) {
vm_pagequeue_unlock(pq);
@@ -1251,7 +1252,7 @@ unlock_and_continue:
goto relock_queues;
}
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
relock_queues:
if (!queues_locked) {
vm_pagequeue_lock(pq);
@@ -1299,9 +1300,9 @@ relock_queues:
continue;
}
object = m->object;
- if (!VM_OBJECT_TRYLOCK(object) &&
+ if (!VM_OBJECT_TRYWLOCK(object) &&
!vm_pageout_fallback_object_lock(m, &next)) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vm_page_unlock(m);
m = next;
continue;
@@ -1314,7 +1315,7 @@ relock_queues:
(m->oflags & VPO_BUSY) ||
(m->hold_count != 0)) {
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vm_page_requeue_locked(m);
m = next;
continue;
@@ -1375,7 +1376,7 @@ relock_queues:
vm_page_requeue_locked(m);
}
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
m = next;
}
vm_pagequeue_unlock(pq);
@@ -1571,9 +1572,9 @@ vm_pageout_page_stats(void)
continue;
}
object = m->object;
- if (!VM_OBJECT_TRYLOCK(object) &&
+ if (!VM_OBJECT_TRYWLOCK(object) &&
!vm_pageout_fallback_object_lock(m, &next)) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vm_page_unlock(m);
m = next;
continue;
@@ -1586,7 +1587,7 @@ vm_pageout_page_stats(void)
(m->oflags & VPO_BUSY) ||
(m->hold_count != 0)) {
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vm_page_requeue_locked(m);
m = next;
continue;
@@ -1625,7 +1626,7 @@ vm_pageout_page_stats(void)
}
}
vm_page_unlock(m);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
m = next;
}
vm_pagequeue_unlock(pq);
diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c
index 6ed64ea..a991e41 100644
--- a/sys/vm/vm_pager.c
+++ b/sys/vm/vm_pager.c
@@ -74,6 +74,7 @@ __FBSDID("$FreeBSD$");
#include <sys/buf.h>
#include <sys/ucred.h>
#include <sys/malloc.h>
+#include <sys/rwlock.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
@@ -248,7 +249,7 @@ vm_pager_deallocate(object)
vm_object_t object;
{
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
(*pagertab[object->type]->pgo_dealloc) (object);
}
@@ -272,13 +273,13 @@ vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
TAILQ_FOREACH(object, pg_list, pager_object_list) {
if (object->handle == handle) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if ((object->flags & OBJ_DEAD) == 0) {
vm_object_reference_locked(object);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
break;
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
}
return (object);
diff --git a/sys/vm/vm_pager.h b/sys/vm/vm_pager.h
index bb7a5ec..b5d923c 100644
--- a/sys/vm/vm_pager.h
+++ b/sys/vm/vm_pager.h
@@ -124,7 +124,7 @@ vm_pager_get_pages(
) {
int r;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
r = (*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage);
if (r == VM_PAGER_OK && m[reqpage]->valid != VM_PAGE_BITS_ALL) {
vm_page_zero_invalid(m[reqpage], TRUE);
@@ -141,7 +141,7 @@ vm_pager_put_pages(
int *rtvals
) {
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
(*pagertab[object->type]->pgo_putpages)
(object, m, count, flags, rtvals);
}
@@ -165,7 +165,7 @@ vm_pager_has_page(
) {
boolean_t ret;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
ret = (*pagertab[object->type]->pgo_haspage)
(object, offset, before, after);
return (ret);
@@ -188,7 +188,7 @@ static __inline void
vm_pager_page_unswapped(vm_page_t m)
{
- VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
if (pagertab[m->object->type]->pgo_pageunswapped)
(*pagertab[m->object->type]->pgo_pageunswapped)(m);
}
diff --git a/sys/vm/vm_reserv.c b/sys/vm/vm_reserv.c
index eb14411..bb071bd 100644
--- a/sys/vm/vm_reserv.c
+++ b/sys/vm/vm_reserv.c
@@ -47,6 +47,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/queue.h>
+#include <sys/rwlock.h>
#include <sys/sbuf.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
@@ -312,7 +313,7 @@ vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages,
int i, index, n;
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
/*
@@ -485,7 +486,7 @@ vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex)
vm_reserv_t rv;
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* Is a reservation fundamentally impossible?
@@ -849,7 +850,7 @@ vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
{
vm_reserv_t rv;
- VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(new_object);
rv = vm_reserv_from_page(m);
if (rv->object == old_object) {
mtx_lock(&vm_page_queue_free_mtx);
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 4c678f4..5e331ee 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -63,6 +63,7 @@ __FBSDID("$FreeBSD$");
#include <sys/vmmeter.h>
#include <sys/limits.h>
#include <sys/conf.h>
+#include <sys/rwlock.h>
#include <sys/sf_buf.h>
#include <machine/atomic.h>
@@ -109,9 +110,9 @@ vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td)
return (0);
while ((object = vp->v_object) != NULL) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (!(object->flags & OBJ_DEAD)) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (0);
}
VOP_UNLOCK(vp, 0);
@@ -135,9 +136,9 @@ vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td)
* Dereference the reference we just created. This assumes
* that the object is associated with the vp.
*/
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
object->ref_count--;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
vrele(vp);
KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object"));
@@ -154,7 +155,7 @@ vnode_destroy_vobject(struct vnode *vp)
if (obj == NULL)
return;
ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject");
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
if (obj->ref_count == 0) {
/*
* vclean() may be called twice. The first time
@@ -167,13 +168,13 @@ vnode_destroy_vobject(struct vnode *vp)
if ((obj->flags & OBJ_DEAD) == 0)
vm_object_terminate(obj);
else
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
} else {
/*
* Woe to the process that tries to page now :-).
*/
vm_pager_deallocate(obj);
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
}
vp->v_object = NULL;
}
@@ -206,7 +207,7 @@ vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
*/
retry:
while ((object = vp->v_object) != NULL) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if ((object->flags & OBJ_DEAD) == 0)
break;
vm_object_set_flag(object, OBJ_DISCONNECTWNT);
@@ -239,7 +240,7 @@ retry:
VI_UNLOCK(vp);
} else {
object->ref_count++;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
vref(vp);
return (object);
@@ -259,7 +260,7 @@ vnode_pager_dealloc(object)
if (vp == NULL)
panic("vnode_pager_dealloc: pager already dealloced");
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
vm_object_pip_wait(object, "vnpdea");
refs = object->ref_count;
@@ -278,10 +279,10 @@ vnode_pager_dealloc(object)
}
vp->v_object = NULL;
VOP_UNSET_TEXT(vp);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
while (refs-- > 0)
vunref(vp);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
}
static boolean_t
@@ -299,7 +300,7 @@ vnode_pager_haspage(object, pindex, before, after)
int bsize;
int pagesperblock, blocksperpage;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
/*
* If no vp or vp is doomed or marked transparent to VM, we do not
* have the page.
@@ -322,9 +323,9 @@ vnode_pager_haspage(object, pindex, before, after)
blocksperpage = (PAGE_SIZE / bsize);
reqblock = pindex * blocksperpage;
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (err)
return TRUE;
if (bn == -1)
@@ -379,12 +380,12 @@ vnode_pager_setsize(vp, nsize)
if ((object = vp->v_object) == NULL)
return;
/* ASSERT_VOP_ELOCKED(vp, "vnode_pager_setsize and not locked vnode"); */
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (nsize == object->un_pager.vnp.vnp_size) {
/*
* Hasn't changed size
*/
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return;
}
nobjsize = OFF_TO_IDX(nsize + PAGE_MASK);
@@ -445,7 +446,7 @@ vnode_pager_setsize(vp, nsize)
}
object->un_pager.vnp.vnp_size = nsize;
object->size = nobjsize;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
/*
@@ -568,9 +569,9 @@ vnode_pager_input_smlfs(object, m)
bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize);
KASSERT((m->dirty & bits) == 0,
("vnode_pager_input_smlfs: page %p is dirty", m));
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
m->valid |= bits;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
sf_buf_free(sf);
if (error) {
@@ -594,7 +595,7 @@ vnode_pager_input_old(object, m)
struct sf_buf *sf;
struct vnode *vp;
- VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
+ VM_OBJECT_ASSERT_WLOCKED(object);
error = 0;
/*
@@ -607,7 +608,7 @@ vnode_pager_input_old(object, m)
if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size)
size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex);
vp = object->handle;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* Allocate a kernel virtual address and initialize so that
@@ -637,7 +638,7 @@ vnode_pager_input_old(object, m)
}
sf_buf_free(sf);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
}
KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m));
if (!error)
@@ -669,11 +670,11 @@ vnode_pager_getpages(object, m, count, reqpage)
int bytes = count * PAGE_SIZE;
vp = object->handle;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0);
KASSERT(rtval != EOPNOTSUPP,
("vnode_pager: FS getpages not implemented\n"));
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
return rtval;
}
@@ -723,7 +724,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
*/
error = VOP_BMAP(vp, foff / bsize, &bo, &reqblock, NULL, NULL);
if (error == EOPNOTSUPP) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (i = 0; i < count; i++)
if (i != reqpage) {
@@ -734,17 +735,17 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
PCPU_INC(cnt.v_vnodein);
PCPU_INC(cnt.v_vnodepgsin);
error = vnode_pager_input_old(object, m[reqpage]);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (error);
} else if (error != 0) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (i = 0; i < count; i++)
if (i != reqpage) {
vm_page_lock(m[i]);
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (VM_PAGER_ERROR);
/*
@@ -754,14 +755,14 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
*/
} else if ((PAGE_SIZE / bsize) > 1 &&
(vp->v_mount->mnt_stat.f_type != nfs_mount_type)) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (i = 0; i < count; i++)
if (i != reqpage) {
vm_page_lock(m[i]);
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
PCPU_INC(cnt.v_vnodein);
PCPU_INC(cnt.v_vnodepgsin);
return vnode_pager_input_smlfs(object, m[reqpage]);
@@ -772,7 +773,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
* clean up and return. Otherwise we have to re-read the
* media.
*/
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (m[reqpage]->valid == VM_PAGE_BITS_ALL) {
for (i = 0; i < count; i++)
if (i != reqpage) {
@@ -780,7 +781,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return VM_PAGER_OK;
} else if (reqblock == -1) {
pmap_zero_page(m[reqpage]);
@@ -793,11 +794,11 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (VM_PAGER_OK);
}
m[reqpage]->valid = 0;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* here on direct device I/O
@@ -810,18 +811,18 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
for (first = 0, i = 0; i < count; i = runend) {
if (vnode_pager_addr(vp, IDX_TO_OFF(m[i]->pindex), &firstaddr,
&runpg) != 0) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (; i < count; i++)
if (i != reqpage) {
vm_page_lock(m[i]);
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return (VM_PAGER_ERROR);
}
if (firstaddr == -1) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (i == reqpage && foff < object->un_pager.vnp.vnp_size) {
panic("vnode_pager_getpages: unexpected missing page: firstaddr: %jd, foff: 0x%jx%08jx, vnp_size: 0x%jx%08jx",
(intmax_t)firstaddr, (uintmax_t)(foff >> 32),
@@ -833,29 +834,29 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
vm_page_lock(m[i]);
vm_page_free(m[i]);
vm_page_unlock(m[i]);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
runend = i + 1;
first = runend;
continue;
}
runend = i + runpg;
if (runend <= reqpage) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (j = i; j < runend; j++) {
vm_page_lock(m[j]);
vm_page_free(m[j]);
vm_page_unlock(m[j]);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
} else {
if (runpg < (count - first)) {
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (i = first + runpg; i < count; i++) {
vm_page_lock(m[i]);
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
count = first + runpg;
}
break;
@@ -946,7 +947,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
pbrelbo(bp);
relpbuf(bp, &vnode_pbuf_freecnt);
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) {
vm_page_t mt;
@@ -983,7 +984,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
if (i != reqpage)
vm_page_readahead_finish(mt);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
if (error) {
printf("vnode_pager_getpages: I/O read error\n");
}
@@ -1029,11 +1030,11 @@ vnode_pager_putpages(object, m, count, sync, rtvals)
* Call device-specific putpages function
*/
vp = object->handle;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0);
KASSERT(rtval != EOPNOTSUPP,
("vnode_pager: stale FS putpages\n"));
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
}
@@ -1095,7 +1096,7 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
* We do not under any circumstances truncate the valid bits, as
* this will screw up bogus page replacement.
*/
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (maxsize + poffset > object->un_pager.vnp.vnp_size) {
if (object->un_pager.vnp.vnp_size > poffset) {
int pgoff;
@@ -1127,7 +1128,7 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
}
}
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
/*
* pageouts are already clustered, use IO_ASYNC to force a bawrite()
@@ -1181,7 +1182,7 @@ vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written)
if (written == 0)
return;
obj = ma[0]->object;
- VM_OBJECT_LOCK(obj);
+ VM_OBJECT_WLOCK(obj);
for (i = 0, pos = 0; pos < written; i++, pos += PAGE_SIZE) {
if (pos < trunc_page(written)) {
rtvals[i] = VM_PAGER_OK;
@@ -1192,7 +1193,7 @@ vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written)
vm_page_clear_dirty(ma[i], 0, written & PAGE_MASK);
}
}
- VM_OBJECT_UNLOCK(obj);
+ VM_OBJECT_WUNLOCK(obj);
}
void
@@ -1202,9 +1203,9 @@ vnode_pager_update_writecount(vm_object_t object, vm_offset_t start,
struct vnode *vp;
vm_ooffset_t old_wm;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
if (object->type != OBJT_VNODE) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return;
}
old_wm = object->un_pager.vnp.writemappings;
@@ -1221,7 +1222,7 @@ vnode_pager_update_writecount(vm_object_t object, vm_offset_t start,
CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
__func__, vp, vp->v_writecount);
}
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
}
void
@@ -1232,14 +1233,14 @@ vnode_pager_release_writecount(vm_object_t object, vm_offset_t start,
struct mount *mp;
vm_offset_t inc;
- VM_OBJECT_LOCK(object);
+ VM_OBJECT_WLOCK(object);
/*
* First, recheck the object type to account for the race when
* the vnode is reclaimed.
*/
if (object->type != OBJT_VNODE) {
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return;
}
@@ -1250,13 +1251,13 @@ vnode_pager_release_writecount(vm_object_t object, vm_offset_t start,
inc = end - start;
if (object->un_pager.vnp.writemappings != inc) {
object->un_pager.vnp.writemappings -= inc;
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
return;
}
vp = object->handle;
vhold(vp);
- VM_OBJECT_UNLOCK(object);
+ VM_OBJECT_WUNLOCK(object);
mp = NULL;
vn_start_write(vp, &mp, V_WAIT);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
OpenPOWER on IntegriCloud