summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/alpha/alpha/atomic.s232
-rw-r--r--sys/alpha/include/atomic.h77
-rw-r--r--sys/amd64/include/atomic.h58
-rw-r--r--sys/i386/include/atomic.h58
-rw-r--r--sys/kern/kern_exec.c13
-rw-r--r--sys/kern/kern_sysctl.c86
-rw-r--r--sys/kern/kern_xxx.c9
-rw-r--r--sys/kern/sys_generic.c22
-rw-r--r--sys/kern/sysv_shm.c6
-rw-r--r--sys/kern/vfs_bio.c48
-rw-r--r--sys/kern/vfs_cluster.c14
-rw-r--r--sys/sys/sysctl.h30
-rw-r--r--sys/vm/swap_pager.c14
-rw-r--r--sys/vm/vm_fault.c21
-rw-r--r--sys/vm/vm_kern.c8
-rw-r--r--sys/vm/vm_map.c38
-rw-r--r--sys/vm/vm_meter.c6
-rw-r--r--sys/vm/vm_mmap.c4
-rw-r--r--sys/vm/vm_object.c64
-rw-r--r--sys/vm/vm_object.h33
-rw-r--r--sys/vm/vm_page.c22
-rw-r--r--sys/vm/vm_page.h24
-rw-r--r--sys/vm/vm_pageout.c20
-rw-r--r--sys/vm/vnode_pager.c8
24 files changed, 714 insertions, 201 deletions
diff --git a/sys/alpha/alpha/atomic.s b/sys/alpha/alpha/atomic.s
new file mode 100644
index 0000000..5eb23b2
--- /dev/null
+++ b/sys/alpha/alpha/atomic.s
@@ -0,0 +1,232 @@
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#include <machine/asm.h>
+
+ .text
+
+LEAF(atomic_set_8, 2)
+ bic a0, 3, t1
+0: ldl_l t2, 0(t1)
+ extbl t2, a0, t0
+ bis t0, a1, t0
+ insbl t0, a0, t0
+ mskbl t2, a0, t2
+ or t2, t0, t0
+ stl_c t0, 0(t1)
+ beq t0, 1f
+ mb
+ RET
+1: br 0b
+ END(atomic_set_8)
+
+LEAF(atomic_clear_8, 2)
+ bic a0, 3, t1
+0: ldl_l t2, 0(t1)
+ extbl t2, a0, t0
+ bic t0, a1, t0
+ insbl t0, a0, t0
+ mskbl t2, a0, t2
+ or t2, t0, t0
+ stl_c t0, 0(t1)
+ beq t0, 1f
+ mb
+ RET
+1: br 0b
+ END(atomic_clear_8)
+
+LEAF(atomic_add_8, 2)
+ bic a0, 3, t1
+0: ldl_l t2, 0(t1)
+ extbl t2, a0, t0
+ addl t0, a1, t0
+ insbl t0, a0, t0
+ mskbl t2, a0, t2
+ or t2, t0, t0
+ stl_c t0, 0(t1)
+ beq t0, 1f
+ mb
+ RET
+1: br 0b
+ END(atomic_add_8)
+
+LEAF(atomic_subtract_8, 2)
+ bic a0, 3, t1
+0: ldl_l t2, 0(t1)
+ extbl t2, a0, t0
+ subl t0, a1, t0
+ insbl t0, a0, t0
+ mskbl t2, a0, t2
+ or t2, t0, t0
+ stl_c t0, 0(t1)
+ beq t0, 1f
+ mb
+ RET
+1: br 0b
+ END(atomic_subtract_8)
+
+LEAF(atomic_set_16, 2)
+ bic a0, 3, t1
+0: ldl_l t2, 0(t1)
+ extwl t2, a0, t0
+ bis t0, a1, t0
+ inswl t0, a0, t0
+ mskwl t2, a0, t2
+ or t2, t0, t0
+ stl_c t0, 0(t1)
+ beq t0, 1f
+ mb
+ RET
+1: br 0b
+ END(atomic_set_16)
+
+LEAF(atomic_clear_16, 2)
+ bic a0, 3, t1
+0: ldl_l t2, 0(t1)
+ extwl t2, a0, t0
+ bic t0, a1, t0
+ inswl t0, a0, t0
+ mskwl t2, a0, t2
+ or t2, t0, t0
+ stl_c t0, 0(t1)
+ beq t0, 1f
+ mb
+ RET
+1: br 0b
+ END(atomic_clear_16)
+
+LEAF(atomic_add_16, 2)
+ bic a0, 3, t1
+0: ldl_l t2, 0(t1)
+ extwl t2, a0, t0
+ addl t0, a1, t0
+ inswl t0, a0, t0
+ mskwl t2, a0, t2
+ or t2, t0, t0
+ stl_c t0, 0(t1)
+ beq t0, 1f
+ mb
+ RET
+1: br 0b
+ END(atomic_add_16)
+
+LEAF(atomic_subtract_16, 2)
+ bic a0, 3, t1
+0: ldl_l t2, 0(t1)
+ extwl t2, a0, t0
+ subl t0, a1, t0
+ inswl t0, a0, t0
+ mskwl t2, a0, t2
+ or t2, t0, t0
+ stl_c t0, 0(t1)
+ beq t0, 1f
+ mb
+ RET
+1: br 0b
+ END(atomic_subtract_16)
+
+LEAF(atomic_set_32, 2)
+0: ldl_l t0, 0(a0)
+ bis t0, a1, t0
+ stl_c t0, 0(a0)
+ beq t0, 1f
+ mb
+ RET
+1: br 0b
+ END(atomic_set_32)
+
+LEAF(atomic_clear_32, 2)
+0: ldl_l t0, 0(a0)
+ bic t0, a1, t0
+ stl_c t0, 0(a0)
+ beq t0, 1f
+ mb
+ RET
+1: br 0b
+ END(atomic_clear_32)
+
+LEAF(atomic_add_32, 2)
+0: ldl_l t0, 0(a0)
+ addl t0, a1, t0
+ stl_c t0, 0(a0)
+ beq t0, 1f
+ mb
+ RET
+1: br 0b
+ END(atomic_add_32)
+
+LEAF(atomic_subtract_32, 2)
+0: ldl_l t0, 0(a0)
+ subl t0, a1, t0
+ stl_c t0, 0(a0)
+ beq t0, 1f
+ mb
+ RET
+1: br 0b
+ END(atomic_subtract_32)
+
+LEAF(atomic_set_64, 2)
+0: ldq_l t0, 0(a0)
+ bis t0, a1, t0
+ stq_c t0, 0(a0)
+ beq t0, 1f
+ mb
+ RET
+1: br 0b
+ END(atomic_set_64)
+
+LEAF(atomic_clear_64, 2)
+0: ldq_l t0, 0(a0)
+ bic t0, a1, t0
+ stq_c t0, 0(a0)
+ beq t0, 1f
+ mb
+ RET
+1: br 0b
+ END(atomic_clear_64)
+
+LEAF(atomic_add_64, 2)
+0: ldq_l t0, 0(a0)
+ addq t0, a1, t0
+ stq_c t0, 0(a0)
+ beq t0, 1f
+ mb
+ RET
+1: br 0b
+ END(atomic_add_64)
+
+LEAF(atomic_subtract_64, 2)
+0: ldq_l t0, 0(a0)
+ subq t0, a1, t0
+ stq_c t0, 0(a0)
+ beq t0, 1f
+ mb
+ RET
+1: br 0b
+ END(atomic_subtract_64)
+
diff --git a/sys/alpha/include/atomic.h b/sys/alpha/include/atomic.h
new file mode 100644
index 0000000..25a27d5
--- /dev/null
+++ b/sys/alpha/include/atomic.h
@@ -0,0 +1,77 @@
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+
+#ifndef _MACHINE_ATOMIC_H_
+#define _MACHINE_ATOMIC_H_
+
+/*
+ * Various simple arithmetic on memory which is atomic in the presence
+ * of interrupts and SMP safe.
+ */
+
+void atomic_set_8(u_int8_t *, u_int8_t);
+void atomic_clear_8(u_int8_t *, u_int8_t);
+void atomic_add_8(u_int8_t *, u_int8_t);
+void atomic_subtract_8(u_int8_t *, u_int8_t);
+
+void atomic_set_16(u_int16_t *, u_int16_t);
+void atomic_clear_16(u_int16_t *, u_int16_t);
+void atomic_add_16(u_int16_t *, u_int16_t);
+void atomic_subtract_16(u_int16_t *, u_int16_t);
+
+void atomic_set_32(u_int32_t *, u_int32_t);
+void atomic_clear_32(u_int32_t *, u_int32_t);
+void atomic_add_32(u_int32_t *, u_int32_t);
+void atomic_subtract_32(u_int32_t *, u_int32_t);
+
+void atomic_set_64(u_int64_t *, u_int64_t);
+void atomic_clear_64(u_int64_t *, u_int64_t);
+void atomic_add_64(u_int64_t *, u_int64_t);
+void atomic_subtract_64(u_int64_t *, u_int64_t);
+
+#define atomic_set_char atomic_set_8
+#define atomic_clear_char atomic_clear_8
+#define atomic_add_char atomic_add_8
+#define atomic_subtract_char atomic_subtract_8
+
+#define atomic_set_short atomic_set_16
+#define atomic_clear_short atomic_clear_16
+#define atomic_add_short atomic_add_16
+#define atomic_subtract_short atomic_subtract_16
+
+#define atomic_set_int atomic_set_32
+#define atomic_clear_int atomic_clear_32
+#define atomic_add_int atomic_add_32
+#define atomic_subtract_int atomic_subtract_32
+
+#define atomic_set_long atomic_set_64
+#define atomic_clear_long atomic_clear_64
+#define atomic_add_long atomic_add_64
+#define atomic_subtract_long atomic_subtract_64
+
+#endif /* ! _MACHINE_ATOMIC_H_ */
diff --git a/sys/amd64/include/atomic.h b/sys/amd64/include/atomic.h
new file mode 100644
index 0000000..aae3484
--- /dev/null
+++ b/sys/amd64/include/atomic.h
@@ -0,0 +1,58 @@
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+#ifndef _MACHINE_ATOMIC_H_
+#define _MACHINE_ATOMIC_H_
+
+/*
+ * Various simple arithmetic on memory which is atomic in the presence
+ * of interrupts.
+ *
+ * Note: these versions are not SMP safe.
+ */
+
+#define atomic_set_char(P, V) (*(u_char*)(P) |= (V))
+#define atomic_clear_char(P, V) (*(u_char*)(P) &= ~(V))
+#define atomic_add_char(P, V) (*(u_char*)(P) += (V))
+#define atomic_subtract_char(P, V) (*(u_char*)(P) -= (V))
+
+#define atomic_set_short(P, V) (*(u_short*)(P) |= (V))
+#define atomic_clear_short(P, V) (*(u_short*)(P) &= ~(V))
+#define atomic_add_short(P, V) (*(u_short*)(P) += (V))
+#define atomic_subtract_short(P, V) (*(u_short*)(P) -= (V))
+
+#define atomic_set_int(P, V) (*(u_int*)(P) |= (V))
+#define atomic_clear_int(P, V) (*(u_int*)(P) &= ~(V))
+#define atomic_add_int(P, V) (*(u_int*)(P) += (V))
+#define atomic_subtract_int(P, V) (*(u_int*)(P) -= (V))
+
+#define atomic_set_long(P, V) (*(u_long*)(P) |= (V))
+#define atomic_clear_long(P, V) (*(u_long*)(P) &= ~(V))
+#define atomic_add_long(P, V) (*(u_long*)(P) += (V))
+#define atomic_subtract_long(P, V) (*(u_long*)(P) -= (V))
+
+#endif /* ! _MACHINE_ATOMIC_H_ */
diff --git a/sys/i386/include/atomic.h b/sys/i386/include/atomic.h
new file mode 100644
index 0000000..aae3484
--- /dev/null
+++ b/sys/i386/include/atomic.h
@@ -0,0 +1,58 @@
+/*-
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id$
+ */
+#ifndef _MACHINE_ATOMIC_H_
+#define _MACHINE_ATOMIC_H_
+
+/*
+ * Various simple arithmetic on memory which is atomic in the presence
+ * of interrupts.
+ *
+ * Note: these versions are not SMP safe.
+ */
+
+#define atomic_set_char(P, V) (*(u_char*)(P) |= (V))
+#define atomic_clear_char(P, V) (*(u_char*)(P) &= ~(V))
+#define atomic_add_char(P, V) (*(u_char*)(P) += (V))
+#define atomic_subtract_char(P, V) (*(u_char*)(P) -= (V))
+
+#define atomic_set_short(P, V) (*(u_short*)(P) |= (V))
+#define atomic_clear_short(P, V) (*(u_short*)(P) &= ~(V))
+#define atomic_add_short(P, V) (*(u_short*)(P) += (V))
+#define atomic_subtract_short(P, V) (*(u_short*)(P) -= (V))
+
+#define atomic_set_int(P, V) (*(u_int*)(P) |= (V))
+#define atomic_clear_int(P, V) (*(u_int*)(P) &= ~(V))
+#define atomic_add_int(P, V) (*(u_int*)(P) += (V))
+#define atomic_subtract_int(P, V) (*(u_int*)(P) -= (V))
+
+#define atomic_set_long(P, V) (*(u_long*)(P) |= (V))
+#define atomic_clear_long(P, V) (*(u_long*)(P) &= ~(V))
+#define atomic_add_long(P, V) (*(u_long*)(P) += (V))
+#define atomic_subtract_long(P, V) (*(u_long*)(P) -= (V))
+
+#endif /* ! _MACHINE_ATOMIC_H_ */
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index 41d5ca0..c77fe20 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: kern_exec.c,v 1.83 1998/06/07 17:11:33 dfr Exp $
+ * $Id: kern_exec.c,v 1.84 1998/07/15 06:19:33 bde Exp $
*/
#include <sys/param.h>
@@ -65,16 +65,11 @@
static long *exec_copyout_strings __P((struct image_params *));
-/*
- * XXX trouble here if sizeof(caddr_t) != sizeof(int), other parts
- * of the sysctl code also assumes this, and sizeof(int) == sizeof(long).
- */
static struct ps_strings *ps_strings = PS_STRINGS;
-SYSCTL_INT(_kern, KERN_PS_STRINGS, ps_strings, 0, &ps_strings, 0, "");
+SYSCTL_INTPTR(_kern, KERN_PS_STRINGS, ps_strings, 0, &ps_strings, 0, "");
static caddr_t usrstack = (caddr_t)USRSTACK;
-SYSCTL_INT(_kern, KERN_USRSTACK, usrstack, 0, &usrstack, 0, "");
-
+SYSCTL_INTPTR(_kern, KERN_USRSTACK, usrstack, 0, &usrstack, 0, "");
/*
* execsw_set is constructed for us by the linker. Each of the items
* is a pointer to a `const struct execsw', hence the double pointer here.
@@ -375,7 +370,7 @@ exec_map_first_page(imgp)
break;
if (ma[i]->valid)
break;
- ma[i]->flags |= PG_BUSY;
+ PAGE_SET_FLAG(ma[i], PG_BUSY);
} else {
ma[i] = vm_page_alloc(object, i, VM_ALLOC_NORMAL);
if (ma[i] == NULL)
diff --git a/sys/kern/kern_sysctl.c b/sys/kern/kern_sysctl.c
index b1dc38f..df744f5 100644
--- a/sys/kern/kern_sysctl.c
+++ b/sys/kern/kern_sysctl.c
@@ -37,7 +37,7 @@
* SUCH DAMAGE.
*
* @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
- * $Id: kern_sysctl.c,v 1.73 1997/11/06 19:29:15 phk Exp $
+ * $Id: kern_sysctl.c,v 1.74 1997/12/16 17:40:20 eivind Exp $
*/
#include "opt_compat.h"
@@ -514,6 +514,60 @@ sysctl_handle_int SYSCTL_HANDLER_ARGS
}
/*
+ * Handle an integer, signed or unsigned.
+ * Two cases:
+ * a variable: point arg1 at it.
+ * a constant: pass it in arg2.
+ */
+
+int
+sysctl_handle_long SYSCTL_HANDLER_ARGS
+{
+ int error = 0;
+
+ if (arg1)
+ error = SYSCTL_OUT(req, arg1, sizeof(long));
+ else
+ error = SYSCTL_OUT(req, &arg2, sizeof(long));
+
+ if (error || !req->newptr)
+ return (error);
+
+ if (!arg1)
+ error = EPERM;
+ else
+ error = SYSCTL_IN(req, arg1, sizeof(long));
+ return (error);
+}
+
+/*
+ * Handle an integer, signed or unsigned.
+ * Two cases:
+ * a variable: point arg1 at it.
+ * a constant: pass it in arg2.
+ */
+
+int
+sysctl_handle_intptr SYSCTL_HANDLER_ARGS
+{
+ int error = 0;
+
+ if (arg1)
+ error = SYSCTL_OUT(req, arg1, sizeof(intptr_t));
+ else
+ error = SYSCTL_OUT(req, &arg2, sizeof(intptr_t));
+
+ if (error || !req->newptr)
+ return (error);
+
+ if (!arg1)
+ error = EPERM;
+ else
+ error = SYSCTL_IN(req, arg1, sizeof(intptr_t));
+ return (error);
+}
+
+/*
* Handle our generic '\0' terminated 'C' string.
* Two cases:
* a variable string: point arg1 at it, arg2 is max length.
@@ -566,12 +620,14 @@ sysctl_handle_opaque SYSCTL_HANDLER_ARGS
* XXX: rather untested at this point
*/
static int
-sysctl_old_kernel(struct sysctl_req *req, const void *p, int l)
+sysctl_old_kernel(struct sysctl_req *req, const void *p, size_t l)
{
- int i = 0;
+ size_t i = 0;
if (req->oldptr) {
- i = min(req->oldlen - req->oldidx, l);
+ i = l;
+ if (i > req->oldlen - req->oldidx)
+ i = req->oldlen - req->oldidx;
if (i > 0)
bcopy(p, (char *)req->oldptr + req->oldidx, i);
}
@@ -582,7 +638,7 @@ sysctl_old_kernel(struct sysctl_req *req, const void *p, int l)
}
static int
-sysctl_new_kernel(struct sysctl_req *req, void *p, int l)
+sysctl_new_kernel(struct sysctl_req *req, void *p, size_t l)
{
if (!req->newptr)
return 0;
@@ -594,7 +650,7 @@ sysctl_new_kernel(struct sysctl_req *req, void *p, int l)
}
int
-kernel_sysctl(struct proc *p, int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen, int *retval)
+kernel_sysctl(struct proc *p, int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen, size_t *retval)
{
int error = 0;
struct sysctl_req req;
@@ -656,16 +712,19 @@ kernel_sysctl(struct proc *p, int *name, u_int namelen, void *old, size_t *oldle
* Transfer function to/from user space.
*/
static int
-sysctl_old_user(struct sysctl_req *req, const void *p, int l)
+sysctl_old_user(struct sysctl_req *req, const void *p, size_t l)
{
- int error = 0, i = 0;
+ int error = 0;
+ size_t i = 0;
if (req->lock == 1 && req->oldptr) {
vslock(req->oldptr, req->oldlen);
req->lock = 2;
}
if (req->oldptr) {
- i = min(req->oldlen - req->oldidx, l);
+ i = l;
+ if (i > req->oldlen - req->oldidx)
+ i = req->oldlen - req->oldidx;
if (i > 0)
error = copyout(p, (char *)req->oldptr + req->oldidx,
i);
@@ -679,7 +738,7 @@ sysctl_old_user(struct sysctl_req *req, const void *p, int l)
}
static int
-sysctl_new_user(struct sysctl_req *req, void *p, int l)
+sysctl_new_user(struct sysctl_req *req, void *p, size_t l)
{
int error;
@@ -773,7 +832,8 @@ struct sysctl_args {
int
__sysctl(struct proc *p, struct sysctl_args *uap)
{
- int error, i, j, name[CTL_MAXNAME];
+ int error, i, name[CTL_MAXNAME];
+ size_t j;
if (uap->namelen > CTL_MAXNAME || uap->namelen < 2)
return (EINVAL);
@@ -800,7 +860,7 @@ __sysctl(struct proc *p, struct sysctl_args *uap)
* must be in kernel space.
*/
int
-userland_sysctl(struct proc *p, int *name, u_int namelen, void *old, size_t *oldlenp, int inkernel, void *new, size_t newlen, int *retval)
+userland_sysctl(struct proc *p, int *name, u_int namelen, void *old, size_t *oldlenp, int inkernel, void *new, size_t newlen, size_t *retval)
{
int error = 0;
struct sysctl_req req, req2;
@@ -944,7 +1004,7 @@ int
ogetkerninfo(struct proc *p, struct getkerninfo_args *uap)
{
int error, name[6];
- u_int size;
+ size_t size;
switch (uap->op & 0xff00) {
diff --git a/sys/kern/kern_xxx.c b/sys/kern/kern_xxx.c
index 8a61710..b7cb83b 100644
--- a/sys/kern/kern_xxx.c
+++ b/sys/kern/kern_xxx.c
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)kern_xxx.c 8.2 (Berkeley) 11/14/93
- * $Id: kern_xxx.c,v 1.26 1997/11/06 19:29:18 phk Exp $
+ * $Id: kern_xxx.c,v 1.27 1997/12/16 17:40:21 eivind Exp $
*/
#include "opt_compat.h"
@@ -60,10 +60,11 @@ ogethostname(p, uap)
struct gethostname_args *uap;
{
int name[2];
+ size_t len = uap->len;
name[0] = CTL_KERN;
name[1] = KERN_HOSTNAME;
- return (userland_sysctl(p, name, 2, uap->hostname, &uap->len,
+ return (userland_sysctl(p, name, 2, uap->hostname, &len,
1, 0, 0, 0));
}
@@ -149,7 +150,8 @@ uname(p, uap)
struct proc *p;
struct uname_args *uap;
{
- int name[2], len, rtval;
+ int name[2], rtval;
+ size_t len;
char *s, *us;
name[0] = CTL_KERN;
@@ -197,6 +199,7 @@ uname(p, uap)
if( rtval)
return rtval;
+ name[0] = CTL_HW;
name[1] = HW_MACHINE;
len = sizeof uap->name->machine;
rtval = userland_sysctl(p, name, 2, uap->name->machine, &len,
diff --git a/sys/kern/sys_generic.c b/sys/kern/sys_generic.c
index f8adb85..3229829 100644
--- a/sys/kern/sys_generic.c
+++ b/sys/kern/sys_generic.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)sys_generic.c 8.5 (Berkeley) 1/21/94
- * $Id: sys_generic.c,v 1.38 1998/05/17 11:52:51 phk Exp $
+ * $Id: sys_generic.c,v 1.39 1998/06/10 10:29:31 dfr Exp $
*/
#include "opt_ktrace.h"
@@ -61,6 +61,8 @@
#include <sys/ktrace.h>
#endif
+#include <machine/limits.h>
+
static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer");
static MALLOC_DEFINE(M_SELECT, "select", "select() buffer");
MALLOC_DEFINE(M_IOV, "iov", "large iov's");
@@ -102,11 +104,9 @@ read(p, uap)
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_offset = -1;
-
- auio.uio_resid = uap->nbyte;
- if (auio.uio_resid < 0)
+ if (uap->nbyte > INT_MAX)
return (EINVAL);
-
+ auio.uio_resid = uap->nbyte;
auio.uio_rw = UIO_READ;
auio.uio_segflg = UIO_USERSPACE;
auio.uio_procp = p;
@@ -183,11 +183,11 @@ readv(p, uap)
goto done;
auio.uio_resid = 0;
for (i = 0; i < uap->iovcnt; i++) {
- auio.uio_resid += iov->iov_len;
- if (auio.uio_resid < 0) {
+ if (iov->iov_len > INT_MAX - auio.uio_resid) {
error = EINVAL;
goto done;
}
+ auio.uio_resid += iov->iov_len;
iov++;
}
#ifdef KTRACE
@@ -253,6 +253,8 @@ write(p, uap)
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_offset = -1;
+ if (uap->nbyte > INT_MAX)
+ return (EINVAL);
auio.uio_resid = uap->nbyte;
auio.uio_rw = UIO_WRITE;
auio.uio_segflg = UIO_USERSPACE;
@@ -334,11 +336,11 @@ writev(p, uap)
goto done;
auio.uio_resid = 0;
for (i = 0; i < uap->iovcnt; i++) {
- auio.uio_resid += iov->iov_len;
- if (auio.uio_resid < 0) {
+ if (iov->iov_len > INT_MAX - auio.uio_resid) {
error = EINVAL;
goto done;
}
+ auio.uio_resid += iov->iov_len;
iov++;
}
#ifdef KTRACE
@@ -380,7 +382,7 @@ done:
#ifndef _SYS_SYSPROTO_H_
struct ioctl_args {
int fd;
- int com;
+ u_long com;
caddr_t data;
};
#endif
diff --git a/sys/kern/sysv_shm.c b/sys/kern/sysv_shm.c
index 25e03a2..8aabc9a 100644
--- a/sys/kern/sysv_shm.c
+++ b/sys/kern/sysv_shm.c
@@ -1,4 +1,4 @@
-/* $Id: sysv_shm.c,v 1.36 1998/05/04 03:01:37 dyson Exp $ */
+/* $Id: sysv_shm.c,v 1.37 1998/05/04 17:12:47 dyson Exp $ */
/* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
/*
@@ -503,8 +503,8 @@ shmget_allocate_segment(p, uap, mode)
shm_handle->shm_object =
vm_pager_allocate(OBJT_SWAP, 0, OFF_TO_IDX(size),
VM_PROT_DEFAULT, 0);
- shm_handle->shm_object->flags &= ~OBJ_ONEMAPPING;
- shm_handle->shm_object->flags |= OBJ_NOSPLIT;
+ vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
+ vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
shmseg->shm_internal = shm_handle;
shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 8a759c0..dd63c5c 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -11,7 +11,7 @@
* 2. Absolutely no warranty of function or purpose is made by the author
* John S. Dyson.
*
- * $Id: vfs_bio.c,v 1.168 1998/08/06 08:33:18 dfr Exp $
+ * $Id: vfs_bio.c,v 1.169 1998/08/13 08:09:07 dfr Exp $
*/
/*
@@ -648,7 +648,7 @@ brelse(struct buf * bp)
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
- m->flags &= ~PG_ZERO;
+ PAGE_CLEAR_FLAG(m, PG_ZERO);
if (m == bogus_page) {
obj = (vm_object_t) vp->v_object;
@@ -835,9 +835,9 @@ vfs_vmio_release(bp)
vm_page_cache(m);
else
vm_page_deactivate(m);
- m->flags &= ~PG_ZERO;
+ PAGE_CLEAR_FLAG(m, PG_ZERO);
} else if (m->hold_count == 0) {
- m->flags |= PG_BUSY;
+ PAGE_SET_FLAG(m, PG_BUSY);
vm_page_protect(m, VM_PROT_NONE);
vm_page_free(m);
}
@@ -847,7 +847,7 @@ vfs_vmio_release(bp)
* act_count.
*/
m->act_count = 0;
- m->flags &= ~PG_ZERO;
+ PAGE_CLEAR_FLAG(m, PG_ZERO);
}
}
}
@@ -1357,7 +1357,7 @@ vfs_setdirty(struct buf *bp) {
* by users through the VM system.
*/
for (i = 0; i < bp->b_npages; i++) {
- bp->b_pages[i]->flags &= ~PG_ZERO;
+ PAGE_CLEAR_FLAG(bp->b_pages[i], PG_ZERO);
vm_page_test_dirty(bp->b_pages[i]);
}
@@ -1789,13 +1789,13 @@ allocbuf(struct buf * bp, int size)
}
vm_page_wire(m);
- m->flags &= ~PG_BUSY;
+ PAGE_CLEAR_FLAG(m, PG_BUSY);
bp->b_flags &= ~B_CACHE;
} else if (m->flags & PG_BUSY) {
s = splvm();
if (m->flags & PG_BUSY) {
- m->flags |= PG_WANTED;
+ PAGE_SET_FLAG(m, PG_WANTED);
tsleep(m, PVM, "pgtblk", 0);
}
splx(s);
@@ -1812,7 +1812,7 @@ allocbuf(struct buf * bp, int size)
bytesinpage = newbsize - toff;
if (bp->b_flags & B_CACHE)
vfs_buf_set_valid(bp, off, toff, bytesinpage, m);
- m->flags &= ~PG_ZERO;
+ PAGE_CLEAR_FLAG(m, PG_ZERO);
vm_page_wire(m);
}
bp->b_pages[pageindex] = m;
@@ -1971,7 +1971,7 @@ biodone(register struct buf * bp)
#if defined(VFS_BIO_DEBUG)
printf("biodone: page disappeared\n");
#endif
- --obj->paging_in_progress;
+ vm_object_pip_subtract(obj, 1);
continue;
}
bp->b_pages[i] = m;
@@ -1994,7 +1994,7 @@ biodone(register struct buf * bp)
if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
vfs_page_set_valid(bp, foff, i, m);
}
- m->flags &= ~PG_ZERO;
+ PAGE_CLEAR_FLAG(m, PG_ZERO);
/*
* when debugging new filesystems or buffer I/O methods, this
@@ -2025,14 +2025,14 @@ biodone(register struct buf * bp)
panic("biodone: page busy < 0\n");
}
PAGE_BWAKEUP(m);
- --obj->paging_in_progress;
+ vm_object_pip_subtract(obj, 1);
foff += resid;
iosize -= resid;
}
if (obj &&
(obj->paging_in_progress == 0) &&
(obj->flags & OBJ_PIPWNT)) {
- obj->flags &= ~OBJ_PIPWNT;
+ vm_object_clear_flag(obj, OBJ_PIPWNT);
wakeup(obj);
}
}
@@ -2125,15 +2125,13 @@ vfs_unbusy_pages(struct buf * bp)
bp->b_pages[i] = m;
pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
}
- s = splvm();
- --obj->paging_in_progress;
- splx(s);
- m->flags &= ~PG_ZERO;
+ vm_object_pip_subtract(obj, 1);
+ PAGE_CLEAR_FLAG(m, PG_ZERO);
PAGE_BWAKEUP(m);
}
if (obj->paging_in_progress == 0 &&
(obj->flags & OBJ_PIPWNT)) {
- obj->flags &= ~OBJ_PIPWNT;
+ vm_object_clear_flag(obj, OBJ_PIPWNT);
wakeup(obj);
}
}
@@ -2250,12 +2248,10 @@ retry:
for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) {
vm_page_t m = bp->b_pages[i];
- m->flags &= ~PG_ZERO;
+ PAGE_CLEAR_FLAG(m, PG_ZERO);
if ((bp->b_flags & B_CLUSTER) == 0) {
- s = splvm();
- obj->paging_in_progress++;
- splx(s);
- m->busy++;
+ vm_object_pip_add(obj, 1);
+ PAGE_BUSY(m);
}
vm_page_protect(m, VM_PROT_NONE);
@@ -2331,7 +2327,7 @@ vfs_bio_clrbuf(struct buf *bp) {
}
}
bp->b_pages[i]->valid = VM_PAGE_BITS_ALL;
- bp->b_pages[i]->flags &= ~PG_ZERO;
+ PAGE_CLEAR_FLAG(bp->b_pages[i], PG_ZERO);
}
bp->b_resid = 0;
} else {
@@ -2369,7 +2365,7 @@ tryagain:
}
vm_page_wire(p);
p->valid = VM_PAGE_BITS_ALL;
- p->flags &= ~PG_ZERO;
+ PAGE_CLEAR_FLAG(p, PG_ZERO);
pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
bp->b_pages[index] = p;
PAGE_WAKEUP(p);
@@ -2399,7 +2395,7 @@ vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
#endif
bp->b_pages[index] = NULL;
pmap_kremove(pg);
- p->flags |= PG_BUSY;
+ PAGE_SET_FLAG(p, PG_BUSY);
vm_page_unwire(p);
vm_page_free(p);
}
diff --git a/sys/kern/vfs_cluster.c b/sys/kern/vfs_cluster.c
index 3f969ad..8eeeb8a 100644
--- a/sys/kern/vfs_cluster.c
+++ b/sys/kern/vfs_cluster.c
@@ -33,7 +33,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
- * $Id: vfs_cluster.c,v 1.67 1998/08/06 08:33:18 dfr Exp $
+ * $Id: vfs_cluster.c,v 1.68 1998/08/13 08:09:07 dfr Exp $
*/
#include "opt_debug_cluster.h"
@@ -417,10 +417,8 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
for (j = 0; j < tbp->b_npages; j += 1) {
vm_page_t m;
m = tbp->b_pages[j];
- s = splvm();
- ++m->busy;
- ++m->object->paging_in_progress;
- splx(s);
+ PAGE_BUSY(m);
+ vm_object_pip_add(m->object, 1);
if ((bp->b_npages == 0) ||
(bp->b_pages[bp->b_npages-1] != m)) {
bp->b_pages[bp->b_npages] = m;
@@ -784,10 +782,8 @@ cluster_wbuild(vp, size, start_lbn, len)
for (j = 0; j < tbp->b_npages; j += 1) {
m = tbp->b_pages[j];
- s = splvm();
- ++m->busy;
- ++m->object->paging_in_progress;
- splx(s);
+ PAGE_BUSY(m);
+ vm_object_pip_add(m->object, 1);
if ((bp->b_npages == 0) ||
(bp->b_pages[bp->b_npages - 1] != m)) {
bp->b_pages[bp->b_npages] = m;
diff --git a/sys/sys/sysctl.h b/sys/sys/sysctl.h
index 90cb92c..e19f8e8 100644
--- a/sys/sys/sysctl.h
+++ b/sys/sys/sysctl.h
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)sysctl.h 8.1 (Berkeley) 6/2/93
- * $Id: sysctl.h,v 1.60 1998/04/24 04:15:52 dg Exp $
+ * $Id: sysctl.h,v 1.61 1998/07/28 22:34:12 joerg Exp $
*/
#ifndef _SYS_SYSCTL_H_
@@ -100,13 +100,13 @@ struct sysctl_req {
struct proc *p;
int lock;
void *oldptr;
- int oldlen;
- int oldidx;
- int (*oldfunc)(struct sysctl_req *, const void *, int);
+ size_t oldlen;
+ size_t oldidx;
+ int (*oldfunc)(struct sysctl_req *, const void *, size_t);
void *newptr;
- int newlen;
- int newidx;
- int (*newfunc)(struct sysctl_req *, void *, int);
+ size_t newlen;
+ size_t newidx;
+ int (*newfunc)(struct sysctl_req *, void *, size_t);
};
/*
@@ -127,6 +127,8 @@ struct sysctl_oid {
#define SYSCTL_OUT(r, p, l) (r->oldfunc)(r, p, l)
int sysctl_handle_int SYSCTL_HANDLER_ARGS;
+int sysctl_handle_long SYSCTL_HANDLER_ARGS;
+int sysctl_handle_intptr SYSCTL_HANDLER_ARGS;
int sysctl_handle_string SYSCTL_HANDLER_ARGS;
int sysctl_handle_opaque SYSCTL_HANDLER_ARGS;
@@ -153,6 +155,16 @@ int sysctl_handle_opaque SYSCTL_HANDLER_ARGS;
SYSCTL_OID(parent, nbr, name, CTLTYPE_INT|access, \
ptr, val, sysctl_handle_int, "I", descr)
+/* This is a integer, if ptr is NULL, val is returned */
+#define SYSCTL_LONG(parent, nbr, name, access, ptr, val, descr) \
+ SYSCTL_OID(parent, nbr, name, CTLTYPE_INT|access, \
+ ptr, val, sysctl_handle_long, "L", descr)
+
+/* This is a integer, if ptr is NULL, val is returned */
+#define SYSCTL_INTPTR(parent, nbr, name, access, ptr, val, descr) \
+ SYSCTL_OID(parent, nbr, name, CTLTYPE_INT|access, \
+ ptr, val, sysctl_handle_intptr, "P", descr)
+
/* This is anything, specified by a pointer and a lenth */
#define SYSCTL_OPAQUE(parent, nbr, name, access, ptr, len, fmt, descr) \
SYSCTL_OID(parent, nbr, name, CTLTYPE_OPAQUE|access, \
@@ -448,8 +460,8 @@ extern char machine[];
extern char osrelease[];
extern char ostype[];
-int kernel_sysctl(struct proc *p, int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen, int *retval);
-int userland_sysctl(struct proc *p, int *name, u_int namelen, void *old, size_t *oldlenp, int inkernel, void *new, size_t newlen, int *retval);
+int kernel_sysctl(struct proc *p, int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen, size_t *retval);
+int userland_sysctl(struct proc *p, int *name, u_int namelen, void *old, size_t *oldlenp, int inkernel, void *new, size_t newlen, size_t *retval);
/*
int sysctl_clockrate __P((char *, size_t*));
int sysctl_file __P((char *, size_t*));
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 946b6d2..5cdbef4 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
- * $Id: swap_pager.c,v 1.98 1998/07/28 15:30:01 bde Exp $
+ * $Id: swap_pager.c,v 1.99 1998/08/13 08:05:13 dfr Exp $
*/
/*
@@ -1104,7 +1104,7 @@ swap_pager_getpages(object, m, count, reqpage)
if (rv == VM_PAGER_OK) {
for (i = 0; i < count; i++) {
m[i]->dirty = 0;
- m[i]->flags &= ~PG_ZERO;
+ PAGE_CLEAR_FLAG(m[i], PG_ZERO);
if (i != reqpage) {
/*
* whether or not to leave the page
@@ -1590,12 +1590,10 @@ swap_pager_finish(spc)
PAGE_BWAKEUP(ma[i]);
}
- s = splvm();
- object->paging_in_progress -= spc->spc_count;
- splx(s);
+ vm_object_pip_subtract(object, spc->spc_count);
if ((object->paging_in_progress == 0) &&
(object->flags & OBJ_PIPWNT)) {
- object->flags &= ~OBJ_PIPWNT;
+ vm_object_clear_flag(object, OBJ_PIPWNT);
wakeup(object);
}
@@ -1648,10 +1646,10 @@ swap_pager_iodone(bp)
(bp->b_flags & B_READ) ? "pagein" : "pageout",
(u_long) bp->b_blkno, bp->b_bcount, bp->b_error);
} else {
- object->paging_in_progress -= spc->spc_count;
+ vm_object_pip_subtract(object, spc->spc_count);
if ((object->paging_in_progress == 0) &&
(object->flags & OBJ_PIPWNT)) {
- object->flags &= ~OBJ_PIPWNT;
+ vm_object_clear_flag(object, OBJ_PIPWNT);
wakeup(object);
}
ma = spc->spc_m;
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index f074234..8233f11 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_fault.c,v 1.85 1998/07/22 09:38:04 dg Exp $
+ * $Id: vm_fault.c,v 1.86 1998/08/06 08:33:19 dfr Exp $
*/
/*
@@ -291,7 +291,7 @@ RetryFault:;
if ((fs.m->flags & PG_BUSY) ||
(fs.m->busy &&
(fs.m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
- fs.m->flags |= PG_WANTED | PG_REFERENCED;
+ PAGE_SET_FLAG(fs.m, PG_WANTED | PG_REFERENCED);
cnt.v_intrans++;
tsleep(fs.m, PSWP, "vmpfw", 0);
}
@@ -314,7 +314,7 @@ RetryFault:;
goto RetryFault;
}
- fs.m->flags |= PG_BUSY;
+ PAGE_SET_FLAG(fs.m, PG_BUSY);
if (((fs.m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) &&
fs.m->object != kernel_object && fs.m->object != kmem_object) {
goto readrest;
@@ -607,7 +607,7 @@ readrest:
vm_page_rename(fs.m, fs.first_object, fs.first_pindex);
fs.first_m = fs.m;
fs.first_m->dirty = VM_PAGE_BITS_ALL;
- fs.first_m->flags |= PG_BUSY;
+ PAGE_SET_FLAG(fs.first_m, PG_BUSY);
fs.m = NULL;
cnt.v_cow_optim++;
} else {
@@ -705,8 +705,9 @@ readrest:
*/
if (prot & VM_PROT_WRITE) {
- fs.m->flags |= PG_WRITEABLE;
- fs.m->object->flags |= OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY;
+ PAGE_SET_FLAG(fs.m, PG_WRITEABLE);
+ vm_object_set_flag(fs.m->object,
+ OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
/*
* If the fault is a write, we know that this page is being
* written NOW. This will save on the pmap_is_modified() calls
@@ -719,14 +720,14 @@ readrest:
unlock_things(&fs);
fs.m->valid = VM_PAGE_BITS_ALL;
- fs.m->flags &= ~PG_ZERO;
+ PAGE_CLEAR_FLAG(fs.m, PG_ZERO);
pmap_enter(fs.map->pmap, vaddr, VM_PAGE_TO_PHYS(fs.m), prot, wired);
if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) {
pmap_prefault(fs.map->pmap, vaddr, fs.entry);
}
- fs.m->flags |= PG_MAPPED|PG_REFERENCED;
+ PAGE_SET_FLAG(fs.m, PG_MAPPED|PG_REFERENCED);
if (fault_flags & VM_FAULT_HOLD)
vm_page_hold(fs.m);
@@ -966,10 +967,10 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
* Enter it in the pmap...
*/
- dst_m->flags &= ~PG_ZERO;
+ PAGE_CLEAR_FLAG(dst_m, PG_ZERO);
pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m),
prot, FALSE);
- dst_m->flags |= PG_WRITEABLE|PG_MAPPED;
+ PAGE_SET_FLAG(dst_m, PG_WRITEABLE|PG_MAPPED);
/*
* Mark it no longer busy, and put it on the active list.
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 6a71c87..6097257 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_kern.c,v 1.47 1998/06/05 21:48:45 dg Exp $
+ * $Id: vm_kern.c,v 1.48 1998/06/21 14:53:41 bde Exp $
*/
/*
@@ -181,7 +181,7 @@ kmem_alloc(map, size)
VM_ALLOC_ZERO | VM_ALLOC_RETRY);
if ((mem->flags & PG_ZERO) == 0)
vm_page_zero_fill(mem);
- mem->flags &= ~(PG_BUSY | PG_ZERO);
+ PAGE_CLEAR_FLAG(mem, (PG_BUSY | PG_ZERO));
mem->valid = VM_PAGE_BITS_ALL;
}
@@ -332,7 +332,7 @@ retry:
vm_map_unlock(map);
return (0);
}
- m->flags &= ~PG_ZERO;
+ PAGE_CLEAR_FLAG(m, PG_ZERO);
m->valid = VM_PAGE_BITS_ALL;
}
@@ -361,7 +361,7 @@ retry:
PAGE_WAKEUP(m);
pmap_enter(kernel_pmap, addr + i, VM_PAGE_TO_PHYS(m),
VM_PROT_ALL, 1);
- m->flags |= PG_MAPPED | PG_WRITEABLE | PG_REFERENCED;
+ PAGE_SET_FLAG(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED);
}
vm_map_unlock(map);
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 398a9eb..9205540 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.c,v 1.132 1998/07/14 12:14:58 bde Exp $
+ * $Id: vm_map.c,v 1.133 1998/08/06 08:33:19 dfr Exp $
*/
/*
@@ -542,9 +542,9 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
new_entry->offset = offset;
if (object) {
if ((object->ref_count > 1) || (object->shadow_count != 0)) {
- object->flags &= ~OBJ_ONEMAPPING;
+ vm_object_clear_flag(object, OBJ_ONEMAPPING);
} else {
- object->flags |= OBJ_ONEMAPPING;
+ vm_object_set_flag(object, OBJ_ONEMAPPING);
}
}
@@ -758,7 +758,7 @@ vm_map_simplify_entry(map, entry)
if (startaddr > entry->start) \
_vm_map_clip_start(map, entry, startaddr); \
else if (entry->object.vm_object && (entry->object.vm_object->ref_count == 1)) \
- entry->object.vm_object->flags |= OBJ_ONEMAPPING; \
+ vm_object_set_flag(entry->object.vm_object, OBJ_ONEMAPPING); \
}
/*
@@ -808,7 +808,8 @@ _vm_map_clip_start(map, entry, start)
if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
if (new_entry->object.vm_object->ref_count == 1)
- new_entry->object.vm_object->flags |= OBJ_ONEMAPPING;
+ vm_object_set_flag(new_entry->object.vm_object,
+ OBJ_ONEMAPPING);
vm_object_reference(new_entry->object.vm_object);
}
}
@@ -826,7 +827,7 @@ _vm_map_clip_start(map, entry, start)
if (endaddr < entry->end) \
_vm_map_clip_end(map, entry, endaddr); \
else if (entry->object.vm_object && (entry->object.vm_object->ref_count == 1)) \
- entry->object.vm_object->flags |= OBJ_ONEMAPPING; \
+ vm_object_set_flag(entry->object.vm_object, OBJ_ONEMAPPING); \
}
/*
@@ -871,7 +872,8 @@ _vm_map_clip_end(map, entry, end)
if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
if (new_entry->object.vm_object->ref_count == 1)
- new_entry->object.vm_object->flags |= OBJ_ONEMAPPING;
+ vm_object_set_flag(new_entry->object.vm_object,
+ OBJ_ONEMAPPING);
vm_object_reference(new_entry->object.vm_object);
}
}
@@ -1770,7 +1772,7 @@ vm_map_delete(map, start, end)
entry = first_entry->next;
object = entry->object.vm_object;
if (object && (object->ref_count == 1) && (object->shadow_count == 0))
- object->flags |= OBJ_ONEMAPPING;
+ vm_object_set_flag(object, OBJ_ONEMAPPING);
} else {
entry = first_entry;
vm_map_clip_start(map, entry, start);
@@ -1972,7 +1974,7 @@ vm_map_split(entry)
vm_object_reference(source); /* Referenced by new_object */
TAILQ_INSERT_TAIL(&source->shadow_head,
new_object, shadow_list);
- source->flags &= ~OBJ_ONEMAPPING;
+ vm_object_clear_flag(source, OBJ_ONEMAPPING);
new_object->backing_object_offset =
orig_object->backing_object_offset + offidxstart;
new_object->backing_object = source;
@@ -1988,16 +1990,16 @@ vm_map_split(entry)
if (m == NULL)
continue;
if (m->flags & PG_BUSY) {
- m->flags |= PG_WANTED;
+ PAGE_SET_FLAG(m, PG_WANTED);
tsleep(m, PVM, "spltwt", 0);
goto retry;
}
- m->flags |= PG_BUSY;
+ PAGE_SET_FLAG(m, PG_BUSY);
vm_page_protect(m, VM_PROT_NONE);
vm_page_rename(m, new_object, idx);
m->dirty = VM_PAGE_BITS_ALL;
- m->flags |= PG_BUSY;
+ PAGE_SET_FLAG(m, PG_BUSY);
}
if (orig_object->type == OBJT_SWAP) {
@@ -2072,7 +2074,7 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
}
vm_object_reference(src_object);
- src_object->flags &= ~OBJ_ONEMAPPING;
+ vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
dst_entry->object.vm_object = src_object;
src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
@@ -2151,7 +2153,7 @@ vmspace_fork(vm1)
old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
object = old_entry->object.vm_object;
}
- object->flags &= ~OBJ_ONEMAPPING;
+ vm_object_clear_flag(object, OBJ_ONEMAPPING);
/*
* Clone the entry, referencing the sharing map.
@@ -2610,7 +2612,7 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
/*
* Set the object optimization hint flag
*/
- srcobject->flags |= OBJ_OPT;
+ vm_object_set_flag(srcobject, OBJ_OPT);
vm_object_reference(srcobject);
entry->object.vm_object = srcobject;
@@ -2668,7 +2670,7 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
/*
* Set the object optimization hint flag
*/
- srcobject->flags |= OBJ_OPT;
+ vm_object_set_flag(srcobject, OBJ_OPT);
vm_object_reference(srcobject);
if (oldobject) {
@@ -2694,7 +2696,7 @@ vm_uiomove(mapa, srcobject, cp, cnta, uaddra, npages)
*/
} else {
- srcobject->flags |= OBJ_OPT;
+ vm_object_set_flag(srcobject, OBJ_OPT);
vm_object_reference(srcobject);
pmap_remove (map->pmap, uaddr, tend);
@@ -2821,7 +2823,7 @@ m_inretry:
vm_object_deallocate(robject);
}
- object->flags &= ~OBJ_OPT;
+ vm_object_clear_flag(object, OBJ_OPT);
}
#include "opt_ddb.h"
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index 6ef1c6d..4879535 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_meter.c 8.4 (Berkeley) 1/4/94
- * $Id: vm_meter.c,v 1.24 1998/03/28 10:33:27 bde Exp $
+ * $Id: vm_meter.c,v 1.25 1998/03/30 09:56:49 phk Exp $
*/
#include <sys/param.h>
@@ -138,7 +138,7 @@ vmtotal SYSCTL_HANDLER_ARGS
for (object = TAILQ_FIRST(&vm_object_list);
object != NULL;
object = TAILQ_NEXT(object,object_list))
- object->flags &= ~OBJ_ACTIVE;
+ vm_object_clear_flag(object, OBJ_ACTIVE);
/*
* Calculate process statistics.
*/
@@ -181,7 +181,7 @@ vmtotal SYSCTL_HANDLER_ARGS
if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) ||
entry->object.vm_object == NULL)
continue;
- entry->object.vm_object->flags |= OBJ_ACTIVE;
+ vm_object_set_flag(entry->object.vm_object, OBJ_ACTIVE);
paging |= entry->object.vm_object->paging_in_progress;
}
if (paging)
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index b66d5c7..b33fac6 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
*
* @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
- * $Id: vm_mmap.c,v 1.80 1998/07/05 11:56:52 dfr Exp $
+ * $Id: vm_mmap.c,v 1.81 1998/07/15 02:32:35 bde Exp $
*/
/*
@@ -747,7 +747,7 @@ mincore(p, uap)
mincoreinfo |= MINCORE_MODIFIED_OTHER;
if ((m->flags & PG_REFERENCED) ||
pmap_ts_referenced(VM_PAGE_TO_PHYS(m))) {
- m->flags |= PG_REFERENCED;
+ PAGE_SET_FLAG(m, PG_REFERENCED);
mincoreinfo |= MINCORE_REFERENCED_OTHER;
}
}
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 1af903f..dc7421f 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.c,v 1.125 1998/07/14 12:26:15 bde Exp $
+ * $Id: vm_object.c,v 1.126 1998/08/06 08:33:19 dfr Exp $
*/
/*
@@ -153,7 +153,7 @@ _vm_object_allocate(type, size, object)
object->flags = 0;
object->id = ++objidnumber;
if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
- object->flags |= OBJ_ONEMAPPING;
+ vm_object_set_flag(object, OBJ_ONEMAPPING);
object->behavior = OBJ_NORMAL;
object->paging_in_progress = 0;
object->resident_page_count = 0;
@@ -275,7 +275,7 @@ vm_object_vndeallocate(object)
object->ref_count--;
if (object->ref_count == 0) {
vp->v_flag &= ~VTEXT;
- object->flags &= ~OBJ_OPT;
+ vm_object_clear_flag(object, OBJ_OPT);
}
vrele(vp);
}
@@ -317,7 +317,7 @@ vm_object_deallocate(object)
* objects.
*/
if ((object->ref_count == 2) && (object->shadow_count == 0)) {
- object->flags |= OBJ_ONEMAPPING;
+ vm_object_set_flag(object, OBJ_ONEMAPPING);
object->ref_count--;
return;
} else if ((object->ref_count == 2) && (object->shadow_count == 1)) {
@@ -385,7 +385,7 @@ doterm:
TAILQ_REMOVE(&temp->shadow_head, object, shadow_list);
temp->shadow_count--;
if (temp->ref_count == 0)
- temp->flags &= ~OBJ_OPT;
+ vm_object_clear_flag(temp, OBJ_OPT);
temp->generation++;
object->backing_object = NULL;
}
@@ -411,7 +411,7 @@ vm_object_terminate(object)
/*
* Make sure no one uses us.
*/
- object->flags |= OBJ_DEAD;
+ vm_object_set_flag(object, OBJ_DEAD);
/*
* wait for the pageout daemon to be done with the object
@@ -461,7 +461,7 @@ vm_object_terminate(object)
if (p->busy || (p->flags & PG_BUSY))
printf("vm_object_terminate: freeing busy page\n");
#endif
- p->flags |= PG_BUSY;
+ PAGE_SET_FLAG(p, PG_BUSY);
vm_page_free(p);
cnt.v_pfree++;
}
@@ -540,7 +540,7 @@ vm_object_page_clean(object, start, end, flags)
vp = object->handle;
- object->flags |= OBJ_CLEANING;
+ vm_object_set_flag(object, OBJ_CLEANING);
tstart = start;
if (end == 0) {
@@ -550,12 +550,12 @@ vm_object_page_clean(object, start, end, flags)
}
for(p = TAILQ_FIRST(&object->memq); p; p = TAILQ_NEXT(p, listq)) {
- p->flags |= PG_CLEANCHK;
+ PAGE_SET_FLAG(p, PG_CLEANCHK);
vm_page_protect(p, VM_PROT_READ);
}
if ((tstart == 0) && (tend == object->size)) {
- object->flags &= ~(OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
+ vm_object_clear_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
}
rescan:
@@ -569,19 +569,19 @@ rescan:
(pi < tstart) || (pi >= tend) ||
(p->valid == 0) ||
((p->queue - p->pc) == PQ_CACHE)) {
- p->flags &= ~PG_CLEANCHK;
+ PAGE_CLEAR_FLAG(p, PG_CLEANCHK);
continue;
}
vm_page_test_dirty(p);
if ((p->dirty & p->valid) == 0) {
- p->flags &= ~PG_CLEANCHK;
+ PAGE_CLEAR_FLAG(p, PG_CLEANCHK);
continue;
}
s = splvm();
while ((p->flags & PG_BUSY) || p->busy) {
- p->flags |= PG_WANTED | PG_REFERENCED;
+ PAGE_SET_FLAG(p, PG_WANTED | PG_REFERENCED);
tsleep(p, PVM, "vpcwai", 0);
if (object->generation != curgeneration) {
splx(s);
@@ -597,12 +597,12 @@ rescan:
(tp->busy != 0))
break;
if((tp->queue - tp->pc) == PQ_CACHE) {
- tp->flags &= ~PG_CLEANCHK;
+ PAGE_CLEAR_FLAG(tp, PG_CLEANCHK);
break;
}
vm_page_test_dirty(tp);
if ((tp->dirty & tp->valid) == 0) {
- tp->flags &= ~PG_CLEANCHK;
+ PAGE_CLEAR_FLAG(tp, PG_CLEANCHK);
break;
}
maf[ i - 1 ] = tp;
@@ -622,12 +622,12 @@ rescan:
(tp->busy != 0))
break;
if((tp->queue - tp->pc) == PQ_CACHE) {
- tp->flags &= ~PG_CLEANCHK;
+ PAGE_CLEAR_FLAG(tp, PG_CLEANCHK);
break;
}
vm_page_test_dirty(tp);
if ((tp->dirty & tp->valid) == 0) {
- tp->flags &= ~PG_CLEANCHK;
+ PAGE_CLEAR_FLAG(tp, PG_CLEANCHK);
break;
}
mab[ i - 1 ] = tp;
@@ -641,14 +641,14 @@ rescan:
for(i=0;i<maxb;i++) {
int index = (maxb - i) - 1;
ma[index] = mab[i];
- ma[index]->flags &= ~PG_CLEANCHK;
+ PAGE_CLEAR_FLAG(ma[index], PG_CLEANCHK);
}
- p->flags &= ~PG_CLEANCHK;
+ PAGE_CLEAR_FLAG(p, PG_CLEANCHK);
ma[maxb] = p;
for(i=0;i<maxf;i++) {
int index = (maxb + i) + 1;
ma[index] = maf[i];
- ma[index]->flags &= ~PG_CLEANCHK;
+ PAGE_CLEAR_FLAG(ma[index], PG_CLEANCHK);
}
runlen = maxb + maxf + 1;
@@ -657,7 +657,7 @@ rescan:
for (i = 0; i<runlen; i++) {
if (ma[i]->valid & ma[i]->dirty) {
vm_page_protect(ma[i], VM_PROT_READ);
- ma[i]->flags |= PG_CLEANCHK;
+ PAGE_SET_FLAG(ma[i], PG_CLEANCHK);
}
}
if (object->generation != curgeneration)
@@ -666,7 +666,7 @@ rescan:
VOP_FSYNC(vp, NULL, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc);
- object->flags &= ~OBJ_CLEANING;
+ vm_object_clear_flag(object, OBJ_CLEANING);
return;
}
@@ -719,7 +719,7 @@ vm_object_pmap_copy(object, start, end)
vm_page_protect(p, VM_PROT_READ);
}
- object->flags &= ~OBJ_WRITEABLE;
+ vm_object_clear_flag(object, OBJ_WRITEABLE);
}
/*
@@ -770,7 +770,7 @@ vm_object_pmap_remove(object, start, end)
vm_page_protect(p, VM_PROT_NONE);
}
if ((start == 0) && (object->size == end))
- object->flags &= ~OBJ_WRITEABLE;
+ vm_object_clear_flag(object, OBJ_WRITEABLE);
}
/*
@@ -884,7 +884,7 @@ vm_object_shadow(object, offset, length)
result->backing_object = source;
if (source) {
TAILQ_INSERT_TAIL(&source->shadow_head, result, shadow_list);
- source->flags &= ~OBJ_ONEMAPPING;
+ vm_object_clear_flag(source, OBJ_ONEMAPPING);
source->shadow_count++;
source->generation++;
}
@@ -941,7 +941,7 @@ vm_object_qcollapse(object)
p = next;
continue;
}
- p->flags |= PG_BUSY;
+ PAGE_SET_FLAG(p, PG_BUSY);
new_pindex = p->pindex - backing_offset_index;
if (p->pindex < backing_offset_index ||
@@ -1053,7 +1053,7 @@ vm_object_collapse(object)
if (backing_object->ref_count == 1) {
- backing_object->flags |= OBJ_DEAD;
+ vm_object_set_flag(backing_object, OBJ_DEAD);
/*
* We can collapse the backing object.
*
@@ -1066,7 +1066,7 @@ vm_object_collapse(object)
while ((p = TAILQ_FIRST(&backing_object->memq)) != 0) {
new_pindex = p->pindex - backing_offset_index;
- p->flags |= PG_BUSY;
+ PAGE_SET_FLAG(p, PG_BUSY);
/*
* If the parent has a page here, or if this
@@ -1216,7 +1216,7 @@ vm_object_collapse(object)
p = TAILQ_NEXT(p, listq)) {
new_pindex = p->pindex - backing_offset_index;
- p->flags |= PG_BUSY;
+ PAGE_SET_FLAG(p, PG_BUSY);
/*
* If the parent has a page here, or if this
@@ -1236,7 +1236,7 @@ vm_object_collapse(object)
return;
}
- pp->flags |= PG_BUSY;
+ PAGE_SET_FLAG(pp, PG_BUSY);
if ((pp->valid == 0) &&
!vm_pager_has_page(object, OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL)) {
/*
@@ -1341,7 +1341,7 @@ again:
continue;
}
- p->flags |= PG_BUSY;
+ PAGE_SET_FLAG(p, PG_BUSY);
vm_page_protect(p, VM_PROT_NONE);
vm_page_free(p);
}
@@ -1374,7 +1374,7 @@ again:
}
}
- p->flags |= PG_BUSY;
+ PAGE_SET_FLAG(p, PG_BUSY);
vm_page_protect(p, VM_PROT_NONE);
vm_page_free(p);
}
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 4855a80..9897393 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.h,v 1.49 1998/05/04 17:12:53 dyson Exp $
+ * $Id: vm_object.h,v 1.50 1998/08/06 08:33:19 dfr Exp $
*/
/*
@@ -72,6 +72,7 @@
#define _VM_OBJECT_
#include <sys/queue.h>
+#include <machine/atomic.h>
enum obj_type { OBJT_DEFAULT, OBJT_SWAP, OBJT_VNODE, OBJT_DEVICE, OBJT_DEAD };
typedef enum obj_type objtype_t;
@@ -162,21 +163,35 @@ extern vm_object_t kmem_object;
#ifdef KERNEL
static __inline void
+vm_object_set_flag(vm_object_t object, u_int bits)
+{
+ atomic_set_short(&object->flags, bits);
+}
+
+static __inline void
+vm_object_clear_flag(vm_object_t object, u_int bits)
+{
+ atomic_clear_short(&object->flags, bits);
+}
+
+static __inline void
vm_object_pip_add(vm_object_t object, int i)
{
- int s = splvm();
- object->paging_in_progress += i;
- splx(s);
+ atomic_add_short(&object->paging_in_progress, i);
+}
+
+static __inline void
+vm_object_pip_subtract(vm_object_t object, int i)
+{
+ atomic_subtract_short(&object->paging_in_progress, i);
}
static __inline void
vm_object_pip_wakeup(vm_object_t object)
{
- int s = splvm();
- object->paging_in_progress--;
- splx(s);
+ atomic_subtract_short(&object->paging_in_progress, 1);
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
- object->flags &= ~OBJ_PIPWNT;
+ vm_object_clear_flag(object, OBJ_PIPWNT);
wakeup(object);
}
}
@@ -189,7 +204,7 @@ vm_object_pip_sleep(vm_object_t object, char *waitid)
if (object->paging_in_progress) {
s = splvm();
if (object->paging_in_progress) {
- object->flags |= OBJ_PIPWNT;
+ vm_object_set_flag(object, OBJ_PIPWNT);
tsleep(object, PVM, waitid, 0);
}
splx(s);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index b32229b..03abeec 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
- * $Id: vm_page.c,v 1.104 1998/07/15 04:17:55 bde Exp $
+ * $Id: vm_page.c,v 1.105 1998/07/26 18:15:20 dfr Exp $
*/
/*
@@ -403,7 +403,7 @@ vm_page_insert(m, object, pindex)
*/
TAILQ_INSERT_TAIL(&object->memq, m, listq);
- m->flags |= PG_TABLED;
+ PAGE_SET_FLAG(m, PG_TABLED);
m->object->page_hint = m;
m->object->generation++;
@@ -446,9 +446,9 @@ vm_page_remove(m)
}
#endif
- m->flags &= ~PG_BUSY;
+ PAGE_CLEAR_FLAG(m, PG_BUSY);
if (m->flags & PG_WANTED) {
- m->flags &= ~PG_WANTED;
+ PAGE_CLEAR_FLAG(m, PG_WANTED);
wakeup(m);
}
@@ -484,7 +484,7 @@ vm_page_remove(m)
object->generation++;
m->object = NULL;
- m->flags &= ~PG_TABLED;
+ PAGE_CLEAR_FLAG(m, PG_TABLED);
}
/*
@@ -940,7 +940,7 @@ vm_page_alloc(object, pindex, page_req)
m->flags = PG_ZERO | PG_BUSY;
} else if (qtype == PQ_CACHE) {
oldobject = m->object;
- m->flags |= PG_BUSY;
+ PAGE_SET_FLAG(m, PG_BUSY);
vm_page_remove(m);
m->flags = PG_BUSY;
} else {
@@ -1011,7 +1011,7 @@ vm_page_sleep(vm_page_t m, char *msg, char *busy) {
int s;
s = splvm();
if ((busy && *busy) || (m->flags & PG_BUSY)) {
- m->flags |= PG_WANTED;
+ PAGE_SET_FLAG(m, PG_WANTED);
tsleep(m, PVM, msg, 0);
slept = 1;
}
@@ -1247,7 +1247,7 @@ vm_page_wire(m)
}
(*vm_page_queues[PQ_NONE].lcnt)++;
m->wire_count++;
- m->flags |= PG_MAPPED;
+ PAGE_SET_FLAG(m, PG_MAPPED);
}
/*
@@ -1384,7 +1384,7 @@ retrylookup:
s = splvm();
while ((object->generation == generation) &&
(m->busy || (m->flags & PG_BUSY))) {
- m->flags |= PG_WANTED | PG_REFERENCED;
+ PAGE_SET_FLAG(m, PG_WANTED | PG_REFERENCED);
tsleep(m, PVM, "pgrbwt", 0);
if ((allocflags & VM_ALLOC_RETRY) == 0) {
splx(s);
@@ -1394,7 +1394,7 @@ retrylookup:
splx(s);
goto retrylookup;
} else {
- m->flags |= PG_BUSY;
+ PAGE_SET_FLAG(m, PG_BUSY);
return m;
}
}
@@ -1633,7 +1633,7 @@ again1:
pqtype = m->queue - m->pc;
if (pqtype == PQ_CACHE) {
- m->flags |= PG_BUSY;
+ PAGE_SET_FLAG(m, PG_BUSY);
vm_page_free(m);
}
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 4bfa8c1..fc6d61c 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_page.h,v 1.42 1998/06/30 08:01:30 jmg Exp $
+ * $Id: vm_page.h,v 1.43 1998/08/22 15:24:09 mckay Exp $
*/
/*
@@ -74,6 +74,8 @@
#include "opt_vmpage.h"
#include <vm/pmap.h>
+#include <machine/atomic.h>
+
/*
* Management of resident (logical) pages.
*
@@ -279,24 +281,30 @@ extern vm_offset_t last_phys_addr; /* physical address for last_page */
* Functions implemented as macros
*/
+#define PAGE_SET_FLAG(m, bits) atomic_set_short(&(m)->flags, bits)
+
+#define PAGE_CLEAR_FLAG(m, bits) atomic_clear_short(&(m)->flags, bits)
+
#define PAGE_ASSERT_WAIT(m, interruptible) { \
- (m)->flags |= PG_WANTED; \
+ PAGE_SET_FLAG(m, PG_WANTED); \
assert_wait((int) (m), (interruptible)); \
}
#define PAGE_WAKEUP(m) { \
- (m)->flags &= ~PG_BUSY; \
+ PAGE_CLEAR_FLAG(m, PG_BUSY); \
if (((m)->flags & PG_WANTED) && ((m)->busy == 0)) { \
- (m)->flags &= ~PG_WANTED; \
+ PAGE_CLEAR_FLAG(m, PG_WANTED); \
wakeup((m)); \
} \
}
+#define PAGE_BUSY(m) atomic_add_char(&(m)->busy, 1)
+
#define PAGE_BWAKEUP(m) { \
- (m)->busy--; \
+ atomic_subtract_char(&(m)->busy, 1); \
if ((((m)->flags & (PG_WANTED | PG_BUSY)) == PG_WANTED) && \
((m)->busy == 0)) { \
- (m)->flags &= ~PG_WANTED; \
+ PAGE_CLEAR_FLAG(m, PG_WANTED); \
wakeup((m)); \
} \
}
@@ -373,11 +381,11 @@ vm_page_protect(vm_page_t mem, int prot)
if (prot == VM_PROT_NONE) {
if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) {
pmap_page_protect(VM_PAGE_TO_PHYS(mem), VM_PROT_NONE);
- mem->flags &= ~(PG_WRITEABLE|PG_MAPPED);
+ PAGE_CLEAR_FLAG(mem, PG_WRITEABLE|PG_MAPPED);
}
} else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) {
pmap_page_protect(VM_PAGE_TO_PHYS(mem), VM_PROT_READ);
- mem->flags &= ~PG_WRITEABLE;
+ PAGE_CLEAR_FLAG(mem, PG_WRITEABLE);
}
}
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index ce39df5..2175622 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.c,v 1.123 1998/07/10 17:58:35 alex Exp $
+ * $Id: vm_pageout.c,v 1.124 1998/08/06 08:33:19 dfr Exp $
*/
/*
@@ -362,7 +362,7 @@ vm_pageout_flush(mc, count, flags)
int i;
for (i = 0; i < count; i++) {
- mc[i]->busy++;
+ PAGE_BUSY(mc[i]);
vm_page_protect(mc[i], VM_PROT_READ);
}
@@ -476,7 +476,7 @@ vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p));
if (actcount) {
- p->flags |= PG_REFERENCED;
+ PAGE_SET_FLAG(p, PG_REFERENCED);
} else if (p->flags & PG_REFERENCED) {
actcount = 1;
}
@@ -485,7 +485,7 @@ vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
(p->flags & PG_REFERENCED)) {
vm_page_activate(p);
p->act_count += actcount;
- p->flags &= ~PG_REFERENCED;
+ PAGE_CLEAR_FLAG(p, PG_REFERENCED);
} else if (p->queue == PQ_ACTIVE) {
if ((p->flags & PG_REFERENCED) == 0) {
p->act_count -= min(p->act_count, ACT_DECLINE);
@@ -500,7 +500,7 @@ vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
}
} else {
vm_page_activate(p);
- p->flags &= ~PG_REFERENCED;
+ PAGE_CLEAR_FLAG(p, PG_REFERENCED);
if (p->act_count < (ACT_MAX - ACT_ADVANCE))
p->act_count += ACT_ADVANCE;
s = splvm();
@@ -599,7 +599,7 @@ vm_pageout_page_free(vm_page_t m) {
vbusy(vp);
}
- m->flags |= PG_BUSY;
+ PAGE_SET_FLAG(m, PG_BUSY);
vm_page_protect(m, VM_PROT_NONE);
vm_page_free(m);
vm_object_deallocate(object);
@@ -683,7 +683,7 @@ rescan0:
* If the object is not being used, we ignore previous references.
*/
if (m->object->ref_count == 0) {
- m->flags &= ~PG_REFERENCED;
+ PAGE_CLEAR_FLAG(m, PG_REFERENCED);
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
/*
@@ -708,7 +708,7 @@ rescan0:
* inactive queue again.
*/
if ((m->flags & PG_REFERENCED) != 0) {
- m->flags &= ~PG_REFERENCED;
+ PAGE_CLEAR_FLAG(m, PG_REFERENCED);
actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
vm_page_activate(m);
m->act_count += (actcount + ACT_ADVANCE + 1);
@@ -906,7 +906,7 @@ rescan0:
/*
* Since we have "tested" this bit, we need to clear it now.
*/
- m->flags &= ~PG_REFERENCED;
+ PAGE_CLEAR_FLAG(m, PG_REFERENCED);
/*
* Only if an object is currently being used, do we use the
@@ -1095,7 +1095,7 @@ vm_pageout_page_stats()
actcount = 0;
if (m->flags & PG_REFERENCED) {
- m->flags &= ~PG_REFERENCED;
+ PAGE_CLEAR_FLAG(m, PG_REFERENCED);
actcount += 1;
}
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index fdda3e3..50b77a0 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -38,7 +38,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
- * $Id: vnode_pager.c,v 1.93 1998/07/11 07:46:16 bde Exp $
+ * $Id: vnode_pager.c,v 1.94 1998/07/11 11:30:46 bde Exp $
*/
/*
@@ -442,7 +442,7 @@ vnode_pager_input_smlfs(object, m)
}
vm_pager_unmap_page(kva);
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
- m->flags &= ~PG_ZERO;
+ PAGE_CLEAR_FLAG(m, PG_ZERO);
if (error) {
return VM_PAGER_ERROR;
}
@@ -506,7 +506,7 @@ vnode_pager_input_old(object, m)
}
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
m->dirty = 0;
- m->flags &= ~PG_ZERO;
+ PAGE_CLEAR_FLAG(m, PG_ZERO);
return error ? VM_PAGER_ERROR : VM_PAGER_OK;
}
@@ -773,7 +773,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
vm_page_set_validclean(mt, 0, nvalid);
}
- mt->flags &= ~PG_ZERO;
+ PAGE_CLEAR_FLAG(mt, PG_ZERO);
if (i != reqpage) {
/*
OpenPOWER on IntegriCloud