summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2007-06-04 21:38:48 +0000
committerattilio <attilio@FreeBSD.org>2007-06-04 21:38:48 +0000
commite333d0ff0eb23a5f94f36fd95b4bbcfda3ccbc8f (patch)
treea35bbd71798a97fd11a5f264ff97c562de001111
parent771efb08f5bfaf22da0498ae91647fdecb3cc6bb (diff)
downloadFreeBSD-src-e333d0ff0eb23a5f94f36fd95b4bbcfda3ccbc8f.zip
FreeBSD-src-e333d0ff0eb23a5f94f36fd95b4bbcfda3ccbc8f.tar.gz
Rework the PCPU_* (MD) interface:
- Rename PCPU_LAZY_INC into PCPU_INC - Add the PCPU_ADD interface which just does an add on the pcpu member given a specific value. Note that for most architectures PCPU_INC and PCPU_ADD are not safe. This is a point that needs some discussions/work in the next days. Reviewed by: alc, bde Approved by: jeff (mentor)
-rw-r--r--sys/amd64/amd64/intr_machdep.c4
-rw-r--r--sys/amd64/amd64/trap.c6
-rw-r--r--sys/amd64/ia32/ia32_syscall.c4
-rw-r--r--sys/amd64/include/pcpu.h29
-rw-r--r--sys/arm/arm/intr.c2
-rw-r--r--sys/arm/arm/trap.c6
-rw-r--r--sys/arm/arm/undefined.c2
-rw-r--r--sys/arm/include/pcpu.h3
-rw-r--r--sys/i386/i386/intr_machdep.c4
-rw-r--r--sys/i386/i386/trap.c6
-rw-r--r--sys/i386/include/pcpu.h29
-rw-r--r--sys/ia64/ia32/ia32_trap.c4
-rw-r--r--sys/ia64/ia64/interrupt.c2
-rw-r--r--sys/ia64/ia64/trap.c4
-rw-r--r--sys/ia64/include/pcpu.h3
-rw-r--r--sys/kern/kern_intr.c2
-rw-r--r--sys/powerpc/aim/trap.c4
-rw-r--r--sys/powerpc/include/pcpu.h3
-rw-r--r--sys/powerpc/powerpc/trap.c4
-rw-r--r--sys/sparc64/include/pcpu.h3
-rw-r--r--sys/sparc64/sparc64/trap.c4
-rw-r--r--sys/sun4v/include/pcpu.h3
-rw-r--r--sys/sun4v/sun4v/trap.c4
-rw-r--r--sys/vm/vm_fault.c12
24 files changed, 99 insertions, 48 deletions
diff --git a/sys/amd64/amd64/intr_machdep.c b/sys/amd64/amd64/intr_machdep.c
index 6ed8c80..3dc1361 100644
--- a/sys/amd64/amd64/intr_machdep.c
+++ b/sys/amd64/amd64/intr_machdep.c
@@ -250,7 +250,7 @@ intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
* processed too.
*/
(*isrc->is_count)++;
- PCPU_LAZY_INC(cnt.v_intr);
+ PCPU_INC(cnt.v_intr);
ie = isrc->is_event;
@@ -321,7 +321,7 @@ intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
* processed too.
*/
(*isrc->is_count)++;
- PCPU_LAZY_INC(cnt.v_intr);
+ PCPU_INC(cnt.v_intr);
ie = isrc->is_event;
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c
index 23a30cc8..4bdaa73 100644
--- a/sys/amd64/amd64/trap.c
+++ b/sys/amd64/amd64/trap.c
@@ -163,7 +163,7 @@ trap(struct trapframe *frame)
register_t addr = 0;
ksiginfo_t ksi;
- PCPU_LAZY_INC(cnt.v_trap);
+ PCPU_INC(cnt.v_trap);
type = frame->tf_trapno;
#ifdef SMP
@@ -737,10 +737,10 @@ syscall(struct trapframe *frame)
ksiginfo_t ksi;
/*
- * note: PCPU_LAZY_INC() can only be used if we can afford
+ * note: PCPU_INC() can only be used if we can afford
* occassional inaccuracy in the count.
*/
- PCPU_LAZY_INC(cnt.v_syscall);
+ PCPU_INC(cnt.v_syscall);
#ifdef DIAGNOSTIC
if (ISPL(frame->tf_cs) != SEL_UPL) {
diff --git a/sys/amd64/ia32/ia32_syscall.c b/sys/amd64/ia32/ia32_syscall.c
index c51a2be..40ec2e6 100644
--- a/sys/amd64/ia32/ia32_syscall.c
+++ b/sys/amd64/ia32/ia32_syscall.c
@@ -105,10 +105,10 @@ ia32_syscall(struct trapframe *frame)
ksiginfo_t ksi;
/*
- * note: PCPU_LAZY_INC() can only be used if we can afford
+ * note: PCPU_INC() can only be used if we can afford
* occassional inaccuracy in the count.
*/
- PCPU_LAZY_INC(cnt.v_syscall);
+ PCPU_INC(cnt.v_syscall);
td->td_pticks = 0;
td->td_frame = frame;
diff --git a/sys/amd64/include/pcpu.h b/sys/amd64/include/pcpu.h
index 47d9b0e..9245bbe 100644
--- a/sys/amd64/include/pcpu.h
+++ b/sys/amd64/include/pcpu.h
@@ -56,7 +56,8 @@
extern struct pcpu *pcpup;
#define PCPU_GET(member) (pcpup->pc_ ## member)
-#define PCPU_LAZY_INC(member) (++pcpup->pc_ ## member)
+#define PCPU_ADD(member, val) (pcpup->pc_ ## member += (val))
+#define PCPU_INC(member) PCPU_ADD(member, 1)
#define PCPU_PTR(member) (&pcpup->pc_ ## member)
#define PCPU_SET(member, val) (pcpup->pc_ ## member = (val))
@@ -110,10 +111,31 @@ extern struct pcpu *pcpup;
})
/*
+ * Adds the value to the per-cpu counter name. The implementation
+ * must be atomic with respect to interrupts.
+ */
+#define __PCPU_ADD(name, val) do { \
+ __pcpu_type(name) __val; \
+ struct __s { \
+ u_char __b[MIN(sizeof(__pcpu_type(name)), 8)]; \
+ } __s; \
+ \
+ __val = (val); \
+ if (sizeof(__val) == 1 || sizeof(__val) == 2 || \
+ sizeof(__val) == 4 || sizeof(__val) == 8) { \
+ __s = *(struct __s *)(void *)&__val; \
+ __asm __volatile("add %1,%%gs:%0" \
+ : "=m" (*(struct __s *)(__pcpu_offset(name))) \
+ : "r" (__s)); \
+ } else \
+ *__PCPU_PTR(name) += __val; \
+} while (0)
+
+/*
* Increments the value of the per-cpu counter name. The implementation
* must be atomic with respect to interrupts.
*/
-#define __PCPU_LAZY_INC(name) do { \
+#define __PCPU_INC(name) do { \
CTASSERT(sizeof(__pcpu_type(name)) == 1 || \
sizeof(__pcpu_type(name)) == 2 || \
sizeof(__pcpu_type(name)) == 4 || \
@@ -159,7 +181,8 @@ extern struct pcpu *pcpup;
}
#define PCPU_GET(member) __PCPU_GET(pc_ ## member)
-#define PCPU_LAZY_INC(member) __PCPU_LAZY_INC(pc_ ## member)
+#define PCPU_ADD(member, val) __PCPU_ADD(pc_ ## member, val)
+#define PCPU_INC(member) __PCPU_INC(pc_ ## member)
#define PCPU_PTR(member) __PCPU_PTR(pc_ ## member)
#define PCPU_SET(member, val) __PCPU_SET(pc_ ## member, val)
diff --git a/sys/arm/arm/intr.c b/sys/arm/arm/intr.c
index 8d573dc..58aa1f6 100644
--- a/sys/arm/arm/intr.c
+++ b/sys/arm/arm/intr.c
@@ -106,7 +106,7 @@ arm_handler_execute(struct trapframe *frame, int irqnb)
struct thread *td = curthread;
int i, thread, ret;
- PCPU_LAZY_INC(cnt.v_intr);
+ PCPU_INC(cnt.v_intr);
td->td_intr_nesting_level++;
while ((i = arm_get_next_irq()) != -1) {
arm_mask_irq(i);
diff --git a/sys/arm/arm/trap.c b/sys/arm/arm/trap.c
index 45a9e3a..22b2a02 100644
--- a/sys/arm/arm/trap.c
+++ b/sys/arm/arm/trap.c
@@ -253,7 +253,7 @@ data_abort_handler(trapframe_t *tf)
td = curthread;
p = td->td_proc;
- PCPU_LAZY_INC(cnt.v_trap);
+ PCPU_INC(cnt.v_trap);
/* Data abort came from user mode? */
user = TRAP_USERMODE(tf);
@@ -725,7 +725,7 @@ prefetch_abort_handler(trapframe_t *tf)
td = curthread;
p = td->td_proc;
- PCPU_LAZY_INC(cnt.v_trap);
+ PCPU_INC(cnt.v_trap);
if (TRAP_USERMODE(tf)) {
td->td_frame = tf;
@@ -880,7 +880,7 @@ syscall(struct thread *td, trapframe_t *frame, u_int32_t insn)
register_t *ap, *args, copyargs[MAXARGS];
struct sysent *callp;
- PCPU_LAZY_INC(cnt.v_syscall);
+ PCPU_INC(cnt.v_syscall);
td->td_pticks = 0;
if (td->td_ucred != td->td_proc->p_ucred)
cred_update_thread(td);
diff --git a/sys/arm/arm/undefined.c b/sys/arm/arm/undefined.c
index efd80bb..133134e 100644
--- a/sys/arm/arm/undefined.c
+++ b/sys/arm/arm/undefined.c
@@ -191,7 +191,7 @@ undefinedinstruction(trapframe_t *frame)
enable_interrupts(I32_bit|F32_bit);
frame->tf_pc -= INSN_SIZE;
- PCPU_LAZY_INC(cnt.v_trap);
+ PCPU_INC(cnt.v_trap);
fault_pc = frame->tf_pc;
diff --git a/sys/arm/include/pcpu.h b/sys/arm/include/pcpu.h
index 82d9066..a1ff5f1 100644
--- a/sys/arm/include/pcpu.h
+++ b/sys/arm/include/pcpu.h
@@ -57,7 +57,8 @@ extern struct pcpu __pcpu;
* XXX The implementation of this operation should be made atomic
* with respect to preemption.
*/
-#define PCPU_LAZY_INC(member) (++__pcpu.pc_ ## member)
+#define PCPU_ADD(member, value) (__pcpu.pc_ ## member += (value))
+#define PCPU_INC(member) PCPU_LAZY_ADD(member, 1)
#define PCPU_PTR(member) (&__pcpu.pc_ ## member)
#define PCPU_SET(member,value) (__pcpu.pc_ ## member = (value))
diff --git a/sys/i386/i386/intr_machdep.c b/sys/i386/i386/intr_machdep.c
index 65e67e2..a07ee77 100644
--- a/sys/i386/i386/intr_machdep.c
+++ b/sys/i386/i386/intr_machdep.c
@@ -241,7 +241,7 @@ intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
* processed too.
*/
(*isrc->is_count)++;
- PCPU_LAZY_INC(cnt.v_intr);
+ PCPU_INC(cnt.v_intr);
ie = isrc->is_event;
@@ -312,7 +312,7 @@ intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
* processed too.
*/
(*isrc->is_count)++;
- PCPU_LAZY_INC(cnt.v_intr);
+ PCPU_INC(cnt.v_intr);
ie = isrc->is_event;
diff --git a/sys/i386/i386/trap.c b/sys/i386/i386/trap.c
index 83fe391..e44e427 100644
--- a/sys/i386/i386/trap.c
+++ b/sys/i386/i386/trap.c
@@ -181,7 +181,7 @@ trap(struct trapframe *frame)
static int lastalert = 0;
#endif
- PCPU_LAZY_INC(cnt.v_trap);
+ PCPU_INC(cnt.v_trap);
type = frame->tf_trapno;
#ifdef SMP
@@ -922,10 +922,10 @@ syscall(struct trapframe *frame)
ksiginfo_t ksi;
/*
- * note: PCPU_LAZY_INC() can only be used if we can afford
+ * note: PCPU_INC() can only be used if we can afford
* occassional inaccuracy in the count.
*/
- PCPU_LAZY_INC(cnt.v_syscall);
+ PCPU_INC(cnt.v_syscall);
#ifdef DIAGNOSTIC
if (ISPL(frame->tf_cs) != SEL_UPL) {
diff --git a/sys/i386/include/pcpu.h b/sys/i386/include/pcpu.h
index e44eea1..67cb530 100644
--- a/sys/i386/include/pcpu.h
+++ b/sys/i386/include/pcpu.h
@@ -62,7 +62,8 @@
extern struct pcpu *pcpup;
#define PCPU_GET(member) (pcpup->pc_ ## member)
-#define PCPU_LAZY_INC(member) (++pcpup->pc_ ## member)
+#define PCPU_ADD(member, val) (pcpu->pc_ ## member += (val))
+#define PCPU_INC(member) PCPU_ADD(member, 1)
#define PCPU_PTR(member) (&pcpup->pc_ ## member)
#define PCPU_SET(member, val) (pcpup->pc_ ## member = (val))
@@ -116,10 +117,31 @@ extern struct pcpu *pcpup;
})
/*
+ * Adds a value of the per-cpu counter name. The implementation
+ * must be atomic with respect to interrupts.
+ */
+#define __PCPU_ADD(name, val) do { \
+ __pcpu_type(name) __val; \
+ struct __s { \
+ u_char __b[MIN(sizeof(__pcpu_type(name)), 4)]; \
+ } __s; \
+ \
+ __val = (val); \
+ if (sizeof(__val) == 1 || sizeof(__val) == 2 || \
+ sizeof(__val) == 4) { \
+ __s = *(struct __s *)(void *)&__val; \
+ __asm __volatile("add %1,%%fs:%0" \
+ : "=m" (*(struct __s *)(__pcpu_offset(name))) \
+ : "r" (__s)); \
+ } else \
+ *__PCPU_PTR(name) += __val; \
+} while (0)
+
+/*
* Increments the value of the per-cpu counter name. The implementation
* must be atomic with respect to interrupts.
*/
-#define __PCPU_LAZY_INC(name) do { \
+#define __PCPU_INC(name) do { \
CTASSERT(sizeof(__pcpu_type(name)) == 1 || \
sizeof(__pcpu_type(name)) == 2 || \
sizeof(__pcpu_type(name)) == 4); \
@@ -160,7 +182,8 @@ extern struct pcpu *pcpup;
}
#define PCPU_GET(member) __PCPU_GET(pc_ ## member)
-#define PCPU_LAZY_INC(member) __PCPU_LAZY_INC(pc_ ## member)
+#define PCPU_ADD(member, val) __PCPU_ADD(pc_ ## member, val)
+#define PCPU_INC(member) __PCPU_INC(pc_ ## member)
#define PCPU_PTR(member) __PCPU_PTR(pc_ ## member)
#define PCPU_SET(member, val) __PCPU_SET(pc_ ## member, val)
diff --git a/sys/ia64/ia32/ia32_trap.c b/sys/ia64/ia32/ia32_trap.c
index b900d66..ba2bceb 100644
--- a/sys/ia64/ia32/ia32_trap.c
+++ b/sys/ia64/ia32/ia32_trap.c
@@ -64,7 +64,7 @@ ia32_syscall(struct trapframe *tf)
int error, i, narg;
ksiginfo_t ksi;
- PCPU_LAZY_INC(cnt.v_syscall);
+ PCPU_INC(cnt.v_syscall);
td = curthread;
params = (caddr_t)(tf->tf_special.sp & ((1L<<32)-1)) +
@@ -220,7 +220,7 @@ ia32_trap(int vector, struct trapframe *tf)
KASSERT(TRAPF_USERMODE(tf), ("%s: In kernel mode???", __func__));
ia64_set_fpsr(IA64_FPSR_DEFAULT);
- PCPU_LAZY_INC(cnt.v_trap);
+ PCPU_INC(cnt.v_trap);
td = curthread;
td->td_frame = tf;
diff --git a/sys/ia64/ia64/interrupt.c b/sys/ia64/ia64/interrupt.c
index 8cf18b4..424dce6 100644
--- a/sys/ia64/ia64/interrupt.c
+++ b/sys/ia64/ia64/interrupt.c
@@ -154,7 +154,7 @@ interrupt(u_int64_t vector, struct trapframe *tf)
if (vector == CLOCK_VECTOR) {/* clock interrupt */
/* CTR0(KTR_INTR, "clock interrupt"); */
- PCPU_LAZY_INC(cnt.v_intr);
+ PCPU_INC(cnt.v_intr);
#ifdef EVCNT_COUNTERS
clock_intr_evcnt.ev_count++;
#else
diff --git a/sys/ia64/ia64/trap.c b/sys/ia64/ia64/trap.c
index 77b5009..373a46a 100644
--- a/sys/ia64/ia64/trap.c
+++ b/sys/ia64/ia64/trap.c
@@ -363,7 +363,7 @@ trap(int vector, struct trapframe *tf)
user = TRAPF_USERMODE(tf) ? 1 : 0;
- PCPU_LAZY_INC(cnt.v_trap);
+ PCPU_INC(cnt.v_trap);
td = curthread;
p = td->td_proc;
@@ -978,7 +978,7 @@ syscall(struct trapframe *tf)
code = tf->tf_scratch.gr15;
args = &tf->tf_scratch.gr16;
- PCPU_LAZY_INC(cnt.v_syscall);
+ PCPU_INC(cnt.v_syscall);
td = curthread;
td->td_frame = tf;
diff --git a/sys/ia64/include/pcpu.h b/sys/ia64/include/pcpu.h
index dfe31ef..d641816 100644
--- a/sys/ia64/include/pcpu.h
+++ b/sys/ia64/include/pcpu.h
@@ -53,7 +53,8 @@ register struct pcpu *pcpup __asm__("r13");
* XXX The implementation of this operation should be made atomic
* with respect to preemption.
*/
-#define PCPU_LAZY_INC(member) (++pcpup->pc_ ## member)
+#define PCPU_ADD(member, value) (pcpup->pc_ ## member += (value))
+#define PCPU_INC(member) PCPU_ADD(member, 1)
#define PCPU_PTR(member) (&pcpup->pc_ ## member)
#define PCPU_SET(member,value) (pcpup->pc_ ## member = (value))
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index aa79bfa..a33f968 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -931,7 +931,7 @@ swi_sched(void *cookie, int flags)
atomic_store_rel_int(&ih->ih_need, 1);
if (!(flags & SWI_DELAY)) {
- PCPU_LAZY_INC(cnt.v_soft);
+ PCPU_INC(cnt.v_soft);
#ifdef INTR_FILTER
error = intr_event_schedule_thread(ie, ie->ie_thread);
#else
diff --git a/sys/powerpc/aim/trap.c b/sys/powerpc/aim/trap.c
index 34daa87..1f25b26 100644
--- a/sys/powerpc/aim/trap.c
+++ b/sys/powerpc/aim/trap.c
@@ -149,7 +149,7 @@ trap(struct trapframe *frame)
u_int ucode;
ksiginfo_t ksi;
- PCPU_LAZY_INC(cnt.v_trap);
+ PCPU_INC(cnt.v_trap);
td = PCPU_GET(curthread);
p = td->td_proc;
@@ -349,7 +349,7 @@ syscall(struct trapframe *frame)
td = PCPU_GET(curthread);
p = td->td_proc;
- PCPU_LAZY_INC(cnt.v_syscall);
+ PCPU_INC(cnt.v_syscall);
#ifdef KSE
if (p->p_flag & P_SA)
diff --git a/sys/powerpc/include/pcpu.h b/sys/powerpc/include/pcpu.h
index 4575bd5..6e95076 100644
--- a/sys/powerpc/include/pcpu.h
+++ b/sys/powerpc/include/pcpu.h
@@ -62,7 +62,8 @@ struct pmap;
* XXX The implementation of this operation should be made atomic
* with respect to preemption.
*/
-#define PCPU_LAZY_INC(member) (++PCPUP->pc_ ## member)
+#define PCPU_ADD(member, value) (PCPUP->pc_ ## member += (value))
+#define PCPU_INC(member) PCPU_ADD(member, 1)
#define PCPU_PTR(member) (&PCPUP->pc_ ## member)
#define PCPU_SET(member,value) (PCPUP->pc_ ## member = (value))
diff --git a/sys/powerpc/powerpc/trap.c b/sys/powerpc/powerpc/trap.c
index 34daa87..1f25b26 100644
--- a/sys/powerpc/powerpc/trap.c
+++ b/sys/powerpc/powerpc/trap.c
@@ -149,7 +149,7 @@ trap(struct trapframe *frame)
u_int ucode;
ksiginfo_t ksi;
- PCPU_LAZY_INC(cnt.v_trap);
+ PCPU_INC(cnt.v_trap);
td = PCPU_GET(curthread);
p = td->td_proc;
@@ -349,7 +349,7 @@ syscall(struct trapframe *frame)
td = PCPU_GET(curthread);
p = td->td_proc;
- PCPU_LAZY_INC(cnt.v_syscall);
+ PCPU_INC(cnt.v_syscall);
#ifdef KSE
if (p->p_flag & P_SA)
diff --git a/sys/sparc64/include/pcpu.h b/sys/sparc64/include/pcpu.h
index 9da5775..f3a9f64 100644
--- a/sys/sparc64/include/pcpu.h
+++ b/sys/sparc64/include/pcpu.h
@@ -71,7 +71,8 @@ register struct pcpu *pcpup __asm__(__XSTRING(PCPU_REG));
* XXX The implementation of this operation should be made atomic
* with respect to preemption.
*/
-#define PCPU_LAZY_INC(member) (++pcpup->pc_ ## member)
+#define PCPU_ADD(member, value) (pcpup->pc_ ## member += (value))
+#define PCPU_INC(member) PCPU_ADD(member, 1)
#define PCPU_PTR(member) (&pcpup->pc_ ## member)
#define PCPU_SET(member,value) (pcpup->pc_ ## member = (value))
diff --git a/sys/sparc64/sparc64/trap.c b/sys/sparc64/sparc64/trap.c
index 3b00bf7..8d0ea35 100644
--- a/sys/sparc64/sparc64/trap.c
+++ b/sys/sparc64/sparc64/trap.c
@@ -243,7 +243,7 @@ trap(struct trapframe *tf)
trap_msg[tf->tf_type & ~T_KERNEL],
(TRAPF_USERMODE(tf) ? "user" : "kernel"), rdpr(pil));
- PCPU_LAZY_INC(cnt.v_trap);
+ PCPU_INC(cnt.v_trap);
if ((tf->tf_tstate & TSTATE_PRIV) == 0) {
KASSERT(td != NULL, ("trap: curthread NULL"));
@@ -518,7 +518,7 @@ syscall(struct trapframe *tf)
p = td->td_proc;
- PCPU_LAZY_INC(cnt.v_syscall);
+ PCPU_INC(cnt.v_syscall);
narg = 0;
error = 0;
diff --git a/sys/sun4v/include/pcpu.h b/sys/sun4v/include/pcpu.h
index b6d901a..2515031 100644
--- a/sys/sun4v/include/pcpu.h
+++ b/sys/sun4v/include/pcpu.h
@@ -92,7 +92,8 @@ register struct pcpu *pcpup __asm__(__XSTRING(PCPU_REG));
* XXX The implementation of this operation should be made atomic
* with respect to preemption.
*/
-#define PCPU_LAZY_INC(member) (++pcpup->pc_ ## member)
+#define PCPU_ADD(member, value) (pcpup->pc_ ## member += (value))
+#define PCPU_INC(member) PCPU_ADD(member, 1)
#define PCPU_PTR(member) (&pcpup->pc_ ## member)
#define PCPU_SET(member,value) (pcpup->pc_ ## member = (value))
diff --git a/sys/sun4v/sun4v/trap.c b/sys/sun4v/sun4v/trap.c
index 4fe6edc..fa2de3d 100644
--- a/sys/sun4v/sun4v/trap.c
+++ b/sys/sun4v/sun4v/trap.c
@@ -268,7 +268,7 @@ trap(struct trapframe *tf, int64_t type, uint64_t data)
trap_msg[trapno],
(TRAPF_USERMODE(tf) ? "user" : "kernel"), rdpr(pil));
- PCPU_LAZY_INC(cnt.v_trap);
+ PCPU_INC(cnt.v_trap);
trapno = (type & TRAP_MASK);
ctx = (type >> TRAP_CTX_SHIFT);
@@ -575,7 +575,7 @@ syscall(struct trapframe *tf)
p = td->td_proc;
- PCPU_LAZY_INC(cnt.v_syscall);
+ PCPU_INC(cnt.v_syscall);
narg = 0;
error = 0;
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 614956a..745ce4d 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -219,7 +219,7 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
hardfault = 0;
growstack = TRUE;
- PCPU_LAZY_INC(cnt.v_vm_faults);
+ PCPU_INC(cnt.v_vm_faults);
RetryFault:;
@@ -394,7 +394,7 @@ RetryFault:;
}
vm_object_pip_wakeup(fs.object);
VM_OBJECT_UNLOCK(fs.object);
- PCPU_LAZY_INC(cnt.v_intrans);
+ PCPU_INC(cnt.v_intrans);
vm_object_deallocate(fs.first_object);
goto RetryFault;
}
@@ -668,9 +668,9 @@ readrest:
if ((fs.m->flags & PG_ZERO) == 0) {
pmap_zero_page(fs.m);
} else {
- PCPU_LAZY_INC(cnt.v_ozfod);
+ PCPU_INC(cnt.v_ozfod);
}
- PCPU_LAZY_INC(cnt.v_zfod);
+ PCPU_INC(cnt.v_zfod);
fs.m->valid = VM_PAGE_BITS_ALL;
break; /* break to PAGE HAS BEEN FOUND */
} else {
@@ -752,7 +752,7 @@ readrest:
vm_page_busy(fs.m);
fs.first_m = fs.m;
fs.m = NULL;
- PCPU_LAZY_INC(cnt.v_cow_optim);
+ PCPU_INC(cnt.v_cow_optim);
} else {
/*
* Oh, well, lets copy it.
@@ -780,7 +780,7 @@ readrest:
fs.m = fs.first_m;
if (!is_first_object_locked)
VM_OBJECT_LOCK(fs.object);
- PCPU_LAZY_INC(cnt.v_cow_faults);
+ PCPU_INC(cnt.v_cow_faults);
} else {
prot &= ~VM_PROT_WRITE;
}
OpenPOWER on IntegriCloud