summaryrefslogtreecommitdiffstats
path: root/sys/sys/vmmeter.h
diff options
context:
space:
mode:
authorjeff <jeff@FreeBSD.org>2007-05-18 07:10:50 +0000
committerjeff <jeff@FreeBSD.org>2007-05-18 07:10:50 +0000
commite1996cb9609d2e55a26ee78dddbfce4ba4073b53 (patch)
treec94b660d4b9246fed8cbeadf7851932258d8b72a /sys/sys/vmmeter.h
parentbeb495eff1db0646624feb7071ced7f632ff8869 (diff)
downloadFreeBSD-src-e1996cb9609d2e55a26ee78dddbfce4ba4073b53.zip
FreeBSD-src-e1996cb9609d2e55a26ee78dddbfce4ba4073b53.tar.gz
- define and use VMCNT_{GET,SET,ADD,SUB,PTR} macros for manipulating
vmcnts. This can be used to abstract away pcpu details but also changes to use atomics for all counters now. This means sched lock is no longer responsible for protecting counts in the switch routines. Contributed by: Attilio Rao <attilio@FreeBSD.org>
Diffstat (limited to 'sys/sys/vmmeter.h')
-rw-r--r--sys/sys/vmmeter.h33
1 files changed, 24 insertions, 9 deletions
diff --git a/sys/sys/vmmeter.h b/sys/sys/vmmeter.h
index 793f32d..375a2ce 100644
--- a/sys/sys/vmmeter.h
+++ b/sys/sys/vmmeter.h
@@ -102,7 +102,18 @@ struct vmmeter {
};
#ifdef _KERNEL
-extern struct vmmeter cnt;
+extern volatile struct vmmeter cnt;
+
+#define VMCNT __DEVOLATILE(struct vmmeter *, &cnt)
+#define VMCNT_SET(member, val) \
+ atomic_store_rel_int(__CONCAT(&cnt.v_, member), val)
+#define VMCNT_ADD(member, val) \
+ atomic_add_int(__CONCAT(&cnt.v_, member), val)
+#define VMCNT_DEC(member, val) \
+ atomic_subtract_int(__CONCAT(&cnt.v_, member), val)
+#define VMCNT_GET(member) (__CONCAT(cnt.v_, member))
+#define VMCNT_PTR(member) \
+ __DEVOLATILE(u_int *, __CONCAT(&cnt.v_, member))
/*
* Return TRUE if we are under our reserved low-free-pages threshold
@@ -112,7 +123,8 @@ static __inline
int
vm_page_count_reserved(void)
{
- return (cnt.v_free_reserved > (cnt.v_free_count + cnt.v_cache_count));
+ return (VMCNT_GET(free_reserved) > (VMCNT_GET(free_count) +
+ VMCNT_GET(cache_count)));
}
/*
@@ -126,7 +138,8 @@ static __inline
int
vm_page_count_severe(void)
{
- return (cnt.v_free_severe > (cnt.v_free_count + cnt.v_cache_count));
+ return (VMCNT_GET(free_severe) > (VMCNT_GET(free_count) +
+ VMCNT_GET(cache_count)));
}
/*
@@ -143,7 +156,8 @@ static __inline
int
vm_page_count_min(void)
{
- return (cnt.v_free_min > (cnt.v_free_count + cnt.v_cache_count));
+ return (VMCNT_GET(free_min) > (VMCNT_GET(free_count) +
+ VMCNT_GET(cache_count)));
}
/*
@@ -155,7 +169,8 @@ static __inline
int
vm_page_count_target(void)
{
- return (cnt.v_free_target > (cnt.v_free_count + cnt.v_cache_count));
+ return (VMCNT_GET(free_target) > (VMCNT_GET(free_count) +
+ VMCNT_GET(cache_count)));
}
/*
@@ -168,8 +183,8 @@ int
vm_paging_target(void)
{
return (
- (cnt.v_free_target + cnt.v_cache_min) -
- (cnt.v_free_count + cnt.v_cache_count)
+ (VMCNT_GET(free_target) + VMCNT_GET(cache_min)) -
+ (VMCNT_GET(free_count) + VMCNT_GET(cache_count))
);
}
@@ -182,8 +197,8 @@ int
vm_paging_needed(void)
{
return (
- (cnt.v_free_reserved + cnt.v_cache_min) >
- (cnt.v_free_count + cnt.v_cache_count)
+ (VMCNT_GET(free_reserved) + VMCNT_GET(cache_min)) >
+ (VMCNT_GET(free_count) + VMCNT_GET(cache_count))
);
}
OpenPOWER on IntegriCloud