summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2007-02-10 01:44:41 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-11 10:51:25 -0800
commit780a065668b1c6ca6a70c7d36b9f6552ea3bb5f5 (patch)
tree4d61d1d042a1a58b84bdf8f5b4b0a33146271a54 /include
parent7131b6d167b41593463ce98df17e101e776bf5ec (diff)
downloadop-kernel-dev-780a065668b1c6ca6a70c7d36b9f6552ea3bb5f5.zip
op-kernel-dev-780a065668b1c6ca6a70c7d36b9f6552ea3bb5f5.tar.gz
[PATCH] count_vm_events-warning-fix
- Prevent things like this: block/ll_rw_blk.c: In function 'submit_bio': block/ll_rw_blk.c:3222: warning: unused variable 'count' inlines are very, very preferable to macros. - remove unused get_cpu_vm_events() macro Cc: Christoph Lameter <clameter@engr.sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/vmstat.h47
1 files changed, 29 insertions, 18 deletions
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 7ba91f2..acb1f10 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -7,18 +7,6 @@
#include <linux/mmzone.h>
#include <asm/atomic.h>
-#ifdef CONFIG_VM_EVENT_COUNTERS
-/*
- * Light weight per cpu counter implementation.
- *
- * Counters should only be incremented. You need to set EMBEDDED
- * to disable VM_EVENT_COUNTERS. Things like procps (vmstat,
- * top, etc) use /proc/vmstat and depend on these counters.
- *
- * Counters are handled completely inline. On many platforms the code
- * generated will simply be the increment of a global address.
- */
-
#ifdef CONFIG_ZONE_DMA
#define DMA_ZONE(xx) xx##_DMA,
#else
@@ -52,6 +40,17 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
NR_VM_EVENT_ITEMS
};
+#ifdef CONFIG_VM_EVENT_COUNTERS
+/*
+ * Light weight per cpu counter implementation.
+ *
+ * Counters should only be incremented and no critical kernel component
+ * should rely on the counter values.
+ *
+ * Counters are handled completely inline. On many platforms the code
+ * generated will simply be the increment of a global address.
+ */
+
struct vm_event_state {
unsigned long event[NR_VM_EVENT_ITEMS];
};
@@ -92,12 +91,24 @@ static inline void vm_events_fold_cpu(int cpu)
#else
/* Disable counters */
-#define get_cpu_vm_events(e) 0L
-#define count_vm_event(e) do { } while (0)
-#define count_vm_events(e,d) do { } while (0)
-#define __count_vm_event(e) do { } while (0)
-#define __count_vm_events(e,d) do { } while (0)
-#define vm_events_fold_cpu(x) do { } while (0)
+static inline void count_vm_event(enum vm_event_item item)
+{
+}
+static inline void count_vm_events(enum vm_event_item item, long delta)
+{
+}
+static inline void __count_vm_event(enum vm_event_item item)
+{
+}
+static inline void __count_vm_events(enum vm_event_item item, long delta)
+{
+}
+static inline void all_vm_events(unsigned long *ret)
+{
+}
+static inline void vm_events_fold_cpu(int cpu)
+{
+}
#endif /* CONFIG_VM_EVENT_COUNTERS */
OpenPOWER on IntegriCloud