summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-11-08 20:24:19 +1100
committerIngo Molnar <mingo@elte.hu>2008-11-09 21:09:54 +0100
commit984f2f377fdfd098f5ae58d09ee04d5e29e6112b (patch)
tree6f6ea07057f5680586a8ac6f77700c118f253bcb
parentcd83e42c6b0413dcbb548c2ead799111ff7e6a13 (diff)
downloadop-kernel-dev-984f2f377fdfd098f5ae58d09ee04d5e29e6112b.zip
op-kernel-dev-984f2f377fdfd098f5ae58d09ee04d5e29e6112b.tar.gz
cpumask: introduce new API, without changing anything, v3
Impact: cleanup Clean up based on feedback from Andrew Morton and others: - change to inline functions instead of macros - add __init to bootmem method - add a missing debug check Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/cpumask.h58
-rw-r--r--lib/cpumask.c3
2 files changed, 54 insertions, 7 deletions
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 31caa1b..21e1dd4 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -564,12 +564,36 @@ static inline unsigned int cpumask_check(unsigned int cpu)
}
#if NR_CPUS == 1
-/* Uniprocesor. */
-#define cpumask_first(src) ({ (void)(src); 0; })
-#define cpumask_next(n, src) ({ (void)(src); 1; })
-#define cpumask_next_zero(n, src) ({ (void)(src); 1; })
-#define cpumask_next_and(n, srcp, andp) ({ (void)(srcp), (void)(andp); 1; })
-#define cpumask_any_but(mask, cpu) ({ (void)(mask); (void)(cpu); 0; })
+/* Uniprocessor. Assume all masks are "1". */
+static inline unsigned int cpumask_first(const struct cpumask *srcp)
+{
+ return 0;
+}
+
+/* Valid inputs for n are -1 and 0. */
+static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
+{
+ return n+1;
+}
+
+static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+{
+ return n+1;
+}
+
+static inline unsigned int cpumask_next_and(int n,
+ const struct cpumask *srcp,
+ const struct cpumask *andp)
+{
+ return n+1;
+}
+
+/* cpu must be a valid cpu, ie 0, so there's no other choice. */
+static inline unsigned int cpumask_any_but(const struct cpumask *mask,
+ unsigned int cpu)
+{
+ return 1;
+}
#define for_each_cpu(cpu, mask) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
@@ -620,10 +644,32 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
+/**
+ * for_each_cpu - iterate over every cpu in a mask
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask: the cpumask pointer
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
#define for_each_cpu(cpu, mask) \
for ((cpu) = -1; \
(cpu) = cpumask_next((cpu), (mask)), \
(cpu) < nr_cpu_ids;)
+
+/**
+ * for_each_cpu_and - iterate over every cpu in both masks
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask: the first cpumask pointer
+ * @and: the second cpumask pointer
+ *
+ * This saves a temporary CPU mask in many places. It is equivalent to:
+ * struct cpumask tmp;
+ * cpumask_and(&tmp, &mask, &and);
+ * for_each_cpu(cpu, &tmp)
+ * ...
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
#define for_each_cpu_and(cpu, mask, and) \
for ((cpu) = -1; \
(cpu) = cpumask_next_and((cpu), (mask), (and)), \
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 2ebc3a9..8d03f22 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -67,6 +67,7 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
{
unsigned int i;
+ cpumask_check(cpu);
for_each_cpu(i, mask)
if (i != cpu)
break;
@@ -108,7 +109,7 @@ void free_cpumask_var(cpumask_var_t mask)
}
EXPORT_SYMBOL(free_cpumask_var);
-void free_bootmem_cpumask_var(cpumask_var_t mask)
+void __init free_bootmem_cpumask_var(cpumask_var_t mask)
{
free_bootmem((unsigned long)mask, cpumask_size());
}
OpenPOWER on IntegriCloud