summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-03-24 17:06:43 +0900
committerTejun Heo <tj@kernel.org>2010-03-30 22:02:32 +0900
commitde380b55f92986c1a84198149cb71b7228d15fbd (patch)
treedce7168802e1e65754c9b6455d0527dfa853168c /include
parentea5a9f0c3447889abceb7482c391bb977472eab9 (diff)
downloadop-kernel-dev-de380b55f92986c1a84198149cb71b7228d15fbd.zip
op-kernel-dev-de380b55f92986c1a84198149cb71b7228d15fbd.tar.gz
percpu: don't implicitly include slab.h from percpu.h
percpu.h has always been including slab.h to get k[mz]alloc/free() for UP inline implementation. percpu.h being used by very low level headers including module.h and sched.h, this meant that a lot files unintentionally got slab.h inclusion. Lee Schermerhorn was trying to make topology.h use percpu.h and got bitten by this implicit inclusion. The right thing to do is break this ultimately unnecessary dependency. The previous patch added explicit inclusion of either gfp.h or slab.h to the source files using them. This patch updates percpu.h such that slab.h is no longer included from percpu.h. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/percpu.h30
1 files changed, 5 insertions, 25 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index a93e5bf..c784513 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -2,10 +2,10 @@
#define __LINUX_PERCPU_H
#include <linux/preempt.h>
-#include <linux/slab.h> /* For kmalloc() */
#include <linux/smp.h>
#include <linux/cpumask.h>
#include <linux/pfn.h>
+#include <linux/init.h>
#include <asm/percpu.h>
@@ -135,9 +135,6 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
-extern void __percpu *__alloc_percpu(size_t size, size_t align);
-extern void free_percpu(void __percpu *__pdata);
-extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void __init setup_per_cpu_areas(void);
@@ -147,27 +144,6 @@ extern void __init setup_per_cpu_areas(void);
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
-static inline void __percpu *__alloc_percpu(size_t size, size_t align)
-{
- /*
- * Can't easily make larger alignment work with kmalloc. WARN
- * on it. Larger alignment should only be used for module
- * percpu sections on SMP for which this path isn't used.
- */
- WARN_ON_ONCE(align > SMP_CACHE_BYTES);
- return kzalloc(size, GFP_KERNEL);
-}
-
-static inline void free_percpu(void __percpu *p)
-{
- kfree(p);
-}
-
-static inline phys_addr_t per_cpu_ptr_to_phys(void *addr)
-{
- return __pa(addr);
-}
-
static inline void __init setup_per_cpu_areas(void) { }
static inline void *pcpu_lpage_remapped(void *kaddr)
@@ -177,6 +153,10 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
#endif /* CONFIG_SMP */
+extern void __percpu *__alloc_percpu(size_t size, size_t align);
+extern void free_percpu(void __percpu *__pdata);
+extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
+
#define alloc_percpu(type) \
(typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
OpenPOWER on IntegriCloud