summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAlexey Dobriyan <adobriyan@gmail.com>2017-11-15 17:32:18 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 18:21:01 -0800
commitd50112edde1d0c621520e53747044009f11c656b (patch)
treeca4092f2126ac85a63647a48e43ecbf34bb69782 /mm
parenta3ba074447824625d3a267a5fffd2ea21556ebf4 (diff)
downloadop-kernel-dev-d50112edde1d0c621520e53747044009f11c656b.zip
op-kernel-dev-d50112edde1d0c621520e53747044009f11c656b.tar.gz
slab, slub, slob: add slab_flags_t
Add sparse-checked slab_flags_t for struct kmem_cache::flags (SLAB_POISON, etc). SLAB is bloated temporarily by switching to "unsigned long", but only temporarily. Link: http://lkml.kernel.org/r/20171021100225.GA22428@avx2 Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Acked-by: Pekka Enberg <penberg@kernel.org> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/kasan/kasan.c2
-rw-r--r--mm/slab.c23
-rw-r--r--mm/slab.h26
-rw-r--r--mm/slab_common.c16
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slub.c26
6 files changed, 48 insertions, 47 deletions
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 6f319fb..405bba4 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -337,7 +337,7 @@ static size_t optimal_redzone(size_t object_size)
}
void kasan_cache_create(struct kmem_cache *cache, size_t *size,
- unsigned long *flags)
+ slab_flags_t *flags)
{
int redzone_adjust;
int orig_size = *size;
diff --git a/mm/slab.c b/mm/slab.c
index 0c6468c..19b1b9f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -252,8 +252,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
} while (0)
-#define CFLGS_OBJFREELIST_SLAB (0x40000000UL)
-#define CFLGS_OFF_SLAB (0x80000000UL)
+#define CFLGS_OBJFREELIST_SLAB ((slab_flags_t __force)0x40000000UL)
+#define CFLGS_OFF_SLAB ((slab_flags_t __force)0x80000000UL)
#define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB)
#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
@@ -441,7 +441,7 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
* Calculate the number of objects and left-over bytes for a given buffer size.
*/
static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
- unsigned long flags, size_t *left_over)
+ slab_flags_t flags, size_t *left_over)
{
unsigned int num;
size_t slab_size = PAGE_SIZE << gfporder;
@@ -1759,7 +1759,7 @@ static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
* towards high-order requests, this should be changed.
*/
static size_t calculate_slab_order(struct kmem_cache *cachep,
- size_t size, unsigned long flags)
+ size_t size, slab_flags_t flags)
{
size_t left_over = 0;
int gfporder;
@@ -1886,8 +1886,8 @@ static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
return 0;
}
-unsigned long kmem_cache_flags(unsigned long object_size,
- unsigned long flags, const char *name,
+slab_flags_t kmem_cache_flags(unsigned long object_size,
+ slab_flags_t flags, const char *name,
void (*ctor)(void *))
{
return flags;
@@ -1895,7 +1895,7 @@ unsigned long kmem_cache_flags(unsigned long object_size,
struct kmem_cache *
__kmem_cache_alias(const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *))
+ slab_flags_t flags, void (*ctor)(void *))
{
struct kmem_cache *cachep;
@@ -1913,7 +1913,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
}
static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
- size_t size, unsigned long flags)
+ size_t size, slab_flags_t flags)
{
size_t left;
@@ -1936,7 +1936,7 @@ static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
}
static bool set_off_slab_cache(struct kmem_cache *cachep,
- size_t size, unsigned long flags)
+ size_t size, slab_flags_t flags)
{
size_t left;
@@ -1970,7 +1970,7 @@ static bool set_off_slab_cache(struct kmem_cache *cachep,
}
static bool set_on_slab_cache(struct kmem_cache *cachep,
- size_t size, unsigned long flags)
+ size_t size, slab_flags_t flags)
{
size_t left;
@@ -2006,8 +2006,7 @@ static bool set_on_slab_cache(struct kmem_cache *cachep,
* cacheline. This can be beneficial if you're counting cycles as closely
* as davem.
*/
-int
-__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
+int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
{
size_t ralign = BYTES_PER_WORD;
gfp_t gfp;
diff --git a/mm/slab.h b/mm/slab.h
index 45c586c..e192556 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -21,7 +21,7 @@ struct kmem_cache {
unsigned int object_size;/* The original size of the object */
unsigned int size; /* The aligned/padded/added on size */
unsigned int align; /* Alignment as calculated */
- unsigned long flags; /* Active flags on the slab */
+ slab_flags_t flags; /* Active flags on the slab */
const char *name; /* Slab name for sysfs */
int refcount; /* Use counter */
void (*ctor)(void *); /* Called on object slot creation */
@@ -79,13 +79,13 @@ extern const struct kmalloc_info_struct {
unsigned long size;
} kmalloc_info[];
-unsigned long calculate_alignment(unsigned long flags,
+unsigned long calculate_alignment(slab_flags_t flags,
unsigned long align, unsigned long size);
#ifndef CONFIG_SLOB
/* Kmalloc array related functions */
void setup_kmalloc_cache_index_table(void);
-void create_kmalloc_caches(unsigned long);
+void create_kmalloc_caches(slab_flags_t);
/* Find the kmalloc slab corresponding for a certain size */
struct kmem_cache *kmalloc_slab(size_t, gfp_t);
@@ -93,32 +93,32 @@ struct kmem_cache *kmalloc_slab(size_t, gfp_t);
/* Functions provided by the slab allocators */
-extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
+int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
- unsigned long flags);
+ slab_flags_t flags);
extern void create_boot_cache(struct kmem_cache *, const char *name,
- size_t size, unsigned long flags);
+ size_t size, slab_flags_t flags);
int slab_unmergeable(struct kmem_cache *s);
struct kmem_cache *find_mergeable(size_t size, size_t align,
- unsigned long flags, const char *name, void (*ctor)(void *));
+ slab_flags_t flags, const char *name, void (*ctor)(void *));
#ifndef CONFIG_SLOB
struct kmem_cache *
__kmem_cache_alias(const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *));
+ slab_flags_t flags, void (*ctor)(void *));
-unsigned long kmem_cache_flags(unsigned long object_size,
- unsigned long flags, const char *name,
+slab_flags_t kmem_cache_flags(unsigned long object_size,
+ slab_flags_t flags, const char *name,
void (*ctor)(void *));
#else
static inline struct kmem_cache *
__kmem_cache_alias(const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *))
+ slab_flags_t flags, void (*ctor)(void *))
{ return NULL; }
-static inline unsigned long kmem_cache_flags(unsigned long object_size,
- unsigned long flags, const char *name,
+static inline slab_flags_t kmem_cache_flags(unsigned long object_size,
+ slab_flags_t flags, const char *name,
void (*ctor)(void *))
{
return flags;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 8f7f9f7..175e866 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -291,7 +291,7 @@ int slab_unmergeable(struct kmem_cache *s)
}
struct kmem_cache *find_mergeable(size_t size, size_t align,
- unsigned long flags, const char *name, void (*ctor)(void *))
+ slab_flags_t flags, const char *name, void (*ctor)(void *))
{
struct kmem_cache *s;
@@ -341,7 +341,7 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
* Figure out what the alignment of the objects will be given a set of
* flags, a user specified alignment and the size of the objects.
*/
-unsigned long calculate_alignment(unsigned long flags,
+unsigned long calculate_alignment(slab_flags_t flags,
unsigned long align, unsigned long size)
{
/*
@@ -366,7 +366,7 @@ unsigned long calculate_alignment(unsigned long flags,
static struct kmem_cache *create_cache(const char *name,
size_t object_size, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *),
+ slab_flags_t flags, void (*ctor)(void *),
struct mem_cgroup *memcg, struct kmem_cache *root_cache)
{
struct kmem_cache *s;
@@ -431,7 +431,7 @@ out_free_cache:
*/
struct kmem_cache *
kmem_cache_create(const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *))
+ slab_flags_t flags, void (*ctor)(void *))
{
struct kmem_cache *s = NULL;
const char *cache_name;
@@ -879,7 +879,7 @@ bool slab_is_available(void)
#ifndef CONFIG_SLOB
/* Create a cache during boot when no slab services are available yet */
void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
- unsigned long flags)
+ slab_flags_t flags)
{
int err;
@@ -899,7 +899,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
}
struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
- unsigned long flags)
+ slab_flags_t flags)
{
struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
@@ -1057,7 +1057,7 @@ void __init setup_kmalloc_cache_index_table(void)
}
}
-static void __init new_kmalloc_cache(int idx, unsigned long flags)
+static void __init new_kmalloc_cache(int idx, slab_flags_t flags)
{
kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
kmalloc_info[idx].size, flags);
@@ -1068,7 +1068,7 @@ static void __init new_kmalloc_cache(int idx, unsigned long flags)
* may already have been created because they were needed to
* enable allocations for slab creation.
*/
-void __init create_kmalloc_caches(unsigned long flags)
+void __init create_kmalloc_caches(slab_flags_t flags)
{
int i;
diff --git a/mm/slob.c b/mm/slob.c
index 3451eca..623e8a5 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -524,7 +524,7 @@ size_t ksize(const void *block)
}
EXPORT_SYMBOL(ksize);
-int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
+int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags)
{
if (flags & SLAB_TYPESAFE_BY_RCU) {
/* leave room for rcu footer at the end of object */
diff --git a/mm/slub.c b/mm/slub.c
index 025bbb5..482d1da 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -193,8 +193,10 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
/* Internal SLUB flags */
-#define __OBJECT_POISON 0x80000000UL /* Poison object */
-#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
+/* Poison object */
+#define __OBJECT_POISON ((slab_flags_t __force)0x80000000UL)
+/* Use cmpxchg_double */
+#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000UL)
/*
* Tracking user of a slab.
@@ -485,9 +487,9 @@ static inline void *restore_red_left(struct kmem_cache *s, void *p)
* Debug settings:
*/
#if defined(CONFIG_SLUB_DEBUG_ON)
-static int slub_debug = DEBUG_DEFAULT_FLAGS;
+static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
#else
-static int slub_debug;
+static slab_flags_t slub_debug;
#endif
static char *slub_debug_slabs;
@@ -1289,8 +1291,8 @@ out:
__setup("slub_debug", setup_slub_debug);
-unsigned long kmem_cache_flags(unsigned long object_size,
- unsigned long flags, const char *name,
+slab_flags_t kmem_cache_flags(unsigned long object_size,
+ slab_flags_t flags, const char *name,
void (*ctor)(void *))
{
/*
@@ -1322,8 +1324,8 @@ static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
struct page *page) {}
static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
struct page *page) {}
-unsigned long kmem_cache_flags(unsigned long object_size,
- unsigned long flags, const char *name,
+slab_flags_t kmem_cache_flags(unsigned long object_size,
+ slab_flags_t flags, const char *name,
void (*ctor)(void *))
{
return flags;
@@ -3477,7 +3479,7 @@ static void set_cpu_partial(struct kmem_cache *s)
*/
static int calculate_sizes(struct kmem_cache *s, int forced_order)
{
- unsigned long flags = s->flags;
+ slab_flags_t flags = s->flags;
size_t size = s->object_size;
int order;
@@ -3593,7 +3595,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
return !!oo_objects(s->oo);
}
-static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
+static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
{
s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
s->reserved = 0;
@@ -4245,7 +4247,7 @@ void __init kmem_cache_init_late(void)
struct kmem_cache *
__kmem_cache_alias(const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *))
+ slab_flags_t flags, void (*ctor)(void *))
{
struct kmem_cache *s, *c;
@@ -4275,7 +4277,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
return s;
}
-int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
+int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
{
int err;
OpenPOWER on IntegriCloud