summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/fault-inject.h5
-rw-r--r--include/linux/slab.h5
-rw-r--r--include/linux/slub_def.h27
3 files changed, 20 insertions, 17 deletions
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index 06ca9b2..7b64ad4 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -82,9 +82,10 @@ static inline void cleanup_fault_attr_dentries(struct fault_attr *attr)
#endif /* CONFIG_FAULT_INJECTION */
#ifdef CONFIG_FAILSLAB
-extern bool should_failslab(size_t size, gfp_t gfpflags);
+extern bool should_failslab(size_t size, gfp_t gfpflags, unsigned long flags);
#else
-static inline bool should_failslab(size_t size, gfp_t gfpflags)
+static inline bool should_failslab(size_t size, gfp_t gfpflags,
+ unsigned long flags)
{
return false;
}
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 2da8372..4884462 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -70,6 +70,11 @@
#else
# define SLAB_NOTRACK 0x00000000UL
#endif
+#ifdef CONFIG_FAILSLAB
+# define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */
+#else
+# define SLAB_FAILSLAB 0x00000000UL
+#endif
/* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 1e14beb..0249d41 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -38,8 +38,6 @@ struct kmem_cache_cpu {
void **freelist; /* Pointer to first free per cpu object */
struct page *page; /* The slab from which we are allocating */
int node; /* The node of the page (or -1 for debug) */
- unsigned int offset; /* Freepointer offset (in word units) */
- unsigned int objsize; /* Size of an object (from kmem_cache) */
#ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS];
#endif
@@ -69,6 +67,7 @@ struct kmem_cache_order_objects {
* Slab cache management.
*/
struct kmem_cache {
+ struct kmem_cache_cpu *cpu_slab;
/* Used for retriving partial slabs etc */
unsigned long flags;
int size; /* The size of an object including meta data */
@@ -104,11 +103,6 @@ struct kmem_cache {
int remote_node_defrag_ratio;
struct kmem_cache_node *node[MAX_NUMNODES];
#endif
-#ifdef CONFIG_SMP
- struct kmem_cache_cpu *cpu_slab[NR_CPUS];
-#else
- struct kmem_cache_cpu cpu_slab;
-#endif
};
/*
@@ -135,11 +129,21 @@ struct kmem_cache {
#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
+#ifdef CONFIG_ZONE_DMA
+#define SLUB_DMA __GFP_DMA
+/* Reserve extra caches for potential DMA use */
+#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6)
+#else
+/* Disable DMA functionality */
+#define SLUB_DMA (__force gfp_t)0
+#define KMALLOC_CACHES SLUB_PAGE_SHIFT
+#endif
+
/*
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
*/
-extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT];
+extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES];
/*
* Sorry that the following has to be that ugly but some versions of GCC
@@ -207,13 +211,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
return &kmalloc_caches[index];
}
-#ifdef CONFIG_ZONE_DMA
-#define SLUB_DMA __GFP_DMA
-#else
-/* Disable DMA functionality */
-#define SLUB_DMA (__force gfp_t)0
-#endif
-
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
OpenPOWER on IntegriCloud