summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorPekka Enberg <penberg@kernel.org>2011-03-20 18:12:03 +0200
committerPekka Enberg <penberg@kernel.org>2011-03-20 18:12:03 +0200
commitc53badd0801728feedfcccae04239410b52b0d03 (patch)
tree016421ec2a618767f01df8cfbf765a4dab3b3629 /mm/slub.c
parent521cb40b0c44418a4fd36dc633f575813d59a43d (diff)
parent865d794d1f144d0f93796840d01696cd70647a8a (diff)
downloadop-kernel-dev-c53badd0801728feedfcccae04239410b52b0d03.zip
op-kernel-dev-c53badd0801728feedfcccae04239410b52b0d03.tar.gz
Merge branch 'slab/next' into for-linus
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c132
1 files changed, 81 insertions, 51 deletions
diff --git a/mm/slub.c b/mm/slub.c
index e15aa7f..e841d89 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -281,11 +281,40 @@ static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
return (p - addr) / s->size;
}
+static inline size_t slab_ksize(const struct kmem_cache *s)
+{
+#ifdef CONFIG_SLUB_DEBUG
+ /*
+ * Debugging requires use of the padding between object
+ * and whatever may come after it.
+ */
+ if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
+ return s->objsize;
+
+#endif
+ /*
+ * If we have the need to store the freelist pointer
+ * back there or track user information then we can
+ * only use the space before that information.
+ */
+ if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
+ return s->inuse;
+ /*
+ * Else we can use all the padding etc for the allocation
+ */
+ return s->size;
+}
+
+static inline int order_objects(int order, unsigned long size, int reserved)
+{
+ return ((PAGE_SIZE << order) - reserved) / size;
+}
+
static inline struct kmem_cache_order_objects oo_make(int order,
- unsigned long size)
+ unsigned long size, int reserved)
{
struct kmem_cache_order_objects x = {
- (order << OO_SHIFT) + (PAGE_SIZE << order) / size
+ (order << OO_SHIFT) + order_objects(order, size, reserved)
};
return x;
@@ -617,7 +646,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
return 1;
start = page_address(page);
- length = (PAGE_SIZE << compound_order(page));
+ length = (PAGE_SIZE << compound_order(page)) - s->reserved;
end = start + length;
remainder = length % s->size;
if (!remainder)
@@ -698,7 +727,7 @@ static int check_slab(struct kmem_cache *s, struct page *page)
return 0;
}
- maxobj = (PAGE_SIZE << compound_order(page)) / s->size;
+ maxobj = order_objects(compound_order(page), s->size, s->reserved);
if (page->objects > maxobj) {
slab_err(s, page, "objects %u > max %u",
s->name, page->objects, maxobj);
@@ -748,7 +777,7 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
nr++;
}
- max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
+ max_objects = order_objects(compound_order(page), s->size, s->reserved);
if (max_objects > MAX_OBJS_PER_PAGE)
max_objects = MAX_OBJS_PER_PAGE;
@@ -800,7 +829,7 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
{
flags &= gfp_allowed_mask;
- kmemcheck_slab_alloc(s, flags, object, s->objsize);
+ kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
}
@@ -1249,21 +1278,38 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
__free_pages(page, order);
}
+#define need_reserve_slab_rcu \
+ (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
+
static void rcu_free_slab(struct rcu_head *h)
{
struct page *page;
- page = container_of((struct list_head *)h, struct page, lru);
+ if (need_reserve_slab_rcu)
+ page = virt_to_head_page(h);
+ else
+ page = container_of((struct list_head *)h, struct page, lru);
+
__free_slab(page->slab, page);
}
static void free_slab(struct kmem_cache *s, struct page *page)
{
if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
- /*
- * RCU free overloads the RCU head over the LRU
- */
- struct rcu_head *head = (void *)&page->lru;
+ struct rcu_head *head;
+
+ if (need_reserve_slab_rcu) {
+ int order = compound_order(page);
+ int offset = (PAGE_SIZE << order) - s->reserved;
+
+ VM_BUG_ON(s->reserved != sizeof(*head));
+ head = page_address(page) + offset;
+ } else {
+ /*
+ * RCU free overloads the RCU head over the LRU
+ */
+ head = (void *)&page->lru;
+ }
call_rcu(head, rcu_free_slab);
} else
@@ -1988,13 +2034,13 @@ static int slub_nomerge;
* the smallest order which will fit the object.
*/
static inline int slab_order(int size, int min_objects,
- int max_order, int fract_leftover)
+ int max_order, int fract_leftover, int reserved)
{
int order;
int rem;
int min_order = slub_min_order;
- if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE)
+ if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
return get_order(size * MAX_OBJS_PER_PAGE) - 1;
for (order = max(min_order,
@@ -2003,10 +2049,10 @@ static inline int slab_order(int size, int min_objects,
unsigned long slab_size = PAGE_SIZE << order;
- if (slab_size < min_objects * size)
+ if (slab_size < min_objects * size + reserved)
continue;
- rem = slab_size % size;
+ rem = (slab_size - reserved) % size;
if (rem <= slab_size / fract_leftover)
break;
@@ -2016,7 +2062,7 @@ static inline int slab_order(int size, int min_objects,
return order;
}
-static inline int calculate_order(int size)
+static inline int calculate_order(int size, int reserved)
{
int order;
int min_objects;
@@ -2034,14 +2080,14 @@ static inline int calculate_order(int size)
min_objects = slub_min_objects;
if (!min_objects)
min_objects = 4 * (fls(nr_cpu_ids) + 1);
- max_objects = (PAGE_SIZE << slub_max_order)/size;
+ max_objects = order_objects(slub_max_order, size, reserved);
min_objects = min(min_objects, max_objects);
while (min_objects > 1) {
fraction = 16;
while (fraction >= 4) {
order = slab_order(size, min_objects,
- slub_max_order, fraction);
+ slub_max_order, fraction, reserved);
if (order <= slub_max_order)
return order;
fraction /= 2;
@@ -2053,14 +2099,14 @@ static inline int calculate_order(int size)
* We were unable to place multiple objects in a slab. Now
* lets see if we can place a single object there.
*/
- order = slab_order(size, 1, slub_max_order, 1);
+ order = slab_order(size, 1, slub_max_order, 1, reserved);
if (order <= slub_max_order)
return order;
/*
* Doh this slab cannot be placed using slub_max_order.
*/
- order = slab_order(size, 1, MAX_ORDER, 1);
+ order = slab_order(size, 1, MAX_ORDER, 1, reserved);
if (order < MAX_ORDER)
return order;
return -ENOSYS;
@@ -2311,7 +2357,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
if (forced_order >= 0)
order = forced_order;
else
- order = calculate_order(size);
+ order = calculate_order(size, s->reserved);
if (order < 0)
return 0;
@@ -2329,8 +2375,8 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
/*
* Determine the number of objects per slab
*/
- s->oo = oo_make(order, size);
- s->min = oo_make(get_order(size), size);
+ s->oo = oo_make(order, size, s->reserved);
+ s->min = oo_make(get_order(size), size, s->reserved);
if (oo_objects(s->oo) > oo_objects(s->max))
s->max = s->oo;
@@ -2349,6 +2395,10 @@ static int kmem_cache_open(struct kmem_cache *s,
s->objsize = size;
s->align = align;
s->flags = kmem_cache_flags(size, flags, name, ctor);
+ s->reserved = 0;
+
+ if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
+ s->reserved = sizeof(struct rcu_head);
if (!calculate_sizes(s, -1))
goto error;
@@ -2399,12 +2449,6 @@ unsigned int kmem_cache_size(struct kmem_cache *s)
}
EXPORT_SYMBOL(kmem_cache_size);
-const char *kmem_cache_name(struct kmem_cache *s)
-{
- return s->name;
-}
-EXPORT_SYMBOL(kmem_cache_name);
-
static void list_slab_objects(struct kmem_cache *s, struct page *page,
const char *text)
{
@@ -2696,7 +2740,6 @@ EXPORT_SYMBOL(__kmalloc_node);
size_t ksize(const void *object)
{
struct page *page;
- struct kmem_cache *s;
if (unlikely(object == ZERO_SIZE_PTR))
return 0;
@@ -2707,28 +2750,8 @@ size_t ksize(const void *object)
WARN_ON(!PageCompound(page));
return PAGE_SIZE << compound_order(page);
}
- s = page->slab;
-
-#ifdef CONFIG_SLUB_DEBUG
- /*
- * Debugging requires use of the padding between object
- * and whatever may come after it.
- */
- if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
- return s->objsize;
-#endif
- /*
- * If we have the need to store the freelist pointer
- * back there or track user information then we can
- * only use the space before that information.
- */
- if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
- return s->inuse;
- /*
- * Else we can use all the padding etc for the allocation
- */
- return s->size;
+ return slab_ksize(page->slab);
}
EXPORT_SYMBOL(ksize);
@@ -4017,6 +4040,12 @@ static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
}
SLAB_ATTR_RO(destroy_by_rcu);
+static ssize_t reserved_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%d\n", s->reserved);
+}
+SLAB_ATTR_RO(reserved);
+
#ifdef CONFIG_SLUB_DEBUG
static ssize_t slabs_show(struct kmem_cache *s, char *buf)
{
@@ -4303,6 +4332,7 @@ static struct attribute *slab_attrs[] = {
&reclaim_account_attr.attr,
&destroy_by_rcu_attr.attr,
&shrink_attr.attr,
+ &reserved_attr.attr,
#ifdef CONFIG_SLUB_DEBUG
&total_objects_attr.attr,
&slabs_attr.attr,
OpenPOWER on IntegriCloud