summaryrefslogtreecommitdiffstats
path: root/include/linux/vmalloc.h
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-05-20 16:57:38 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-20 17:58:30 -0700
commit80c4bd7a5e4368b680e0aeb57050a1b06eb573d8 (patch)
tree248bfb6e975a03f0a164e38de844f48053980158 /include/linux/vmalloc.h
parentf705ac4b39f30a6a5f8411a42114758f4d4655bc (diff)
downloadop-kernel-dev-80c4bd7a5e4368b680e0aeb57050a1b06eb573d8.zip
op-kernel-dev-80c4bd7a5e4368b680e0aeb57050a1b06eb573d8.tar.gz
mm/vmalloc: keep a separate lazy-free list
When mixing lots of vmallocs and set_memory_*() (which calls vm_unmap_aliases()) I encountered situations where the performance degraded severely due to the walking of the entire vmap_area list each invocation. One simple improvement is to add the lazily freed vmap_area to a separate lockless free list, such that we then avoid having to walk the full list on each purge. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Roman Pen <r.peniaev@gmail.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Roman Pen <r.peniaev@gmail.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Toshi Kani <toshi.kani@hp.com> Cc: Shawn Lin <shawn.lin@rock-chips.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/vmalloc.h')
-rw-r--r--include/linux/vmalloc.h3
1 files changed, 2 insertions, 1 deletions
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index d1f1d33..957adb7 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -4,6 +4,7 @@
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/list.h>
+#include <linux/llist.h>
#include <asm/page.h> /* pgprot_t */
#include <linux/rbtree.h>
@@ -44,7 +45,7 @@ struct vmap_area {
unsigned long flags;
struct rb_node rb_node; /* address sorted rbtree */
struct list_head list; /* address sorted list */
- struct list_head purge_list; /* "lazy purge" list */
+ struct llist_node purge_list; /* "lazy purge" list */
struct vm_struct *vm;
struct rcu_head rcu_head;
};
OpenPOWER on IntegriCloud