summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2017-09-08 16:14:36 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-08 18:26:48 -0700
commitcd9e61ed1eebbcd5dfad59475d41ec58d9b64b6a (patch)
treec1d649fce5c7a7be138856e172c2304424c1a2e9 /lib
parentc32ee3d9abd284b4fcaacc250b101f93829c7bae (diff)
downloadop-kernel-dev-cd9e61ed1eebbcd5dfad59475d41ec58d9b64b6a.zip
op-kernel-dev-cd9e61ed1eebbcd5dfad59475d41ec58d9b64b6a.tar.gz
rbtree: cache leftmost node internally
Patch series "rbtree: Cache leftmost node internally", v4. A series to extending rbtrees to internally cache the leftmost node such that we can have fast overlap check optimization for all interval tree users[1]. The benefits of this series are that: (i) Unify users that do internal leftmost node caching. (ii) Optimize all interval tree users. (iii) Convert at least two new users (epoll and procfs) to the new interface. This patch (of 16): Red-black tree semantics imply that nodes with smaller or greater (or equal for duplicates) keys always be to the left and right, respectively. For the kernel this is extremely evident when considering our rb_first() semantics. Enabling lookups for the smallest node in the tree in O(1) can save a good chunk of cycles in not having to walk down the tree each time. To this end there are a few core users that explicitly do this, such as the scheduler and rtmutexes. There is also the desire for interval trees to have this optimization allowing faster overlap checking. This patch introduces a new 'struct rb_root_cached' which is just the root with a cached pointer to the leftmost node. The reason why the regular rb_root was not extended instead of adding a new structure was that this allows the user to have the choice between memory footprint and actual tree performance. The new wrappers on top of the regular rb_root calls are: - rb_first_cached(cached_root) -- which is a fast replacement for rb_first. - rb_insert_color_cached(node, cached_root, new) - rb_erase_cached(node, cached_root) In addition, augmented cached interfaces are also added for basic insertion and deletion operations; which becomes important for the interval tree changes. With the exception of the inserts, which adds a bool for updating the new leftmost, the interfaces are kept the same. To this end, porting rb users to the cached version becomes really trivial, and keeping current rbtree semantics for users that don't care about the optimization requires zero overhead. Link: http://lkml.kernel.org/r/20170719014603.19029-2-dave@stgolabs.net Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Reviewed-by: Jan Kara <jack@suse.cz> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/rbtree.c34
1 files changed, 29 insertions, 5 deletions
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 4ba2828..d102d9d 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -95,10 +95,14 @@ __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
static __always_inline void
__rb_insert(struct rb_node *node, struct rb_root *root,
+ bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
+ if (newleft)
+ *leftmost = node;
+
while (true) {
/*
* Loop invariant: node is red
@@ -434,19 +438,38 @@ static const struct rb_augment_callbacks dummy_callbacks = {
void rb_insert_color(struct rb_node *node, struct rb_root *root)
{
- __rb_insert(node, root, dummy_rotate);
+ __rb_insert(node, root, false, NULL, dummy_rotate);
}
EXPORT_SYMBOL(rb_insert_color);
void rb_erase(struct rb_node *node, struct rb_root *root)
{
struct rb_node *rebalance;
- rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
+ rebalance = __rb_erase_augmented(node, root,
+ NULL, &dummy_callbacks);
if (rebalance)
____rb_erase_color(rebalance, root, dummy_rotate);
}
EXPORT_SYMBOL(rb_erase);
+void rb_insert_color_cached(struct rb_node *node,
+ struct rb_root_cached *root, bool leftmost)
+{
+ __rb_insert(node, &root->rb_root, leftmost,
+ &root->rb_leftmost, dummy_rotate);
+}
+EXPORT_SYMBOL(rb_insert_color_cached);
+
+void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
+{
+ struct rb_node *rebalance;
+ rebalance = __rb_erase_augmented(node, &root->rb_root,
+ &root->rb_leftmost, &dummy_callbacks);
+ if (rebalance)
+ ____rb_erase_color(rebalance, &root->rb_root, dummy_rotate);
+}
+EXPORT_SYMBOL(rb_erase_cached);
+
/*
* Augmented rbtree manipulation functions.
*
@@ -455,9 +478,10 @@ EXPORT_SYMBOL(rb_erase);
*/
void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+ bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
- __rb_insert(node, root, augment_rotate);
+ __rb_insert(node, root, newleft, leftmost, augment_rotate);
}
EXPORT_SYMBOL(__rb_insert_augmented);
@@ -502,7 +526,7 @@ struct rb_node *rb_next(const struct rb_node *node)
* as we can.
*/
if (node->rb_right) {
- node = node->rb_right;
+ node = node->rb_right;
while (node->rb_left)
node=node->rb_left;
return (struct rb_node *)node;
@@ -534,7 +558,7 @@ struct rb_node *rb_prev(const struct rb_node *node)
* as we can.
*/
if (node->rb_left) {
- node = node->rb_left;
+ node = node->rb_left;
while (node->rb_right)
node=node->rb_right;
return (struct rb_node *)node;
OpenPOWER on IntegriCloud