2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
Red Black Trees
|
|
|
|
(C) 1999 Andrea Arcangeli <andrea@suse.de>
|
|
|
|
(C) 2002 David Woodhouse <dwmw2@infradead.org>
|
2012-10-09 03:31:11 +04:00
|
|
|
(C) 2012 Michel Lespinasse <walken@google.com>
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; either version 2 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program; if not, write to the Free Software
|
|
|
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
|
|
|
|
|
|
linux/lib/rbtree.c
|
|
|
|
*/
|
|
|
|
|
2012-10-09 03:31:33 +04:00
|
|
|
#include <linux/rbtree_augmented.h>
|
2011-11-17 06:29:17 +04:00
|
|
|
#include <linux/export.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2012-10-09 03:30:47 +04:00
|
|
|
/*
|
|
|
|
* red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
|
|
|
|
*
|
|
|
|
* 1) A node is either red or black
|
|
|
|
* 2) The root is black
|
|
|
|
* 3) All leaves (NULL) are black
|
|
|
|
* 4) Both children of every red node are black
|
|
|
|
* 5) Every simple path from root to leaves contains the same number
|
|
|
|
* of black nodes.
|
|
|
|
*
|
|
|
|
* 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two
|
|
|
|
* consecutive red nodes in a path and every red node is therefore followed by
|
|
|
|
* a black. So if B is the number of black nodes on every simple path (as per
|
|
|
|
* 5), then the longest possible path due to 4 is 2B.
|
|
|
|
*
|
|
|
|
* We shall indicate color with case, where black nodes are uppercase and red
|
2012-10-09 03:30:57 +04:00
|
|
|
* nodes will be lowercase. Unknown color nodes shall be drawn as red within
|
|
|
|
* parentheses and have some accompanying text comment.
|
2012-10-09 03:30:47 +04:00
|
|
|
*/
|
|
|
|
|
2015-05-27 04:39:36 +03:00
|
|
|
/*
|
|
|
|
* Notes on lockless lookups:
|
|
|
|
*
|
|
|
|
* All stores to the tree structure (rb_left and rb_right) must be done using
|
|
|
|
* WRITE_ONCE(). And we must not inadvertently cause (temporary) loops in the
|
|
|
|
* tree structure as seen in program order.
|
|
|
|
*
|
|
|
|
* These two requirements will allow lockless iteration of the tree -- not
|
|
|
|
* correct iteration mind you, tree rotations are not atomic so a lookup might
|
|
|
|
* miss entire subtrees.
|
|
|
|
*
|
|
|
|
* But they do guarantee that any such traversal will only see valid elements
|
|
|
|
* and that it will indeed complete -- does not get stuck in a loop.
|
|
|
|
*
|
|
|
|
* It also guarantees that if the lookup returns an element it is the 'correct'
|
|
|
|
* one. But not returning an element does _NOT_ mean it's not present.
|
|
|
|
*
|
|
|
|
* NOTE:
|
|
|
|
*
|
|
|
|
* Stores to __rb_parent_color are not important for simple lookups so those
|
|
|
|
* are left undone as of now. Nor did I check for loops involving parent
|
|
|
|
* pointers.
|
|
|
|
*/
|
|
|
|
|
2012-10-09 03:31:11 +04:00
|
|
|
static inline void rb_set_black(struct rb_node *rb)
|
|
|
|
{
|
|
|
|
rb->__rb_parent_color |= RB_BLACK;
|
|
|
|
}
|
|
|
|
|
2012-10-09 03:30:47 +04:00
|
|
|
static inline struct rb_node *rb_red_parent(struct rb_node *red)
|
|
|
|
{
|
|
|
|
return (struct rb_node *)red->__rb_parent_color;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Helper function for rotations:
|
|
|
|
* - old's parent and color get assigned to new
|
|
|
|
* - old gets assigned new as a parent and 'color' as a color.
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
|
|
|
|
struct rb_root *root, int color)
|
|
|
|
{
|
|
|
|
struct rb_node *parent = rb_parent(old);
|
|
|
|
new->__rb_parent_color = old->__rb_parent_color;
|
|
|
|
rb_set_parent_color(old, new, color);
|
2012-10-09 03:31:07 +04:00
|
|
|
__rb_change_child(old, new, parent, root);
|
2012-10-09 03:30:47 +04:00
|
|
|
}
|
|
|
|
|
2012-10-09 03:31:17 +04:00
|
|
|
static __always_inline void
|
|
|
|
__rb_insert(struct rb_node *node, struct rb_root *root,
|
rbtree: cache leftmost node internally
Patch series "rbtree: Cache leftmost node internally", v4.
A series to extending rbtrees to internally cache the leftmost node such
that we can have fast overlap check optimization for all interval tree
users[1]. The benefits of this series are that:
(i) Unify users that do internal leftmost node caching.
(ii) Optimize all interval tree users.
(iii) Convert at least two new users (epoll and procfs) to the new interface.
This patch (of 16):
Red-black tree semantics imply that nodes with smaller or greater (or
equal for duplicates) keys always be to the left and right,
respectively. For the kernel this is extremely evident when considering
our rb_first() semantics. Enabling lookups for the smallest node in the
tree in O(1) can save a good chunk of cycles in not having to walk down
the tree each time. To this end there are a few core users that
explicitly do this, such as the scheduler and rtmutexes. There is also
the desire for interval trees to have this optimization allowing faster
overlap checking.
This patch introduces a new 'struct rb_root_cached' which is just the
root with a cached pointer to the leftmost node. The reason why the
regular rb_root was not extended instead of adding a new structure was
that this allows the user to have the choice between memory footprint
and actual tree performance. The new wrappers on top of the regular
rb_root calls are:
- rb_first_cached(cached_root) -- which is a fast replacement
for rb_first.
- rb_insert_color_cached(node, cached_root, new)
- rb_erase_cached(node, cached_root)
In addition, augmented cached interfaces are also added for basic
insertion and deletion operations; which becomes important for the
interval tree changes.
With the exception of the inserts, which adds a bool for updating the
new leftmost, the interfaces are kept the same. To this end, porting rb
users to the cached version becomes really trivial, and keeping current
rbtree semantics for users that don't care about the optimization
requires zero overhead.
Link: http://lkml.kernel.org/r/20170719014603.19029-2-dave@stgolabs.net
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Reviewed-by: Jan Kara <jack@suse.cz>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-09-09 02:14:36 +03:00
|
|
|
bool newleft, struct rb_node **leftmost,
|
2012-10-09 03:31:17 +04:00
|
|
|
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2012-10-09 03:30:47 +04:00
|
|
|
struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
rbtree: cache leftmost node internally
Patch series "rbtree: Cache leftmost node internally", v4.
A series to extending rbtrees to internally cache the leftmost node such
that we can have fast overlap check optimization for all interval tree
users[1]. The benefits of this series are that:
(i) Unify users that do internal leftmost node caching.
(ii) Optimize all interval tree users.
(iii) Convert at least two new users (epoll and procfs) to the new interface.
This patch (of 16):
Red-black tree semantics imply that nodes with smaller or greater (or
equal for duplicates) keys always be to the left and right,
respectively. For the kernel this is extremely evident when considering
our rb_first() semantics. Enabling lookups for the smallest node in the
tree in O(1) can save a good chunk of cycles in not having to walk down
the tree each time. To this end there are a few core users that
explicitly do this, such as the scheduler and rtmutexes. There is also
the desire for interval trees to have this optimization allowing faster
overlap checking.
This patch introduces a new 'struct rb_root_cached' which is just the
root with a cached pointer to the leftmost node. The reason why the
regular rb_root was not extended instead of adding a new structure was
that this allows the user to have the choice between memory footprint
and actual tree performance. The new wrappers on top of the regular
rb_root calls are:
- rb_first_cached(cached_root) -- which is a fast replacement
for rb_first.
- rb_insert_color_cached(node, cached_root, new)
- rb_erase_cached(node, cached_root)
In addition, augmented cached interfaces are also added for basic
insertion and deletion operations; which becomes important for the
interval tree changes.
With the exception of the inserts, which adds a bool for updating the
new leftmost, the interfaces are kept the same. To this end, porting rb
users to the cached version becomes really trivial, and keeping current
rbtree semantics for users that don't care about the optimization
requires zero overhead.
Link: http://lkml.kernel.org/r/20170719014603.19029-2-dave@stgolabs.net
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Reviewed-by: Jan Kara <jack@suse.cz>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-09-09 02:14:36 +03:00
|
|
|
if (newleft)
|
|
|
|
*leftmost = node;
|
|
|
|
|
2012-10-09 03:30:44 +04:00
|
|
|
while (true) {
|
|
|
|
/*
|
rbtree: optimize root-check during rebalancing loop
The only times the nil-parent (root node) condition is true is when the
node is the first in the tree, or after fixing rbtree rule #4 and the
case 1 rebalancing made the node the root. Such conditions do not apply
most of the time:
(i) The common case in an rbtree is to have more than a single node,
so this is only true for the first rb_insert().
(ii) While there is a chance only one first rotation is needed, cases
where the node's uncle is black (cases 2,3) are more common as we can
have the following scenarios during the rotation looping:
case1 only, case1+1, case2+3, case1+2+3, case3 only, etc.
This patch, therefore, adds an unlikely() optimization to this
conditional. When profiling with CONFIG_PROFILE_ANNOTATED_BRANCHES, a
kernel build shows that the incorrect rate is less than 15%, and for
workloads that involve insert mostly trees overtime tend to have less
than 2% incorrect rate.
Link: http://lkml.kernel.org/r/20170719014603.19029-3-dave@stgolabs.net
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-09-09 02:14:39 +03:00
|
|
|
* Loop invariant: node is red.
|
2012-10-09 03:30:44 +04:00
|
|
|
*/
|
rbtree: optimize root-check during rebalancing loop
The only times the nil-parent (root node) condition is true is when the
node is the first in the tree, or after fixing rbtree rule #4 and the
case 1 rebalancing made the node the root. Such conditions do not apply
most of the time:
(i) The common case in an rbtree is to have more than a single node,
so this is only true for the first rb_insert().
(ii) While there is a chance only one first rotation is needed, cases
where the node's uncle is black (cases 2,3) are more common as we can
have the following scenarios during the rotation looping:
case1 only, case1+1, case2+3, case1+2+3, case3 only, etc.
This patch, therefore, adds an unlikely() optimization to this
conditional. When profiling with CONFIG_PROFILE_ANNOTATED_BRANCHES, a
kernel build shows that the incorrect rate is less than 15%, and for
workloads that involve insert mostly trees overtime tend to have less
than 2% incorrect rate.
Link: http://lkml.kernel.org/r/20170719014603.19029-3-dave@stgolabs.net
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-09-09 02:14:39 +03:00
|
|
|
if (unlikely(!parent)) {
|
|
|
|
/*
|
|
|
|
* The inserted node is root. Either this is the
|
|
|
|
* first node, or we recursed at Case 1 below and
|
|
|
|
* are no longer violating 4).
|
|
|
|
*/
|
2012-10-09 03:30:47 +04:00
|
|
|
rb_set_parent_color(node, NULL, RB_BLACK);
|
2012-10-09 03:30:44 +04:00
|
|
|
break;
|
rbtree: optimize root-check during rebalancing loop
The only times the nil-parent (root node) condition is true is when the
node is the first in the tree, or after fixing rbtree rule #4 and the
case 1 rebalancing made the node the root. Such conditions do not apply
most of the time:
(i) The common case in an rbtree is to have more than a single node,
so this is only true for the first rb_insert().
(ii) While there is a chance only one first rotation is needed, cases
where the node's uncle is black (cases 2,3) are more common as we can
have the following scenarios during the rotation looping:
case1 only, case1+1, case2+3, case1+2+3, case3 only, etc.
This patch, therefore, adds an unlikely() optimization to this
conditional. When profiling with CONFIG_PROFILE_ANNOTATED_BRANCHES, a
kernel build shows that the incorrect rate is less than 15%, and for
workloads that involve insert mostly trees overtime tend to have less
than 2% incorrect rate.
Link: http://lkml.kernel.org/r/20170719014603.19029-3-dave@stgolabs.net
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-09-09 02:14:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If there is a black parent, we are done.
|
|
|
|
* Otherwise, take some corrective action as,
|
|
|
|
* per 4), we don't want a red root or two
|
|
|
|
* consecutive red nodes.
|
|
|
|
*/
|
|
|
|
if(rb_is_black(parent))
|
2012-10-09 03:30:44 +04:00
|
|
|
break;
|
|
|
|
|
2012-10-09 03:30:47 +04:00
|
|
|
gparent = rb_red_parent(parent);
|
|
|
|
|
2012-10-09 03:31:02 +04:00
|
|
|
tmp = gparent->rb_right;
|
|
|
|
if (parent != tmp) { /* parent == gparent->rb_left */
|
2012-10-09 03:30:47 +04:00
|
|
|
if (tmp && rb_is_red(tmp)) {
|
|
|
|
/*
|
2017-09-09 02:14:42 +03:00
|
|
|
* Case 1 - node's uncle is red (color flips).
|
2012-10-09 03:30:47 +04:00
|
|
|
*
|
|
|
|
* G g
|
|
|
|
* / \ / \
|
|
|
|
* p u --> P U
|
|
|
|
* / /
|
2014-08-09 01:22:14 +04:00
|
|
|
* n n
|
2012-10-09 03:30:47 +04:00
|
|
|
*
|
|
|
|
* However, since g's parent might be red, and
|
|
|
|
* 4) does not allow this, we need to recurse
|
|
|
|
* at g.
|
|
|
|
*/
|
|
|
|
rb_set_parent_color(tmp, gparent, RB_BLACK);
|
|
|
|
rb_set_parent_color(parent, gparent, RB_BLACK);
|
|
|
|
node = gparent;
|
|
|
|
parent = rb_parent(node);
|
|
|
|
rb_set_parent_color(node, parent, RB_RED);
|
|
|
|
continue;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2012-10-09 03:31:02 +04:00
|
|
|
tmp = parent->rb_right;
|
|
|
|
if (node == tmp) {
|
2012-10-09 03:30:47 +04:00
|
|
|
/*
|
2017-09-09 02:14:42 +03:00
|
|
|
* Case 2 - node's uncle is black and node is
|
|
|
|
* the parent's right child (left rotate at parent).
|
2012-10-09 03:30:47 +04:00
|
|
|
*
|
|
|
|
* G G
|
|
|
|
* / \ / \
|
|
|
|
* p U --> n U
|
|
|
|
* \ /
|
|
|
|
* n p
|
|
|
|
*
|
|
|
|
* This still leaves us in violation of 4), the
|
|
|
|
* continuation into Case 3 will fix that.
|
|
|
|
*/
|
2015-05-27 04:39:36 +03:00
|
|
|
tmp = node->rb_left;
|
|
|
|
WRITE_ONCE(parent->rb_right, tmp);
|
|
|
|
WRITE_ONCE(node->rb_left, parent);
|
2012-10-09 03:30:47 +04:00
|
|
|
if (tmp)
|
|
|
|
rb_set_parent_color(tmp, parent,
|
|
|
|
RB_BLACK);
|
|
|
|
rb_set_parent_color(parent, node, RB_RED);
|
2012-10-09 03:31:17 +04:00
|
|
|
augment_rotate(parent, node);
|
2005-04-17 02:20:36 +04:00
|
|
|
parent = node;
|
2012-10-09 03:31:02 +04:00
|
|
|
tmp = node->rb_right;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2012-10-09 03:30:47 +04:00
|
|
|
/*
|
2017-09-09 02:14:42 +03:00
|
|
|
* Case 3 - node's uncle is black and node is
|
|
|
|
* the parent's left child (right rotate at gparent).
|
2012-10-09 03:30:47 +04:00
|
|
|
*
|
|
|
|
* G P
|
|
|
|
* / \ / \
|
|
|
|
* p U --> n g
|
|
|
|
* / \
|
|
|
|
* n U
|
|
|
|
*/
|
2015-05-27 04:39:36 +03:00
|
|
|
WRITE_ONCE(gparent->rb_left, tmp); /* == parent->rb_right */
|
|
|
|
WRITE_ONCE(parent->rb_right, gparent);
|
2012-10-09 03:30:47 +04:00
|
|
|
if (tmp)
|
|
|
|
rb_set_parent_color(tmp, gparent, RB_BLACK);
|
|
|
|
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
|
2012-10-09 03:31:17 +04:00
|
|
|
augment_rotate(gparent, parent);
|
2012-10-09 03:30:42 +04:00
|
|
|
break;
|
2005-04-17 02:20:36 +04:00
|
|
|
} else {
|
2012-10-09 03:30:47 +04:00
|
|
|
tmp = gparent->rb_left;
|
|
|
|
if (tmp && rb_is_red(tmp)) {
|
|
|
|
/* Case 1 - color flips */
|
|
|
|
rb_set_parent_color(tmp, gparent, RB_BLACK);
|
|
|
|
rb_set_parent_color(parent, gparent, RB_BLACK);
|
|
|
|
node = gparent;
|
|
|
|
parent = rb_parent(node);
|
|
|
|
rb_set_parent_color(node, parent, RB_RED);
|
|
|
|
continue;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2012-10-09 03:31:02 +04:00
|
|
|
tmp = parent->rb_left;
|
|
|
|
if (node == tmp) {
|
2012-10-09 03:30:47 +04:00
|
|
|
/* Case 2 - right rotate at parent */
|
2015-05-27 04:39:36 +03:00
|
|
|
tmp = node->rb_right;
|
|
|
|
WRITE_ONCE(parent->rb_left, tmp);
|
|
|
|
WRITE_ONCE(node->rb_right, parent);
|
2012-10-09 03:30:47 +04:00
|
|
|
if (tmp)
|
|
|
|
rb_set_parent_color(tmp, parent,
|
|
|
|
RB_BLACK);
|
|
|
|
rb_set_parent_color(parent, node, RB_RED);
|
2012-10-09 03:31:17 +04:00
|
|
|
augment_rotate(parent, node);
|
2005-04-17 02:20:36 +04:00
|
|
|
parent = node;
|
2012-10-09 03:31:02 +04:00
|
|
|
tmp = node->rb_left;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2012-10-09 03:30:47 +04:00
|
|
|
/* Case 3 - left rotate at gparent */
|
2015-05-27 04:39:36 +03:00
|
|
|
WRITE_ONCE(gparent->rb_right, tmp); /* == parent->rb_left */
|
|
|
|
WRITE_ONCE(parent->rb_left, gparent);
|
2012-10-09 03:30:47 +04:00
|
|
|
if (tmp)
|
|
|
|
rb_set_parent_color(tmp, gparent, RB_BLACK);
|
|
|
|
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
|
2012-10-09 03:31:17 +04:00
|
|
|
augment_rotate(gparent, parent);
|
2012-10-09 03:30:42 +04:00
|
|
|
break;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-12 02:32:20 +04:00
|
|
|
/*
|
|
|
|
* Inline version for rb_erase() use - we want to be able to inline
|
|
|
|
* and eliminate the dummy_rotate callback there
|
|
|
|
*/
|
|
|
|
static __always_inline void
|
|
|
|
____rb_erase_color(struct rb_node *parent, struct rb_root *root,
|
2012-10-09 03:31:33 +04:00
|
|
|
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2012-10-09 03:31:11 +04:00
|
|
|
struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2012-10-09 03:30:50 +04:00
|
|
|
while (true) {
|
|
|
|
/*
|
2012-10-09 03:31:11 +04:00
|
|
|
* Loop invariants:
|
|
|
|
* - node is black (or NULL on first iteration)
|
|
|
|
* - node is not the root (parent is not NULL)
|
|
|
|
* - All leaf paths going through parent and node have a
|
|
|
|
* black node count that is 1 lower than other leaf paths.
|
2012-10-09 03:30:50 +04:00
|
|
|
*/
|
2012-10-09 03:31:02 +04:00
|
|
|
sibling = parent->rb_right;
|
|
|
|
if (node != sibling) { /* node == parent->rb_left */
|
2012-10-09 03:30:57 +04:00
|
|
|
if (rb_is_red(sibling)) {
|
|
|
|
/*
|
|
|
|
* Case 1 - left rotate at parent
|
|
|
|
*
|
|
|
|
* P S
|
|
|
|
* / \ / \
|
|
|
|
* N s --> p Sr
|
|
|
|
* / \ / \
|
|
|
|
* Sl Sr N Sl
|
|
|
|
*/
|
2015-05-27 04:39:36 +03:00
|
|
|
tmp1 = sibling->rb_left;
|
|
|
|
WRITE_ONCE(parent->rb_right, tmp1);
|
|
|
|
WRITE_ONCE(sibling->rb_left, parent);
|
2012-10-09 03:30:57 +04:00
|
|
|
rb_set_parent_color(tmp1, parent, RB_BLACK);
|
|
|
|
__rb_rotate_set_parents(parent, sibling, root,
|
|
|
|
RB_RED);
|
2012-10-09 03:31:33 +04:00
|
|
|
augment_rotate(parent, sibling);
|
2012-10-09 03:30:57 +04:00
|
|
|
sibling = tmp1;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2012-10-09 03:30:57 +04:00
|
|
|
tmp1 = sibling->rb_right;
|
|
|
|
if (!tmp1 || rb_is_black(tmp1)) {
|
|
|
|
tmp2 = sibling->rb_left;
|
|
|
|
if (!tmp2 || rb_is_black(tmp2)) {
|
|
|
|
/*
|
|
|
|
* Case 2 - sibling color flip
|
|
|
|
* (p could be either color here)
|
|
|
|
*
|
|
|
|
* (p) (p)
|
|
|
|
* / \ / \
|
|
|
|
* N S --> N s
|
|
|
|
* / \ / \
|
|
|
|
* Sl Sr Sl Sr
|
|
|
|
*
|
2012-10-09 03:31:11 +04:00
|
|
|
* This leaves us violating 5) which
|
|
|
|
* can be fixed by flipping p to black
|
|
|
|
* if it was red, or by recursing at p.
|
|
|
|
* p is red when coming from Case 1.
|
2012-10-09 03:30:57 +04:00
|
|
|
*/
|
|
|
|
rb_set_parent_color(sibling, parent,
|
|
|
|
RB_RED);
|
2012-10-09 03:31:11 +04:00
|
|
|
if (rb_is_red(parent))
|
|
|
|
rb_set_black(parent);
|
|
|
|
else {
|
|
|
|
node = parent;
|
|
|
|
parent = rb_parent(node);
|
|
|
|
if (parent)
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
break;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2012-10-09 03:30:57 +04:00
|
|
|
/*
|
|
|
|
* Case 3 - right rotate at sibling
|
|
|
|
* (p could be either color here)
|
|
|
|
*
|
|
|
|
* (p) (p)
|
|
|
|
* / \ / \
|
2016-12-13 03:46:17 +03:00
|
|
|
* N S --> N sl
|
2012-10-09 03:30:57 +04:00
|
|
|
* / \ \
|
2016-12-13 03:46:17 +03:00
|
|
|
* sl Sr S
|
2012-10-09 03:30:57 +04:00
|
|
|
* \
|
|
|
|
* Sr
|
2016-12-13 03:46:17 +03:00
|
|
|
*
|
|
|
|
* Note: p might be red, and then both
|
|
|
|
* p and sl are red after rotation(which
|
|
|
|
* breaks property 4). This is fixed in
|
|
|
|
* Case 4 (in __rb_rotate_set_parents()
|
|
|
|
* which set sl the color of p
|
|
|
|
* and set p RB_BLACK)
|
|
|
|
*
|
|
|
|
* (p) (sl)
|
|
|
|
* / \ / \
|
|
|
|
* N sl --> P S
|
|
|
|
* \ / \
|
|
|
|
* S N Sr
|
|
|
|
* \
|
|
|
|
* Sr
|
2012-10-09 03:30:57 +04:00
|
|
|
*/
|
2015-05-27 04:39:36 +03:00
|
|
|
tmp1 = tmp2->rb_right;
|
|
|
|
WRITE_ONCE(sibling->rb_left, tmp1);
|
|
|
|
WRITE_ONCE(tmp2->rb_right, sibling);
|
|
|
|
WRITE_ONCE(parent->rb_right, tmp2);
|
2012-10-09 03:30:57 +04:00
|
|
|
if (tmp1)
|
|
|
|
rb_set_parent_color(tmp1, sibling,
|
|
|
|
RB_BLACK);
|
2012-10-09 03:31:33 +04:00
|
|
|
augment_rotate(sibling, tmp2);
|
2012-10-09 03:30:57 +04:00
|
|
|
tmp1 = sibling;
|
|
|
|
sibling = tmp2;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2012-10-09 03:30:57 +04:00
|
|
|
/*
|
|
|
|
* Case 4 - left rotate at parent + color flips
|
|
|
|
* (p and sl could be either color here.
|
|
|
|
* After rotation, p becomes black, s acquires
|
|
|
|
* p's color, and sl keeps its color)
|
|
|
|
*
|
|
|
|
* (p) (s)
|
|
|
|
* / \ / \
|
|
|
|
* N S --> P Sr
|
|
|
|
* / \ / \
|
|
|
|
* (sl) sr N (sl)
|
|
|
|
*/
|
2015-05-27 04:39:36 +03:00
|
|
|
tmp2 = sibling->rb_left;
|
|
|
|
WRITE_ONCE(parent->rb_right, tmp2);
|
|
|
|
WRITE_ONCE(sibling->rb_left, parent);
|
2012-10-09 03:30:57 +04:00
|
|
|
rb_set_parent_color(tmp1, sibling, RB_BLACK);
|
|
|
|
if (tmp2)
|
|
|
|
rb_set_parent(tmp2, parent);
|
|
|
|
__rb_rotate_set_parents(parent, sibling, root,
|
|
|
|
RB_BLACK);
|
2012-10-09 03:31:33 +04:00
|
|
|
augment_rotate(parent, sibling);
|
2012-10-09 03:30:54 +04:00
|
|
|
break;
|
2012-10-09 03:30:50 +04:00
|
|
|
} else {
|
2012-10-09 03:30:57 +04:00
|
|
|
sibling = parent->rb_left;
|
|
|
|
if (rb_is_red(sibling)) {
|
|
|
|
/* Case 1 - right rotate at parent */
|
2015-05-27 04:39:36 +03:00
|
|
|
tmp1 = sibling->rb_right;
|
|
|
|
WRITE_ONCE(parent->rb_left, tmp1);
|
|
|
|
WRITE_ONCE(sibling->rb_right, parent);
|
2012-10-09 03:30:57 +04:00
|
|
|
rb_set_parent_color(tmp1, parent, RB_BLACK);
|
|
|
|
__rb_rotate_set_parents(parent, sibling, root,
|
|
|
|
RB_RED);
|
2012-10-09 03:31:33 +04:00
|
|
|
augment_rotate(parent, sibling);
|
2012-10-09 03:30:57 +04:00
|
|
|
sibling = tmp1;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2012-10-09 03:30:57 +04:00
|
|
|
tmp1 = sibling->rb_left;
|
|
|
|
if (!tmp1 || rb_is_black(tmp1)) {
|
|
|
|
tmp2 = sibling->rb_right;
|
|
|
|
if (!tmp2 || rb_is_black(tmp2)) {
|
|
|
|
/* Case 2 - sibling color flip */
|
|
|
|
rb_set_parent_color(sibling, parent,
|
|
|
|
RB_RED);
|
2012-10-09 03:31:11 +04:00
|
|
|
if (rb_is_red(parent))
|
|
|
|
rb_set_black(parent);
|
|
|
|
else {
|
|
|
|
node = parent;
|
|
|
|
parent = rb_parent(node);
|
|
|
|
if (parent)
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
break;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2016-12-13 03:46:17 +03:00
|
|
|
/* Case 3 - left rotate at sibling */
|
2015-05-27 04:39:36 +03:00
|
|
|
tmp1 = tmp2->rb_left;
|
|
|
|
WRITE_ONCE(sibling->rb_right, tmp1);
|
|
|
|
WRITE_ONCE(tmp2->rb_left, sibling);
|
|
|
|
WRITE_ONCE(parent->rb_left, tmp2);
|
2012-10-09 03:30:57 +04:00
|
|
|
if (tmp1)
|
|
|
|
rb_set_parent_color(tmp1, sibling,
|
|
|
|
RB_BLACK);
|
2012-10-09 03:31:33 +04:00
|
|
|
augment_rotate(sibling, tmp2);
|
2012-10-09 03:30:57 +04:00
|
|
|
tmp1 = sibling;
|
|
|
|
sibling = tmp2;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2016-12-13 03:46:17 +03:00
|
|
|
/* Case 4 - right rotate at parent + color flips */
|
2015-05-27 04:39:36 +03:00
|
|
|
tmp2 = sibling->rb_right;
|
|
|
|
WRITE_ONCE(parent->rb_left, tmp2);
|
|
|
|
WRITE_ONCE(sibling->rb_right, parent);
|
2012-10-09 03:30:57 +04:00
|
|
|
rb_set_parent_color(tmp1, sibling, RB_BLACK);
|
|
|
|
if (tmp2)
|
|
|
|
rb_set_parent(tmp2, parent);
|
|
|
|
__rb_rotate_set_parents(parent, sibling, root,
|
|
|
|
RB_BLACK);
|
2012-10-09 03:31:33 +04:00
|
|
|
augment_rotate(parent, sibling);
|
2012-10-09 03:30:54 +04:00
|
|
|
break;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-01-12 02:32:20 +04:00
|
|
|
|
|
|
|
/* Non-inline version for rb_erase_augmented() use */
|
|
|
|
void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
|
|
|
|
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
|
|
|
|
{
|
|
|
|
____rb_erase_color(parent, root, augment_rotate);
|
|
|
|
}
|
2012-10-09 03:31:33 +04:00
|
|
|
EXPORT_SYMBOL(__rb_erase_color);
|
2012-10-09 03:31:17 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Non-augmented rbtree manipulation functions.
|
|
|
|
*
|
|
|
|
* We use dummy augmented callbacks here, and have the compiler optimize them
|
|
|
|
* out of the rb_insert_color() and rb_erase() function definitions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {}
|
|
|
|
static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
|
|
|
|
static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
|
|
|
|
|
|
|
|
static const struct rb_augment_callbacks dummy_callbacks = {
|
2017-02-25 02:01:04 +03:00
|
|
|
.propagate = dummy_propagate,
|
|
|
|
.copy = dummy_copy,
|
|
|
|
.rotate = dummy_rotate
|
2012-10-09 03:31:17 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
void rb_insert_color(struct rb_node *node, struct rb_root *root)
|
|
|
|
{
|
rbtree: cache leftmost node internally
Patch series "rbtree: Cache leftmost node internally", v4.
A series to extending rbtrees to internally cache the leftmost node such
that we can have fast overlap check optimization for all interval tree
users[1]. The benefits of this series are that:
(i) Unify users that do internal leftmost node caching.
(ii) Optimize all interval tree users.
(iii) Convert at least two new users (epoll and procfs) to the new interface.
This patch (of 16):
Red-black tree semantics imply that nodes with smaller or greater (or
equal for duplicates) keys always be to the left and right,
respectively. For the kernel this is extremely evident when considering
our rb_first() semantics. Enabling lookups for the smallest node in the
tree in O(1) can save a good chunk of cycles in not having to walk down
the tree each time. To this end there are a few core users that
explicitly do this, such as the scheduler and rtmutexes. There is also
the desire for interval trees to have this optimization allowing faster
overlap checking.
This patch introduces a new 'struct rb_root_cached' which is just the
root with a cached pointer to the leftmost node. The reason why the
regular rb_root was not extended instead of adding a new structure was
that this allows the user to have the choice between memory footprint
and actual tree performance. The new wrappers on top of the regular
rb_root calls are:
- rb_first_cached(cached_root) -- which is a fast replacement
for rb_first.
- rb_insert_color_cached(node, cached_root, new)
- rb_erase_cached(node, cached_root)
In addition, augmented cached interfaces are also added for basic
insertion and deletion operations; which becomes important for the
interval tree changes.
With the exception of the inserts, which adds a bool for updating the
new leftmost, the interfaces are kept the same. To this end, porting rb
users to the cached version becomes really trivial, and keeping current
rbtree semantics for users that don't care about the optimization
requires zero overhead.
Link: http://lkml.kernel.org/r/20170719014603.19029-2-dave@stgolabs.net
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Reviewed-by: Jan Kara <jack@suse.cz>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-09-09 02:14:36 +03:00
|
|
|
__rb_insert(node, root, false, NULL, dummy_rotate);
|
2012-10-09 03:31:17 +04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(rb_insert_color);
|
|
|
|
|
|
|
|
void rb_erase(struct rb_node *node, struct rb_root *root)
|
|
|
|
{
|
2013-01-12 02:32:20 +04:00
|
|
|
struct rb_node *rebalance;
|
rbtree: cache leftmost node internally
Patch series "rbtree: Cache leftmost node internally", v4.
A series to extending rbtrees to internally cache the leftmost node such
that we can have fast overlap check optimization for all interval tree
users[1]. The benefits of this series are that:
(i) Unify users that do internal leftmost node caching.
(ii) Optimize all interval tree users.
(iii) Convert at least two new users (epoll and procfs) to the new interface.
This patch (of 16):
Red-black tree semantics imply that nodes with smaller or greater (or
equal for duplicates) keys always be to the left and right,
respectively. For the kernel this is extremely evident when considering
our rb_first() semantics. Enabling lookups for the smallest node in the
tree in O(1) can save a good chunk of cycles in not having to walk down
the tree each time. To this end there are a few core users that
explicitly do this, such as the scheduler and rtmutexes. There is also
the desire for interval trees to have this optimization allowing faster
overlap checking.
This patch introduces a new 'struct rb_root_cached' which is just the
root with a cached pointer to the leftmost node. The reason why the
regular rb_root was not extended instead of adding a new structure was
that this allows the user to have the choice between memory footprint
and actual tree performance. The new wrappers on top of the regular
rb_root calls are:
- rb_first_cached(cached_root) -- which is a fast replacement
for rb_first.
- rb_insert_color_cached(node, cached_root, new)
- rb_erase_cached(node, cached_root)
In addition, augmented cached interfaces are also added for basic
insertion and deletion operations; which becomes important for the
interval tree changes.
With the exception of the inserts, which adds a bool for updating the
new leftmost, the interfaces are kept the same. To this end, porting rb
users to the cached version becomes really trivial, and keeping current
rbtree semantics for users that don't care about the optimization
requires zero overhead.
Link: http://lkml.kernel.org/r/20170719014603.19029-2-dave@stgolabs.net
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Reviewed-by: Jan Kara <jack@suse.cz>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-09-09 02:14:36 +03:00
|
|
|
rebalance = __rb_erase_augmented(node, root,
|
|
|
|
NULL, &dummy_callbacks);
|
2013-01-12 02:32:20 +04:00
|
|
|
if (rebalance)
|
|
|
|
____rb_erase_color(rebalance, root, dummy_rotate);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(rb_erase);
|
|
|
|
|
rbtree: cache leftmost node internally
Patch series "rbtree: Cache leftmost node internally", v4.
A series to extending rbtrees to internally cache the leftmost node such
that we can have fast overlap check optimization for all interval tree
users[1]. The benefits of this series are that:
(i) Unify users that do internal leftmost node caching.
(ii) Optimize all interval tree users.
(iii) Convert at least two new users (epoll and procfs) to the new interface.
This patch (of 16):
Red-black tree semantics imply that nodes with smaller or greater (or
equal for duplicates) keys always be to the left and right,
respectively. For the kernel this is extremely evident when considering
our rb_first() semantics. Enabling lookups for the smallest node in the
tree in O(1) can save a good chunk of cycles in not having to walk down
the tree each time. To this end there are a few core users that
explicitly do this, such as the scheduler and rtmutexes. There is also
the desire for interval trees to have this optimization allowing faster
overlap checking.
This patch introduces a new 'struct rb_root_cached' which is just the
root with a cached pointer to the leftmost node. The reason why the
regular rb_root was not extended instead of adding a new structure was
that this allows the user to have the choice between memory footprint
and actual tree performance. The new wrappers on top of the regular
rb_root calls are:
- rb_first_cached(cached_root) -- which is a fast replacement
for rb_first.
- rb_insert_color_cached(node, cached_root, new)
- rb_erase_cached(node, cached_root)
In addition, augmented cached interfaces are also added for basic
insertion and deletion operations; which becomes important for the
interval tree changes.
With the exception of the inserts, which adds a bool for updating the
new leftmost, the interfaces are kept the same. To this end, porting rb
users to the cached version becomes really trivial, and keeping current
rbtree semantics for users that don't care about the optimization
requires zero overhead.
Link: http://lkml.kernel.org/r/20170719014603.19029-2-dave@stgolabs.net
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Reviewed-by: Jan Kara <jack@suse.cz>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-09-09 02:14:36 +03:00
|
|
|
void rb_insert_color_cached(struct rb_node *node,
|
|
|
|
struct rb_root_cached *root, bool leftmost)
|
|
|
|
{
|
|
|
|
__rb_insert(node, &root->rb_root, leftmost,
|
|
|
|
&root->rb_leftmost, dummy_rotate);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(rb_insert_color_cached);
|
|
|
|
|
|
|
|
void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
|
|
|
|
{
|
|
|
|
struct rb_node *rebalance;
|
|
|
|
rebalance = __rb_erase_augmented(node, &root->rb_root,
|
|
|
|
&root->rb_leftmost, &dummy_callbacks);
|
|
|
|
if (rebalance)
|
|
|
|
____rb_erase_color(rebalance, &root->rb_root, dummy_rotate);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(rb_erase_cached);
|
|
|
|
|
2012-10-09 03:31:17 +04:00
|
|
|
/*
|
|
|
|
* Augmented rbtree manipulation functions.
|
|
|
|
*
|
|
|
|
* This instantiates the same __always_inline functions as in the non-augmented
|
|
|
|
* case, but this time with user-defined callbacks.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
|
rbtree: cache leftmost node internally
Patch series "rbtree: Cache leftmost node internally", v4.
A series to extending rbtrees to internally cache the leftmost node such
that we can have fast overlap check optimization for all interval tree
users[1]. The benefits of this series are that:
(i) Unify users that do internal leftmost node caching.
(ii) Optimize all interval tree users.
(iii) Convert at least two new users (epoll and procfs) to the new interface.
This patch (of 16):
Red-black tree semantics imply that nodes with smaller or greater (or
equal for duplicates) keys always be to the left and right,
respectively. For the kernel this is extremely evident when considering
our rb_first() semantics. Enabling lookups for the smallest node in the
tree in O(1) can save a good chunk of cycles in not having to walk down
the tree each time. To this end there are a few core users that
explicitly do this, such as the scheduler and rtmutexes. There is also
the desire for interval trees to have this optimization allowing faster
overlap checking.
This patch introduces a new 'struct rb_root_cached' which is just the
root with a cached pointer to the leftmost node. The reason why the
regular rb_root was not extended instead of adding a new structure was
that this allows the user to have the choice between memory footprint
and actual tree performance. The new wrappers on top of the regular
rb_root calls are:
- rb_first_cached(cached_root) -- which is a fast replacement
for rb_first.
- rb_insert_color_cached(node, cached_root, new)
- rb_erase_cached(node, cached_root)
In addition, augmented cached interfaces are also added for basic
insertion and deletion operations; which becomes important for the
interval tree changes.
With the exception of the inserts, which adds a bool for updating the
new leftmost, the interfaces are kept the same. To this end, porting rb
users to the cached version becomes really trivial, and keeping current
rbtree semantics for users that don't care about the optimization
requires zero overhead.
Link: http://lkml.kernel.org/r/20170719014603.19029-2-dave@stgolabs.net
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Reviewed-by: Jan Kara <jack@suse.cz>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-09-09 02:14:36 +03:00
|
|
|
bool newleft, struct rb_node **leftmost,
|
2012-10-09 03:31:17 +04:00
|
|
|
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
|
|
|
|
{
|
rbtree: cache leftmost node internally
Patch series "rbtree: Cache leftmost node internally", v4.
A series to extending rbtrees to internally cache the leftmost node such
that we can have fast overlap check optimization for all interval tree
users[1]. The benefits of this series are that:
(i) Unify users that do internal leftmost node caching.
(ii) Optimize all interval tree users.
(iii) Convert at least two new users (epoll and procfs) to the new interface.
This patch (of 16):
Red-black tree semantics imply that nodes with smaller or greater (or
equal for duplicates) keys always be to the left and right,
respectively. For the kernel this is extremely evident when considering
our rb_first() semantics. Enabling lookups for the smallest node in the
tree in O(1) can save a good chunk of cycles in not having to walk down
the tree each time. To this end there are a few core users that
explicitly do this, such as the scheduler and rtmutexes. There is also
the desire for interval trees to have this optimization allowing faster
overlap checking.
This patch introduces a new 'struct rb_root_cached' which is just the
root with a cached pointer to the leftmost node. The reason why the
regular rb_root was not extended instead of adding a new structure was
that this allows the user to have the choice between memory footprint
and actual tree performance. The new wrappers on top of the regular
rb_root calls are:
- rb_first_cached(cached_root) -- which is a fast replacement
for rb_first.
- rb_insert_color_cached(node, cached_root, new)
- rb_erase_cached(node, cached_root)
In addition, augmented cached interfaces are also added for basic
insertion and deletion operations; which becomes important for the
interval tree changes.
With the exception of the inserts, which adds a bool for updating the
new leftmost, the interfaces are kept the same. To this end, porting rb
users to the cached version becomes really trivial, and keeping current
rbtree semantics for users that don't care about the optimization
requires zero overhead.
Link: http://lkml.kernel.org/r/20170719014603.19029-2-dave@stgolabs.net
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Reviewed-by: Jan Kara <jack@suse.cz>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-09-09 02:14:36 +03:00
|
|
|
__rb_insert(node, root, newleft, leftmost, augment_rotate);
|
2012-10-09 03:31:17 +04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__rb_insert_augmented);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* This function returns the first node (in sort order) of the tree.
|
|
|
|
*/
|
2009-01-10 14:12:09 +03:00
|
|
|
struct rb_node *rb_first(const struct rb_root *root)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct rb_node *n;
|
|
|
|
|
|
|
|
n = root->rb_node;
|
|
|
|
if (!n)
|
|
|
|
return NULL;
|
|
|
|
while (n->rb_left)
|
|
|
|
n = n->rb_left;
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(rb_first);
|
|
|
|
|
2009-01-10 14:12:09 +03:00
|
|
|
struct rb_node *rb_last(const struct rb_root *root)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct rb_node *n;
|
|
|
|
|
|
|
|
n = root->rb_node;
|
|
|
|
if (!n)
|
|
|
|
return NULL;
|
|
|
|
while (n->rb_right)
|
|
|
|
n = n->rb_right;
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(rb_last);
|
|
|
|
|
2009-01-10 14:12:09 +03:00
|
|
|
struct rb_node *rb_next(const struct rb_node *node)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2006-04-21 16:35:51 +04:00
|
|
|
struct rb_node *parent;
|
|
|
|
|
2012-10-09 03:30:32 +04:00
|
|
|
if (RB_EMPTY_NODE(node))
|
2006-07-11 23:15:52 +04:00
|
|
|
return NULL;
|
|
|
|
|
2012-10-09 03:31:01 +04:00
|
|
|
/*
|
|
|
|
* If we have a right-hand child, go down and then left as far
|
|
|
|
* as we can.
|
|
|
|
*/
|
2005-04-17 02:20:36 +04:00
|
|
|
if (node->rb_right) {
|
rbtree: cache leftmost node internally
Patch series "rbtree: Cache leftmost node internally", v4.
A series to extending rbtrees to internally cache the leftmost node such
that we can have fast overlap check optimization for all interval tree
users[1]. The benefits of this series are that:
(i) Unify users that do internal leftmost node caching.
(ii) Optimize all interval tree users.
(iii) Convert at least two new users (epoll and procfs) to the new interface.
This patch (of 16):
Red-black tree semantics imply that nodes with smaller or greater (or
equal for duplicates) keys always be to the left and right,
respectively. For the kernel this is extremely evident when considering
our rb_first() semantics. Enabling lookups for the smallest node in the
tree in O(1) can save a good chunk of cycles in not having to walk down
the tree each time. To this end there are a few core users that
explicitly do this, such as the scheduler and rtmutexes. There is also
the desire for interval trees to have this optimization allowing faster
overlap checking.
This patch introduces a new 'struct rb_root_cached' which is just the
root with a cached pointer to the leftmost node. The reason why the
regular rb_root was not extended instead of adding a new structure was
that this allows the user to have the choice between memory footprint
and actual tree performance. The new wrappers on top of the regular
rb_root calls are:
- rb_first_cached(cached_root) -- which is a fast replacement
for rb_first.
- rb_insert_color_cached(node, cached_root, new)
- rb_erase_cached(node, cached_root)
In addition, augmented cached interfaces are also added for basic
insertion and deletion operations; which becomes important for the
interval tree changes.
With the exception of the inserts, which adds a bool for updating the
new leftmost, the interfaces are kept the same. To this end, porting rb
users to the cached version becomes really trivial, and keeping current
rbtree semantics for users that don't care about the optimization
requires zero overhead.
Link: http://lkml.kernel.org/r/20170719014603.19029-2-dave@stgolabs.net
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Reviewed-by: Jan Kara <jack@suse.cz>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-09-09 02:14:36 +03:00
|
|
|
node = node->rb_right;
|
2005-04-17 02:20:36 +04:00
|
|
|
while (node->rb_left)
|
|
|
|
node=node->rb_left;
|
2009-01-10 14:12:09 +03:00
|
|
|
return (struct rb_node *)node;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2012-10-09 03:31:01 +04:00
|
|
|
/*
|
|
|
|
* No right-hand children. Everything down and left is smaller than us,
|
|
|
|
* so any 'next' node must be in the general direction of our parent.
|
|
|
|
* Go up the tree; any time the ancestor is a right-hand child of its
|
|
|
|
* parent, keep going up. First time it's a left-hand child of its
|
|
|
|
* parent, said parent is our 'next' node.
|
|
|
|
*/
|
2006-04-21 16:35:51 +04:00
|
|
|
while ((parent = rb_parent(node)) && node == parent->rb_right)
|
|
|
|
node = parent;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-04-21 16:35:51 +04:00
|
|
|
return parent;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(rb_next);
|
|
|
|
|
2009-01-10 14:12:09 +03:00
|
|
|
struct rb_node *rb_prev(const struct rb_node *node)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2006-04-21 16:35:51 +04:00
|
|
|
struct rb_node *parent;
|
|
|
|
|
2012-10-09 03:30:32 +04:00
|
|
|
if (RB_EMPTY_NODE(node))
|
2006-07-11 23:15:52 +04:00
|
|
|
return NULL;
|
|
|
|
|
2012-10-09 03:31:01 +04:00
|
|
|
/*
|
|
|
|
* If we have a left-hand child, go down and then right as far
|
|
|
|
* as we can.
|
|
|
|
*/
|
2005-04-17 02:20:36 +04:00
|
|
|
if (node->rb_left) {
|
rbtree: cache leftmost node internally
Patch series "rbtree: Cache leftmost node internally", v4.
A series to extending rbtrees to internally cache the leftmost node such
that we can have fast overlap check optimization for all interval tree
users[1]. The benefits of this series are that:
(i) Unify users that do internal leftmost node caching.
(ii) Optimize all interval tree users.
(iii) Convert at least two new users (epoll and procfs) to the new interface.
This patch (of 16):
Red-black tree semantics imply that nodes with smaller or greater (or
equal for duplicates) keys always be to the left and right,
respectively. For the kernel this is extremely evident when considering
our rb_first() semantics. Enabling lookups for the smallest node in the
tree in O(1) can save a good chunk of cycles in not having to walk down
the tree each time. To this end there are a few core users that
explicitly do this, such as the scheduler and rtmutexes. There is also
the desire for interval trees to have this optimization allowing faster
overlap checking.
This patch introduces a new 'struct rb_root_cached' which is just the
root with a cached pointer to the leftmost node. The reason why the
regular rb_root was not extended instead of adding a new structure was
that this allows the user to have the choice between memory footprint
and actual tree performance. The new wrappers on top of the regular
rb_root calls are:
- rb_first_cached(cached_root) -- which is a fast replacement
for rb_first.
- rb_insert_color_cached(node, cached_root, new)
- rb_erase_cached(node, cached_root)
In addition, augmented cached interfaces are also added for basic
insertion and deletion operations; which becomes important for the
interval tree changes.
With the exception of the inserts, which adds a bool for updating the
new leftmost, the interfaces are kept the same. To this end, porting rb
users to the cached version becomes really trivial, and keeping current
rbtree semantics for users that don't care about the optimization
requires zero overhead.
Link: http://lkml.kernel.org/r/20170719014603.19029-2-dave@stgolabs.net
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Reviewed-by: Jan Kara <jack@suse.cz>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-09-09 02:14:36 +03:00
|
|
|
node = node->rb_left;
|
2005-04-17 02:20:36 +04:00
|
|
|
while (node->rb_right)
|
|
|
|
node=node->rb_right;
|
2009-01-10 14:12:09 +03:00
|
|
|
return (struct rb_node *)node;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2012-10-09 03:31:01 +04:00
|
|
|
/*
|
|
|
|
* No left-hand children. Go up till we find an ancestor which
|
|
|
|
* is a right-hand child of its parent.
|
|
|
|
*/
|
2006-04-21 16:35:51 +04:00
|
|
|
while ((parent = rb_parent(node)) && node == parent->rb_left)
|
|
|
|
node = parent;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-04-21 16:35:51 +04:00
|
|
|
return parent;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(rb_prev);
|
|
|
|
|
|
|
|
void rb_replace_node(struct rb_node *victim, struct rb_node *new,
|
|
|
|
struct rb_root *root)
|
|
|
|
{
|
2006-04-21 16:35:51 +04:00
|
|
|
struct rb_node *parent = rb_parent(victim);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2016-07-01 09:53:51 +03:00
|
|
|
/* Copy the pointers/colour from the victim to the replacement */
|
|
|
|
*new = *victim;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* Set the surrounding nodes to point to the replacement */
|
|
|
|
if (victim->rb_left)
|
2006-04-21 16:35:51 +04:00
|
|
|
rb_set_parent(victim->rb_left, new);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (victim->rb_right)
|
2006-04-21 16:35:51 +04:00
|
|
|
rb_set_parent(victim->rb_right, new);
|
2016-07-01 09:53:51 +03:00
|
|
|
__rb_change_child(victim, new, parent, root);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(rb_replace_node);
|
|
|
|
|
2017-12-15 02:32:28 +03:00
|
|
|
void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
|
|
|
|
struct rb_root_cached *root)
|
|
|
|
{
|
|
|
|
rb_replace_node(victim, new, &root->rb_root);
|
|
|
|
|
|
|
|
if (root->rb_leftmost == victim)
|
|
|
|
root->rb_leftmost = new;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(rb_replace_node_cached);
|
|
|
|
|
2016-07-01 09:53:51 +03:00
|
|
|
void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
|
|
|
|
struct rb_root *root)
|
|
|
|
{
|
|
|
|
struct rb_node *parent = rb_parent(victim);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* Copy the pointers/colour from the victim to the replacement */
|
|
|
|
*new = *victim;
|
2016-07-01 09:53:51 +03:00
|
|
|
|
|
|
|
/* Set the surrounding nodes to point to the replacement */
|
|
|
|
if (victim->rb_left)
|
|
|
|
rb_set_parent(victim->rb_left, new);
|
|
|
|
if (victim->rb_right)
|
|
|
|
rb_set_parent(victim->rb_right, new);
|
|
|
|
|
|
|
|
/* Set the parent's pointer to the new node last after an RCU barrier
|
|
|
|
* so that the pointers onwards are seen to be set correctly when doing
|
|
|
|
* an RCU walk over the tree.
|
|
|
|
*/
|
|
|
|
__rb_change_child_rcu(victim, new, parent, root);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2016-07-01 09:53:51 +03:00
|
|
|
EXPORT_SYMBOL(rb_replace_node_rcu);
|
2013-09-12 01:25:10 +04:00
|
|
|
|
|
|
|
static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
|
|
|
|
{
|
|
|
|
for (;;) {
|
|
|
|
if (node->rb_left)
|
|
|
|
node = node->rb_left;
|
|
|
|
else if (node->rb_right)
|
|
|
|
node = node->rb_right;
|
|
|
|
else
|
|
|
|
return (struct rb_node *)node;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct rb_node *rb_next_postorder(const struct rb_node *node)
|
|
|
|
{
|
|
|
|
const struct rb_node *parent;
|
|
|
|
if (!node)
|
|
|
|
return NULL;
|
|
|
|
parent = rb_parent(node);
|
|
|
|
|
|
|
|
/* If we're sitting on node, we've already seen our children */
|
|
|
|
if (parent && node == parent->rb_left && parent->rb_right) {
|
|
|
|
/* If we are the parent's left node, go to the parent's right
|
|
|
|
* node then all the way down to the left */
|
|
|
|
return rb_left_deepest_node(parent->rb_right);
|
|
|
|
} else
|
|
|
|
/* Otherwise we are the parent's right node, and the parent
|
|
|
|
* should be next */
|
|
|
|
return (struct rb_node *)parent;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(rb_next_postorder);
|
|
|
|
|
|
|
|
struct rb_node *rb_first_postorder(const struct rb_root *root)
|
|
|
|
{
|
|
|
|
if (!root->rb_node)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return rb_left_deepest_node(root->rb_node);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(rb_first_postorder);
|