summaryrefslogtreecommitdiffstats
path: root/lib/libc
diff options
context:
space:
mode:
authorjasone <jasone@FreeBSD.org>2008-05-01 17:24:37 +0000
committerjasone <jasone@FreeBSD.org>2008-05-01 17:24:37 +0000
commit57d95000d8f3c31d1f1144a55e7daae057bae44c (patch)
tree7e14b915aab3ae4f115ce8b135a81404dcb5580a /lib/libc
parent0107e432459ee23ed66e8bbbf6639956e7aace3b (diff)
downloadFreeBSD-src-57d95000d8f3c31d1f1144a55e7daae057bae44c.zip
FreeBSD-src-57d95000d8f3c31d1f1144a55e7daae057bae44c.tar.gz
Add rb_wrap(), which creates C function wrappers for most rb_*()
macros. Add rb_foreach_next() and rb_foreach_reverse_prev(), which make it possible to re-synchronize tree iteration after the tree has been modified. Rename rb_tree_new() to rb_new().
Diffstat (limited to 'lib/libc')
-rw-r--r--lib/libc/stdlib/rb.h230
1 files changed, 194 insertions, 36 deletions
diff --git a/lib/libc/stdlib/rb.h b/lib/libc/stdlib/rb.h
index 7978534..10f94af 100644
--- a/lib/libc/stdlib/rb.h
+++ b/lib/libc/stdlib/rb.h
@@ -60,69 +60,69 @@ __FBSDID("$FreeBSD$");
#include <assert.h>
/* Node structure. */
-#define rb_node(a_type) \
+#define rb_node(a_type) \
struct { \
a_type *rbn_left; \
a_type *rbn_right_red; \
}
/* Root structure. */
-#define rb_tree(a_type) \
+#define rb_tree(a_type) \
struct { \
a_type *rbt_root; \
a_type rbt_nil; \
}
/* Left accessors. */
-#define rbp_left_get(a_type, a_field, a_node) \
+#define rbp_left_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_left)
-#define rbp_left_set(a_type, a_field, a_node, a_left) do { \
+#define rbp_left_set(a_type, a_field, a_node, a_left) do { \
(a_node)->a_field.rbn_left = a_left; \
} while (0)
/* Right accessors. */
-#define rbp_right_get(a_type, a_field, a_node) \
+#define rbp_right_get(a_type, a_field, a_node) \
((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \
& ((ssize_t)-2)))
-#define rbp_right_set(a_type, a_field, a_node, a_right) do { \
+#define rbp_right_set(a_type, a_field, a_node, a_right) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \
| (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \
} while (0)
/* Color accessors. */
-#define rbp_red_get(a_type, a_field, a_node) \
+#define rbp_red_get(a_type, a_field, a_node) \
((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \
& ((size_t)1)))
-#define rbp_color_set(a_type, a_field, a_node, a_red) do { \
+#define rbp_color_set(a_type, a_field, a_node, a_red) do { \
(a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \
| ((ssize_t)a_red)); \
} while (0)
-#define rbp_red_set(a_type, a_field, a_node) do { \
+#define rbp_red_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \
(a_node)->a_field.rbn_right_red) | ((size_t)1)); \
} while (0)
-#define rbp_black_set(a_type, a_field, a_node) do { \
+#define rbp_black_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
} while (0)
/* Node initializer. */
-#define rbp_node_new(a_type, a_field, a_tree, a_node) do { \
+#define rbp_node_new(a_type, a_field, a_tree, a_node) do { \
rbp_left_set(a_type, a_field, (a_node), &(a_tree)->rbt_nil); \
rbp_right_set(a_type, a_field, (a_node), &(a_tree)->rbt_nil); \
rbp_red_set(a_type, a_field, (a_node)); \
} while (0)
/* Tree initializer. */
-#define rb_tree_new(a_type, a_field, a_tree) do { \
+#define rb_new(a_type, a_field, a_tree) do { \
(a_tree)->rbt_root = &(a_tree)->rbt_nil; \
rbp_node_new(a_type, a_field, a_tree, &(a_tree)->rbt_nil); \
rbp_black_set(a_type, a_field, &(a_tree)->rbt_nil); \
} while (0)
/* Tree operations. */
-#define rbp_black_height(a_type, a_field, a_tree, r_height) do { \
+#define rbp_black_height(a_type, a_field, a_tree, r_height) do { \
a_type *rbp_bh_t; \
for (rbp_bh_t = (a_tree)->rbt_root, (r_height) = 0; \
rbp_bh_t != &(a_tree)->rbt_nil; \
@@ -133,21 +133,21 @@ struct { \
} \
} while (0)
-#define rbp_first(a_type, a_field, a_tree, a_root, r_node) do { \
+#define rbp_first(a_type, a_field, a_tree, a_root, r_node) do { \
for ((r_node) = (a_root); \
rbp_left_get(a_type, a_field, (r_node)) != &(a_tree)->rbt_nil; \
(r_node) = rbp_left_get(a_type, a_field, (r_node))) { \
} \
} while (0)
-#define rbp_last(a_type, a_field, a_tree, a_root, r_node) do { \
+#define rbp_last(a_type, a_field, a_tree, a_root, r_node) do { \
for ((r_node) = (a_root); \
rbp_right_get(a_type, a_field, (r_node)) != &(a_tree)->rbt_nil; \
(r_node) = rbp_right_get(a_type, a_field, (r_node))) { \
} \
} while (0)
-#define rbp_next(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
+#define rbp_next(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
if (rbp_right_get(a_type, a_field, (a_node)) \
!= &(a_tree)->rbt_nil) { \
rbp_first(a_type, a_field, a_tree, rbp_right_get(a_type, \
@@ -171,7 +171,7 @@ struct { \
} \
} while (0)
-#define rbp_prev(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
+#define rbp_prev(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
if (rbp_left_get(a_type, a_field, (a_node)) != &(a_tree)->rbt_nil) {\
rbp_last(a_type, a_field, a_tree, rbp_left_get(a_type, \
a_field, (a_node)), (r_node)); \
@@ -194,35 +194,35 @@ struct { \
} \
} while (0)
-#define rb_first(a_type, a_field, a_tree, r_node) do { \
+#define rb_first(a_type, a_field, a_tree, r_node) do { \
rbp_first(a_type, a_field, a_tree, (a_tree)->rbt_root, (r_node)); \
if ((r_node) == &(a_tree)->rbt_nil) { \
(r_node) = NULL; \
} \
} while (0)
-#define rb_last(a_type, a_field, a_tree, r_node) do { \
+#define rb_last(a_type, a_field, a_tree, r_node) do { \
rbp_last(a_type, a_field, a_tree, (a_tree)->rbt_root, r_node); \
if ((r_node) == &(a_tree)->rbt_nil) { \
(r_node) = NULL; \
} \
} while (0)
-#define rb_next(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
+#define rb_next(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
rbp_next(a_type, a_field, a_cmp, a_tree, (a_node), (r_node)); \
if ((r_node) == &(a_tree)->rbt_nil) { \
(r_node) = NULL; \
} \
} while (0)
-#define rb_prev(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
+#define rb_prev(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
rbp_prev(a_type, a_field, a_cmp, a_tree, (a_node), (r_node)); \
if ((r_node) == &(a_tree)->rbt_nil) { \
(r_node) = NULL; \
} \
} while (0)
-#define rb_search(a_type, a_field, a_cmp, a_tree, a_key, r_node) do { \
+#define rb_search(a_type, a_field, a_cmp, a_tree, a_key, r_node) do { \
int rbp_se_cmp; \
(r_node) = (a_tree)->rbt_root; \
while ((r_node) != &(a_tree)->rbt_nil \
@@ -242,7 +242,7 @@ struct { \
* Find a match if it exists. Otherwise, find the next greater node, if one
* exists.
*/
-#define rb_nsearch(a_type, a_field, a_cmp, a_tree, a_key, r_node) do { \
+#define rb_nsearch(a_type, a_field, a_cmp, a_tree, a_key, r_node) do { \
a_type *rbp_ns_t = (a_tree)->rbt_root; \
(r_node) = NULL; \
while (rbp_ns_t != &(a_tree)->rbt_nil) { \
@@ -263,7 +263,7 @@ struct { \
* Find a match if it exists. Otherwise, find the previous lesser node, if one
* exists.
*/
-#define rb_psearch(a_type, a_field, a_cmp, a_tree, a_key, r_node) do { \
+#define rb_psearch(a_type, a_field, a_cmp, a_tree, a_key, r_node) do { \
a_type *rbp_ps_t = (a_tree)->rbt_root; \
(r_node) = NULL; \
while (rbp_ps_t != &(a_tree)->rbt_nil) { \
@@ -280,21 +280,21 @@ struct { \
} \
} while (0)
-#define rbp_rotate_left(a_type, a_field, a_node, r_node) do { \
+#define rbp_rotate_left(a_type, a_field, a_node, r_node) do { \
(r_node) = rbp_right_get(a_type, a_field, (a_node)); \
rbp_right_set(a_type, a_field, (a_node), \
rbp_left_get(a_type, a_field, (r_node))); \
rbp_left_set(a_type, a_field, (r_node), (a_node)); \
} while (0)
-#define rbp_rotate_right(a_type, a_field, a_node, r_node) do { \
+#define rbp_rotate_right(a_type, a_field, a_node, r_node) do { \
(r_node) = rbp_left_get(a_type, a_field, (a_node)); \
rbp_left_set(a_type, a_field, (a_node), \
rbp_right_get(a_type, a_field, (r_node))); \
rbp_right_set(a_type, a_field, (r_node), (a_node)); \
} while (0)
-#define rbp_lean_left(a_type, a_field, a_node, r_node) do { \
+#define rbp_lean_left(a_type, a_field, a_node, r_node) do { \
bool rbp_ll_red; \
rbp_rotate_left(a_type, a_field, (a_node), (r_node)); \
rbp_ll_red = rbp_red_get(a_type, a_field, (a_node)); \
@@ -302,7 +302,7 @@ struct { \
rbp_red_set(a_type, a_field, (a_node)); \
} while (0)
-#define rbp_lean_right(a_type, a_field, a_node, r_node) do { \
+#define rbp_lean_right(a_type, a_field, a_node, r_node) do { \
bool rbp_lr_red; \
rbp_rotate_right(a_type, a_field, (a_node), (r_node)); \
rbp_lr_red = rbp_red_get(a_type, a_field, (a_node)); \
@@ -310,7 +310,7 @@ struct { \
rbp_red_set(a_type, a_field, (a_node)); \
} while (0)
-#define rbp_move_red_left(a_type, a_field, a_node, r_node) do { \
+#define rbp_move_red_left(a_type, a_field, a_node, r_node) do { \
a_type *rbp_mrl_t, *rbp_mrl_u; \
rbp_mrl_t = rbp_left_get(a_type, a_field, (a_node)); \
rbp_red_set(a_type, a_field, rbp_mrl_t); \
@@ -335,7 +335,7 @@ struct { \
} \
} while (0)
-#define rbp_move_red_right(a_type, a_field, a_node, r_node) do { \
+#define rbp_move_red_right(a_type, a_field, a_node, r_node) do { \
a_type *rbp_mrr_t; \
rbp_mrr_t = rbp_left_get(a_type, a_field, (a_node)); \
if (rbp_red_get(a_type, a_field, rbp_mrr_t)) { \
@@ -374,7 +374,7 @@ struct { \
} \
} while (0)
-#define rb_insert(a_type, a_field, a_cmp, a_tree, a_node) do { \
+#define rb_insert(a_type, a_field, a_cmp, a_tree, a_node) do { \
a_type rbp_i_s; \
a_type *rbp_i_g, *rbp_i_p, *rbp_i_c, *rbp_i_t, *rbp_i_u; \
int rbp_i_cmp = 0; \
@@ -459,7 +459,7 @@ struct { \
rbp_black_set(a_type, a_field, (a_tree)->rbt_root); \
} while (0)
-#define rb_remove(a_type, a_field, a_cmp, a_tree, a_node) do { \
+#define rb_remove(a_type, a_field, a_cmp, a_tree, a_node) do { \
a_type rbp_r_s; \
a_type *rbp_r_p, *rbp_r_c, *rbp_r_xp, *rbp_r_t, *rbp_r_u; \
int rbp_r_cmp; \
@@ -665,13 +665,98 @@ struct { \
} while (0)
/*
+ * The rb_wrap() macro provides a convenient way to wrap functions around the
+ * cpp macros. The main benefits of wrapping are that 1) repeated macro
+ * expansion can cause code bloat, especially for rb_{insert,remove)(), and
+ * 2) type, linkage, comparison functions, etc. need not be specified at every
+ * call point.
+ */
+
+#define rb_wrap(a_attr, a_prefix, a_tree_type, a_type, a_field, a_cmp) \
+a_attr void \
+a_prefix##new(a_tree_type *tree) { \
+ rb_new(a_type, a_field, tree); \
+} \
+a_attr a_type * \
+a_prefix##first(a_tree_type *tree) { \
+ a_type *ret; \
+ rb_first(a_type, a_field, tree, ret); \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##last(a_tree_type *tree) { \
+ a_type *ret; \
+ rb_last(a_type, a_field, tree, ret); \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##next(a_tree_type *tree, a_type *node) { \
+ a_type *ret; \
+ rb_next(a_type, a_field, a_cmp, tree, node, ret); \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##prev(a_tree_type *tree, a_type *node) { \
+ a_type *ret; \
+ rb_prev(a_type, a_field, a_cmp, tree, node, ret); \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##search(a_tree_type *tree, a_type *key) { \
+ a_type *ret; \
+ rb_search(a_type, a_field, a_cmp, tree, key, ret); \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##nsearch(a_tree_type *tree, a_type *key) { \
+ a_type *ret; \
+ rb_nsearch(a_type, a_field, a_cmp, tree, key, ret); \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##psearch(a_tree_type *tree, a_type *key) { \
+ a_type *ret; \
+ rb_psearch(a_type, a_field, a_cmp, tree, key, ret); \
+ return (ret); \
+} \
+a_attr void \
+a_prefix##insert(a_tree_type *tree, a_type *node) { \
+ rb_insert(a_type, a_field, a_cmp, tree, node); \
+} \
+a_attr void \
+a_prefix##remove(a_tree_type *tree, a_type *node) { \
+ rb_remove(a_type, a_field, a_cmp, tree, node); \
+}
+
+/*
* The iterators simulate recursion via an array of pointers that store the
* current path. This is critical to performance, since a series of calls to
* rb_{next,prev}() would require time proportional to (n lg n), whereas this
* implementation only requires time proportional to (n).
+ *
+ * Since the iterators cache a path down the tree, any tree modification may
+ * cause the cached path to become invalid. In order to continue iteration,
+ * use something like the following sequence:
+ *
+ * {
+ * a_type *node, *tnode;
+ *
+ * rb_foreach_begin(a_type, a_field, a_tree, node) {
+ * ...
+ * rb_prev(a_type, a_field, a_cmp, a_tree, node, tnode);
+ * rb_remove(a_type, a_field, a_cmp, a_tree, node);
+ * rb_foreach_next(a_type, a_field, a_cmp, a_tree, tnode);
+ * ...
+ * } rb_foreach_end(a_type, a_field, a_tree, node)
+ * }
+ *
+ * Note that this idiom is not advised if every iteration modifies the tree,
+ * since in that case there is no algorithmic complexity improvement over a
+ * series of rb_{next,prev}() calls, thus making the setup overhead wasted
+ * effort.
*/
-#define rb_foreach_begin(a_type, a_field, a_tree, a_var) { \
+#define rb_foreach_begin(a_type, a_field, a_tree, a_var) { \
/* Compute the maximum possible tree depth (3X the black height). */\
unsigned rbp_f_height; \
rbp_black_height(a_type, a_field, a_tree, rbp_f_height); \
@@ -680,6 +765,7 @@ struct { \
/* Initialize the path to contain the left spine. */\
a_type *rbp_f_path[rbp_f_height]; \
a_type *rbp_f_node; \
+ bool rbp_f_synced = false; \
unsigned rbp_f_depth = 0; \
if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) { \
rbp_f_path[rbp_f_depth] = (a_tree)->rbt_root; \
@@ -694,7 +780,40 @@ struct { \
while (rbp_f_depth > 0) { \
(a_var) = rbp_f_path[rbp_f_depth-1];
-#define rb_foreach_end(a_type, a_field, a_tree, a_var) \
+/* Only use if modifying the tree during iteration. */
+#define rb_foreach_next(a_type, a_field, a_cmp, a_tree, a_node) \
+ /* Re-initialize the path to contain the path to a_node. */\
+ rbp_f_depth = 0; \
+ if (a_node != NULL) { \
+ if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) { \
+ rbp_f_path[rbp_f_depth] = (a_tree)->rbt_root; \
+ rbp_f_depth++; \
+ rbp_f_node = rbp_f_path[0]; \
+ while (true) { \
+ int rbp_f_cmp = (a_cmp)((a_node), \
+ rbp_f_path[rbp_f_depth-1]); \
+ if (rbp_f_cmp < 0) { \
+ rbp_f_node = rbp_left_get(a_type, a_field, \
+ rbp_f_path[rbp_f_depth-1]); \
+ } else if (rbp_f_cmp > 0) { \
+ rbp_f_node = rbp_right_get(a_type, a_field, \
+ rbp_f_path[rbp_f_depth-1]); \
+ } else { \
+ break; \
+ } \
+ assert(rbp_f_node != &(a_tree)->rbt_nil); \
+ rbp_f_path[rbp_f_depth] = rbp_f_node; \
+ rbp_f_depth++; \
+ } \
+ } \
+ } \
+ rbp_f_synced = true;
+
+#define rb_foreach_end(a_type, a_field, a_tree, a_var) \
+ if (rbp_f_synced) { \
+ rbp_f_synced = false; \
+ continue; \
+ } \
/* Find the successor. */\
if ((rbp_f_node = rbp_right_get(a_type, a_field, \
rbp_f_path[rbp_f_depth-1])) != &(a_tree)->rbt_nil) { \
@@ -723,7 +842,7 @@ struct { \
} \
}
-#define rb_foreach_reverse_begin(a_type, a_field, a_tree, a_var) { \
+#define rb_foreach_reverse_begin(a_type, a_field, a_tree, a_var) { \
/* Compute the maximum possible tree depth (3X the black height). */\
unsigned rbp_fr_height; \
rbp_black_height(a_type, a_field, a_tree, rbp_fr_height); \
@@ -732,6 +851,7 @@ struct { \
/* Initialize the path to contain the right spine. */\
a_type *rbp_fr_path[rbp_fr_height]; \
a_type *rbp_fr_node; \
+ bool rbp_fr_synced = false; \
unsigned rbp_fr_depth = 0; \
if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) { \
rbp_fr_path[rbp_fr_depth] = (a_tree)->rbt_root; \
@@ -746,7 +866,45 @@ struct { \
while (rbp_fr_depth > 0) { \
(a_var) = rbp_fr_path[rbp_fr_depth-1];
-#define rb_foreach_reverse_end(a_type, a_field, a_tree, a_var) \
+/* Only use if modifying the tree during iteration. */
+#define rb_foreach_reverse_prev(a_type, a_field, a_cmp, a_tree, a_node) \
+ /* Re-initialize the path to contain the path to a_node. */\
+ rbp_fr_depth = 0; \
+ if (a_node != NULL) { \
+ if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) { \
+ rbp_fr_path[rbp_fr_depth] = (a_tree)->rbt_root; \
+ rbp_fr_depth++; \
+ rbp_fr_node = rbp_fr_path[0]; \
+ while (true) { \
+ int rbp_fr_cmp = (a_cmp)((a_node), \
+ rbp_fr_path[rbp_fr_depth-1]); \
+ if (rbp_fr_cmp < 0) { \
+ rbp_fr_node = rbp_left_get(a_type, a_field, \
+ rbp_fr_path[rbp_fr_depth-1]); \
+ } else if (rbp_fr_cmp > 0) { \
+ rbp_fr_node = rbp_right_get(a_type, a_field,\
+ rbp_fr_path[rbp_fr_depth-1]); \
+ } else { \
+ break; \
+ } \
+ assert(rbp_fr_node != &(a_tree)->rbt_nil); \
+ rbp_fr_path[rbp_fr_depth] = rbp_fr_node; \
+ rbp_fr_depth++; \
+ } \
+ } \
+ } \
+ rbp_fr_synced = true;
+
+#define rb_foreach_reverse_end(a_type, a_field, a_tree, a_var) \
+ if (rbp_fr_synced) { \
+ rbp_fr_synced = false; \
+ continue; \
+ } \
+ if (rbp_fr_depth == 0) { \
+ /* rb_foreach_reverse_sync() was called with a NULL */\
+ /* a_node. */\
+ break; \
+ } \
/* Find the predecessor. */\
if ((rbp_fr_node = rbp_left_get(a_type, a_field, \
rbp_fr_path[rbp_fr_depth-1])) != &(a_tree)->rbt_nil) { \
OpenPOWER on IntegriCloud