summaryrefslogtreecommitdiff
path: root/rbtree.h
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@fb.com>2014-10-10 16:57:09 -0400
committerDavid Sterba <dsterba@suse.cz>2014-10-14 10:39:40 +0200
commit2ba12173d59af0b3f8885494eae9efbe15d143d6 (patch)
treea9ebce261161de1825204df3e9397f153b84ab1f /rbtree.h
parentcdb9e22e292275237cbd93b9c4326382daff70f1 (diff)
Btrfs-progs: update rbtree libs
While debugging a broken fs we were seeing hangs in the rb_erase loops. The rbtree was simple and wasn't corrupted so it appeared to be a bug in our rbtree library. Updating to the kernels latest rbtree code made the infinite loop go away, so pull it back. Thanks, Signed-off-by: Josef Bacik <jbacik@fb.com> Signed-off-by: David Sterba <dsterba@suse.cz>
Diffstat (limited to 'rbtree.h')
-rw-r--r--rbtree.h142
1 files changed, 46 insertions, 96 deletions
diff --git a/rbtree.h b/rbtree.h
index 3add4245..03c06d8e 100644
--- a/rbtree.h
+++ b/rbtree.h
@@ -23,72 +23,7 @@
I know it's not the cleaner way, but in C (not in C++) to get
performances and genericity...
- Some example of insert and search follows here. The search is a plain
- normal search over an ordered tree. The insert instead must be implemented
- int two steps: as first thing the code must insert the element in
- order as a red leaf in the tree, then the support library function
- rb_insert_color() must be called. Such function will do the
- not trivial work to rebalance the rbtree if necessary.
-
------------------------------------------------------------------------
-static inline struct page * rb_search_page_cache(struct inode * inode,
- unsigned long offset)
-{
- struct rb_node * n = inode->i_rb_page_cache.rb_node;
- struct page * page;
-
- while (n)
- {
- page = rb_entry(n, struct page, rb_page_cache);
-
- if (offset < page->offset)
- n = n->rb_left;
- else if (offset > page->offset)
- n = n->rb_right;
- else
- return page;
- }
- return NULL;
-}
-
-static inline struct page * __rb_insert_page_cache(struct inode * inode,
- unsigned long offset,
- struct rb_node * node)
-{
- struct rb_node ** p = &inode->i_rb_page_cache.rb_node;
- struct rb_node * parent = NULL;
- struct page * page;
-
- while (*p)
- {
- parent = *p;
- page = rb_entry(parent, struct page, rb_page_cache);
-
- if (offset < page->offset)
- p = &(*p)->rb_left;
- else if (offset > page->offset)
- p = &(*p)->rb_right;
- else
- return page;
- }
-
- rb_link_node(node, parent, p);
-
- return NULL;
-}
-
-static inline struct page * rb_insert_page_cache(struct inode * inode,
- unsigned long offset,
- struct rb_node * node)
-{
- struct page * ret;
- if ((ret = __rb_insert_page_cache(inode, offset, node)))
- goto out;
- rb_insert_color(node, &inode->i_rb_page_cache);
- out:
- return ret;
-}
------------------------------------------------------------------------
+ See Documentation/rbtree.txt for documentation and samples.
*/
#ifndef _LINUX_RBTREE_H
@@ -98,63 +33,78 @@ static inline struct page * rb_insert_page_cache(struct inode * inode,
#else
#include <btrfs/kerncompat.h>
#endif /* BTRFS_FLAT_INCLUDES */
-struct rb_node
-{
- unsigned long rb_parent_color;
-#define RB_RED 0
-#define RB_BLACK 1
+
+struct rb_node {
+ unsigned long __rb_parent_color;
struct rb_node *rb_right;
struct rb_node *rb_left;
} __attribute__((aligned(sizeof(long))));
/* The alignment might seem pointless, but allegedly CRIS needs it */
-struct rb_root
-{
+struct rb_root {
struct rb_node *rb_node;
};
-#define rb_parent(r) ((struct rb_node *)((r)->rb_parent_color & ~3))
-#define rb_color(r) ((r)->rb_parent_color & 1)
-#define rb_is_red(r) (!rb_color(r))
-#define rb_is_black(r) rb_color(r)
-#define rb_set_red(r) do { (r)->rb_parent_color &= ~1; } while (0)
-#define rb_set_black(r) do { (r)->rb_parent_color |= 1; } while (0)
-static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
-{
- rb->rb_parent_color = (rb->rb_parent_color & 3) | (unsigned long)p;
-}
-static inline void rb_set_color(struct rb_node *rb, int color)
-{
- rb->rb_parent_color = (rb->rb_parent_color & ~1) | color;
-}
+#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
#define RB_ROOT (struct rb_root) { NULL, }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
-#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
-#define RB_EMPTY_NODE(node) (rb_parent(node) == node)
-#define RB_CLEAR_NODE(node) (rb_set_parent(node, node))
+#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
+
+/* 'empty' nodes are nodes that are known not to be inserted in an rbree */
+#define RB_EMPTY_NODE(node) \
+ ((node)->__rb_parent_color == (unsigned long)(node))
+#define RB_CLEAR_NODE(node) \
+ ((node)->__rb_parent_color = (unsigned long)(node))
+
extern void rb_insert_color(struct rb_node *, struct rb_root *);
extern void rb_erase(struct rb_node *, struct rb_root *);
+
/* Find logical next and previous nodes in a tree */
-extern struct rb_node *rb_next(struct rb_node *);
-extern struct rb_node *rb_prev(struct rb_node *);
-extern struct rb_node *rb_first(struct rb_root *);
-extern struct rb_node *rb_last(struct rb_root *);
+extern struct rb_node *rb_next(const struct rb_node *);
+extern struct rb_node *rb_prev(const struct rb_node *);
+extern struct rb_node *rb_first(const struct rb_root *);
+extern struct rb_node *rb_last(const struct rb_root *);
+
+/* Postorder iteration - always visit the parent after its children */
+extern struct rb_node *rb_first_postorder(const struct rb_root *);
+extern struct rb_node *rb_next_postorder(const struct rb_node *);
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
-extern void rb_replace_node(struct rb_node *victim, struct rb_node *xnew,
+extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
struct rb_node ** rb_link)
{
- node->rb_parent_color = (unsigned long )parent;
+ node->__rb_parent_color = (unsigned long)parent;
node->rb_left = node->rb_right = NULL;
*rb_link = node;
}
+
+#define rb_entry_safe(ptr, type, member) \
+ ({ typeof(ptr) ____ptr = (ptr); \
+ ____ptr ? rb_entry(____ptr, type, member) : NULL; \
+ })
+
+/**
+ * rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
+ * given type safe against removal of rb_node entry
+ *
+ * @pos: the 'type *' to use as a loop cursor.
+ * @n: another 'type *' to use as temporary storage
+ * @root: 'rb_root *' of the rbtree.
+ * @field: the name of the rb_node field within 'type'.
+ */
+#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
+ for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
+ pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \
+ typeof(*pos), field); 1; }); \
+ pos = n)
+
#endif /* _LINUX_RBTREE_H */