[PATCH v5 25/25] rbtree.h: (optional?) Add RB_INSERT_DUPE_RIGHT flag

2012-09-25 Thread Daniel Santos
I'm not sure if this is needed anywhere in the kernel. If not, it will
cause inserts run on older compilers to slow down very slightly.

This flag affects the behavior of inserts in trees where duplicate keys
are allowed.  It's generally assumed that the new node will be added to
the head of a group of nodes with the same key value.  However, this
behavior may not always be desired and this flag offers the option to
choose them to be inserted at the tail of such a group.

Signed-off-by: Daniel Santos 
---
 include/linux/rbtree.h |   79 ---
 1 files changed, 60 insertions(+), 19 deletions(-)

diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index f1fbdea..2ca553b 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -264,6 +264,11 @@ static inline void rb_link_node(struct rb_node * node, 
struct rb_node * parent,
  * @RB_INSERT_REPLACES:When set, the rb_insert() will replace a value 
if it
  * matches the supplied one (valid only when
  * RB_UNIQUE_KEYS is set).
+ * @RB_INSERT_DUPE_RIGHT: Normally, when inserting a duplicate value into a
+ * tree with non-unique keys, the new value is inserted at
+ * the the head of the group same-value objects.  This
+ * flag causes inserts in such cases to put the new value
+ * at the tail of the group.
  * @RB_IS_AUGMENTED:   is an augmented tree
  * @RB_VERIFY_USAGE:   Perform checks upon node insertion for a small run-time
  * overhead. This has other usage restrictions, so read
@@ -300,12 +305,14 @@ enum rb_flags {
RB_HAS_COUNT= 0x0004,
RB_UNIQUE_KEYS  = 0x0008,
RB_INSERT_REPLACES  = 0x0010,
+   RB_INSERT_DUPE_RIGHT= 0x0020,
RB_IS_AUGMENTED = 0x0040,
RB_VERIFY_USAGE = 0x0080 & __RB_DEBUG_ENABLE_MASK,
RB_VERIFY_INTEGRITY = 0x0100 & __RB_DEBUG_ENABLE_MASK,
RB_ALL_FLAGS= RB_HAS_LEFTMOST | RB_HAS_RIGHTMOST
| RB_HAS_COUNT | RB_UNIQUE_KEYS
| RB_INSERT_REPLACES | RB_IS_AUGMENTED
+   | RB_INSERT_DUPE_RIGHT
| RB_VERIFY_USAGE | RB_VERIFY_INTEGRITY,
 };
 
@@ -504,15 +511,19 @@ void __rb_assert_good_rel(const struct rb_relationship 
*rel)
/* Due to a bug in versions of gcc prior to 4.6, the following
 * expressions are always evalulated at run-time:
 *
+* ((rel->flags & RB_UNIQUE_KEYS) && (rel->flags & 
RB_INSERT_DUPE_RIGHT))
 * (!(rel->flags & RB_UNIQUE_KEYS) && (rel->flags & RB_INSERT_REPLACES))
 *
 * The work-around for this bug is separate each bitwise AND test using
 * an if/else construct and evaluate only the last test with the
 * BUILD_BUG_ON macro.
 */
-
-   if (rel->flags & RB_INSERT_REPLACES)
-   BUILD_BUG_ON42(!(rel->flags & RB_UNIQUE_KEYS));
+   if (rel->flags & RB_UNIQUE_KEYS)
+   /* only with non-unique keys */
+   BUILD_BUG_ON42(rel->flags & RB_INSERT_DUPE_RIGHT);
+   else
+   /* only with unique keys */
+   BUILD_BUG_ON42(rel->flags & RB_INSERT_REPLACES);
 }
 
 
@@ -840,6 +851,7 @@ struct rb_node *__rb_find_subtree(
 */
break;
else if (!diff) {
+   /* FIXME: non-unique keys broken here on 
inserts */
/* exact match */
*matched = go_left
 ? RB_MATCH_LEFT
@@ -1022,16 +1034,34 @@ struct rb_node *rb_insert(
 
parent = *p;
 
-   if (diff > 0) {
-   p = &(*p)->rb_right;
-   if (rel->flags & RB_HAS_LEFTMOST)
-   leftmost = 0;
-   } else if (!(rel->flags & RB_UNIQUE_KEYS) || diff < 0) {
-   p = &(*p)->rb_left;
-   if (rel->flags & RB_HAS_RIGHTMOST)
-   rightmost = 0;
-   } else
-   break;
+   /* when using non-unique keys, a new same-key objects is
+* inserted at the head of any existing same-key objects unless
+* RB_INSERT_DUPE_RIGHT is specified, which causes them to be
+* inserted at the tail.
+*/
+   if (rel->flags & RB_INSERT_DUPE_RIGHT) {
+   if (diff < 0) {
+   p = &(*p)->rb_left;
+   if (rel->flags & RB_HAS_RIGHTMOST)
+   rightmost = 0;
+   } else if (!(rel->flags & RB_UNIQUE_KEYS) || diff > 0) {
+   

[PATCH v5 25/25] rbtree.h: (optional?) Add RB_INSERT_DUPE_RIGHT flag

2012-09-25 Thread Daniel Santos
I'm not sure if this is needed anywhere in the kernel. If not, it will
cause inserts run on older compilers to slow down very slightly.

This flag affects the behavior of inserts in trees where duplicate keys
are allowed.  It's generally assumed that the new node will be added to
the head of a group of nodes with the same key value.  However, this
behavior may not always be desired and this flag offers the option to
choose them to be inserted at the tail of such a group.

Signed-off-by: Daniel Santos daniel.san...@pobox.com
---
 include/linux/rbtree.h |   79 ---
 1 files changed, 60 insertions(+), 19 deletions(-)

diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index f1fbdea..2ca553b 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -264,6 +264,11 @@ static inline void rb_link_node(struct rb_node * node, 
struct rb_node * parent,
  * @RB_INSERT_REPLACES:When set, the rb_insert() will replace a value 
if it
  * matches the supplied one (valid only when
  * RB_UNIQUE_KEYS is set).
+ * @RB_INSERT_DUPE_RIGHT: Normally, when inserting a duplicate value into a
+ * tree with non-unique keys, the new value is inserted at
+ * the the head of the group same-value objects.  This
+ * flag causes inserts in such cases to put the new value
+ * at the tail of the group.
  * @RB_IS_AUGMENTED:   is an augmented tree
  * @RB_VERIFY_USAGE:   Perform checks upon node insertion for a small run-time
  * overhead. This has other usage restrictions, so read
@@ -300,12 +305,14 @@ enum rb_flags {
RB_HAS_COUNT= 0x0004,
RB_UNIQUE_KEYS  = 0x0008,
RB_INSERT_REPLACES  = 0x0010,
+   RB_INSERT_DUPE_RIGHT= 0x0020,
RB_IS_AUGMENTED = 0x0040,
RB_VERIFY_USAGE = 0x0080  __RB_DEBUG_ENABLE_MASK,
RB_VERIFY_INTEGRITY = 0x0100  __RB_DEBUG_ENABLE_MASK,
RB_ALL_FLAGS= RB_HAS_LEFTMOST | RB_HAS_RIGHTMOST
| RB_HAS_COUNT | RB_UNIQUE_KEYS
| RB_INSERT_REPLACES | RB_IS_AUGMENTED
+   | RB_INSERT_DUPE_RIGHT
| RB_VERIFY_USAGE | RB_VERIFY_INTEGRITY,
 };
 
@@ -504,15 +511,19 @@ void __rb_assert_good_rel(const struct rb_relationship 
*rel)
/* Due to a bug in versions of gcc prior to 4.6, the following
 * expressions are always evalulated at run-time:
 *
+* ((rel-flags  RB_UNIQUE_KEYS)  (rel-flags  
RB_INSERT_DUPE_RIGHT))
 * (!(rel-flags  RB_UNIQUE_KEYS)  (rel-flags  RB_INSERT_REPLACES))
 *
 * The work-around for this bug is separate each bitwise AND test using
 * an if/else construct and evaluate only the last test with the
 * BUILD_BUG_ON macro.
 */
-
-   if (rel-flags  RB_INSERT_REPLACES)
-   BUILD_BUG_ON42(!(rel-flags  RB_UNIQUE_KEYS));
+   if (rel-flags  RB_UNIQUE_KEYS)
+   /* only with non-unique keys */
+   BUILD_BUG_ON42(rel-flags  RB_INSERT_DUPE_RIGHT);
+   else
+   /* only with unique keys */
+   BUILD_BUG_ON42(rel-flags  RB_INSERT_REPLACES);
 }
 
 
@@ -840,6 +851,7 @@ struct rb_node *__rb_find_subtree(
 */
break;
else if (!diff) {
+   /* FIXME: non-unique keys broken here on 
inserts */
/* exact match */
*matched = go_left
 ? RB_MATCH_LEFT
@@ -1022,16 +1034,34 @@ struct rb_node *rb_insert(
 
parent = *p;
 
-   if (diff  0) {
-   p = (*p)-rb_right;
-   if (rel-flags  RB_HAS_LEFTMOST)
-   leftmost = 0;
-   } else if (!(rel-flags  RB_UNIQUE_KEYS) || diff  0) {
-   p = (*p)-rb_left;
-   if (rel-flags  RB_HAS_RIGHTMOST)
-   rightmost = 0;
-   } else
-   break;
+   /* when using non-unique keys, a new same-key objects is
+* inserted at the head of any existing same-key objects unless
+* RB_INSERT_DUPE_RIGHT is specified, which causes them to be
+* inserted at the tail.
+*/
+   if (rel-flags  RB_INSERT_DUPE_RIGHT) {
+   if (diff  0) {
+   p = (*p)-rb_left;
+   if (rel-flags  RB_HAS_RIGHTMOST)
+   rightmost = 0;
+   } else if (!(rel-flags  RB_UNIQUE_KEYS) || diff  0) {
+   p = (*p)-rb_right;
+