When terminating or aborting a RIB dump the system needs to unlock and
cleanup any rib entry or prefix which was locked. This happens for example
when running `bgpctl show rib | head`. This will abort the rib_dump run
and leave a locked rib entry behind. Since locked entries are not removed
from the tree this is a memory leak.
Noticed because in debug mode the shutdown code was bitching about non
emtpy RIB trees. Fix is simple, remove possible cached elements before
freeing the context.

I had to move some code around and also made the lock and unlock functions
return the element they work on so the code becomes a bit more compact and
resembles the refcnt functions a bit more.

OK? 
-- 
:wq Claudio


Index: rde_rib.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_rib.c,v
retrieving revision 1.200
diff -u -p -r1.200 rde_rib.c
--- rde_rib.c   23 Jul 2019 14:19:17 -0000      1.200
+++ rde_rib.c   24 Jul 2019 13:45:55 -0000
@@ -97,6 +97,36 @@ re_is_locked(struct rib_entry *re)
        return (re->lock != 0);
 }
 
+static inline struct prefix *
+prefix_lock(struct prefix *p)
+{
+       if (p->flags & PREFIX_FLAG_LOCKED)
+               fatalx("%s: locking locked prefix", __func__);
+       p->flags |= PREFIX_FLAG_LOCKED;
+       return p;
+}
+
+static inline struct prefix *
+prefix_unlock(struct prefix *p)
+{
+       if ((p->flags & PREFIX_FLAG_LOCKED) == 0)
+               fatalx("%s: unlocking unlocked prefix", __func__);
+       p->flags &= ~PREFIX_FLAG_LOCKED;
+       return p;
+}
+
+static inline int
+prefix_is_locked(struct prefix *p)
+{
+       return (p->flags & PREFIX_FLAG_LOCKED) != 0;
+}
+
+static inline int
+prefix_is_dead(struct prefix *p)
+{
+       return (p->flags & PREFIX_FLAG_DEAD) != 0;
+}
+
 static inline struct rib_tree *
 rib_tree(struct rib *rib)
 {
@@ -349,8 +379,7 @@ rib_restart(struct rib_context *ctx)
 {
        struct rib_entry *re;
 
-       re = ctx->ctx_re;
-       re_unlock(re);
+       re = re_unlock(ctx->ctx_re);
 
        /* find first non empty element */
        while (re && rib_empty(re))
@@ -390,8 +419,7 @@ rib_dump_r(struct rib_context *ctx)
                if (ctx->ctx_count && i++ >= ctx->ctx_count &&
                    !re_is_locked(re)) {
                        /* store and lock last element */
-                       ctx->ctx_re = re;
-                       re_lock(re);
+                       ctx->ctx_re = re_lock(re);
                        return;
                }
                ctx->ctx_rib_call(re, ctx->ctx_arg);
@@ -442,6 +470,29 @@ rib_dump_abort(u_int16_t id)
                        continue;
                if (ctx->ctx_done)
                        ctx->ctx_done(ctx->ctx_arg, ctx->ctx_aid);
+               if (ctx->ctx_re && rib_empty(re_unlock(ctx->ctx_re)))
+                       rib_remove(ctx->ctx_re);
+               if (ctx->ctx_p && prefix_is_dead(prefix_unlock(ctx->ctx_p)))
+                       prefix_adjout_destroy(ctx->ctx_p);
+               LIST_REMOVE(ctx, entry);
+               free(ctx);
+       }
+}
+
+void
+rib_dump_terminate(void *arg)
+{
+       struct rib_context *ctx, *next;
+
+       LIST_FOREACH_SAFE(ctx, &rib_dumps, entry, next) {
+               if (ctx->ctx_arg != arg)
+                       continue;
+               if (ctx->ctx_done)
+                       ctx->ctx_done(ctx->ctx_arg, ctx->ctx_aid);
+               if (ctx->ctx_re && rib_empty(re_unlock(ctx->ctx_re)))
+                       rib_remove(ctx->ctx_re);
+               if (ctx->ctx_p && prefix_is_dead(prefix_unlock(ctx->ctx_p)))
+                       prefix_adjout_destroy(ctx->ctx_p);
                LIST_REMOVE(ctx, entry);
                free(ctx);
        }
@@ -473,21 +524,6 @@ rib_dump_new(u_int16_t id, u_int8_t aid,
        return 0;
 }
 
-void
-rib_dump_terminate(void *arg)
-{
-       struct rib_context *ctx, *next;
-
-       LIST_FOREACH_SAFE(ctx, &rib_dumps, entry, next) {
-               if (ctx->ctx_arg != arg)
-                       continue;
-               if (ctx->ctx_done)
-                       ctx->ctx_done(ctx->ctx_arg, ctx->ctx_aid);
-               LIST_REMOVE(ctx, entry);
-               free(ctx);
-       }
-}
-
 /* path specific functions */
 
 static struct rde_aspath *path_lookup(struct rde_aspath *);
@@ -1208,41 +1244,12 @@ prefix_withdraw(struct rde_peer *peer, s
        return (1);
 }
 
-static inline void
-prefix_lock(struct prefix *p)
-{
-       if (p->flags & PREFIX_FLAG_LOCKED)
-               fatalx("%s: locking locked prefix", __func__);
-       p->flags |= PREFIX_FLAG_LOCKED;
-}
-
-static inline void
-prefix_unlock(struct prefix *p)
-{
-       if ((p->flags & PREFIX_FLAG_LOCKED) == 0)
-               fatalx("%s: unlocking unlocked prefix", __func__);
-       p->flags &= ~PREFIX_FLAG_LOCKED;
-}
-
-static inline int
-prefix_is_locked(struct prefix *p)
-{
-       return (p->flags & PREFIX_FLAG_LOCKED) != 0;
-}
-
-static inline int
-prefix_is_dead(struct prefix *p)
-{
-       return (p->flags & PREFIX_FLAG_DEAD) != 0;
-}
-
 static struct prefix *
 prefix_restart(struct rib_context *ctx)
 {
        struct prefix *p;
 
-       p = ctx->ctx_p;
-       prefix_unlock(p);
+       p = prefix_unlock(ctx->ctx_p);
 
        if (prefix_is_dead(p)) {
                struct prefix *next;
@@ -1251,6 +1258,7 @@ prefix_restart(struct rib_context *ctx)
                prefix_adjout_destroy(p);
                p = next;
        }
+       ctx->ctx_p = NULL;
        return p;
 }
 
@@ -1322,8 +1330,7 @@ prefix_dump_r(struct rib_context *ctx)
                if (ctx->ctx_count && i++ >= ctx->ctx_count &&
                    !prefix_is_locked(p)) {
                        /* store and lock last element */
-                       ctx->ctx_p = p;
-                       prefix_lock(p);
+                       ctx->ctx_p = prefix_lock(p);
                        return;
                }
                ctx->ctx_prefix_call(p, ctx->ctx_arg);

Reply via email to