Re: bgpd change attr cache to use RB tree

2022-08-31 Thread Sebastian Benoit
Claudio Jeker(cje...@diehard.n-r-g.com) on 2022.08.31 13:56:18 +0200:
> Like all other hash tables use an RB tree instead.
> Again the calculation of the hash can be skipped because the compare
> function is probably fast enough.

that sentence does parse, but i am semantically challenged by it.

diff is ok :)

> 
> -- 
> :wq Claudio
> 
> ? ktrace.out
> ? obj
> Index: rde.c
> ===
> RCS file: /cvs/src/usr.sbin/bgpd/rde.c,v
> retrieving revision 1.571
> diff -u -p -r1.571 rde.c
> --- rde.c 31 Aug 2022 11:25:36 -  1.571
> +++ rde.c 31 Aug 2022 11:27:54 -
> @@ -197,7 +197,6 @@ rde_main(int debug, int verbose)
>  
>   /* initialize the RIB structures */
>   pt_init();
> - attr_init(attrhashsize);
>   peer_init(peerhashsize);
>  
>   /* make sure the default RIBs are setup */
> @@ -360,7 +359,6 @@ rde_dispatch_imsg_session(struct imsgbuf
>   struct session_upsup;
>   struct rde_peer *peer;
>   struct rde_aspath   *asp;
> - struct rde_hashstats rdehash;
>   struct filter_set   *s;
>   struct as_set   *aset;
>   struct rde_prefixset*pset;
> @@ -628,9 +626,6 @@ badnetdel:
>   case IMSG_CTL_SHOW_RIB_MEM:
>   imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_RIB_MEM, 0,
>   imsg.hdr.pid, -1, , sizeof(rdemem));
> - attr_hash_stats();
> - imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_RIB_HASH, 0,
> - imsg.hdr.pid, -1, , sizeof(rdehash));
>   imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, imsg.hdr.pid,
>   -1, NULL, 0);
>   break;
> Index: rde.h
> ===
> RCS file: /cvs/src/usr.sbin/bgpd/rde.h,v
> retrieving revision 1.267
> diff -u -p -r1.267 rde.h
> --- rde.h 30 Aug 2022 18:50:21 -  1.267
> +++ rde.h 31 Aug 2022 11:27:54 -
> @@ -162,9 +162,8 @@ enum attrtypes {
>  #define ATTR_WELL_KNOWN  ATTR_TRANSITIVE
>  
>  struct attr {
> - LIST_ENTRY(attr) entry;
> + RB_ENTRY(attr)   entry;
>   u_char  *data;
> - uint64_t hash;
>   int  refcnt;
>   uint16_t len;
>   uint8_t  flags;
> @@ -429,9 +428,7 @@ intattr_write(void *, uint16_t, uint8
>   uint16_t);
>  int   attr_writebuf(struct ibuf *, uint8_t, uint8_t, void *,
>   uint16_t);
> -void  attr_init(uint32_t);
>  void  attr_shutdown(void);
> -void  attr_hash_stats(struct rde_hashstats *);
>  int   attr_optadd(struct rde_aspath *, uint8_t, uint8_t,
>   void *, uint16_t);
>  struct attr  *attr_optget(const struct rde_aspath *, uint8_t);
> Index: rde_attr.c
> ===
> RCS file: /cvs/src/usr.sbin/bgpd/rde_attr.c,v
> retrieving revision 1.129
> diff -u -p -r1.129 rde_attr.c
> --- rde_attr.c29 Aug 2022 18:18:55 -  1.129
> +++ rde_attr.c31 Aug 2022 11:27:54 -
> @@ -92,74 +92,21 @@ attr_writebuf(struct ibuf *buf, uint8_t 
>  }
>  
>  /* optional attribute specific functions */
> -int   attr_diff(struct attr *, struct attr *);
> -struct attr  *attr_alloc(uint8_t, uint8_t, const void *, uint16_t);
> -struct attr  *attr_lookup(uint8_t, uint8_t, const void *, uint16_t);
> +struct attr  *attr_alloc(uint8_t, uint8_t, void *, uint16_t);
> +struct attr  *attr_lookup(uint8_t, uint8_t, void *, uint16_t);
>  void  attr_put(struct attr *);
>  
> -struct attr_table {
> - struct attr_list*hashtbl;
> - uint64_t hashmask;
> -} attrtable;
> +static inline int attr_diff(struct attr *, struct attr *);
>  
> -SIPHASH_KEY attrtablekey;
> +RB_HEAD(attr_tree, attr) attrtable = RB_INITIALIZER();
> +RB_GENERATE_STATIC(attr_tree, attr, entry, attr_diff);
>  
> -#define ATTR_HASH(x) \
> - [(x) & attrtable.hashmask]
> -
> -void
> -attr_init(uint32_t hashsize)
> -{
> - uint32_ths, i;
> -
> - arc4random_buf(, sizeof(attrtablekey));
> - for (hs = 1; hs < hashsize; hs <<= 1)
> - ;
> - attrtable.hashtbl = calloc(hs, sizeof(struct attr_list));
> - if (attrtable.hashtbl == NULL)
> - fatal("%s", __func__);
> -
> - for (i = 0; i < hs; i++)
> - LIST_INIT([i]);
> -
> - attrtable.hashmask = hs - 1;
> -}
>  
>  void
>  attr_shutdown(void)
>  {
> - uint64_ti;
> -
> - for (i = 0; i <= attrtable.hashmask; i++)
> - if (!LIST_EMPTY([i]))
> - log_warnx("%s: free non-free table", __func__);
> -
> - free(attrtable.hashtbl);
> -}
> -
> 

Re: bgpd change attr cache to use RB tree

2022-08-31 Thread Theo Buehler
On Wed, Aug 31, 2022 at 01:56:18PM +0200, Claudio Jeker wrote:
> Like all other hash tables use an RB tree instead.
> Again the calculation of the hash can be skipped because the compare
> function is probably fast enough.

Reads fine

ok tb



bgpd change attr cache to use RB tree

2022-08-31 Thread Claudio Jeker
Like all other hash tables use an RB tree instead.
Again the calculation of the hash can be skipped because the compare
function is probably fast enough.

-- 
:wq Claudio

? ktrace.out
? obj
Index: rde.c
===
RCS file: /cvs/src/usr.sbin/bgpd/rde.c,v
retrieving revision 1.571
diff -u -p -r1.571 rde.c
--- rde.c   31 Aug 2022 11:25:36 -  1.571
+++ rde.c   31 Aug 2022 11:27:54 -
@@ -197,7 +197,6 @@ rde_main(int debug, int verbose)
 
/* initialize the RIB structures */
pt_init();
-   attr_init(attrhashsize);
peer_init(peerhashsize);
 
/* make sure the default RIBs are setup */
@@ -360,7 +359,6 @@ rde_dispatch_imsg_session(struct imsgbuf
struct session_upsup;
struct rde_peer *peer;
struct rde_aspath   *asp;
-   struct rde_hashstats rdehash;
struct filter_set   *s;
struct as_set   *aset;
struct rde_prefixset*pset;
@@ -628,9 +626,6 @@ badnetdel:
case IMSG_CTL_SHOW_RIB_MEM:
imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_RIB_MEM, 0,
imsg.hdr.pid, -1, , sizeof(rdemem));
-   attr_hash_stats();
-   imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_RIB_HASH, 0,
-   imsg.hdr.pid, -1, , sizeof(rdehash));
imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, imsg.hdr.pid,
-1, NULL, 0);
break;
Index: rde.h
===
RCS file: /cvs/src/usr.sbin/bgpd/rde.h,v
retrieving revision 1.267
diff -u -p -r1.267 rde.h
--- rde.h   30 Aug 2022 18:50:21 -  1.267
+++ rde.h   31 Aug 2022 11:27:54 -
@@ -162,9 +162,8 @@ enum attrtypes {
 #define ATTR_WELL_KNOWNATTR_TRANSITIVE
 
 struct attr {
-   LIST_ENTRY(attr) entry;
+   RB_ENTRY(attr)   entry;
u_char  *data;
-   uint64_t hash;
int  refcnt;
uint16_t len;
uint8_t  flags;
@@ -429,9 +428,7 @@ int  attr_write(void *, uint16_t, uint8
uint16_t);
 int attr_writebuf(struct ibuf *, uint8_t, uint8_t, void *,
uint16_t);
-voidattr_init(uint32_t);
 voidattr_shutdown(void);
-voidattr_hash_stats(struct rde_hashstats *);
 int attr_optadd(struct rde_aspath *, uint8_t, uint8_t,
void *, uint16_t);
 struct attr*attr_optget(const struct rde_aspath *, uint8_t);
Index: rde_attr.c
===
RCS file: /cvs/src/usr.sbin/bgpd/rde_attr.c,v
retrieving revision 1.129
diff -u -p -r1.129 rde_attr.c
--- rde_attr.c  29 Aug 2022 18:18:55 -  1.129
+++ rde_attr.c  31 Aug 2022 11:27:54 -
@@ -92,74 +92,21 @@ attr_writebuf(struct ibuf *buf, uint8_t 
 }
 
 /* optional attribute specific functions */
-int attr_diff(struct attr *, struct attr *);
-struct attr*attr_alloc(uint8_t, uint8_t, const void *, uint16_t);
-struct attr*attr_lookup(uint8_t, uint8_t, const void *, uint16_t);
+struct attr*attr_alloc(uint8_t, uint8_t, void *, uint16_t);
+struct attr*attr_lookup(uint8_t, uint8_t, void *, uint16_t);
 voidattr_put(struct attr *);
 
-struct attr_table {
-   struct attr_list*hashtbl;
-   uint64_t hashmask;
-} attrtable;
+static inline int   attr_diff(struct attr *, struct attr *);
 
-SIPHASH_KEY attrtablekey;
+RB_HEAD(attr_tree, attr)   attrtable = RB_INITIALIZER();
+RB_GENERATE_STATIC(attr_tree, attr, entry, attr_diff);
 
-#define ATTR_HASH(x)   \
-   [(x) & attrtable.hashmask]
-
-void
-attr_init(uint32_t hashsize)
-{
-   uint32_ths, i;
-
-   arc4random_buf(, sizeof(attrtablekey));
-   for (hs = 1; hs < hashsize; hs <<= 1)
-   ;
-   attrtable.hashtbl = calloc(hs, sizeof(struct attr_list));
-   if (attrtable.hashtbl == NULL)
-   fatal("%s", __func__);
-
-   for (i = 0; i < hs; i++)
-   LIST_INIT([i]);
-
-   attrtable.hashmask = hs - 1;
-}
 
 void
 attr_shutdown(void)
 {
-   uint64_ti;
-
-   for (i = 0; i <= attrtable.hashmask; i++)
-   if (!LIST_EMPTY([i]))
-   log_warnx("%s: free non-free table", __func__);
-
-   free(attrtable.hashtbl);
-}
-
-void
-attr_hash_stats(struct rde_hashstats *hs)
-{
-   struct attr *a;
-   uint64_ti;
-   int64_t n;
-
-   memset(hs, 0, sizeof(*hs));
-   strlcpy(hs->name, "attr hash", sizeof(hs->name));
-   hs->min = LLONG_MAX;
-   hs->num