Re: radix lookup w/o KERNEL_LOCK()
On Fri, Jun 09, 2017 at 04:40:18PM +0200, Martin Pieuchot wrote: > Now with the correct diff: OK bluhm@ > > Index: net/radix.c > === > RCS file: /cvs/src/sys/net/radix.c,v > retrieving revision 1.56 > diff -u -p -r1.56 radix.c > --- net/radix.c 24 Jan 2017 10:08:30 - 1.56 > +++ net/radix.c 9 Jun 2017 14:22:16 - > @@ -48,24 +48,32 @@ > > #include > > -static unsigned int max_keylen; > -struct radix_node_head *mask_rnhead; > -static char *addmask_key; > -static char normal_chars[] = {0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, > -1}; > -static char *rn_zeros, *rn_ones; > +#define SALEN(sa)(*(u_char *)(sa)) > + > +/* > + * Read-only variables, allocated & filled during rn_init(). > + */ > +static char *rn_zeros; /* array of 0s */ > +static char *rn_ones; /* array of 1s */ > +static unsigned int max_keylen;/* size of the above arrays */ > +#define KEYLEN_LIMIT 64 /* maximum allowed keylen */ > > -struct pool rtmask_pool; /* pool for radix_mask structures */ > > -#define rn_masktop (mask_rnhead->rnh_treetop) > +struct radix_node_head *mask_rnhead; /* head of shared mask tree */ > +struct pool rtmask_pool; /* pool for radix_mask structures */ > > static inline int rn_satisfies_leaf(char *, struct radix_node *, int); > static inline int rn_lexobetter(void *, void *); > static inline struct radix_mask *rn_new_radix_mask(struct radix_node *, > struct radix_mask *); > > +int rn_refines(void *, void *); > +int rn_inithead0(struct radix_node_head *, int); > +struct radix_node *rn_addmask(void *, int, int); > struct radix_node *rn_insert(void *, struct radix_node_head *, int *, > struct radix_node [2]); > struct radix_node *rn_newpair(void *, int, struct radix_node[2]); > +void rn_link_dupedkey(struct radix_node *, struct radix_node *, int); > > static inline struct radix_node *rn_search(void *, struct radix_node *); > struct radix_node *rn_search_m(void *, struct radix_node *, void *); > @@ -205,11 +213,11 @@ rn_satisfies_leaf(char *trial, struct ra > char *cplim; > int length; > > - length = min(*(u_char *)cp, *(u_char *)cp2); > + length = min(SALEN(cp), SALEN(cp2)); > if (cp3 == NULL) > cp3 = rn_ones; > else > - length = min(length, *(u_char *)cp3); > + length = min(length, SALEN(cp3)); > cplim = cp + length; > cp += skip; > cp2 += skip; > @@ -246,9 +254,9 @@ rn_match(void *v_arg, struct radix_node_ >* are probably the most common case... >*/ > if (t->rn_mask) > - vlen = *(u_char *)t->rn_mask; > + vlen = SALEN(t->rn_mask); > else > - vlen = *(u_char *)v; > + vlen = SALEN(v); > cp = v + off; > cp2 = t->rn_key + off; > cplim = v + vlen; > @@ -361,7 +369,7 @@ rn_insert(void *v_arg, struct radix_node > caddr_t cp, cp2, cplim; > int vlen, cmp_res; > > - vlen = *(u_char *)v; > + vlen = SALEN(v); > cp = v + off; > cp2 = t->rn_key + off; > cplim = v + vlen; > @@ -413,9 +421,9 @@ rn_addmask(void *n_arg, int search, int > caddr_t cp, cplim; > int b = 0, mlen, j; > int maskduplicated, m0, isnormal; > - static int last_zeroed = 0; > + char addmask_key[KEYLEN_LIMIT]; > > - if ((mlen = *(u_char *)netmask) > max_keylen) > + if ((mlen = SALEN(netmask)) > max_keylen) > mlen = max_keylen; > if (skip == 0) > skip = 1; > @@ -431,15 +439,11 @@ rn_addmask(void *n_arg, int search, int > for (cp = addmask_key + mlen; (cp > addmask_key) && cp[-1] == 0;) > cp--; > mlen = cp - addmask_key; > - if (mlen <= skip) { > - if (m0 >= last_zeroed) > - last_zeroed = mlen; > + if (mlen <= skip) > return (mask_rnhead->rnh_nodes); > - } > - if (m0 < last_zeroed) > - memset(addmask_key + m0, 0, last_zeroed - m0); > - *addmask_key = last_zeroed = mlen; > - tm = rn_search(addmask_key, rn_masktop); > + memset(addmask_key + m0, 0, max_keylen - m0); > + SALEN(addmask_key) = mlen; > + tm = rn_search(addmask_key, mask_rnhead->rnh_treetop); > if (memcmp(addmask_key, tm->rn_key, mlen) != 0) > tm = NULL; > if (tm || search) > @@ -452,7 +456,7 @@ rn_addmask(void *n_arg, int search, int > memcpy(cp, addmask_key, mlen); > tm = rn_insert(cp, mask_rnhead, &maskduplicated, tm); > if (maskduplicated) { > - log(LOG_ERR, "rn_addmask: mask impossibly already in tree\n"); > + log(LOG_ERR, "%s: mask impossibly already in tree\n", __func__); > free(saved_tm, M_RTABLE, 0); > return (tm); > } > @@ -464,6 +468,9 @@ rn_addmask(void *n_arg, int search, int > for (cp
Re: radix lookup w/o KERNEL_LOCK()
On 09/06/17(Fri) 16:23, Martin Pieuchot wrote: > On 09/06/17(Fri) 15:46, Alexander Bluhm wrote: > > On Fri, Jun 09, 2017 at 03:11:05PM +0200, Martin Pieuchot wrote: > > > > The static variable last_zeroed does not look MP safe. If I get > > > > this code correctly it is only an optimization to avoid multiple > > > > zeroing in addmask_key. This does not work anyway with addmask_key > > > > on the stack. > > > > > > You're right, I removed the 'static'. > > > > As last_zeroed is always 0 now, there is dead code left over. > > I think this variable should be killed. > > I agree. > > > > @@ -438,8 +447,8 @@ rn_addmask(void *n_arg, int search, int > > > } > > > if (m0 < last_zeroed) > > > memset(addmask_key + m0, 0, last_zeroed - m0); > > > - *addmask_key = last_zeroed = mlen; > > > - tm = rn_search(addmask_key, rn_masktop); > > > + SALEN(addmask_key) = mlen; > > > > This assumes that addmask_key has been zeroed in rn_init(). > > I think now it must be > > memset(addmask_key + m0, 0, sizeof(addmask_key) - m0); > > Maybe the sizeof(addmask_key) could be optimized by using mlen > > or max_keylen instead. > > I couldn't convince myself that rn_search() do not check bits > after 'mlen' so I added the memset() as you suggested. Now with the correct diff: Index: net/radix.c === RCS file: /cvs/src/sys/net/radix.c,v retrieving revision 1.56 diff -u -p -r1.56 radix.c --- net/radix.c 24 Jan 2017 10:08:30 - 1.56 +++ net/radix.c 9 Jun 2017 14:22:16 - @@ -48,24 +48,32 @@ #include -static unsigned int max_keylen; -struct radix_node_head *mask_rnhead; -static char *addmask_key; -static char normal_chars[] = {0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, -1}; -static char *rn_zeros, *rn_ones; +#define SALEN(sa) (*(u_char *)(sa)) + +/* + * Read-only variables, allocated & filled during rn_init(). + */ +static char*rn_zeros; /* array of 0s */ +static char*rn_ones; /* array of 1s */ +static unsigned int max_keylen;/* size of the above arrays */ +#define KEYLEN_LIMIT64 /* maximum allowed keylen */ -struct pool rtmask_pool; /* pool for radix_mask structures */ -#define rn_masktop (mask_rnhead->rnh_treetop) +struct radix_node_head *mask_rnhead; /* head of shared mask tree */ +struct pool rtmask_pool; /* pool for radix_mask structures */ static inline int rn_satisfies_leaf(char *, struct radix_node *, int); static inline int rn_lexobetter(void *, void *); static inline struct radix_mask *rn_new_radix_mask(struct radix_node *, struct radix_mask *); +int rn_refines(void *, void *); +int rn_inithead0(struct radix_node_head *, int); +struct radix_node *rn_addmask(void *, int, int); struct radix_node *rn_insert(void *, struct radix_node_head *, int *, struct radix_node [2]); struct radix_node *rn_newpair(void *, int, struct radix_node[2]); +void rn_link_dupedkey(struct radix_node *, struct radix_node *, int); static inline struct radix_node *rn_search(void *, struct radix_node *); struct radix_node *rn_search_m(void *, struct radix_node *, void *); @@ -205,11 +213,11 @@ rn_satisfies_leaf(char *trial, struct ra char *cplim; int length; - length = min(*(u_char *)cp, *(u_char *)cp2); + length = min(SALEN(cp), SALEN(cp2)); if (cp3 == NULL) cp3 = rn_ones; else - length = min(length, *(u_char *)cp3); + length = min(length, SALEN(cp3)); cplim = cp + length; cp += skip; cp2 += skip; @@ -246,9 +254,9 @@ rn_match(void *v_arg, struct radix_node_ * are probably the most common case... */ if (t->rn_mask) - vlen = *(u_char *)t->rn_mask; + vlen = SALEN(t->rn_mask); else - vlen = *(u_char *)v; + vlen = SALEN(v); cp = v + off; cp2 = t->rn_key + off; cplim = v + vlen; @@ -361,7 +369,7 @@ rn_insert(void *v_arg, struct radix_node caddr_t cp, cp2, cplim; int vlen, cmp_res; - vlen = *(u_char *)v; + vlen = SALEN(v); cp = v + off; cp2 = t->rn_key + off; cplim = v + vlen; @@ -413,9 +421,9 @@ rn_addmask(void *n_arg, int search, int caddr_t cp, cplim; int b = 0, mlen, j; int maskduplicated, m0, isnormal; - static int last_zeroed = 0; + char addmask_key[KEYLEN_LIMIT]; - if ((mlen = *(u_char *)netmask) > max_keylen) + if ((mlen = SALEN(netmask)) > max_keylen) mlen = max_keylen; if (skip == 0) skip = 1; @@ -431,15 +439,11 @@ rn_addmask(void *n_arg, int search, int for (cp = addmask_key + mlen; (cp > addmask_key) && cp[-1] == 0;) cp--; mlen = cp - addmask_key; - if (mlen <= skip) { - if (m0 >= last_zeroed) -
Re: radix lookup w/o KERNEL_LOCK()
On Fri, Jun 09, 2017 at 04:23:13PM +0200, Martin Pieuchot wrote: > I couldn't convince myself that rn_search() do not check bits > after 'mlen' so I added the memset() as you suggested. It looks like you forgot to put the memset() in the diff. > @@ -432,14 +440,10 @@ rn_addmask(void *n_arg, int search, int > cp--; > mlen = cp - addmask_key; > if (mlen <= skip) { > - if (m0 >= last_zeroed) > - last_zeroed = mlen; > return (mask_rnhead->rnh_nodes); > } > - if (m0 < last_zeroed) > - memset(addmask_key + m0, 0, last_zeroed - m0); > - *addmask_key = last_zeroed = mlen; > - tm = rn_search(addmask_key, rn_masktop); > + SALEN(addmask_key) = mlen; > + tm = rn_search(addmask_key, mask_rnhead->rnh_treetop); > if (memcmp(addmask_key, tm->rn_key, mlen) != 0) > tm = NULL; > if (tm || search) With memset() added, OK bluhm@
Re: radix lookup w/o KERNEL_LOCK()
On 09/06/17(Fri) 15:46, Alexander Bluhm wrote: > On Fri, Jun 09, 2017 at 03:11:05PM +0200, Martin Pieuchot wrote: > > > The static variable last_zeroed does not look MP safe. If I get > > > this code correctly it is only an optimization to avoid multiple > > > zeroing in addmask_key. This does not work anyway with addmask_key > > > on the stack. > > > > You're right, I removed the 'static'. > > As last_zeroed is always 0 now, there is dead code left over. > I think this variable should be killed. I agree. > > @@ -438,8 +447,8 @@ rn_addmask(void *n_arg, int search, int > > } > > if (m0 < last_zeroed) > > memset(addmask_key + m0, 0, last_zeroed - m0); > > - *addmask_key = last_zeroed = mlen; > > - tm = rn_search(addmask_key, rn_masktop); > > + SALEN(addmask_key) = mlen; > > This assumes that addmask_key has been zeroed in rn_init(). > I think now it must be > memset(addmask_key + m0, 0, sizeof(addmask_key) - m0); > Maybe the sizeof(addmask_key) could be optimized by using mlen > or max_keylen instead. I couldn't convince myself that rn_search() do not check bits after 'mlen' so I added the memset() as you suggested. Index: net/radix.c === RCS file: /cvs/src/sys/net/radix.c,v retrieving revision 1.56 diff -u -p -r1.56 radix.c --- net/radix.c 24 Jan 2017 10:08:30 - 1.56 +++ net/radix.c 9 Jun 2017 14:04:12 - @@ -48,24 +48,32 @@ #include -static unsigned int max_keylen; -struct radix_node_head *mask_rnhead; -static char *addmask_key; -static char normal_chars[] = {0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, -1}; -static char *rn_zeros, *rn_ones; +#define SALEN(sa) (*(u_char *)(sa)) -struct pool rtmask_pool; /* pool for radix_mask structures */ +/* + * Read-only variables, allocated & filled during rn_init(). + */ +static char*rn_zeros; /* array of 0s */ +static char*rn_ones; /* array of 1s */ +static unsigned int max_keylen;/* size of the above arrays */ +#define KEYLEN_LIMIT64 /* maximum allowed keylen */ -#define rn_masktop (mask_rnhead->rnh_treetop) + +struct radix_node_head *mask_rnhead; /* head of shared mask tree */ +struct pool rtmask_pool; /* pool for radix_mask structures */ static inline int rn_satisfies_leaf(char *, struct radix_node *, int); static inline int rn_lexobetter(void *, void *); static inline struct radix_mask *rn_new_radix_mask(struct radix_node *, struct radix_mask *); +int rn_refines(void *, void *); +int rn_inithead0(struct radix_node_head *, int); +struct radix_node *rn_addmask(void *, int, int); struct radix_node *rn_insert(void *, struct radix_node_head *, int *, struct radix_node [2]); struct radix_node *rn_newpair(void *, int, struct radix_node[2]); +void rn_link_dupedkey(struct radix_node *, struct radix_node *, int); static inline struct radix_node *rn_search(void *, struct radix_node *); struct radix_node *rn_search_m(void *, struct radix_node *, void *); @@ -413,9 +421,9 @@ rn_addmask(void *n_arg, int search, int caddr_t cp, cplim; int b = 0, mlen, j; int maskduplicated, m0, isnormal; - static int last_zeroed = 0; + char addmask_key[KEYLEN_LIMIT]; - if ((mlen = *(u_char *)netmask) > max_keylen) + if ((mlen = SALEN(netmask)) > max_keylen) mlen = max_keylen; if (skip == 0) skip = 1; @@ -432,14 +440,10 @@ rn_addmask(void *n_arg, int search, int cp--; mlen = cp - addmask_key; if (mlen <= skip) { - if (m0 >= last_zeroed) - last_zeroed = mlen; return (mask_rnhead->rnh_nodes); } - if (m0 < last_zeroed) - memset(addmask_key + m0, 0, last_zeroed - m0); - *addmask_key = last_zeroed = mlen; - tm = rn_search(addmask_key, rn_masktop); + SALEN(addmask_key) = mlen; + tm = rn_search(addmask_key, mask_rnhead->rnh_treetop); if (memcmp(addmask_key, tm->rn_key, mlen) != 0) tm = NULL; if (tm || search) @@ -452,7 +456,7 @@ rn_addmask(void *n_arg, int search, int memcpy(cp, addmask_key, mlen); tm = rn_insert(cp, mask_rnhead, &maskduplicated, tm); if (maskduplicated) { - log(LOG_ERR, "rn_addmask: mask impossibly already in tree\n"); + log(LOG_ERR, "%s: mask impossibly already in tree\n", __func__); free(saved_tm, M_RTABLE, 0); return (tm); } @@ -464,6 +468,9 @@ rn_addmask(void *n_arg, int search, int for (cp = netmask + skip; (cp < cplim) && *(u_char *)cp == 0xff;) cp++; if (cp != cplim) { + static const char normal_chars[] = { + 0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, -1 + }; for (j = 0x80; (j & *cp) != 0; j >>= 1)
Re: radix lookup w/o KERNEL_LOCK()
On Fri, Jun 09, 2017 at 03:11:05PM +0200, Martin Pieuchot wrote: > > The static variable last_zeroed does not look MP safe. If I get > > this code correctly it is only an optimization to avoid multiple > > zeroing in addmask_key. This does not work anyway with addmask_key > > on the stack. > > You're right, I removed the 'static'. As last_zeroed is always 0 now, there is dead code left over. I think this variable should be killed. > @@ -438,8 +447,8 @@ rn_addmask(void *n_arg, int search, int > } > if (m0 < last_zeroed) > memset(addmask_key + m0, 0, last_zeroed - m0); > - *addmask_key = last_zeroed = mlen; > - tm = rn_search(addmask_key, rn_masktop); > + SALEN(addmask_key) = mlen; This assumes that addmask_key has been zeroed in rn_init(). I think now it must be memset(addmask_key + m0, 0, sizeof(addmask_key) - m0); Maybe the sizeof(addmask_key) could be optimized by using mlen or max_keylen instead. bluhm
Re: radix lookup w/o KERNEL_LOCK()
On 08/06/17(Thu) 23:46, Alexander Bluhm wrote: > On Tue, Jun 06, 2017 at 03:36:12PM +0200, Martin Pieuchot wrote: > > +#define SALEN(sa) (*(u_char *)sa) > > Put () around macro arguments. > #define SALEN(sa) (*(u_char *)(sa)) Done. > > -int > > +static int > > rn_refines(void *m_arg, void *n_arg) > > > -int > > +static int > > rn_inithead0(struct radix_node_head *rnh, int offset) > > Could you keep these kernel funtions global for debugging? Sure. > > static int last_zeroed = 0; > > + char addmask_key[KEYLEN_LIMIT]; > > The static variable last_zeroed does not look MP safe. If I get > this code correctly it is only an optimization to avoid multiple > zeroing in addmask_key. This does not work anyway with addmask_key > on the stack. You're right, I removed the 'static'. > > *addmask_key = last_zeroed = mlen; > > Should this be SALEN(addmask_key)? Yes and we can drop 'last_zeroed' since it isn't used afterward. Index: net/radix.c === RCS file: /cvs/src/sys/net/radix.c,v retrieving revision 1.56 diff -u -p -r1.56 radix.c --- net/radix.c 24 Jan 2017 10:08:30 - 1.56 +++ net/radix.c 9 Jun 2017 13:08:50 - @@ -48,24 +48,32 @@ #include -static unsigned int max_keylen; -struct radix_node_head *mask_rnhead; -static char *addmask_key; -static char normal_chars[] = {0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, -1}; -static char *rn_zeros, *rn_ones; +#define SALEN(sa) (*(u_char *)(sa)) -struct pool rtmask_pool; /* pool for radix_mask structures */ +/* + * Read-only variables, allocated & filled during rn_init(). + */ +static char*rn_zeros; /* array of 0s */ +static char*rn_ones; /* array of 1s */ +static unsigned int max_keylen;/* size of the above arrays */ +#define KEYLEN_LIMIT64 /* maximum allowed keylen */ -#define rn_masktop (mask_rnhead->rnh_treetop) + +struct radix_node_head *mask_rnhead; /* head of shared mask tree */ +struct pool rtmask_pool; /* pool for radix_mask structures */ static inline int rn_satisfies_leaf(char *, struct radix_node *, int); static inline int rn_lexobetter(void *, void *); static inline struct radix_mask *rn_new_radix_mask(struct radix_node *, struct radix_mask *); +int rn_refines(void *, void *); +int rn_inithead0(struct radix_node_head *, int); +struct radix_node *rn_addmask(void *, int, int); struct radix_node *rn_insert(void *, struct radix_node_head *, int *, struct radix_node [2]); struct radix_node *rn_newpair(void *, int, struct radix_node[2]); +void rn_link_dupedkey(struct radix_node *, struct radix_node *, int); static inline struct radix_node *rn_search(void *, struct radix_node *); struct radix_node *rn_search_m(void *, struct radix_node *, void *); @@ -413,9 +421,10 @@ rn_addmask(void *n_arg, int search, int caddr_t cp, cplim; int b = 0, mlen, j; int maskduplicated, m0, isnormal; - static int last_zeroed = 0; + int last_zeroed = 0; + char addmask_key[KEYLEN_LIMIT]; - if ((mlen = *(u_char *)netmask) > max_keylen) + if ((mlen = SALEN(netmask)) > max_keylen) mlen = max_keylen; if (skip == 0) skip = 1; @@ -438,8 +447,8 @@ rn_addmask(void *n_arg, int search, int } if (m0 < last_zeroed) memset(addmask_key + m0, 0, last_zeroed - m0); - *addmask_key = last_zeroed = mlen; - tm = rn_search(addmask_key, rn_masktop); + SALEN(addmask_key) = mlen; + tm = rn_search(addmask_key, mask_rnhead->rnh_treetop); if (memcmp(addmask_key, tm->rn_key, mlen) != 0) tm = NULL; if (tm || search) @@ -452,7 +461,7 @@ rn_addmask(void *n_arg, int search, int memcpy(cp, addmask_key, mlen); tm = rn_insert(cp, mask_rnhead, &maskduplicated, tm); if (maskduplicated) { - log(LOG_ERR, "rn_addmask: mask impossibly already in tree\n"); + log(LOG_ERR, "%s: mask impossibly already in tree\n", __func__); free(saved_tm, M_RTABLE, 0); return (tm); } @@ -464,6 +473,9 @@ rn_addmask(void *n_arg, int search, int for (cp = netmask + skip; (cp < cplim) && *(u_char *)cp == 0xff;) cp++; if (cp != cplim) { + static const char normal_chars[] = { + 0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, -1 + }; for (j = 0x80; (j & *cp) != 0; j >>= 1) b++; if (*cp != normal_chars[b] || cp != (cplim - 1)) @@ -488,9 +500,9 @@ rn_lexobetter(void *m_arg, void *n_arg) * first. The netmasks were normalized before calling this function and * don't have unneeded trailing zeros. */ - if (*mp > *np) + if (SALEN(mp) > SALEN(np)) return 1; - if (*mp < *np) +
Re: radix lookup w/o KERNEL_LOCK()
On Tue, Jun 06, 2017 at 03:36:12PM +0200, Martin Pieuchot wrote: > +#define SALEN(sa)(*(u_char *)sa) Put () around macro arguments. #define SALEN(sa) (*(u_char *)(sa)) > -int > +static int > rn_refines(void *m_arg, void *n_arg) > -int > +static int > rn_inithead0(struct radix_node_head *rnh, int offset) Could you keep these kernel funtions global for debugging? > @@ -414,8 +422,9 @@ rn_addmask(void *n_arg, int search, int > static int last_zeroed = 0; > + char addmask_key[KEYLEN_LIMIT]; The static variable last_zeroed does not look MP safe. If I get this code correctly it is only an optimization to avoid multiple zeroing in addmask_key. This does not work anyway with addmask_key on the stack. > *addmask_key = last_zeroed = mlen; Should this be SALEN(addmask_key)? bluhm
radix lookup w/o KERNEL_LOCK()
One of the reasons IPsec still requires the KERNEL_LOCK() in the forwarding path is that it uses the radix tree for the SPD lookup. Since the radix code is used by other subsystems (NFS export list, PIPEX, PF) we want it to be able to run on parallel, at least when callers aren't manipulating the same tree. That's what the diff below does by getting rid of the globals used in rn_addmask(). For that we now use a buffer on the stack of the caller instead of the global ``addmask_key''. While here I documented other globals, moved local prototypes to the C file and added a SALEN() macro for readability purpose. ok? Index: net/radix.c === RCS file: /cvs/src/sys/net/radix.c,v retrieving revision 1.56 diff -u -p -r1.56 radix.c --- net/radix.c 24 Jan 2017 10:08:30 - 1.56 +++ net/radix.c 6 Jun 2017 13:23:19 - @@ -48,24 +48,32 @@ #include -static unsigned int max_keylen; -struct radix_node_head *mask_rnhead; -static char *addmask_key; -static char normal_chars[] = {0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, -1}; -static char *rn_zeros, *rn_ones; +#define SALEN(sa) (*(u_char *)sa) + +/* + * Read-only variables, allocated & filled during rn_init(). + */ +static char*rn_zeros; /* array of 0s */ +static char*rn_ones; /* array of 1s */ +static unsigned int max_keylen;/* size of the above arrays */ +#define KEYLEN_LIMIT64 /* maximum allowed keylen */ -struct pool rtmask_pool; /* pool for radix_mask structures */ -#define rn_masktop (mask_rnhead->rnh_treetop) +struct radix_node_head *mask_rnhead; /* head of shared mask tree */ +struct pool rtmask_pool; /* pool for radix_mask structures */ +static int rn_refines(void *, void *); +static int rn_inithead0(struct radix_node_head *, int); static inline int rn_satisfies_leaf(char *, struct radix_node *, int); static inline int rn_lexobetter(void *, void *); static inline struct radix_mask *rn_new_radix_mask(struct radix_node *, struct radix_mask *); +struct radix_node *rn_addmask(void *, int, int); struct radix_node *rn_insert(void *, struct radix_node_head *, int *, struct radix_node [2]); struct radix_node *rn_newpair(void *, int, struct radix_node[2]); +void rn_link_dupedkey(struct radix_node *, struct radix_node *, int); static inline struct radix_node *rn_search(void *, struct radix_node *); struct radix_node *rn_search_m(void *, struct radix_node *, void *); @@ -143,7 +151,7 @@ rn_search_m(void *v_arg, struct radix_no return x; } -int +static int rn_refines(void *m_arg, void *n_arg) { caddr_t m = m_arg; @@ -414,8 +422,9 @@ rn_addmask(void *n_arg, int search, int int b = 0, mlen, j; int maskduplicated, m0, isnormal; static int last_zeroed = 0; + char addmask_key[KEYLEN_LIMIT]; - if ((mlen = *(u_char *)netmask) > max_keylen) + if ((mlen = SALEN(netmask)) > max_keylen) mlen = max_keylen; if (skip == 0) skip = 1; @@ -439,7 +448,7 @@ rn_addmask(void *n_arg, int search, int if (m0 < last_zeroed) memset(addmask_key + m0, 0, last_zeroed - m0); *addmask_key = last_zeroed = mlen; - tm = rn_search(addmask_key, rn_masktop); + tm = rn_search(addmask_key, mask_rnhead->rnh_treetop); if (memcmp(addmask_key, tm->rn_key, mlen) != 0) tm = NULL; if (tm || search) @@ -452,7 +461,7 @@ rn_addmask(void *n_arg, int search, int memcpy(cp, addmask_key, mlen); tm = rn_insert(cp, mask_rnhead, &maskduplicated, tm); if (maskduplicated) { - log(LOG_ERR, "rn_addmask: mask impossibly already in tree\n"); + log(LOG_ERR, "%s: mask impossibly already in tree\n", __func__); free(saved_tm, M_RTABLE, 0); return (tm); } @@ -464,6 +473,9 @@ rn_addmask(void *n_arg, int search, int for (cp = netmask + skip; (cp < cplim) && *(u_char *)cp == 0xff;) cp++; if (cp != cplim) { + static const char normal_chars[] = { + 0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, -1 + }; for (j = 0x80; (j & *cp) != 0; j >>= 1) b++; if (*cp != normal_chars[b] || cp != (cplim - 1)) @@ -488,9 +500,9 @@ rn_lexobetter(void *m_arg, void *n_arg) * first. The netmasks were normalized before calling this function and * don't have unneeded trailing zeros. */ - if (*mp > *np) + if (SALEN(mp) > SALEN(np)) return 1; - if (*mp < *np) + if (SALEN(mp) < SALEN(np)) return 0; /* * Must return the first difference between the masks @@ -756,6 +768,8 @@ rn_addroute(void *v_arg, void *n_arg, st struct radix_node *tt, *saved_tt, *tm = NULL; i