When running on busy bgpd servers with many clients the function pathid_assign() consumes a lot of CPU time. The code does a lookup which often fails and then walks the list of prefixes. In the end this is results in two list walks.
This complicated dance is only needed for peers that use add-path but regular peers can skip this by assigning a per peer path_id instead. To make this work even path_id_txs are for regular peers while the add-path peers use even numbers. This should help route collectors and route reflectors that receive the same route many times from different sources. At least as long as these peers don't use add-path. -- :wq Claudio Index: rde.c =================================================================== RCS file: /cvs/src/usr.sbin/bgpd/rde.c,v retrieving revision 1.576 diff -u -p -r1.576 rde.c --- rde.c 12 Sep 2022 10:03:17 -0000 1.576 +++ rde.c 19 Sep 2022 15:29:19 -0000 @@ -1607,28 +1607,38 @@ pathid_conflict(struct rib_entry *re, ui return 0; } +/* + * Assign a send side path_id to all paths. + */ static uint32_t pathid_assign(struct rde_peer *peer, uint32_t path_id, struct bgpd_addr *prefix, uint8_t prefixlen) { struct rib_entry *re; - struct prefix *p = NULL; uint32_t path_id_tx; - /* - * Assign a send side path_id to all paths. - */ + /* If peer has no add-path use the per peer path_id */ + if (!peer_has_add_path(peer, prefix->aid, CAPA_AP_RECV)) + return peer->path_id_tx; + + /* peer uses add-path, therefor new path_ids need to be assigned */ re = rib_get(rib_byid(RIB_ADJ_IN), prefix, prefixlen); - if (re != NULL) + if (re != NULL) { + struct prefix *p; + p = prefix_bypeer(re, peer, path_id); - if (p != NULL) - path_id_tx = p->path_id_tx; - else { - do { - /* assign new local path_id */ - path_id_tx = arc4random(); - } while (pathid_conflict(re, path_id_tx)); + if (p != NULL) + return p->path_id_tx; } + + do { + /* + * Assign new local path_id, must be an odd number. + * Even numbers are used by the per peer path_id_tx. + */ + path_id_tx = (arc4random() << 1) | 1; + } while (pathid_conflict(re, path_id_tx)); + return path_id_tx; } Index: rde.h =================================================================== RCS file: /cvs/src/usr.sbin/bgpd/rde.h,v retrieving revision 1.271 diff -u -p -r1.271 rde.h --- rde.h 12 Sep 2022 10:03:17 -0000 1.271 +++ rde.h 19 Sep 2022 15:15:12 -0000 @@ -99,6 +99,7 @@ struct rde_peer { uint32_t remote_bgpid; /* host byte order! */ uint32_t up_nlricnt; uint32_t up_wcnt; + uint32_t path_id_tx; enum peer_state state; enum export_type export_type; uint16_t loc_rib_id; Index: rde_peer.c =================================================================== RCS file: /cvs/src/usr.sbin/bgpd/rde_peer.c,v retrieving revision 1.23 diff -u -p -r1.23 rde_peer.c --- rde_peer.c 1 Sep 2022 13:23:24 -0000 1.23 +++ rde_peer.c 19 Sep 2022 15:26:34 -0000 @@ -168,6 +168,22 @@ peer_add(uint32_t id, struct peer_config peer->flags = peer->conf.flags; SIMPLEQ_INIT(&peer->imsg_queue); + /* assign random unique transmit path id */ + do { + struct rde_peer *p; + int conflict = 0; + + peer->path_id_tx = arc4random() << 1; + RB_FOREACH(p, peer_tree, &peertable) { + if (p->path_id_tx == peer->path_id_tx) { + conflict = 1; + break; + } + } + if (!conflict) + break; + } while (1); + if (RB_INSERT(peer_tree, &peertable, peer) != NULL) fatalx("rde peer table corrupted");