On Thu, Jan 12, 2017 at 4:28 PM, Tobias Klauser <tklau...@distanz.ch> wrote: > Thanks for the patch and sorry for the delay in reply. > > I'm not too familiar with the RCU list interface, but from a quick > glance this looks OK in general. A few remarks below. > > Daniel, if you have time could you maybe have a quick look? > > On 2017-01-09 at 07:26:07 +0100, Vadim Kochan <vadi...@gmail.com> wrote: >> list.h provides generic Linux-like linked list API which also supports >> RCU list operations. >> >> Also additionally was removed the spinlock which is not needed for >> RCU-list operations, for the list_del_rcu(...) case it is needed >> additionally call synchronize_rcu(...) before free the flow entry. >> >> Because of full RCU support now flows are freed after grace-period via >> calling synchronize_rcu() and these removed-and-ready-to-free entries >> are kept in separate struct list_head. > > Do we really need the separate free list? Couldn't we add a struct > rcu_head to struct flow_entry instead and then use something along the > lines of: > > static void flow_entry_xfree_rcu(struct rcu_head *rcu) > { > struct flow_entry *entry = caa_container_of(rcu, struct flow_entry, > rcu_head); > flow_entry_free(entry); > } > > call_rcu(&entry->rcu_head, free_entry_rcu); > > to free the entries after the RCU grace period?
Hm, OK I will look at this, for me it was simpler to use separate list head:) > >> Signed-off-by: Vadim Kochan <vadi...@gmail.com> > >> --- >> flowtop.c | 121 >> +++++++++++++++++++++++--------------------------------------- >> 1 file changed, 44 insertions(+), 77 deletions(-) >> >> diff --git a/flowtop.c b/flowtop.c >> index f48f5c8..a5ac2bb 100644 >> --- a/flowtop.c >>+++ b/flowtop.c > > The #include "locking.h" should be removed as non of the symbols defined > there are used anymore in flowtop.c > >> @@ -51,6 +51,9 @@ >> #endif >> >> struct flow_entry { >> + struct list_head entry; >> + struct list_head free; >> + >> uint32_t flow_id, use, status; >> uint8_t l3_proto, l4_proto; >> uint32_t ip4_src_addr, ip4_dst_addr; >> @@ -65,7 +68,6 @@ struct flow_entry { >> char city_src[128], city_dst[128]; >> char rev_dns_src[256], rev_dns_dst[256]; >> char procname[256]; >> - struct flow_entry *next; >> int inode; >> unsigned int procnum; >> bool is_visible; >> @@ -78,8 +80,8 @@ struct flow_entry { >> }; >> >> struct flow_list { >> - struct flow_entry *head; >> - struct spinlock lock; >> + struct list_head head; >> + struct list_head free; >> }; >> >> enum flow_direction { >> @@ -355,15 +357,15 @@ static inline struct flow_entry >> *flow_entry_xalloc(void) >> static inline void flow_entry_xfree(struct flow_entry *n) >> { >> if (n->ct) >> - nfct_destroy(n->ct); >> + xfree(n->ct); > > This would leak memory allocated internally in struct nf_contrack, no? > What's the reason for this change? nfct_destroy(ct) may fail if we free entry after nfct event processing (our free is defered now because of RCU), so using just free(x) is safer because we do just nfct_clone(ct) which just allocated another nfct_conntrack entry for us, but ofcourse it would be better to have something nfct_free(x), so I agree this is not nice but safer. > >> xfree(n); >> } >> >> static inline void flow_list_init(struct flow_list *fl) >> { >> - fl->head = NULL; >> - spinlock_init(&fl->lock); >> + INIT_LIST_HEAD(&fl->head); >> + INIT_LIST_HEAD(&fl->free); >> } >> >> static inline bool nfct_is_dns(const struct nf_conntrack *ct) >> @@ -392,84 +394,60 @@ static void flow_list_new_entry(struct flow_list *fl, >> const struct nf_conntrack >> flow_entry_from_ct(n, ct); >> flow_entry_get_extended(n); >> >> - rcu_assign_pointer(n->next, fl->head); >> - rcu_assign_pointer(fl->head, n); >> + list_add_rcu(&n->entry, &fl->head); >> >> n->is_visible = true; >> } >> >> -static struct flow_entry *flow_list_find_id(struct flow_list *fl, >> - uint32_t id) >> +static struct flow_entry *flow_list_find_id(struct flow_list *fl, uint32_t >> id) >> { >> - struct flow_entry *n = rcu_dereference(fl->head); >> + struct flow_entry *n; >> >> - while (n != NULL) { >> + list_for_each_entry_rcu(n, &fl->head, entry) { > q >> if (n->flow_id == id) >> return n; >> - >> - n = rcu_dereference(n->next); >> } >> >> return NULL; >> } >> >> -static struct flow_entry *flow_list_find_prev_id(const struct flow_list *fl, >> - uint32_t id) >> +static void flow_list_del_entry(struct flow_list *fl, const struct >> nf_conntrack *ct) >> { >> - struct flow_entry *prev = rcu_dereference(fl->head), *next; >> - >> - if (prev->flow_id == id) >> - return NULL; >> - >> - while ((next = rcu_dereference(prev->next)) != NULL) { >> - if (next->flow_id == id) >> - return prev; >> + struct flow_entry *n; >> >> - prev = next; >> + n = flow_list_find_id(fl, nfct_get_attr_u32(ct, ATTR_ID)); >> + if (n) { >> + list_del_rcu(&n->entry); >> + list_add_rcu(&n->free, &fl->free); > > Here would be the call_rcu() outlined above (instead of list_add_rcu) > >> } >> - >> - return NULL; >> } >> >> -static void flow_list_destroy_entry(struct flow_list *fl, >> - const struct nf_conntrack *ct) >> +static void flow_list_clear(struct flow_list *fl) >> { >> - struct flow_entry *n1, *n2; >> - uint32_t id = nfct_get_attr_u32(ct, ATTR_ID); >> - >> - n1 = flow_list_find_id(fl, id); >> - if (n1) { >> - n2 = flow_list_find_prev_id(fl, id); >> - if (n2) { >> - rcu_assign_pointer(n2->next, n1->next); >> - n1->next = NULL; >> - >> - flow_entry_xfree(n1); >> - } else { >> - struct flow_entry *next = fl->head->next; >> + struct flow_entry *n, *tmp; >> >> - flow_entry_xfree(fl->head); >> - fl->head = next; >> - } >> + list_for_each_entry_safe(n, tmp, &fl->head, entry) { >> + list_del_rcu(&n->entry); >> + list_add_rcu(&n->free, &fl->free); > > Same here. > >> } >> } >> >> -static void flow_list_destroy(struct flow_list *fl) >> +static void flow_list_free(struct flow_list *fl) >> { >> - struct flow_entry *n; >> + struct flow_entry *n, *tmp; >> >> synchronize_rcu(); >> - spinlock_lock(&flow_list.lock); >> >> - while (fl->head != NULL) { >> - n = rcu_dereference(fl->head->next); >> - fl->head->next = NULL; >> - >> - flow_entry_xfree(fl->head); >> - rcu_assign_pointer(fl->head, n); >> + list_for_each_entry_safe(n, tmp, &fl->free, free) { >> + list_del(&n->free); >> + flow_entry_xfree(n); >> } >> +} >> >> - spinlock_unlock(&flow_list.lock); >> +static void flow_list_destroy(struct flow_list *fl) >> +{ >> + flow_list_clear(fl); >> + flow_list_free(fl); >> } >> >> static void flow_entry_find_process(struct flow_entry *n) >> @@ -1044,15 +1022,14 @@ static void draw_flows(WINDOW *screen, struct >> flow_list *fl, >> >> rcu_read_lock(); >> >> - n = rcu_dereference(fl->head); >> - if (!n) >> + if (list_empty(&fl->head)) >> mvwprintw(screen, line, 2, "(No sessions! " >> "Is netfilter running?)"); >> >> ui_table_clear(&flows_tbl); >> ui_table_header_print(&flows_tbl); >> >> - for (; n; n = rcu_dereference(n->next)) { >> + list_for_each_entry_rcu(n, &fl->head, entry) { >> if (!n->is_visible) >> continue; >> if (presenter_flow_wrong_state(n)) >> @@ -1415,9 +1392,6 @@ static int flow_event_cb(enum nf_conntrack_msg_type >> type, >> if (sigint) >> return NFCT_CB_STOP; >> >> - synchronize_rcu(); >> - spinlock_lock(&flow_list.lock); >> - >> switch (type) { >> case NFCT_T_NEW: >> flow_list_new_entry(&flow_list, ct); >> @@ -1426,14 +1400,12 @@ static int flow_event_cb(enum nf_conntrack_msg_type >> type, >> flow_list_update_entry(&flow_list, ct); >> break; >> case NFCT_T_DESTROY: >> - flow_list_destroy_entry(&flow_list, ct); >> + flow_list_del_entry(&flow_list, ct); >> break; >> default: >> break; >> } >> >> - spinlock_unlock(&flow_list.lock); >> - >> if (sigint) >> return NFCT_CB_STOP; >> >> @@ -1444,8 +1416,7 @@ static void collector_refresh_flows(struct nfct_handle >> *handle) >> { >> struct flow_entry *n; >> >> - n = rcu_dereference(flow_list.head); >> - for (; n; n = rcu_dereference(n->next)) >> + list_for_each_entry_rcu(n, &flow_list.head, entry) >> nfct_query(handle, NFCT_Q_GET, n->ct); >> } >> >> @@ -1500,9 +1471,6 @@ static int flow_dump_cb(enum nf_conntrack_msg_type >> type __maybe_unused, >> if (sigint) >> return NFCT_CB_STOP; >> >> - synchronize_rcu(); >> - spinlock_lock(&flow_list.lock); >> - >> if (!(what & ~(INCLUDE_IPV4 | INCLUDE_IPV6))) >> goto check_addr; >> >> @@ -1558,7 +1526,6 @@ check_addr: >> flow_list_new_entry(&flow_list, ct); >> >> skip_flow: >> - spinlock_unlock(&flow_list.lock); >> return NFCT_CB_CONTINUE; >> } >> >> @@ -1635,18 +1602,18 @@ static void *collector(void *null __maybe_unused) >> continue; >> >> panic("Error while polling: %s\n", strerror(errno)); >> - } else if (status == 0) { >> - continue; >> + } else if (status != 0) { >> + if (poll_fd[0].revents & POLLIN) >> + nfct_catch(ct_event); >> } >> >> - if (poll_fd[0].revents & POLLIN) >> - nfct_catch(ct_event); >> + if (!list_empty(&flow_list.free)) >> + flow_list_free(&flow_list); >> } >> >> - rcu_unregister_thread(); >> - >> flow_list_destroy(&flow_list); >> - spinlock_destroy(&flow_list.lock); >> + >> + rcu_unregister_thread(); >> >> nfct_close(ct_event); >> >> -- >> 2.11.0 >> Again regarding call_rcu() I will investigate about this as I did not use it. Thanks, Vadim Kochan -- You received this message because you are subscribed to the Google Groups "netsniff-ng" group. To unsubscribe from this group and stop receiving emails from it, send an email to netsniff-ng+unsubscr...@googlegroups.com. For more options, visit https://groups.google.com/d/optout.