Following patch uses of lockless genl callbacks and makes ovs datapath locking fine granular. This allows simultaneous flow setup and port add/deletion. Also it fixes softlockup issues seen due to genl-lock.
Signed-off-by: Pravin B Shelar <pshe...@nicira.com> --- net/openvswitch/datapath.c | 280 +++++++++++++++++++++++++++++++------------- net/openvswitch/datapath.h | 4 +- net/openvswitch/vport.c | 1 + 3 files changed, 205 insertions(+), 80 deletions(-) diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 2c74daa..a452427 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -57,13 +57,13 @@ /** * DOC: Locking: * - * Writes to device state (add/remove datapath, port, set operations on vports, - * etc.) are protected by RTNL. + * Updates to datapath are protected by dp_lock write lock, dp_lock + * nests inside RTNL lock. * - * Writes to other state (flow table modifications, set miscellaneous datapath - * parameters, etc.) are protected by genl_mutex. The RTNL lock nests inside - * genl_mutex. + * Writes to device state (add/remove port, set operations on vports, + * etc.) are protected by RTNL. * + * Writes to flow table modifications are protected by datapath ftable_lock. * Reads are protected by RCU. * * There are a few special cases (mostly stats) that have their own @@ -71,8 +71,43 @@ * each other. */ +static DECLARE_RWSEM(dp_lock); + +static void dp_read_lock(void) +{ + down_read(&dp_lock); +} + +static void dp_read_unlock(void) +{ + up_read(&dp_lock); +} + +static void dp_write_lock(void) +{ + down_write(&dp_lock); +} + +static void dp_write_unlock(void) +{ + up_write(&dp_lock); +} + +#define flow_table_ft_lock(dp) \ + rcu_dereference_protected(dp->table, \ + (rwsem_is_locked(&dp_lock) && lockdep_is_held(&dp->ftable_lock))) + +#define flow_acts_ft_lock(flow, dp) \ + rcu_dereference_protected(flow->sf_acts, \ + (rwsem_is_locked(&dp_lock) && lockdep_is_held(&dp->ftable_lock))) + +#define flow_acts_rcu_ft_lock(flow, dp) \ + rcu_dereference_check(flow->sf_acts, \ + (rwsem_is_locked(&dp_lock) && lockdep_is_held(&dp->ftable_lock))) + + /* Global list of datapaths to enable dumping them all out. - * Protected by genl_mutex. + * Protected by dp_lock. */ static LIST_HEAD(dps); @@ -80,13 +115,12 @@ static LIST_HEAD(dps); static void rehash_flow_table(struct work_struct *work); static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table); -static struct vport *new_vport(const struct vport_parms *); static int queue_gso_packets(int dp_ifindex, struct sk_buff *, const struct dp_upcall_info *); static int queue_userspace_packet(int dp_ifindex, struct sk_buff *, const struct dp_upcall_info *); -/* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */ +/* Must be called with rcu_read_lock, dp_lock, or RTNL lock. */ static struct datapath *get_dp(int dp_ifindex) { struct datapath *dp = NULL; @@ -138,7 +172,7 @@ static void destroy_dp_rcu(struct rcu_head *rcu) kfree(dp); } -/* Called with RTNL lock and genl_lock. */ +/* Called with RTNL lock. */ static struct vport *new_vport(const struct vport_parms *parms) { struct vport *vport; @@ -220,7 +254,8 @@ static struct genl_family dp_packet_genl_family = { .hdrsize = sizeof(struct ovs_header), .name = OVS_PACKET_FAMILY, .version = OVS_PACKET_VERSION, - .maxattr = OVS_PACKET_ATTR_MAX + .maxattr = OVS_PACKET_ATTR_MAX, + .lockless = true, }; int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, @@ -368,18 +403,13 @@ out: return err; } -/* Called with genl_mutex. */ -static int flush_flows(int dp_ifindex) +/* Called with dp_lock and ft_lock. */ +static int flush_flows(struct datapath *dp) { struct flow_table *old_table; struct flow_table *new_table; - struct datapath *dp; - dp = get_dp(dp_ifindex); - if (!dp) - return -ENODEV; - - old_table = genl_dereference(dp->table); + old_table = flow_table_ft_lock(dp); new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS); if (!new_table) return -ENOMEM; @@ -704,10 +734,11 @@ static struct genl_ops dp_packet_genl_ops[] = { } }; +/* Must be called with rcu_read_lock. */ static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats) { int i; - struct flow_table *table = genl_dereference(dp->table); + struct flow_table *table = rcu_dereference(dp->table); stats->n_flows = ovs_flow_tbl_count(table); @@ -741,14 +772,15 @@ static struct genl_family dp_flow_genl_family = { .hdrsize = sizeof(struct ovs_header), .name = OVS_FLOW_FAMILY, .version = OVS_FLOW_VERSION, - .maxattr = OVS_FLOW_ATTR_MAX + .maxattr = OVS_FLOW_ATTR_MAX, + .lockless = true, }; static struct genl_multicast_group ovs_dp_flow_multicast_group = { .name = OVS_FLOW_MCGROUP }; -/* Called with genl_lock. */ +/* Called with rcu_read_lock or ft_lock. */ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, struct sk_buff *skb, u32 pid, u32 seq, u32 flags, u8 cmd) @@ -762,8 +794,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, u8 tcp_flags; int err; - sf_acts = rcu_dereference_protected(flow->sf_acts, - lockdep_genl_is_held()); + sf_acts = flow_acts_rcu_ft_lock(flow, dp); ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd); if (!ovs_header) @@ -823,13 +854,13 @@ error: return err; } -static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow) +static struct sk_buff *ovs_flow_cmd_alloc_info(struct datapath *dp, + struct sw_flow *flow) { const struct sw_flow_actions *sf_acts; int len; - sf_acts = rcu_dereference_protected(flow->sf_acts, - lockdep_genl_is_held()); + sf_acts = flow_acts_ft_lock(flow, dp); /* OVS_FLOW_ATTR_KEY */ len = nla_total_size(FLOW_BUFSIZE); @@ -854,7 +885,7 @@ static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, struct sk_buff *skb; int retval; - skb = ovs_flow_cmd_alloc_info(flow); + skb = ovs_flow_cmd_alloc_info(dp, flow); if (!skb) return ERR_PTR(-ENOMEM); @@ -893,12 +924,14 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) goto error; } + dp_read_lock(); dp = get_dp(ovs_header->dp_ifindex); error = -ENODEV; if (!dp) - goto error; + goto error_unlock; - table = genl_dereference(dp->table); + mutex_lock(&dp->ftable_lock); + table = flow_table_ft_lock(dp); flow = ovs_flow_tbl_lookup(table, &key, key_len); if (!flow) { struct sw_flow_actions *acts; @@ -906,7 +939,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) /* Bail out if we're not allowed to create a new flow. */ error = -ENOENT; if (info->genlhdr->cmd == OVS_FLOW_CMD_SET) - goto error; + goto error_ft_unlock; /* Expand table, if necessary, to make room. */ if (ovs_flow_tbl_need_to_expand(table)) { @@ -916,7 +949,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) if (!IS_ERR(new_table)) { rcu_assign_pointer(dp->table, new_table); ovs_flow_tbl_deferred_destroy(table); - table = genl_dereference(dp->table); + table = flow_table_ft_lock(dp); } } @@ -924,7 +957,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) flow = ovs_flow_alloc(); if (IS_ERR(flow)) { error = PTR_ERR(flow); - goto error; + goto error_ft_unlock; } flow->key = key; clear_stats(flow); @@ -957,11 +990,10 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) error = -EEXIST; if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW && info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) - goto error; + goto error_ft_unlock; /* Update actions. */ - old_acts = rcu_dereference_protected(flow->sf_acts, - lockdep_genl_is_held()); + old_acts = flow_acts_ft_lock(flow, dp); acts_attrs = a[OVS_FLOW_ATTR_ACTIONS]; if (acts_attrs && (old_acts->actions_len != nla_len(acts_attrs) || @@ -972,7 +1004,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) new_acts = ovs_flow_actions_alloc(acts_attrs); error = PTR_ERR(new_acts); if (IS_ERR(new_acts)) - goto error; + goto error_ft_unlock; rcu_assign_pointer(flow->sf_acts, new_acts); ovs_flow_deferred_free_acts(old_acts); @@ -989,6 +1021,9 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) } } + mutex_unlock(&dp->ftable_lock); + dp_read_unlock(); + if (!IS_ERR(reply)) genl_notify(reply, genl_info_net(info), info->snd_pid, ovs_dp_flow_multicast_group.id, info->nlhdr, @@ -1000,6 +1035,10 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) error_free_flow: ovs_flow_free(flow); +error_ft_unlock: + mutex_unlock(&dp->ftable_lock); +error_unlock: + dp_read_unlock(); error: return error; } @@ -1022,21 +1061,37 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) if (err) return err; + dp_read_lock(); dp = get_dp(ovs_header->dp_ifindex); - if (!dp) - return -ENODEV; + if (!dp) { + err = -ENODEV; + goto exit_unlock; + } - table = genl_dereference(dp->table); + mutex_lock(&dp->ftable_lock); + table = flow_table_ft_lock(dp); flow = ovs_flow_tbl_lookup(table, &key, key_len); - if (!flow) - return -ENOENT; + if (!flow) { + err = ENOENT; + goto exit_ft_unlock; + } reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, OVS_FLOW_CMD_NEW); - if (IS_ERR(reply)) - return PTR_ERR(reply); + if (IS_ERR(reply)) { + err = PTR_ERR(reply); + goto exit_ft_unlock; + } + mutex_unlock(&dp->ftable_lock); + dp_read_unlock(); return genlmsg_reply(reply, info); + +exit_ft_unlock: + mutex_unlock(&dp->ftable_lock); +exit_unlock: + dp_read_unlock(); + return err; } static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) @@ -1051,24 +1106,35 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) int err; int key_len; - if (!a[OVS_FLOW_ATTR_KEY]) - return flush_flows(ovs_header->dp_ifindex); + dp_read_lock(); + dp = get_dp(ovs_header->dp_ifindex); + if (!dp) { + err = -ENODEV; + goto exit_unlock; + } + + mutex_lock(&dp->ftable_lock); + if (!a[OVS_FLOW_ATTR_KEY]) { + err = flush_flows(dp); + goto exit_ft_unlock; + } + err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); if (err) - return err; + goto exit_ft_unlock; - dp = get_dp(ovs_header->dp_ifindex); - if (!dp) - return -ENODEV; - - table = genl_dereference(dp->table); + table = flow_table_ft_lock(dp); flow = ovs_flow_tbl_lookup(table, &key, key_len); - if (!flow) - return -ENOENT; + if (!flow) { + err = -ENOENT; + goto exit_ft_unlock; + } - reply = ovs_flow_cmd_alloc_info(flow); - if (!reply) - return -ENOMEM; + reply = ovs_flow_cmd_alloc_info(dp, flow); + if (!reply) { + err = -ENOMEM; + goto exit_ft_unlock; + } ovs_flow_tbl_remove(table, flow); @@ -1080,7 +1146,12 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) genl_notify(reply, genl_info_net(info), info->snd_pid, ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL); - return 0; + +exit_ft_unlock: + mutex_unlock(&dp->ftable_lock); +exit_unlock: + dp_read_unlock(); + return err; } static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) @@ -1088,12 +1159,16 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); struct datapath *dp; struct flow_table *table; + int ret; + rcu_read_lock(); dp = get_dp(ovs_header->dp_ifindex); - if (!dp) - return -ENODEV; + if (!dp) { + ret = -ENODEV; + goto exit_unlock; + } - table = genl_dereference(dp->table); + table = rcu_dereference(dp->table); for (;;) { struct sw_flow *flow; @@ -1114,7 +1189,12 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) cb->args[0] = bucket; cb->args[1] = obj; } - return skb->len; + ret = skb->len; + +exit_unlock: + rcu_read_unlock(); + return ret; + } static struct genl_ops dp_flow_genl_ops[] = { @@ -1151,7 +1231,8 @@ static struct genl_family dp_datapath_genl_family = { .hdrsize = sizeof(struct ovs_header), .name = OVS_DATAPATH_FAMILY, .version = OVS_DATAPATH_VERSION, - .maxattr = OVS_DP_ATTR_MAX + .maxattr = OVS_DP_ATTR_MAX, + .lockless = true, }; static struct genl_multicast_group ovs_dp_datapath_multicast_group = { @@ -1178,7 +1259,9 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, if (err) goto nla_put_failure; + rcu_read_lock(); get_dp_stats(dp, &dp_stats); + rcu_read_unlock(); if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats)) goto nla_put_failure; @@ -1208,7 +1291,7 @@ static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid, return skb; } -/* Called with genl_mutex and optionally with RTNL lock also. */ +/* Called with dp_lock and optionally with RTNL lock also. */ static struct datapath *lookup_datapath(struct ovs_header *ovs_header, struct nlattr *a[OVS_DP_ATTR_MAX + 1]) { @@ -1241,15 +1324,18 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) goto err; rtnl_lock(); + dp_write_lock(); + err = -ENODEV; if (!try_module_get(THIS_MODULE)) - goto err_unlock_rtnl; + goto err_unlock; err = -ENOMEM; dp = kzalloc(sizeof(*dp), GFP_KERNEL); if (dp == NULL) goto err_put_module; INIT_LIST_HEAD(&dp->port_list); + mutex_init(&dp->ftable_lock); /* Allocate table. */ err = -ENOMEM; @@ -1287,6 +1373,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) goto err_destroy_local_port; list_add_tail(&dp->list_node, &dps); + dp_write_unlock(); rtnl_unlock(); genl_notify(reply, genl_info_net(info), info->snd_pid, @@ -1299,12 +1386,13 @@ err_destroy_local_port: err_destroy_percpu: free_percpu(dp->stats_percpu); err_destroy_table: - ovs_flow_tbl_destroy(genl_dereference(dp->table)); + ovs_flow_tbl_destroy(rcu_dereference_raw(dp->table)); err_free_dp: kfree(dp); err_put_module: module_put(THIS_MODULE); -err_unlock_rtnl: +err_unlock: + dp_write_unlock(); rtnl_unlock(); err: return err; @@ -1318,6 +1406,8 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info) int err; rtnl_lock(); + dp_write_lock(); + dp = lookup_datapath(info->userhdr, info->attrs); err = PTR_ERR(dp); if (IS_ERR(dp)) @@ -1342,6 +1432,7 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info) * fully destroyed before freeing the datapath. */ rtnl_unlock(); + dp_write_unlock(); call_rcu(&dp->rcu, destroy_dp_rcu); module_put(THIS_MODULE); @@ -1353,6 +1444,7 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info) return 0; exit_unlock: + dp_write_unlock(); rtnl_unlock(); return err; } @@ -1363,9 +1455,12 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info) struct datapath *dp; int err; + dp_write_lock(); dp = lookup_datapath(info->userhdr, info->attrs); - if (IS_ERR(dp)) - return PTR_ERR(dp); + err = PTR_ERR(dp); + if (IS_ERR(dp)) { + goto exit_unlock; + } reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW); @@ -1373,14 +1468,16 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info) err = PTR_ERR(reply); netlink_set_err(init_net.genl_sock, 0, ovs_dp_datapath_multicast_group.id, err); - return 0; + err = 0; + goto exit_unlock; } genl_notify(reply, genl_info_net(info), info->snd_pid, ovs_dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL); - - return 0; +exit_unlock: + dp_write_unlock(); + return err; } static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) @@ -1388,12 +1485,16 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) struct sk_buff *reply; struct datapath *dp; + dp_read_lock(); dp = lookup_datapath(info->userhdr, info->attrs); - if (IS_ERR(dp)) + if (IS_ERR(dp)) { + dp_read_unlock(); return PTR_ERR(dp); + } reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW); + dp_read_unlock(); if (IS_ERR(reply)) return PTR_ERR(reply); @@ -1406,6 +1507,7 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) int skip = cb->args[0]; int i = 0; + dp_read_lock(); list_for_each_entry(dp, &dps, list_node) { if (i >= skip && ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid, @@ -1414,6 +1516,7 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) break; i++; } + dp_read_unlock(); cb->args[0] = i; @@ -1458,7 +1561,8 @@ static struct genl_family dp_vport_genl_family = { .hdrsize = sizeof(struct ovs_header), .name = OVS_VPORT_FAMILY, .version = OVS_VPORT_VERSION, - .maxattr = OVS_VPORT_ATTR_MAX + .maxattr = OVS_VPORT_ATTR_MAX, + .lockless = true, }; struct genl_multicast_group ovs_dp_vport_multicast_group = { @@ -1573,6 +1677,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) goto exit; rtnl_lock(); + dp_read_lock(); dp = get_dp(ovs_header->dp_ifindex); err = -ENODEV; if (!dp) @@ -1624,6 +1729,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); exit_unlock: + dp_read_unlock(); rtnl_unlock(); exit: return err; @@ -1637,6 +1743,7 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) int err; rtnl_lock(); + dp_read_lock(); vport = lookup_vport(info->userhdr, a); err = PTR_ERR(vport); if (IS_ERR(vport)) @@ -1664,6 +1771,7 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); exit_unlock: + dp_read_unlock(); rtnl_unlock(); return err; } @@ -1676,6 +1784,8 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info) int err; rtnl_lock(); + dp_read_lock(); + vport = lookup_vport(info->userhdr, a); err = PTR_ERR(vport); if (IS_ERR(vport)) @@ -1698,6 +1808,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info) ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); exit_unlock: + dp_read_unlock(); rtnl_unlock(); return err; } @@ -1738,11 +1849,13 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) u32 port_no; int retval; + rcu_read_lock(); dp = get_dp(ovs_header->dp_ifindex); - if (!dp) + if (!dp) { + rcu_read_unlock(); return -ENODEV; + } - rcu_read_lock(); for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) { struct vport *vport; @@ -1766,22 +1879,31 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) static void rehash_flow_table(struct work_struct *work) { struct datapath *dp; + int interval = REHASH_FLOW_INTERVAL; - genl_lock(); + if (!rtnl_trylock()) { + interval = REHASH_FLOW_INTERVAL / 2; + goto resched; + } + dp_read_lock(); list_for_each_entry(dp, &dps, list_node) { - struct flow_table *old_table = genl_dereference(dp->table); + struct flow_table *old_table; struct flow_table *new_table; + mutex_lock(&dp->ftable_lock); + old_table = flow_table_ft_lock(dp); new_table = ovs_flow_tbl_rehash(old_table); if (!IS_ERR(new_table)) { rcu_assign_pointer(dp->table, new_table); ovs_flow_tbl_deferred_destroy(old_table); } + mutex_unlock(&dp->ftable_lock); } + dp_read_unlock(); + rtnl_unlock(); - genl_unlock(); - +resched: schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL); } diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index c73370c..fed4056 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -57,7 +57,8 @@ struct dp_stats_percpu { * @rcu: RCU callback head for deferred destruction. * @list_node: Element in global 'dps' list. * @n_flows: Number of flows currently in flow table. - * @table: Current flow table. Protected by genl_lock and RCU. + * @table: Current flow table. Protected by %ftable_lock and RCU. + * @ftable_lock: Flow-table lock. * @ports: Map from port number to &struct vport. %OVSP_LOCAL port * always exists, other ports may be %NULL. Protected by RTNL and RCU. * @port_list: List of all ports in @ports in arbitrary order. RTNL required @@ -73,6 +74,7 @@ struct datapath { /* Flow table. */ struct flow_table __rcu *table; + struct mutex ftable_lock; /* Switch ports. */ struct vport __rcu *ports[DP_MAX_PORTS]; diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 6c066ba..8a02293 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -86,6 +86,7 @@ struct vport *ovs_vport_locate(const char *name) struct vport *vport; struct hlist_node *node; + WARN_ON(!rcu_read_lock_held() && !rtnl_is_locked()); hlist_for_each_entry_rcu(vport, node, bucket, hash_node) if (!strcmp(name, vport->ops->get_name(vport))) return vport; -- 1.7.10 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev