Like the previous patch, the goal is to ease to convert nsids from one netns to another netns. A new attribute (NETNSA_CURRENT_NSID) is added to the kernel answer when NETNSA_TARGET_NSID is provided, thus the user can easily convert nsids.
Signed-off-by: Nicolas Dichtel <nicolas.dich...@6wind.com> --- include/uapi/linux/net_namespace.h | 1 + net/core/net_namespace.c | 30 ++++++++++++++++++++++++------ 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/include/uapi/linux/net_namespace.h b/include/uapi/linux/net_namespace.h index 0ed9dd61d32a..9f9956809565 100644 --- a/include/uapi/linux/net_namespace.h +++ b/include/uapi/linux/net_namespace.h @@ -17,6 +17,7 @@ enum { NETNSA_PID, NETNSA_FD, NETNSA_TARGET_NSID, + NETNSA_CURRENT_NSID, __NETNSA_MAX, }; diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 92730905886c..fc568cd0b560 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -740,7 +740,7 @@ static int rtnl_net_get_size(void) } static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, - int cmd, int nsid) + int cmd, int nsid, bool add_ref, int ref_nsid) { struct nlmsghdr *nlh; struct rtgenmsg *rth; @@ -755,6 +755,9 @@ static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, if (nla_put_s32(skb, NETNSA_NSID, nsid)) goto nla_put_failure; + if (add_ref && nla_put_s32(skb, NETNSA_CURRENT_NSID, ref_nsid)) + goto nla_put_failure; + nlmsg_end(skb, nlh); return 0; @@ -769,9 +772,10 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh, struct net *net = sock_net(skb->sk); struct nlattr *tb[NETNSA_MAX + 1]; struct net *peer, *target = net; + bool add_ref = false; struct nlattr *nla; struct sk_buff *msg; - int err, id; + int err, id, ref_id; err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, rtnl_net_policy, extack); @@ -809,6 +813,8 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh, err = PTR_ERR(target); goto put_peer; } + ref_id = peernet2id(net, peer); + add_ref = true; } else { get_net(target); } @@ -821,7 +827,7 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh, id = peernet2id(target, peer); err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, - RTM_NEWNSID, id); + RTM_NEWNSID, id, add_ref, ref_id); if (err < 0) goto free_nlmsg; @@ -849,14 +855,17 @@ struct rtnl_net_dump_cb { static int rtnl_net_dumpid_one(int id, void *peer, void *data) { struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data; - int ret; + int ref_id = 0, ret; if (net_cb->idx < net_cb->s_idx) goto cont; + if (net_cb->ref_net) + ref_id = __peernet2id(net_cb->ref_net, peer); + ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid, net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI, - RTM_NEWNSID, id); + RTM_NEWNSID, id, net_cb->ref_net, ref_id); if (ret < 0) return ret; @@ -923,7 +932,16 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) } spin_lock_bh(&net_cb.tgt_net->nsid_lock); + if (net_cb.ref_net && + !net_eq(net_cb.ref_net, net_cb.tgt_net) && + !spin_trylock_bh(&net_cb.ref_net->nsid_lock)) { + err = -EAGAIN; + goto end; + } idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb); + if (net_cb.ref_net && + !net_eq(net_cb.ref_net, net_cb.tgt_net)) + spin_unlock_bh(&net_cb.ref_net->nsid_lock); spin_unlock_bh(&net_cb.tgt_net->nsid_lock); cb->args[0] = net_cb.idx; @@ -942,7 +960,7 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id) if (!msg) goto out; - err = rtnl_net_fill(msg, 0, 0, 0, cmd, id); + err = rtnl_net_fill(msg, 0, 0, 0, cmd, id, false, 0); if (err < 0) goto err_out; -- 2.18.0