On 28/05/2017 14:59, Roi Dayan wrote:
From: Paul Blakey <pa...@mellanox.com> Currently only tunnel offload is supported. Signed-off-by: Paul Blakey <pa...@mellanox.com> Reviewed-by: Roi Dayan <r...@mellanox.com> Reviewed-by: Simon Horman <simon.hor...@netronome.com> --- lib/netdev-tc-offloads.c | 400 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 390 insertions(+), 10 deletions(-) diff --git a/lib/netdev-tc-offloads.c b/lib/netdev-tc-offloads.c index 36e8f6f..f141784 100644 --- a/lib/netdev-tc-offloads.c +++ b/lib/netdev-tc-offloads.c @@ -89,7 +89,7 @@ del_ufid_tc_mapping(const ovs_u128 *ufid) /* Add ufid entry to ufid_tc hashmap. * If entry exists already it will be replaced. */ -static void OVS_UNUSED +static void add_ufid_tc_mapping(const ovs_u128 *ufid, int prio, int handle, struct netdev *netdev, int ifindex) { @@ -120,7 +120,7 @@ add_ufid_tc_mapping(const ovs_u128 *ufid, int prio, int handle, * Returns handle if successful and fill prio and netdev for that ufid. * Otherwise returns 0. */ -static int OVS_UNUSED +static int get_ufid_tc_mapping(const ovs_u128 *ufid, int *prio, struct netdev **netdev) { size_t ufid_hash = hash_bytes(ufid, sizeof *ufid, 0); @@ -183,7 +183,7 @@ struct prio_map_data { * * Return prio on success or 0 if we are out of prios. */ -static uint16_t OVS_UNUSED +static uint16_t get_prio_for_tc_flower(struct tc_flower *flower) { static struct hmap prios = HMAP_INITIALIZER(&prios); @@ -438,16 +438,396 @@ netdev_tc_flow_dump_next(struct netdev_flow_dump *dump, return false; } +static int +parse_put_flow_set_action(struct tc_flower *flower, const struct nlattr *set, + size_t set_len) +{ + static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20); + const struct nlattr *set_attr; + size_t set_left; + + NL_ATTR_FOR_EACH_UNSAFE(set_attr, set_left, set, set_len) { + if (nl_attr_type(set_attr) == OVS_KEY_ATTR_TUNNEL) { + const struct nlattr *tunnel = nl_attr_get(set_attr); + const size_t tunnel_len = nl_attr_get_size(set_attr); + const struct nlattr *tun_attr; + size_t tun_left; + + flower->set.set = true; + NL_ATTR_FOR_EACH_UNSAFE(tun_attr, tun_left, tunnel, tunnel_len) { + switch (nl_attr_type(tun_attr)) { + case OVS_TUNNEL_KEY_ATTR_ID: { + flower->set.id = nl_attr_get_be64(tun_attr); + } + break; + case OVS_TUNNEL_KEY_ATTR_IPV4_SRC: { + flower->set.ipv4.ipv4_src = nl_attr_get_be32(tun_attr); + } + break; + case OVS_TUNNEL_KEY_ATTR_IPV4_DST: { + flower->set.ipv4.ipv4_dst = nl_attr_get_be32(tun_attr); + } + break; + case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: { + flower->set.ipv6.ipv6_src = + nl_attr_get_in6_addr(tun_attr); + } + break; + case OVS_TUNNEL_KEY_ATTR_IPV6_DST: { + flower->set.ipv6.ipv6_dst = + nl_attr_get_in6_addr(tun_attr); + } + break; + case OVS_TUNNEL_KEY_ATTR_TP_SRC: { + flower->set.tp_src = nl_attr_get_be16(tun_attr); + } + break; + case OVS_TUNNEL_KEY_ATTR_TP_DST: { + flower->set.tp_dst = nl_attr_get_be16(tun_attr); + } + break; + } + } + } else { + VLOG_DBG_RL(&rl, "unsupported set action type: %d", + nl_attr_type(set_attr)); + return EOPNOTSUPP; + } + } + return 0; +} + +static int +test_key_and_mask(struct match *match) { + static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20); + const struct flow *key = &match->flow; + struct flow *mask = &match->wc.masks; + + if (mask->pkt_mark) { + VLOG_DBG_RL(&rl, "offloading attribute pkt_mark isn't supported"); + return EOPNOTSUPP; + } + + if (mask->recirc_id && key->recirc_id) { + VLOG_DBG_RL(&rl, "offloading attribute recirc_id isn't supported"); + return EOPNOTSUPP; + } + mask->recirc_id = 0; + + if (mask->dp_hash) { + VLOG_DBG_RL(&rl, "offloading attribute dp_hash isn't supported"); + return EOPNOTSUPP; + } + + if (mask->conj_id) { + VLOG_DBG_RL(&rl, "offloading attribute conj_id isn't supported"); + return EOPNOTSUPP; + } + + if (mask->skb_priority) { + VLOG_DBG_RL(&rl, "offloading attribute skb_priority isn't supported"); + return EOPNOTSUPP; + } + + if (mask->actset_output) { + VLOG_DBG_RL(&rl, + "offloading attribute actset_output isn't supported"); + return EOPNOTSUPP; + } + + if (mask->ct_state) { + VLOG_DBG_RL(&rl, "offloading attribute ct_state isn't supported"); + return EOPNOTSUPP; + } + + if (mask->ct_zone) { + VLOG_DBG_RL(&rl, "offloading attribute ct_zone isn't supported"); + return EOPNOTSUPP; + } + + if (mask->ct_mark) { + VLOG_DBG_RL(&rl, "offloading attribute ct_zone isn't supported"); + return EOPNOTSUPP; + }
typo in the log msg. will be fixed.
+ + if (!ovs_u128_is_zero(mask->ct_label)) { + VLOG_DBG_RL(&rl, "offloading attribute ct_lable isn't supported");
typo in the log msg. will be fixed.
+ return EOPNOTSUPP; + } + + for (int i = 0; i < FLOW_N_REGS; i++) { + if (mask->regs[i]) { + VLOG_DBG_RL(&rl, + "offloading attribute regs[%d] isn't supported", i); + return EOPNOTSUPP; + } + } + + if (mask->metadata) { + VLOG_DBG_RL(&rl, "offloading attribute metadata isn't supported"); + return EOPNOTSUPP; + } + + if (mask->nw_tos) { + VLOG_DBG_RL(&rl, "offloading attribute nw_tos isn't supported"); + return EOPNOTSUPP; + } + + if (mask->nw_ttl) { + VLOG_DBG_RL(&rl, "offloading attribute nw_ttl isn't supported"); + return EOPNOTSUPP; + } + + if (mask->nw_frag) { + VLOG_DBG_RL(&rl, "offloading attribute nw_frag isn't supported"); + return EOPNOTSUPP; + } + + for (int i = 0; i < FLOW_MAX_MPLS_LABELS; i++) { + if (mask->mpls_lse[i]) { + VLOG_DBG_RL(&rl, "offloading attribute mpls_lse isn't supported"); + return EOPNOTSUPP; + } + } + + if (key->dl_type == htons(ETH_TYPE_IP) && + key->nw_proto == IPPROTO_ICMP) { + if (mask->tp_src) { + VLOG_DBG_RL(&rl, + "offloading attribute icmp_type isn't supported"); + return EOPNOTSUPP; + } + if (mask->tp_dst) { + VLOG_DBG_RL(&rl, + "offloading attribute icmp_code isn't supported"); + return EOPNOTSUPP; + } + } else if (key->dl_type == htons(ETH_TYPE_IP) && + key->nw_proto == IPPROTO_IGMP) { + if (mask->tp_src) { + VLOG_DBG_RL(&rl, + "offloading attribute igmp_type isn't supported"); + return EOPNOTSUPP; + } + if (mask->tp_dst) { + VLOG_DBG_RL(&rl, + "offloading attribute igmp_code isn't supported"); + return EOPNOTSUPP; + } + } else if (key->dl_type == htons(ETH_TYPE_IPV6) && + key->nw_proto == IPPROTO_ICMPV6) { + if (mask->tp_src) { + VLOG_DBG_RL(&rl, + "offloading attribute icmp_type isn't supported"); + return EOPNOTSUPP; + } + if (mask->tp_dst) { + VLOG_DBG_RL(&rl, + "offloading attribute icmp_code isn't supported"); + return EOPNOTSUPP; + } + } + if (is_ip_any(key) && key->nw_proto == IPPROTO_TCP && mask->tcp_flags) { + if (mask->tcp_flags) { + VLOG_DBG_RL(&rl, + "offloading attribute tcp_flags isn't supported"); + return EOPNOTSUPP; + } + } + + if (!is_all_zeros(mask, sizeof *mask)) { + VLOG_DBG_RL(&rl, "offloading isn't supported, unknown attribute"); + return EOPNOTSUPP; + } + + return 0; +} + int -netdev_tc_flow_put(struct netdev *netdev OVS_UNUSED, - struct match *match OVS_UNUSED, - struct nlattr *actions OVS_UNUSED, - size_t actions_len OVS_UNUSED, - const ovs_u128 *ufid OVS_UNUSED, - struct offload_info *info OVS_UNUSED, +netdev_tc_flow_put(struct netdev *netdev, + struct match *match, + struct nlattr *actions, + size_t actions_len, + const ovs_u128 *ufid, + struct offload_info *info, struct dpif_flow_stats *stats OVS_UNUSED) { - return EOPNOTSUPP; + static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20); + struct tc_flower flower; + const struct flow *key = &match->flow; + struct flow *mask = &match->wc.masks; + const struct flow_tnl *tnl = &match->flow.tunnel; + struct nlattr *nla; + size_t left; + int prio = 0; + int handle; + int ifindex; + int err; + + ifindex = netdev_get_ifindex(netdev); + if (ifindex < 0) { + VLOG_ERR_RL(&error_rl, "failed to get ifindex for %s: %s", + netdev_get_name(netdev), ovs_strerror(-ifindex)); + return -ifindex; + } + + memset(&flower, 0, sizeof flower); + + if (tnl->tun_id) { + VLOG_DBG_RL(&rl, + "tunnel: id %#" PRIx64 " src " IP_FMT + " dst " IP_FMT " tp_src %d tp_dst %d", + ntohll(tnl->tun_id), + IP_ARGS(tnl->ip_src), IP_ARGS(tnl->ip_dst), + ntohs(tnl->tp_src), ntohs(tnl->tp_dst)); + flower.tunnel.id = tnl->tun_id; + flower.tunnel.ipv4.ipv4_src = tnl->ip_src; + flower.tunnel.ipv4.ipv4_dst = tnl->ip_dst; + flower.tunnel.ipv6.ipv6_src = tnl->ipv6_src; + flower.tunnel.ipv6.ipv6_dst = tnl->ipv6_dst; + flower.tunnel.tp_src = tnl->tp_src; + flower.tunnel.tp_dst = tnl->tp_dst; + flower.tunnel.tunnel = true; + + memset(&mask->tunnel, 0, sizeof mask->tunnel); + } + + flower.key.eth_type = key->dl_type; + flower.mask.eth_type = mask->dl_type; + + if (mask->vlans[0].tci) { + ovs_be16 vid_mask = mask->vlans[0].tci & htons(VLAN_VID_MASK); + ovs_be16 pcp_mask = mask->vlans[0].tci & htons(VLAN_PCP_MASK); + ovs_be16 cfi = mask->vlans[0].tci & htons(VLAN_CFI); + + if (cfi && key->vlans[0].tci & htons(VLAN_CFI) + && (!vid_mask || vid_mask == htons(VLAN_VID_MASK)) + && (!pcp_mask || pcp_mask == htons(VLAN_PCP_MASK)) + && (vid_mask || pcp_mask)) { + if (vid_mask) { + flower.key.vlan_id = vlan_tci_to_vid(key->vlans[0].tci); + VLOG_DBG_RL(&rl, "vlan_id: %d\n", flower.key.vlan_id); + } + if (pcp_mask) { + flower.key.vlan_prio = vlan_tci_to_pcp(key->vlans[0].tci); + VLOG_DBG_RL(&rl, "vlan_prio: %d\n", flower.key.vlan_prio); + } + flower.key.encap_eth_type = flower.key.eth_type; + flower.key.eth_type = htons(ETH_TYPE_VLAN); + } else if (mask->vlans[0].tci == htons(0xffff) && + ntohs(key->vlans[0].tci) == 0) { + /* exact && no vlan */ + } else { + /* partial mask */ + return EOPNOTSUPP; + } + } else if (mask->vlans[1].tci) { + return EOPNOTSUPP; + } + memset(mask->vlans, 0, sizeof mask->vlans); + + flower.key.dst_mac = key->dl_dst; + flower.mask.dst_mac = mask->dl_dst; + flower.key.src_mac = key->dl_src; + flower.mask.src_mac = mask->dl_src; + memset(&mask->dl_dst, 0, sizeof mask->dl_dst); + memset(&mask->dl_src, 0, sizeof mask->dl_src); + mask->dl_type = 0; + mask->in_port.odp_port = 0; + + if (is_ip_any(key)) { + flower.key.ip_proto = key->nw_proto; + flower.mask.ip_proto = mask->nw_proto; + + if (key->nw_proto == IPPROTO_TCP || key->nw_proto == IPPROTO_UDP) { + flower.key.dst_port = key->tp_dst; + flower.mask.dst_port = mask->tp_dst; + flower.key.src_port = key->tp_src; + flower.mask.src_port = mask->tp_src; + mask->tp_src = 0; + mask->tp_dst = 0; + } + + mask->nw_frag = 0; + mask->nw_tos = 0; + mask->nw_proto = 0; + + if (flower.key.eth_type == htons(ETH_P_IP)) { + flower.key.ipv4.ipv4_src = key->nw_src; + flower.mask.ipv4.ipv4_src = mask->nw_src; + flower.key.ipv4.ipv4_dst = key->nw_dst; + flower.mask.ipv4.ipv4_dst = mask->nw_dst; + mask->nw_src = 0; + mask->nw_dst = 0; + } else if (flower.key.eth_type == htons(ETH_P_IPV6)) { + flower.key.ipv6.ipv6_src = key->ipv6_src; + flower.mask.ipv6.ipv6_src = mask->ipv6_src; + flower.key.ipv6.ipv6_dst = key->ipv6_dst; + flower.mask.ipv6.ipv6_dst = mask->ipv6_dst; + memset(&mask->ipv6_src, 0, sizeof mask->ipv6_src); + memset(&mask->ipv6_dst, 0, sizeof mask->ipv6_dst); + } + } + + err = test_key_and_mask(match); + if (err) { + return err; + } + + NL_ATTR_FOR_EACH(nla, left, actions, actions_len) { + if (nl_attr_type(nla) == OVS_ACTION_ATTR_OUTPUT) { + odp_port_t port = nl_attr_get_odp_port(nla); + struct netdev *outdev = netdev_ports_get(port, + info->port_hmap_obj); + + flower.ifindex_out = netdev_get_ifindex(outdev); + flower.set.tp_dst = info->tp_dst_port; + netdev_close(outdev); + } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_PUSH_VLAN) { + const struct ovs_action_push_vlan *vlan_push = nl_attr_get(nla); + + flower.vlan_push_id = vlan_tci_to_vid(vlan_push->vlan_tci); + flower.vlan_push_prio = vlan_tci_to_pcp(vlan_push->vlan_tci); + } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_POP_VLAN) { + flower.vlan_pop = 1; + } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_SET) { + const struct nlattr *set = nl_attr_get(nla); + const size_t set_len = nl_attr_get_size(nla); + + err = parse_put_flow_set_action(&flower, set, set_len); + if (err) { + return err; + } + } else { + VLOG_DBG_RL(&rl, "unsupported put action type: %d", + nl_attr_type(nla)); + return EOPNOTSUPP; + } + } + + handle = get_ufid_tc_mapping(ufid, &prio, NULL); + if (handle && prio) { + VLOG_DBG_RL(&rl, "updating old handle: %d prio: %d", handle, prio); + tc_del_filter(ifindex, prio, handle); + } + + if (!prio) { + prio = get_prio_for_tc_flower(&flower); + if (prio == 0) { + VLOG_ERR_RL(&rl, "couldn't get tc prio: %s", ovs_strerror(ENOSPC)); + return ENOSPC; + } + } + + flower.act_cookie.data = ufid; + flower.act_cookie.len = sizeof *ufid; + + err = tc_replace_flower(ifindex, prio, handle, &flower); + if (!err) { + add_ufid_tc_mapping(ufid, flower.prio, flower.handle, netdev, ifindex); + } + + return err; } int
_______________________________________________ dev mailing list d...@openvswitch.org https://mail.openvswitch.org/mailman/listinfo/ovs-dev