It is pointless to check special handling for fragments multiple times for each packet. As the nw_frags field is not writeable, it suffices to check them once, before the first table lookup.
Also, previously a fragment could have been dropped before ingress stats were updated. This patch changes the drop to happen after ingress stats have been updated. Signed-off-by: Jarno Rajahalme <[email protected]> --- v2: Reorganized patch to be much simpler, added further clean-up. ofproto/ofproto-dpif-xlate.c | 73 ++++++++++++++++++------------------------ ofproto/ofproto-dpif.c | 65 +++++++++++++++++++++---------------- 2 files changed, 68 insertions(+), 70 deletions(-) diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c index a881dfb..e9cef62 100644 --- a/ofproto/ofproto-dpif-xlate.c +++ b/ofproto/ofproto-dpif-xlate.c @@ -4141,7 +4141,6 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) struct flow *flow = &xin->flow; struct rule_dpif *rule = NULL; - const struct rule_actions *actions = NULL; enum slow_path_reason special; const struct ofpact *ofpacts; struct xport *in_port; @@ -4229,6 +4228,11 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) !xin->skip_wildcards ? wc : NULL, &rule, ctx.xin->xcache != NULL, ctx.xin->resubmit_stats); + /* These may have been cleared due to frags handling. */ + if (!(flow->tp_src | flow->tp_dst)) { + ctx.base_flow.tp_src = flow->tp_src; + ctx.base_flow.tp_dst = flow->tp_dst; + } if (ctx.xin->resubmit_stats) { rule_dpif_credit_stats(rule, ctx.xin->resubmit_stats); } @@ -4240,13 +4244,38 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) } ctx.rule = rule; } + + in_port = get_ofp_port(ctx.xbridge, flow->in_port.ofp_port); + if (in_port && in_port->is_tunnel) { + if (ctx.xin->resubmit_stats) { + netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats); + if (in_port->bfd) { + bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats); + } + } + if (ctx.xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETDEV); + entry->u.dev.rx = netdev_ref(in_port->netdev); + entry->u.dev.bfd = bfd_ref(in_port->bfd); + } + } + + /* Drop frags if needed after ingress stats are taken care of (above). */ + if (OVS_UNLIKELY(flow->nw_frag & FLOW_NW_FRAG_ANY + && ctx.xbridge->frag == OFPC_FRAG_DROP)) { + return; + } + xout->fail_open = ctx.rule && rule_dpif_is_fail_open(ctx.rule); if (xin->ofpacts) { ofpacts = xin->ofpacts; ofpacts_len = xin->ofpacts_len; } else if (ctx.rule) { - actions = rule_dpif_get_actions(ctx.rule); + const struct rule_actions *actions = rule_dpif_get_actions(ctx.rule); + ofpacts = actions->ofpacts; ofpacts_len = actions->ofpacts_len; } else { @@ -4263,46 +4292,6 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) orig_flow = *flow; } - if (flow->nw_frag & FLOW_NW_FRAG_ANY) { - switch (ctx.xbridge->frag) { - case OFPC_FRAG_NORMAL: - /* We must pretend that transport ports are unavailable. */ - flow->tp_src = ctx.base_flow.tp_src = htons(0); - flow->tp_dst = ctx.base_flow.tp_dst = htons(0); - break; - - case OFPC_FRAG_DROP: - return; - - case OFPC_FRAG_REASM: - OVS_NOT_REACHED(); - - case OFPC_FRAG_NX_MATCH: - /* Nothing to do. */ - break; - - case OFPC_INVALID_TTL_TO_CONTROLLER: - OVS_NOT_REACHED(); - } - } - - in_port = get_ofp_port(ctx.xbridge, flow->in_port.ofp_port); - if (in_port && in_port->is_tunnel) { - if (ctx.xin->resubmit_stats) { - netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats); - if (in_port->bfd) { - bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats); - } - } - if (ctx.xin->xcache) { - struct xc_entry *entry; - - entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETDEV); - entry->u.dev.rx = netdev_ref(in_port->netdev); - entry->u.dev.bfd = bfd_ref(in_port->bfd); - } - } - special = process_special(&ctx, flow, in_port, ctx.xin->packet); if (special) { ctx.xout->slow |= special; diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c index d965d38..102cbd7 100644 --- a/ofproto/ofproto-dpif.c +++ b/ofproto/ofproto-dpif.c @@ -3525,9 +3525,8 @@ rule_dpif_reduce_timeouts(struct rule_dpif *rule, uint16_t idle_timeout, ofproto_rule_reduce_timeouts(&rule->up, idle_timeout, hard_timeout); } -/* Returns 'rule''s actions. The caller owns a reference on the returned - * actions and must eventually release it (with rule_actions_unref()) to avoid - * a memory leak. */ +/* Returns 'rule''s actions, which are RCU protected. The caller can use the + * actions until it quiesces. */ const struct rule_actions * rule_dpif_get_actions(const struct rule_dpif *rule) { @@ -3575,6 +3574,9 @@ rule_set_recirc_id(struct rule *rule_, uint32_t id) * OFPTC11_TABLE_MISS_CONTINUE is in effect for the sequence of tables * where misses occur. * + * 'flow->tp_src' and 'flow->tp_dst' may be modified depending on the + * frags handling configuration for 'ofproto'. + * * The rule is returned in '*rule', which is valid at least until the next * RCU quiescent period. If the '*rule' needs to stay around longer, * a non-zero 'take_ref' must be passed in to cause a reference to be taken @@ -3588,6 +3590,32 @@ rule_dpif_lookup(struct ofproto_dpif *ofproto, struct flow *flow, enum ofputil_port_config config = 0; uint8_t table_id; + if (flow->nw_frag & FLOW_NW_FRAG_ANY + && ofproto->up.frag_handling != OFPC_FRAG_NX_MATCH) { + + if (ofproto->up.frag_handling == OFPC_FRAG_NORMAL) { + /* We must pretend that transport ports are unavailable. */ + flow->tp_src = htons(0); + flow->tp_dst = htons(0); + } else { + /* Must be OFPC_FRAG_DROP (we don't have OFPC_FRAG_REASM). + * Use the drop_frags_rule (which cannot disappear). */ + *rule = ofproto->drop_frags_rule; + + /* XXX: Earlier the stats went to table 0. */ + if (stats) { + struct oftable *tbl = &ofproto->up.tables[TBL_INTERNAL]; + unsigned long orig; + + atomic_add_relaxed(&tbl->n_matched, stats->n_packets, &orig); + } + if (take_ref) { + rule_dpif_ref(*rule); + } + return TBL_INTERNAL; + } + } + if (ofproto_dpif_get_enable_recirc(ofproto)) { /* Always exactly match recirc_id since datapath supports * recirculation. */ @@ -3653,31 +3681,6 @@ rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto, uint8_t table_id, struct classifier *cls = &ofproto->up.tables[table_id].cls; const struct cls_rule *cls_rule; struct rule_dpif *rule; - struct flow ofpc_normal_flow; - - if (ofproto->up.frag_handling != OFPC_FRAG_NX_MATCH) { - /* We always unwildcard dl_type and nw_frag (for IP), so they - * need not be unwildcarded here. */ - - if (flow->nw_frag & FLOW_NW_FRAG_ANY) { - if (ofproto->up.frag_handling == OFPC_FRAG_NORMAL) { - /* We must pretend that transport ports are unavailable. */ - ofpc_normal_flow = *flow; - ofpc_normal_flow.tp_src = htons(0); - ofpc_normal_flow.tp_dst = htons(0); - flow = &ofpc_normal_flow; - } else { - /* Must be OFPC_FRAG_DROP (we don't have OFPC_FRAG_REASM). - * Use the drop_frags_rule (which cannot disappear). */ - cls_rule = &ofproto->drop_frags_rule->up.cr; - rule = rule_dpif_cast(rule_from_cls_rule(cls_rule)); - if (take_ref) { - rule_dpif_ref(rule); - } - return rule; - } - } - } do { cls_rule = classifier_lookup(cls, flow, wc); @@ -4710,7 +4713,13 @@ ofproto_trace(struct ofproto_dpif *ofproto, struct flow *flow, if (ofpacts) { rule = NULL; } else { + ovs_be16 tp_src = flow->tp_src; + ovs_be16 tp_dst = flow->tp_dst; + rule_dpif_lookup(ofproto, flow, &trace.wc, &rule, false, NULL); + /* Restore ports in case they were modified by rule_dpif_lookup(). */ + flow->tp_src = tp_src; + flow->tp_dst = tp_dst; trace_format_rule(ds, 0, rule); if (rule == ofproto->miss_rule) { -- 1.7.10.4 _______________________________________________ dev mailing list [email protected] http://openvswitch.org/mailman/listinfo/dev
