This patch reorganizes miss handling so that it can support future patches which don't go through the standard dpif interfaces. As an added side benefit, the resulting code is a bit easier to understand.
Signed-off-by: Ethan Jackson <et...@nicira.com> --- ofproto/ofproto-dpif-ipfix.c | 8 +- ofproto/ofproto-dpif-ipfix.h | 4 +- ofproto/ofproto-dpif-sflow.c | 2 +- ofproto/ofproto-dpif-sflow.h | 6 +- ofproto/ofproto-dpif-upcall.c | 386 +++++++++++++++++++++--------------------- ofproto/ofproto-dpif-upcall.h | 3 - ofproto/ofproto-dpif.c | 5 +- 7 files changed, 207 insertions(+), 207 deletions(-) diff --git a/ofproto/ofproto-dpif-ipfix.c b/ofproto/ofproto-dpif-ipfix.c index 1584c25..ff498d0 100644 --- a/ofproto/ofproto-dpif-ipfix.c +++ b/ofproto/ofproto-dpif-ipfix.c @@ -1019,7 +1019,7 @@ ipfix_cache_update(struct dpif_ipfix_exporter *exporter, static void ipfix_cache_entry_init(struct ipfix_flow_cache_entry *entry, - struct ofpbuf *packet, const struct flow *flow, + const struct ofpbuf *packet, const struct flow *flow, uint64_t packet_delta_count, uint32_t obs_domain_id, uint32_t obs_point_id) { @@ -1284,7 +1284,7 @@ ipfix_send_data_msg(struct dpif_ipfix_exporter *exporter, static void dpif_ipfix_sample(struct dpif_ipfix_exporter *exporter, - struct ofpbuf *packet, const struct flow *flow, + const struct ofpbuf *packet, const struct flow *flow, uint64_t packet_delta_count, uint32_t obs_domain_id, uint32_t obs_point_id) { @@ -1298,7 +1298,7 @@ dpif_ipfix_sample(struct dpif_ipfix_exporter *exporter, } void -dpif_ipfix_bridge_sample(struct dpif_ipfix *di, struct ofpbuf *packet, +dpif_ipfix_bridge_sample(struct dpif_ipfix *di, const struct ofpbuf *packet, const struct flow *flow) OVS_EXCLUDED(mutex) { uint64_t packet_delta_count; @@ -1315,7 +1315,7 @@ dpif_ipfix_bridge_sample(struct dpif_ipfix *di, struct ofpbuf *packet, } void -dpif_ipfix_flow_sample(struct dpif_ipfix *di, struct ofpbuf *packet, +dpif_ipfix_flow_sample(struct dpif_ipfix *di, const struct ofpbuf *packet, const struct flow *flow, uint32_t collector_set_id, uint16_t probability, uint32_t obs_domain_id, uint32_t obs_point_id) OVS_EXCLUDED(mutex) diff --git a/ofproto/ofproto-dpif-ipfix.h b/ofproto/ofproto-dpif-ipfix.h index 6ebf8b0..9de17ab 100644 --- a/ofproto/ofproto-dpif-ipfix.h +++ b/ofproto/ofproto-dpif-ipfix.h @@ -35,9 +35,9 @@ void dpif_ipfix_set_options( const struct ofproto_ipfix_bridge_exporter_options *, const struct ofproto_ipfix_flow_exporter_options *, size_t); -void dpif_ipfix_bridge_sample(struct dpif_ipfix *, struct ofpbuf *, +void dpif_ipfix_bridge_sample(struct dpif_ipfix *, const struct ofpbuf *, const struct flow *); -void dpif_ipfix_flow_sample(struct dpif_ipfix *, struct ofpbuf *, +void dpif_ipfix_flow_sample(struct dpif_ipfix *, const struct ofpbuf *, const struct flow *, uint32_t, uint16_t, uint32_t, uint32_t); diff --git a/ofproto/ofproto-dpif-sflow.c b/ofproto/ofproto-dpif-sflow.c index c7e092a..fad066b 100644 --- a/ofproto/ofproto-dpif-sflow.c +++ b/ofproto/ofproto-dpif-sflow.c @@ -561,7 +561,7 @@ dpif_sflow_odp_port_to_ifindex(const struct dpif_sflow *ds, } void -dpif_sflow_received(struct dpif_sflow *ds, struct ofpbuf *packet, +dpif_sflow_received(struct dpif_sflow *ds, const struct ofpbuf *packet, const struct flow *flow, odp_port_t odp_in_port, const union user_action_cookie *cookie) OVS_EXCLUDED(mutex) diff --git a/ofproto/ofproto-dpif-sflow.h b/ofproto/ofproto-dpif-sflow.h index d53c95c..130568a 100644 --- a/ofproto/ofproto-dpif-sflow.h +++ b/ofproto/ofproto-dpif-sflow.h @@ -46,10 +46,8 @@ void dpif_sflow_del_port(struct dpif_sflow *, odp_port_t odp_port); void dpif_sflow_run(struct dpif_sflow *); void dpif_sflow_wait(struct dpif_sflow *); -void dpif_sflow_received(struct dpif_sflow *, - struct ofpbuf *, - const struct flow *, - odp_port_t odp_port, +void dpif_sflow_received(struct dpif_sflow *, const struct ofpbuf *, + const struct flow *, odp_port_t odp_port, const union user_action_cookie *); int dpif_sflow_odp_port_to_ifindex(const struct dpif_sflow *, diff --git a/ofproto/ofproto-dpif-upcall.c b/ofproto/ofproto-dpif-upcall.c index 96a7e0b..ba74359 100644 --- a/ofproto/ofproto-dpif-upcall.c +++ b/ofproto/ofproto-dpif-upcall.c @@ -140,6 +140,7 @@ struct udpif { enum upcall_type { BAD_UPCALL, /* Some kind of bug somewhere. */ MISS_UPCALL, /* A flow miss. */ + SLOW_UPCALL, /* A flow miss caused by a userspace action. */ SFLOW_UPCALL, /* sFlow sample. */ FLOW_SAMPLE_UPCALL, /* Per-flow sampling. */ IPFIX_UPCALL /* Per-bridge sampling. */ @@ -148,21 +149,16 @@ enum upcall_type { struct upcall { struct ofproto_dpif *ofproto; - struct flow flow; - const struct nlattr *key; - size_t key_len; - enum dpif_upcall_type upcall_type; + const struct flow *flow; + const struct ofpbuf *packet; + enum dpif_upcall_type type; struct dpif_flow_stats stats; - uint64_t slow_path_buf[128 / 8]; - struct odputil_keybuf mask_buf; - struct xlate_out xout; + struct ofpbuf put_actions; - /* Raw upcall plus data for keeping track of the memory backing it. */ - struct dpif_upcall dpif_upcall; /* As returned by dpif_recv() */ - struct ofpbuf upcall_buf; /* Owns some data in 'dpif_upcall'. */ - uint64_t upcall_stub[512 / 8]; /* Buffer to reduce need for malloc(). */ + const struct nlattr *key; + size_t key_len; }; /* 'udpif_key's are responsible for tracking the little bit of state udpif @@ -198,10 +194,11 @@ struct udpif_key { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); static struct list all_udpifs = LIST_INITIALIZER(&all_udpifs); -static size_t read_upcalls(struct handler *, - struct upcall upcalls[UPCALL_MAX_BATCH]); -static void free_upcall(struct upcall *); -static int convert_upcall(struct udpif *, struct upcall *); +static size_t recv_upcalls(struct handler *); +static int process_upcall(struct udpif *, struct upcall *, + const struct ofpbuf *packet, const struct flow *, + enum dpif_upcall_type, const struct nlattr *userdata, + struct ofpbuf *odp_actions); static void handle_upcalls(struct udpif *, struct upcall *, size_t n_upcalls); static void udpif_stop_threads(struct udpif *); static void udpif_start_threads(struct udpif *, size_t n_handlers, @@ -232,6 +229,10 @@ static bool ukey_acquire(struct udpif *udpif, const struct nlattr *key, size_t key_len, long long int used, struct udpif_key **result); static void ukey_delete(struct revalidator *, struct udpif_key *); +static enum upcall_type classify_upcall(enum dpif_upcall_type type, + const struct nlattr *userdata); +static void exec_upcalls(struct dpif *, struct dpif_upcall *, + struct ofpbuf *bufs, int cnt); static atomic_bool enable_megaflows = ATOMIC_VAR_INIT(true); @@ -537,20 +538,10 @@ udpif_upcall_handler(void *arg) struct udpif *udpif = handler->udpif; while (!latch_is_set(&handler->udpif->exit_latch)) { - struct upcall upcalls[UPCALL_MAX_BATCH]; - size_t n_upcalls, i; - - n_upcalls = read_upcalls(handler, upcalls); - if (!n_upcalls) { + if (!recv_upcalls(handler)) { dpif_recv_wait(udpif->dpif, handler->handler_id); latch_wait(&udpif->exit_latch); poll_block(); - } else { - handle_upcalls(handler->udpif, upcalls, n_upcalls); - - for (i = 0; i < n_upcalls; i++) { - free_upcall(&upcalls[i]); - } } coverage_clear(); } @@ -558,6 +549,67 @@ udpif_upcall_handler(void *arg) return NULL; } +static size_t +recv_upcalls(struct handler *handler) +{ + struct udpif *udpif = handler->udpif; + uint64_t recv_stubs[UPCALL_MAX_BATCH][512 / 8]; + struct ofpbuf recv_bufs[UPCALL_MAX_BATCH]; + struct upcall upcalls[UPCALL_MAX_BATCH]; + size_t n_upcalls, i; + + n_upcalls = 0; + while (n_upcalls < UPCALL_MAX_BATCH) { + struct ofpbuf *recv_buf = &recv_bufs[n_upcalls]; + struct upcall *upcall = &upcalls[n_upcalls]; + struct dpif_upcall dupcall; + struct pkt_metadata md; + struct flow flow; + + ofpbuf_use_stub(&recv_buf[n_upcalls], recv_stubs[n_upcalls], + sizeof recv_stubs[n_upcalls]); + if (dpif_recv(udpif->dpif, handler->handler_id, &dupcall, recv_buf)) { + ofpbuf_uninit(recv_buf); + break; + } + + if (odp_flow_key_to_flow(dupcall.key, dupcall.key_len, &flow) + == ODP_FIT_ERROR) { + goto free_dupcall; + } + + md = pkt_metadata_from_flow(&flow); + flow_extract(&dupcall.packet, &md, &flow); + + if (process_upcall(udpif, upcall, &dupcall.packet, &flow, dupcall.type, + dupcall.userdata, NULL)) { + goto free_dupcall; + } + + upcall->key = dupcall.key; + upcall->key_len = dupcall.key_len; + + n_upcalls++; + continue; + +free_dupcall: + ofpbuf_uninit(&dupcall.packet); + ofpbuf_uninit(recv_buf); + } + + if (n_upcalls) { + handle_upcalls(handler->udpif, upcalls, n_upcalls); + for (i = 0; i < n_upcalls; i++) { + ofpbuf_uninit(&upcalls[i].put_actions); + xlate_out_uninit(&upcalls[i].xout); + ofpbuf_uninit(CONST_CAST(struct ofpbuf *, upcalls[i].packet)); + ofpbuf_uninit(&recv_bufs[i]); + } + } + + return n_upcalls; +} + static void * udpif_revalidator(void *arg) { @@ -646,14 +698,13 @@ udpif_revalidator(void *arg) } static enum upcall_type -classify_upcall(const struct upcall *upcall) +classify_upcall(enum dpif_upcall_type type, const struct nlattr *userdata) { - const struct dpif_upcall *dpif_upcall = &upcall->dpif_upcall; union user_action_cookie cookie; size_t userdata_len; /* First look at the upcall type. */ - switch (dpif_upcall->type) { + switch (type) { case DPIF_UC_ACTION: break; @@ -662,17 +713,16 @@ classify_upcall(const struct upcall *upcall) case DPIF_N_UC_TYPES: default: - VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, - dpif_upcall->type); + VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, type); return BAD_UPCALL; } /* "action" upcalls need a closer look. */ - if (!dpif_upcall->userdata) { + if (!userdata) { VLOG_WARN_RL(&rl, "action upcall missing cookie"); return BAD_UPCALL; } - userdata_len = nl_attr_get_size(dpif_upcall->userdata); + userdata_len = nl_attr_get_size(userdata); if (userdata_len < sizeof cookie.type || userdata_len > sizeof cookie) { VLOG_WARN_RL(&rl, "action upcall cookie has unexpected size %"PRIuSIZE, @@ -680,13 +730,13 @@ classify_upcall(const struct upcall *upcall) return BAD_UPCALL; } memset(&cookie, 0, sizeof cookie); - memcpy(&cookie, nl_attr_get(dpif_upcall->userdata), userdata_len); + memcpy(&cookie, nl_attr_get(userdata), userdata_len); if (userdata_len == MAX(8, sizeof cookie.sflow) && cookie.type == USER_ACTION_COOKIE_SFLOW) { return SFLOW_UPCALL; } else if (userdata_len == MAX(8, sizeof cookie.slow_path) && cookie.type == USER_ACTION_COOKIE_SLOW_PATH) { - return MISS_UPCALL; + return SLOW_UPCALL; } else if (userdata_len == MAX(8, sizeof cookie.flow_sample) && cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) { return FLOW_SAMPLE_UPCALL; @@ -704,7 +754,7 @@ classify_upcall(const struct upcall *upcall) * initialized with at least 128 bytes of space. */ static void compose_slow_path(struct udpif *udpif, struct xlate_out *xout, - struct flow *flow, odp_port_t odp_in_port, + const struct flow *flow, odp_port_t odp_in_port, struct ofpbuf *buf) { union user_action_cookie cookie; @@ -723,28 +773,30 @@ compose_slow_path(struct udpif *udpif, struct xlate_out *xout, } static void -upcall_init(struct upcall *upcall, struct flow *flow, struct ofpbuf *packet, - struct ofproto_dpif *ofproto, struct dpif_upcall *dupcall, - ofp_port_t ofp_in_port) +upcall_init(struct udpif *udpif, struct upcall *upcall, + const struct ofpbuf *packet, const struct flow *flow, + enum dpif_upcall_type type, struct ofproto_dpif *ofproto, + ofp_port_t ofp_in_port, struct ofpbuf *odp_actions) { - struct pkt_metadata md = pkt_metadata_from_flow(flow); struct xlate_in xin; - flow_extract(packet, &md, &upcall->flow); - upcall->ofproto = ofproto; - upcall->key = dupcall->key; - upcall->key_len = dupcall->key_len; - upcall->upcall_type = dupcall->type; upcall->stats.n_packets = 1; upcall->stats.n_bytes = ofpbuf_size(packet); upcall->stats.used = time_msec(); - upcall->stats.tcp_flags = ntohs(upcall->flow.tcp_flags); + upcall->stats.tcp_flags = ntohs(flow->tcp_flags); - xlate_in_init(&xin, upcall->ofproto, &upcall->flow, ofp_in_port, NULL, + upcall->packet = packet; + upcall->flow = flow; + upcall->type = type; + upcall->key = NULL; + upcall->key_len = 0; + + xlate_in_init(&xin, upcall->ofproto, upcall->flow, ofp_in_port, NULL, upcall->stats.tcp_flags, packet); + xin.odp_actions = odp_actions; - if (upcall->upcall_type == DPIF_UC_MISS) { + if (type == DPIF_UC_MISS) { xin.resubmit_stats = &upcall->stats; } else { /* For non-miss upcalls, there's a flow in the datapath which this @@ -753,14 +805,42 @@ upcall_init(struct upcall *upcall, struct flow *flow, struct ofpbuf *packet, } xlate_actions(&xin, &upcall->xout); -} -static void -free_upcall(struct upcall *upcall) -{ - xlate_out_uninit(&upcall->xout); - ofpbuf_uninit(&upcall->dpif_upcall.packet); - ofpbuf_uninit(&upcall->upcall_buf); + /* Special case for fail-open mode. + * + * If we are in fail-open mode, but we are connected to a controller too, + * then we should send the packet up to the controller in the hope that it + * will try to set up a flow and thereby allow us to exit fail-open. + * + * See the top-level comment in fail-open.c for more information. + * + * Copy packets before they are modified by execution. */ + if (upcall->xout.fail_open) { + const struct ofpbuf *packet = upcall->packet; + struct ofproto_packet_in *pin; + + pin = xmalloc(sizeof *pin); + pin->up.packet = xmemdup(ofpbuf_data(packet), ofpbuf_size(packet)); + pin->up.packet_len = ofpbuf_size(packet); + pin->up.reason = OFPR_NO_MATCH; + pin->up.table_id = 0; + pin->up.cookie = OVS_BE64_MAX; + flow_get_metadata(upcall->flow, &pin->up.fmd); + pin->send_len = 0; /* Not used for flow table misses. */ + pin->miss_type = OFPROTO_PACKET_IN_NO_MISS; + ofproto_dpif_send_packet_in(upcall->ofproto, pin); + } + + if (!upcall->xout.slow) { + ofpbuf_use_const(&upcall->put_actions, + ofpbuf_data(upcall->xout.odp_actions), + ofpbuf_size(upcall->xout.odp_actions)); + } else { + ofpbuf_init(&upcall->put_actions, 0); + compose_slow_path(udpif, &upcall->xout, upcall->flow, + upcall->flow->in_port.odp_port, + &upcall->put_actions); + } } static struct udpif * @@ -776,9 +856,9 @@ find_udpif(struct dpif *dpif) return NULL; } -void +static void exec_upcalls(struct dpif *dpif, struct dpif_upcall *dupcalls, - struct ofpbuf *bufs, int cnt) + struct ofpbuf *bufs OVS_UNUSED, int cnt) { struct upcall upcalls[UPCALL_MAX_BATCH]; struct udpif *udpif; @@ -792,117 +872,88 @@ exec_upcalls(struct dpif *dpif, struct dpif_upcall *dupcalls, for (j = i; j < MIN(i + UPCALL_MAX_BATCH, cnt); j++) { struct upcall *upcall = &upcalls[n_upcalls]; struct dpif_upcall *dupcall = &dupcalls[j]; - struct ofpbuf *buf = &bufs[j]; - - upcall->dpif_upcall = *dupcall; - upcall->upcall_buf = *buf; + struct pkt_metadata md; + struct flow flow; dpif_print_packet(dpif, dupcall); - if (!convert_upcall(udpif, upcall)) { - n_upcalls += 1; - } - } - if (n_upcalls) { - handle_upcalls(udpif, upcalls, n_upcalls); - for (j = 0; j < n_upcalls; j++) { - free_upcall(&upcalls[j]); + if (odp_flow_key_to_flow(dupcall->key, dupcall->key_len, &flow) + == ODP_FIT_ERROR) { + continue; } - } - } -} -/* Reads and classifies upcalls. Returns the number of upcalls successfully - * read. */ -static size_t -read_upcalls(struct handler *handler, - struct upcall upcalls[UPCALL_MAX_BATCH]) -{ - struct udpif *udpif = handler->udpif; - size_t i; - size_t n_upcalls = 0; + md = pkt_metadata_from_flow(&flow); + flow_extract(&dupcall->packet, &md, &flow); - /* Try reading UPCALL_MAX_BATCH upcalls from dpif. */ - for (i = 0; i < UPCALL_MAX_BATCH; i++) { - struct upcall *upcall = &upcalls[n_upcalls]; - int error; + if (process_upcall(udpif, upcall, &dupcall->packet, &flow, + dupcall->type, dupcall->userdata, NULL)) { + continue; + } - ofpbuf_use_stub(&upcall->upcall_buf, upcall->upcall_stub, - sizeof upcall->upcall_stub); - error = dpif_recv(udpif->dpif, handler->handler_id, - &upcall->dpif_upcall, &upcall->upcall_buf); - if (error) { - ofpbuf_uninit(&upcall->upcall_buf); - break; + upcall->key = dupcall->key; + upcall->key_len = dupcall->key_len; + n_upcalls++; } - if (!convert_upcall(udpif, upcall)) { - n_upcalls += 1; + if (n_upcalls) { + handle_upcalls(udpif, upcalls, n_upcalls); + for (j = 0; j < n_upcalls; j++) { + ofpbuf_uninit(&upcalls[i].put_actions); + xlate_out_uninit(&upcalls[i].xout); + ofpbuf_uninit(CONST_CAST(struct ofpbuf *, upcalls[i].packet)); + } } } - return n_upcalls; } static int -convert_upcall(struct udpif *udpif, struct upcall *upcall) +process_upcall(struct udpif *udpif, struct upcall *upcall, + const struct ofpbuf *packet, const struct flow *flow, + enum dpif_upcall_type dtype, const struct nlattr *userdata, + struct ofpbuf *odp_actions) { - struct dpif_upcall *dupcall = &upcall->dpif_upcall; - struct ofpbuf *packet = &dupcall->packet; struct ofproto_dpif *ofproto; + enum upcall_type upcall_type; struct dpif_sflow *sflow; struct dpif_ipfix *ipfix; ofp_port_t ofp_in_port; - enum upcall_type type; - struct flow flow; int error; - if (odp_flow_key_to_flow(dupcall->key, dupcall->key_len, &flow) - == ODP_FIT_ERROR) { - error = EINVAL; - goto destroy_upcall; - } - - error = xlate_receive(udpif->backer, &flow, &ofproto, &ipfix, &sflow, NULL, - &ofp_in_port); - + error = xlate_receive(udpif->backer, flow, &ofproto, &ipfix, + &sflow, NULL, &ofp_in_port); if (error) { if (error == ENODEV) { - /* Received packet on datapath port for which we couldn't - * associate an ofproto. This can happen if a port is removed - * while traffic is being received. Print a rate-limited - * message in case it happens frequently. Install a drop flow - * so that future packets of the flow are inexpensively dropped - * in the kernel. */ + /* Received packet on datapath port for which we couldn't associate + * an ofproto. This can happen if a port is removed while traffic + * is being received. Print a rate-limited message in case it + * happens frequently. */ VLOG_INFO_RL(&rl, "received packet on unassociated datapath " - "port %"PRIu32, flow.in_port.odp_port); - dpif_flow_put(udpif->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY, - dupcall->key, dupcall->key_len, NULL, 0, NULL, 0, - NULL); + "port %"PRIu32, ofp_in_port); } - goto destroy_upcall; + return error; } - type = classify_upcall(upcall); - if (type == MISS_UPCALL) { - upcall_init(upcall, &flow, packet, ofproto, dupcall, ofp_in_port); - return error; + upcall_type = classify_upcall(dtype, userdata); + if (upcall_type == MISS_UPCALL || upcall_type == SLOW_UPCALL) { + upcall_init(udpif, upcall, packet, flow, dtype, ofproto, ofp_in_port, + odp_actions); + return 0; } - switch (type) { + switch (upcall_type) { case SFLOW_UPCALL: if (sflow) { union user_action_cookie cookie; memset(&cookie, 0, sizeof cookie); - memcpy(&cookie, nl_attr_get(dupcall->userdata), - sizeof cookie.sflow); - dpif_sflow_received(sflow, packet, &flow, flow.in_port.odp_port, + memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.sflow); + dpif_sflow_received(sflow, packet, flow, flow->in_port.odp_port, &cookie); } break; case IPFIX_UPCALL: if (ipfix) { - dpif_ipfix_bridge_sample(ipfix, packet, &flow); + dpif_ipfix_bridge_sample(ipfix, packet, flow); } break; case FLOW_SAMPLE_UPCALL: @@ -910,12 +961,11 @@ convert_upcall(struct udpif *udpif, struct upcall *upcall) union user_action_cookie cookie; memset(&cookie, 0, sizeof cookie); - memcpy(&cookie, nl_attr_get(dupcall->userdata), - sizeof cookie.flow_sample); + memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.flow_sample); /* The flow reflects exactly the contents of the packet. * Sample the packet using it. */ - dpif_ipfix_flow_sample(ipfix, packet, &flow, + dpif_ipfix_flow_sample(ipfix, packet, flow, cookie.flow_sample.collector_set_id, cookie.flow_sample.probability, cookie.flow_sample.obs_domain_id, @@ -925,28 +975,25 @@ convert_upcall(struct udpif *udpif, struct upcall *upcall) case BAD_UPCALL: break; case MISS_UPCALL: + case SLOW_UPCALL: OVS_NOT_REACHED(); } dpif_ipfix_unref(ipfix); dpif_sflow_unref(sflow); - error = EAGAIN; - -destroy_upcall: - ofpbuf_uninit(&upcall->dpif_upcall.packet); - ofpbuf_uninit(&upcall->upcall_buf); - return error; + return EAGAIN; } static void handle_upcalls(struct udpif *udpif, struct upcall *upcalls, size_t n_upcalls) { + struct odputil_keybuf mask_bufs[UPCALL_MAX_BATCH]; struct dpif_op *opsp[UPCALL_MAX_BATCH * 2]; struct dpif_op ops[UPCALL_MAX_BATCH * 2]; - size_t n_ops, i; unsigned int flow_limit; - bool fail_open, may_put; + size_t n_ops, i; + bool may_put; atomic_read(&udpif->flow_limit, &flow_limit); may_put = udpif_get_n_flows(udpif) < flow_limit; @@ -961,28 +1008,24 @@ handle_upcalls(struct udpif *udpif, struct upcall *upcalls, * * The loop fills 'ops' with an array of operations to execute in the * datapath. */ - fail_open = false; n_ops = 0; for (i = 0; i < n_upcalls; i++) { struct upcall *upcall = &upcalls[i]; - struct ofpbuf *packet = &upcall->dpif_upcall.packet; + const struct ofpbuf *packet = upcall->packet; struct dpif_op *op; - fail_open = fail_open || upcall->xout.fail_open; - /* Do not install a flow into the datapath if: * * - The datapath already has too many flows. * * - We received this packet via some flow installed in the kernel * already. */ - if (may_put - && upcall->dpif_upcall.type == DPIF_UC_MISS) { + if (may_put && upcall->type == DPIF_UC_MISS) { struct ofpbuf mask; bool megaflow; atomic_read(&enable_megaflows, &megaflow); - ofpbuf_use_stack(&mask, &upcall->mask_buf, sizeof upcall->mask_buf); + ofpbuf_use_stack(&mask, &mask_bufs[i], sizeof mask_bufs[i]); if (megaflow) { size_t max_mpls; bool recirc; @@ -990,7 +1033,7 @@ handle_upcalls(struct udpif *udpif, struct upcall *upcalls, recirc = ofproto_dpif_get_enable_recirc(upcall->ofproto); max_mpls = ofproto_dpif_get_max_mpls_depth(upcall->ofproto); odp_flow_key_from_mask(&mask, &upcall->xout.wc.masks, - &upcall->flow, UINT32_MAX, max_mpls, + upcall->flow, UINT32_MAX, max_mpls, recirc); } @@ -1002,27 +1045,14 @@ handle_upcalls(struct udpif *udpif, struct upcall *upcalls, op->u.flow_put.mask = ofpbuf_data(&mask); op->u.flow_put.mask_len = ofpbuf_size(&mask); op->u.flow_put.stats = NULL; - - if (!upcall->xout.slow) { - op->u.flow_put.actions = ofpbuf_data(upcall->xout.odp_actions); - op->u.flow_put.actions_len = ofpbuf_size(upcall->xout.odp_actions); - } else { - struct ofpbuf buf; - - ofpbuf_use_stack(&buf, upcall->slow_path_buf, - sizeof upcall->slow_path_buf); - compose_slow_path(udpif, &upcall->xout, &upcall->flow, - upcall->flow.in_port.odp_port, &buf); - op->u.flow_put.actions = ofpbuf_data(&buf); - op->u.flow_put.actions_len = ofpbuf_size(&buf); - } + op->u.flow_put.actions = ofpbuf_data(&upcall->put_actions); + op->u.flow_put.actions_len = ofpbuf_size(&upcall->put_actions); } if (ofpbuf_size(upcall->xout.odp_actions)) { - op = &ops[n_ops++]; op->type = DPIF_OP_EXECUTE; - op->u.execute.packet = packet; + op->u.execute.packet = CONST_CAST(struct ofpbuf *, packet); odp_key_to_pkt_metadata(upcall->key, upcall->key_len, &op->u.execute.md); op->u.execute.actions = ofpbuf_data(upcall->xout.odp_actions); @@ -1031,34 +1061,6 @@ handle_upcalls(struct udpif *udpif, struct upcall *upcalls, } } - /* Special case for fail-open mode. - * - * If we are in fail-open mode, but we are connected to a controller too, - * then we should send the packet up to the controller in the hope that it - * will try to set up a flow and thereby allow us to exit fail-open. - * - * See the top-level comment in fail-open.c for more information. - * - * Copy packets before they are modified by execution. */ - if (fail_open) { - for (i = 0; i < n_upcalls; i++) { - struct upcall *upcall = &upcalls[i]; - struct ofpbuf *packet = &upcall->dpif_upcall.packet; - struct ofproto_packet_in *pin; - - pin = xmalloc(sizeof *pin); - pin->up.packet = xmemdup(ofpbuf_data(packet), ofpbuf_size(packet)); - pin->up.packet_len = ofpbuf_size(packet); - pin->up.reason = OFPR_NO_MATCH; - pin->up.table_id = 0; - pin->up.cookie = OVS_BE64_MAX; - flow_get_metadata(&upcall->flow, &pin->up.fmd); - pin->send_len = 0; /* Not used for flow table misses. */ - pin->miss_type = OFPROTO_PACKET_IN_NO_MISS; - ofproto_dpif_send_packet_in(upcall->ofproto, pin); - } - } - /* Execute batch. */ for (i = 0; i < n_ops; i++) { opsp[i] = &ops[i]; diff --git a/ofproto/ofproto-dpif-upcall.h b/ofproto/ofproto-dpif-upcall.h index 2b197ad..ec19bd0 100644 --- a/ofproto/ofproto-dpif-upcall.h +++ b/ofproto/ofproto-dpif-upcall.h @@ -28,9 +28,6 @@ struct simap; * them. Additionally, it's responsible for maintaining the datapath flow * table. */ -void exec_upcalls(struct dpif *, struct dpif_upcall *, struct ofpbuf *, - int cnt); - struct udpif *udpif_create(struct dpif_backer *, struct dpif *); void udpif_run(struct udpif *udpif); void udpif_set_threads(struct udpif *, size_t n_handlers, diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c index 6fd7d3d..2885763 100644 --- a/ofproto/ofproto-dpif.c +++ b/ofproto/ofproto-dpif.c @@ -4602,7 +4602,10 @@ ofproto_dpif_contains_flow(const struct ofproto_dpif *ofproto, return false; } - xlate_receive(ofproto->backer, &flow, &ofp, NULL, NULL, NULL, NULL); + if (xlate_receive(ofproto->backer, &flow, &ofp, NULL, NULL, NULL, NULL)) { + return false; + } + return ofp == ofproto; } -- 1.8.1.2 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev