Next commit will introduce the possibility to grow dp_packet_batch. As some memory will be allocated when growing the batch, split dp_packet_batch handling with new helpers for initialising, resetting (before reuse) and destroying.
Some special care must be taken to init/destroy xmemdup'd dp_packet_batch objects. Signed-off-by: David Marchand <[email protected]> --- lib/dp-packet.h | 15 +++++++-- lib/dpif-netdev-avx512.c | 5 ++- lib/dpif-netdev.c | 66 ++++++++++++++++++++++++++++++++++++--- lib/dpif.c | 1 + lib/netdev-afxdp.c | 2 +- lib/netdev-bsd.c | 3 +- lib/netdev-dummy.c | 3 +- lib/netdev-linux.c | 2 +- lib/netdev.c | 1 + lib/odp-execute-private.c | 2 ++ lib/odp-execute.c | 3 ++ tests/test-conntrack.c | 6 +++- 12 files changed, 97 insertions(+), 12 deletions(-) diff --git a/lib/dp-packet.h b/lib/dp-packet.h index 16b9afbc36..2540534cc3 100644 --- a/lib/dp-packet.h +++ b/lib/dp-packet.h @@ -866,12 +866,18 @@ struct dp_packet_batch { }; static inline void -dp_packet_batch_init(struct dp_packet_batch *batch) +dp_packet_batch_reset_metadata(struct dp_packet_batch *batch) { batch->count = 0; batch->trunc = false; } +static inline void +dp_packet_batch_init(struct dp_packet_batch *batch) +{ + dp_packet_batch_reset_metadata(batch); +} + static inline void dp_packet_batch_add__(struct dp_packet_batch *batch, struct dp_packet *packet, size_t limit) @@ -987,6 +993,11 @@ dp_packet_batch_clone(struct dp_packet_batch *dst, dst->trunc = src->trunc; } +static inline void +dp_packet_batch_destroy(struct dp_packet_batch *batch OVS_UNUSED) +{ +} + static inline void dp_packet_delete_batch(struct dp_packet_batch *batch, bool should_steal) { @@ -996,7 +1007,7 @@ dp_packet_delete_batch(struct dp_packet_batch *batch, bool should_steal) DP_PACKET_BATCH_FOR_EACH (i, packet, batch) { dp_packet_delete(packet); } - dp_packet_batch_init(batch); + dp_packet_batch_reset_metadata(batch); } } diff --git a/lib/dpif-netdev-avx512.c b/lib/dpif-netdev-avx512.c index 22cfad647e..b36f3d33e9 100644 --- a/lib/dpif-netdev-avx512.c +++ b/lib/dpif-netdev-avx512.c @@ -359,7 +359,8 @@ action_stage: /* Initialize the "Action Batch" for each flow handled below. */ struct dp_packet_batch action_batch; - action_batch.trunc = 0; + + dp_packet_batch_init(&action_batch); while (lookup_pkts_bitmask) { uint32_t rule_pkt_idx = raw_ctz(lookup_pkts_bitmask); @@ -410,6 +411,8 @@ action_stage: bytes, tcp_flags); } + dp_packet_batch_destroy(&action_batch); + return 0; } diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 0690d22cf2..47517ec010 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -4119,6 +4119,7 @@ dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute) * tunnel push. */ dp_packet_delete_batch(&pp, true); } + dp_packet_batch_destroy(&pp); return 0; } @@ -4875,7 +4876,7 @@ dp_netdev_pmd_flush_output_on_port(struct dp_netdev_pmd_thread *pmd, continue; } netdev_send(p->port->netdev, i, &p->txq_pkts[i], true); - dp_packet_batch_init(&p->txq_pkts[i]); + dp_packet_batch_reset_metadata(&p->txq_pkts[i]); } } else { if (p->port->txq_mode == TXQ_MODE_XPS) { @@ -4887,7 +4888,7 @@ dp_netdev_pmd_flush_output_on_port(struct dp_netdev_pmd_thread *pmd, } netdev_send(p->port->netdev, tx_qid, &p->output_pkts, concurrent_txqs); } - dp_packet_batch_init(&p->output_pkts); + dp_packet_batch_reset_metadata(&p->output_pkts); /* Update time of the next flush. */ atomic_read_relaxed(&pmd->dp->tx_flush_interval, &tx_flush_interval); @@ -4997,6 +4998,8 @@ dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd, } } + dp_packet_batch_destroy(&batch); + pmd->ctx.last_rxq = NULL; return batch_cnt; @@ -6410,11 +6413,27 @@ pmd_free_cached_ports(struct dp_netdev_pmd_thread *pmd) dpif_netdev_xps_revalidate_pmd(pmd, true); HMAP_FOR_EACH_POP (tx_port_cached, node, &pmd->tnl_port_cache) { + if (tx_port_cached->txq_pkts) { + int n_txq = netdev_n_txq(tx_port_cached->port->netdev); + + for (int i = 0; i < n_txq; i++) { + dp_packet_batch_destroy(&tx_port_cached->txq_pkts[i]); + } + } free(tx_port_cached->txq_pkts); + dp_packet_batch_destroy(&tx_port_cached->output_pkts); free(tx_port_cached); } HMAP_FOR_EACH_POP (tx_port_cached, node, &pmd->send_port_cache) { + if (tx_port_cached->txq_pkts) { + int n_txq = netdev_n_txq(tx_port_cached->port->netdev); + + for (int i = 0; i < n_txq; i++) { + dp_packet_batch_destroy(&tx_port_cached->txq_pkts[i]); + } + } free(tx_port_cached->txq_pkts); + dp_packet_batch_destroy(&tx_port_cached->output_pkts); free(tx_port_cached); } } @@ -6439,10 +6458,14 @@ pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd) if (netdev_has_tunnel_push_pop(tx_port->port->netdev)) { tx_port_cached = xmemdup(tx_port, sizeof *tx_port_cached); + dp_packet_batch_init(&tx_port_cached->output_pkts); if (tx_port->txq_pkts) { txq_pkts_cached = xmemdup(tx_port->txq_pkts, n_txq * sizeof *tx_port->txq_pkts); tx_port_cached->txq_pkts = txq_pkts_cached; + for (int i = 0; i < n_txq; i++) { + dp_packet_batch_init(&tx_port_cached->txq_pkts[i]); + } } hmap_insert(&pmd->tnl_port_cache, &tx_port_cached->node, hash_port_no(tx_port_cached->port->port_no)); @@ -6450,10 +6473,14 @@ pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd) if (n_txq) { tx_port_cached = xmemdup(tx_port, sizeof *tx_port_cached); + dp_packet_batch_init(&tx_port_cached->output_pkts); if (tx_port->txq_pkts) { txq_pkts_cached = xmemdup(tx_port->txq_pkts, n_txq * sizeof *tx_port->txq_pkts); tx_port_cached->txq_pkts = txq_pkts_cached; + for (int i = 0; i < n_txq; i++) { + dp_packet_batch_init(&tx_port_cached->txq_pkts[i]); + } } hmap_insert(&pmd->send_port_cache, &tx_port_cached->node, hash_port_no(tx_port_cached->port->port_no)); @@ -7402,7 +7429,15 @@ dp_netdev_del_port_tx_from_pmd(struct dp_netdev_pmd_thread *pmd, OVS_REQUIRES(pmd->port_mutex) { hmap_remove(&pmd->tx_ports, &tx->node); + if (tx->txq_pkts) { + int i, n_txq = netdev_n_txq(tx->port->netdev); + + for (i = 0; i < n_txq; i++) { + dp_packet_batch_destroy(&tx->txq_pkts[i]); + } + } free(tx->txq_pkts); + dp_packet_batch_destroy(&tx->output_pkts); free(tx); pmd->need_reload = true; } @@ -8009,6 +8044,7 @@ handle_packet_upcall(struct dp_netdev_pmd_thread *pmd, dp_packet_batch_init_packet(&b, packet); dp_netdev_execute_actions(pmd, &b, true, &match.flow, actions->data, actions->size); + dp_packet_batch_destroy(&b); add_actions = put_actions->size ? put_actions : actions; if (OVS_LIKELY(error != ENOSPC)) { @@ -8210,6 +8246,7 @@ dp_netdev_input__(struct dp_netdev_pmd_thread *pmd, for (i = 0; i < n_batches; i++) { packet_batch_per_flow_execute(&batches[i], pmd); + dp_packet_batch_destroy(&batches[i].array); } } @@ -8237,13 +8274,14 @@ dp_netdev_recirculate(struct dp_netdev_pmd_thread *pmd, do { size_t count = MIN(batch_cnt - sent, NETDEV_MAX_BURST); + dp_packet_batch_reset_metadata(&smaller_batch); smaller_batch.trunc = packets->trunc; - smaller_batch.count = 0; dp_packet_batch_add_array(&smaller_batch, &packets->packets[sent], count); dp_netdev_input__(pmd, &smaller_batch, true, 0); sent += count; } while (sent < batch_cnt); + dp_packet_batch_destroy(&smaller_batch); return; } @@ -8398,6 +8436,7 @@ dp_execute_userspace_action(struct dp_netdev_pmd_thread *pmd, dp_packet_batch_init_packet(&b, packet); dp_netdev_execute_actions(pmd, &b, should_steal, flow, actions->data, actions->size); + dp_packet_batch_destroy(&b); } else if (should_steal) { dp_packet_delete(packet); COVERAGE_INC(datapath_drop_userspace_action_error); @@ -8456,6 +8495,9 @@ dp_execute_output_action(struct dp_netdev_pmd_thread *pmd, batch_cnt - sent); } while (sent < batch_cnt); } + if (!should_steal) { + dp_packet_batch_destroy(&out); + } return true; } @@ -8499,6 +8541,11 @@ dp_execute_lb_output_action(struct dp_netdev_pmd_thread *pmd, non_atomic_ullong_add(&s_entry->n_packets, 1); non_atomic_ullong_add(&s_entry->n_bytes, size); } + dp_packet_batch_destroy(&output_pkt); + } + + if (!should_steal) { + dp_packet_batch_destroy(&out); } } @@ -8568,6 +8615,9 @@ dp_execute_cb(void *aux_, struct dp_packet_batch *packets_, packets_dropped); } if (dp_packet_batch_is_empty(packets_)) { + if (!should_steal) { + dp_packet_batch_destroy(&tnl_pkt); + } return; } @@ -8579,6 +8629,10 @@ dp_execute_cb(void *aux_, struct dp_packet_batch *packets_, (*depth)++; dp_netdev_recirculate(pmd, packets_); (*depth)--; + + if (!should_steal) { + dp_packet_batch_destroy(&tnl_pkt); + } return; } COVERAGE_ADD(datapath_drop_invalid_tnl_port, @@ -8622,7 +8676,8 @@ dp_execute_cb(void *aux_, struct dp_packet_batch *packets_, } if (clone) { - dp_packet_delete_batch(packets_, true); + dp_packet_delete_batch(&usr_pkt, true); + dp_packet_batch_destroy(&usr_pkt); } ofpbuf_uninit(&actions); @@ -8652,6 +8707,9 @@ dp_execute_cb(void *aux_, struct dp_packet_batch *packets_, dp_netdev_recirculate(pmd, packets_); (*depth)--; + if (!should_steal) { + dp_packet_batch_destroy(&recirc_pkts); + } return; } diff --git a/lib/dpif.c b/lib/dpif.c index cab5884254..df8aab4782 100644 --- a/lib/dpif.c +++ b/lib/dpif.c @@ -1309,6 +1309,7 @@ dpif_execute_with_help(struct dpif *dpif, struct dpif_execute *execute) dp_packet_batch_init_packet(&pb, execute->packet); odp_execute_actions(&aux, &pb, false, execute->actions, execute->actions_len, dpif_execute_helper_cb); + dp_packet_batch_destroy(&pb); ofpbuf_uninit(&aux.meter_actions); return aux.error; } diff --git a/lib/netdev-afxdp.c b/lib/netdev-afxdp.c index 8ef2ac192f..b0b331138c 100644 --- a/lib/netdev-afxdp.c +++ b/lib/netdev-afxdp.c @@ -924,7 +924,7 @@ free_afxdp_buf_batch(struct dp_packet_batch *batch) elems[i] = (void *)addr; } umem_elem_push_n(xpacket->mpool, dp_packet_batch_size(batch), elems); - dp_packet_batch_init(batch); + dp_packet_batch_reset_metadata(batch); } static inline bool diff --git a/lib/netdev-bsd.c b/lib/netdev-bsd.c index d29589efde..c515bae2de 100644 --- a/lib/netdev-bsd.c +++ b/lib/netdev-bsd.c @@ -647,7 +647,8 @@ netdev_bsd_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet_batch *batch, if (retval) { dp_packet_delete(packet); } else { - dp_packet_batch_init_packet(batch, packet); + dp_packet_batch_reset_metadata(batch); + dp_packet_batch_add(batch, packet); } if (qfill) { diff --git a/lib/netdev-dummy.c b/lib/netdev-dummy.c index 7d3a7b9682..570850c677 100644 --- a/lib/netdev-dummy.c +++ b/lib/netdev-dummy.c @@ -1282,7 +1282,8 @@ netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet_batch *batch, ovs_mutex_unlock(&netdev->mutex); - dp_packet_batch_init_packet(batch, packet); + dp_packet_batch_reset_metadata(batch); + dp_packet_batch_add(batch, packet); if (qfill) { *qfill = -ENOTSUP; diff --git a/lib/netdev-linux.c b/lib/netdev-linux.c index 47faea8c63..996153eef0 100644 --- a/lib/netdev-linux.c +++ b/lib/netdev-linux.c @@ -1576,7 +1576,7 @@ netdev_linux_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet_batch *batch, } } - dp_packet_batch_init(batch); + dp_packet_batch_reset_metadata(batch); retval = (rx->is_tap ? netdev_linux_batch_rxq_recv_tap(rx, mtu, batch) : netdev_linux_batch_rxq_recv_sock(rx, mtu, batch)); diff --git a/lib/netdev.c b/lib/netdev.c index daa4287362..8bd4b43db4 100644 --- a/lib/netdev.c +++ b/lib/netdev.c @@ -870,6 +870,7 @@ netdev_send_tso(struct netdev *netdev, int qid, } else { retval = error; } + dp_packet_batch_destroy(curr_batch); } free(batches); return retval; diff --git a/lib/odp-execute-private.c b/lib/odp-execute-private.c index 8b7a6b4ab0..12d7cc4619 100644 --- a/lib/odp-execute-private.c +++ b/lib/odp-execute-private.c @@ -263,8 +263,10 @@ action_autoval_generic(struct dp_packet_batch *batch, const struct nlattr *a) } } dp_packet_delete_batch(&test_batch, true); + dp_packet_batch_destroy(&test_batch); } dp_packet_delete_batch(&original_batch, true); + dp_packet_batch_destroy(&original_batch); } void diff --git a/lib/odp-execute.c b/lib/odp-execute.c index ecbda8c010..b9446fb25f 100644 --- a/lib/odp-execute.c +++ b/lib/odp-execute.c @@ -758,6 +758,7 @@ odp_execute_sample(void *dp, struct dp_packet *packet, bool steal, dp_packet_batch_init_packet(&pb, packet); odp_execute_actions(dp, &pb, true, nl_attr_get(subactions), nl_attr_get_size(subactions), dp_execute_action); + dp_packet_batch_destroy(&pb); } static void @@ -776,6 +777,7 @@ odp_execute_clone(void *dp, struct dp_packet_batch *batch, bool steal, dp_packet_batch_reset_cutlen(batch); odp_execute_actions(dp, &clone_pkt_batch, true, nl_attr_get(actions), nl_attr_get_size(actions), dp_execute_action); + dp_packet_batch_destroy(&clone_pkt_batch); } else { odp_execute_actions(dp, batch, true, nl_attr_get(actions), @@ -844,6 +846,7 @@ odp_execute_check_pkt_len(void *dp, struct dp_packet *packet, bool steal, dp_packet_batch_init_packet(&pb, packet); odp_execute_actions(dp, &pb, true, nl_attr_get(a), nl_attr_get_size(a), dp_execute_action); + dp_packet_batch_destroy(&pb); } static bool diff --git a/tests/test-conntrack.c b/tests/test-conntrack.c index dc8d6cff94..2afcbb38f4 100644 --- a/tests/test-conntrack.c +++ b/tests/test-conntrack.c @@ -76,6 +76,7 @@ static void destroy_packets(struct dp_packet_batch *pkt_batch) { dp_packet_delete_batch(pkt_batch, true); + dp_packet_batch_destroy(pkt_batch); free(pkt_batch); } @@ -294,6 +295,7 @@ test_benchmark_zones(struct ovs_cmdl_context *ctx) conntrack_destroy(ct); for (i = 0; i < n_conns; i++) { dp_packet_delete_batch(pkt_batch[i], true); + dp_packet_batch_destroy(pkt_batch[i]); free(pkt_batch[i]); } free(pkt_batch); @@ -325,7 +327,7 @@ pcap_batch_execute_conntrack(struct conntrack *ct_, if (flow.dl_type != dl_type) { conntrack_execute(ct_, &new_batch, dl_type, false, true, 0, NULL, NULL, NULL, NULL, now, 0); - dp_packet_batch_init(&new_batch); + dp_packet_batch_reset_metadata(&new_batch); } dp_packet_batch_add(&new_batch, packet); } @@ -335,6 +337,7 @@ pcap_batch_execute_conntrack(struct conntrack *ct_, NULL, NULL, now, 0); } + dp_packet_batch_destroy(&new_batch); } static void @@ -393,6 +396,7 @@ test_pcap(struct ovs_cmdl_context *ctx) } dp_packet_delete_batch(batch, true); + dp_packet_batch_destroy(batch); } conntrack_destroy(ct); ovs_pcap_close(pcap); -- 2.53.0 _______________________________________________ dev mailing list [email protected] https://mail.openvswitch.org/mailman/listinfo/ovs-dev
