Not needed anymore because 'may_steal' already handled on dpif-netdev layer and always true.
Signed-off-by: Ilya Maximets <[email protected]> --- lib/dpif-netdev.c | 2 +- lib/netdev-bsd.c | 4 ++-- lib/netdev-dpdk.c | 25 +++++++++++-------------- lib/netdev-dummy.c | 4 ++-- lib/netdev-linux.c | 4 ++-- lib/netdev-provider.h | 7 +++---- lib/netdev.c | 12 ++++-------- lib/netdev.h | 2 +- 8 files changed, 26 insertions(+), 34 deletions(-) diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 1854405..cf11352 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -3251,7 +3251,7 @@ dp_netdev_pmd_flush_output_on_port(struct dp_netdev_pmd_thread *pmd, tx_qid = pmd->static_tx_qid; } - netdev_send(p->port->netdev, tx_qid, &p->output_pkts, true, dynamic_txqs); + netdev_send(p->port->netdev, tx_qid, &p->output_pkts, dynamic_txqs); dp_packet_batch_init(&p->output_pkts); } diff --git a/lib/netdev-bsd.c b/lib/netdev-bsd.c index 8a4cdb3..4f243b5 100644 --- a/lib/netdev-bsd.c +++ b/lib/netdev-bsd.c @@ -680,7 +680,7 @@ netdev_bsd_rxq_drain(struct netdev_rxq *rxq_) */ static int netdev_bsd_send(struct netdev *netdev_, int qid OVS_UNUSED, - struct dp_packet_batch *batch, bool may_steal, + struct dp_packet_batch *batch, bool concurrent_txq OVS_UNUSED) { struct netdev_bsd *dev = netdev_bsd_cast(netdev_); @@ -728,7 +728,7 @@ netdev_bsd_send(struct netdev *netdev_, int qid OVS_UNUSED, } ovs_mutex_unlock(&dev->mutex); - dp_packet_delete_batch(batch, may_steal); + dp_packet_delete_batch(batch, true); return error; } diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index c60f46f..abdff23 100644 --- a/lib/netdev-dpdk.c +++ b/lib/netdev-dpdk.c @@ -1904,12 +1904,12 @@ dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch) static int netdev_dpdk_vhost_send(struct netdev *netdev, int qid, struct dp_packet_batch *batch, - bool may_steal, bool concurrent_txq OVS_UNUSED) + bool concurrent_txq OVS_UNUSED) { - if (OVS_UNLIKELY(!may_steal || batch->packets[0]->source != DPBUF_DPDK)) { + if (OVS_UNLIKELY(batch->packets[0]->source != DPBUF_DPDK)) { dpdk_do_tx_copy(netdev, qid, batch); - dp_packet_delete_batch(batch, may_steal); + dp_packet_delete_batch(batch, true); } else { dp_packet_batch_apply_cutlen(batch); __netdev_dpdk_vhost_send(netdev, qid, batch->packets, batch->count); @@ -1919,11 +1919,11 @@ netdev_dpdk_vhost_send(struct netdev *netdev, int qid, static inline void netdev_dpdk_send__(struct netdev_dpdk *dev, int qid, - struct dp_packet_batch *batch, bool may_steal, + struct dp_packet_batch *batch, bool concurrent_txq) { if (OVS_UNLIKELY(!(dev->flags & NETDEV_UP))) { - dp_packet_delete_batch(batch, may_steal); + dp_packet_delete_batch(batch, true); return; } @@ -1932,12 +1932,11 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid, rte_spinlock_lock(&dev->tx_q[qid].tx_lock); } - if (OVS_UNLIKELY(!may_steal || - batch->packets[0]->source != DPBUF_DPDK)) { + if (OVS_UNLIKELY(batch->packets[0]->source != DPBUF_DPDK)) { struct netdev *netdev = &dev->up; dpdk_do_tx_copy(netdev, qid, batch); - dp_packet_delete_batch(batch, may_steal); + dp_packet_delete_batch(batch, true); } else { int tx_cnt, dropped; int batch_cnt = dp_packet_batch_size(batch); @@ -1965,12 +1964,11 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid, static int netdev_dpdk_eth_send(struct netdev *netdev, int qid, - struct dp_packet_batch *batch, bool may_steal, - bool concurrent_txq) + struct dp_packet_batch *batch, bool concurrent_txq) { struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); - netdev_dpdk_send__(dev, qid, batch, may_steal, concurrent_txq); + netdev_dpdk_send__(dev, qid, batch, concurrent_txq); return 0; } @@ -2937,8 +2935,7 @@ dpdk_ring_open(const char dev_name[], dpdk_port_t *eth_port_id) static int netdev_dpdk_ring_send(struct netdev *netdev, int qid, - struct dp_packet_batch *batch, bool may_steal, - bool concurrent_txq) + struct dp_packet_batch *batch, bool concurrent_txq) { struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); struct dp_packet *packet; @@ -2951,7 +2948,7 @@ netdev_dpdk_ring_send(struct netdev *netdev, int qid, dp_packet_mbuf_rss_flag_reset(packet); } - netdev_dpdk_send__(dev, qid, batch, may_steal, concurrent_txq); + netdev_dpdk_send__(dev, qid, batch, concurrent_txq); return 0; } diff --git a/lib/netdev-dummy.c b/lib/netdev-dummy.c index f731af1..57ef13f 100644 --- a/lib/netdev-dummy.c +++ b/lib/netdev-dummy.c @@ -1062,7 +1062,7 @@ netdev_dummy_rxq_drain(struct netdev_rxq *rxq_) static int netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED, - struct dp_packet_batch *batch, bool may_steal, + struct dp_packet_batch *batch, bool concurrent_txq OVS_UNUSED) { struct netdev_dummy *dev = netdev_dummy_cast(netdev); @@ -1132,7 +1132,7 @@ netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED, ovs_mutex_unlock(&dev->mutex); } - dp_packet_delete_batch(batch, may_steal); + dp_packet_delete_batch(batch, true); return error; } diff --git a/lib/netdev-linux.c b/lib/netdev-linux.c index 2ff3e2b..aaf4899 100644 --- a/lib/netdev-linux.c +++ b/lib/netdev-linux.c @@ -1269,7 +1269,7 @@ netdev_linux_tap_batch_send(struct netdev *netdev_, * expected to do additional queuing of packets. */ static int netdev_linux_send(struct netdev *netdev_, int qid OVS_UNUSED, - struct dp_packet_batch *batch, bool may_steal, + struct dp_packet_batch *batch, bool concurrent_txq OVS_UNUSED) { int error = 0; @@ -1305,7 +1305,7 @@ netdev_linux_send(struct netdev *netdev_, int qid OVS_UNUSED, } free_batch: - dp_packet_delete_batch(batch, may_steal); + dp_packet_delete_batch(batch, true); return error; } diff --git a/lib/netdev-provider.h b/lib/netdev-provider.h index b3c57d5..e6ec79a 100644 --- a/lib/netdev-provider.h +++ b/lib/netdev-provider.h @@ -347,9 +347,8 @@ struct netdev_class { * If the function returns a non-zero value, some of the packets might have * been sent anyway. * - * If 'may_steal' is false, the caller retains ownership of all the - * packets. If 'may_steal' is true, the caller transfers ownership of all - * the packets to the network device, regardless of success. + * The caller transfers ownership of all the packets to the network + * device, regardless of success. * * If 'concurrent_txq' is true, the caller may perform concurrent calls * to netdev_send() with the same 'qid'. The netdev provider is responsible @@ -369,7 +368,7 @@ struct netdev_class { * datapath". It will also prevent the OVS implementation of bonding from * working properly over 'netdev'.) */ int (*send)(struct netdev *netdev, int qid, struct dp_packet_batch *batch, - bool may_steal, bool concurrent_txq); + bool concurrent_txq); /* Registers with the poll loop to wake up from the next call to * poll_block() when the packet transmission queue for 'netdev' has diff --git a/lib/netdev.c b/lib/netdev.c index e47bfe1..4487bb5 100644 --- a/lib/netdev.c +++ b/lib/netdev.c @@ -771,9 +771,8 @@ netdev_get_pt_mode(const struct netdev *netdev) * If the function returns a non-zero value, some of the packets might have * been sent anyway. * - * If 'may_steal' is false, the caller retains ownership of all the packets. - * If 'may_steal' is true, the caller transfers ownership of all the packets - * to the network device, regardless of success. + * The caller transfers ownership of all the packets to the network device, + * regardless of success. * * If 'concurrent_txq' is true, the caller may perform concurrent calls * to netdev_send() with the same 'qid'. The netdev provider is responsible @@ -790,15 +789,12 @@ netdev_get_pt_mode(const struct netdev *netdev) * cases this function will always return EOPNOTSUPP. */ int netdev_send(struct netdev *netdev, int qid, struct dp_packet_batch *batch, - bool may_steal, bool concurrent_txq) + bool concurrent_txq) { - int error = netdev->netdev_class->send(netdev, qid, batch, may_steal, + int error = netdev->netdev_class->send(netdev, qid, batch, concurrent_txq); if (!error) { COVERAGE_INC(netdev_sent); - if (!may_steal) { - dp_packet_batch_reset_cutlen(batch); - } } return error; } diff --git a/lib/netdev.h b/lib/netdev.h index f8482f7..bdcacf5 100644 --- a/lib/netdev.h +++ b/lib/netdev.h @@ -181,7 +181,7 @@ int netdev_rxq_drain(struct netdev_rxq *); /* Packet transmission. */ int netdev_send(struct netdev *, int qid, struct dp_packet_batch *, - bool may_steal, bool concurrent_txq); + bool concurrent_txq); void netdev_send_wait(struct netdev *, int qid); /* Flow offloading. */ -- 2.7.4 _______________________________________________ dev mailing list [email protected] https://mail.openvswitch.org/mailman/listinfo/ovs-dev
