Not needed anymore because 'may_steal' already handled on dpif-netdev layer and always true.
Signed-off-by: Ilya Maximets <i.maxim...@samsung.com> --- lib/dpif-netdev.c | 2 +- lib/netdev-bsd.c | 4 ++-- lib/netdev-dpdk.c | 25 +++++++++++-------------- lib/netdev-dummy.c | 4 ++-- lib/netdev-linux.c | 4 ++-- lib/netdev-provider.h | 7 +++---- lib/netdev.c | 12 ++++-------- lib/netdev.h | 2 +- 8 files changed, 26 insertions(+), 34 deletions(-) diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index f124bf8..68c35fa 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -3300,7 +3300,7 @@ dp_netdev_pmd_flush_output_on_port(struct dp_netdev_pmd_thread *pmd, tx_qid = pmd->static_tx_qid; } - netdev_send(p->port->netdev, tx_qid, &p->output_pkts, true, dynamic_txqs); + netdev_send(p->port->netdev, tx_qid, &p->output_pkts, dynamic_txqs); dp_packet_batch_init(&p->output_pkts); } diff --git a/lib/netdev-bsd.c b/lib/netdev-bsd.c index 65c5674..658a5bc 100644 --- a/lib/netdev-bsd.c +++ b/lib/netdev-bsd.c @@ -680,7 +680,7 @@ netdev_bsd_rxq_drain(struct netdev_rxq *rxq_) */ static int netdev_bsd_send(struct netdev *netdev_, int qid OVS_UNUSED, - struct dp_packet_batch *batch, bool may_steal, + struct dp_packet_batch *batch, bool concurrent_txq OVS_UNUSED) { struct netdev_bsd *dev = netdev_bsd_cast(netdev_); @@ -728,7 +728,7 @@ netdev_bsd_send(struct netdev *netdev_, int qid OVS_UNUSED, } ovs_mutex_unlock(&dev->mutex); - dp_packet_delete_batch(batch, may_steal); + dp_packet_delete_batch(batch, true); return error; } diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index faff842..47cf2dd 100644 --- a/lib/netdev-dpdk.c +++ b/lib/netdev-dpdk.c @@ -1874,12 +1874,12 @@ dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch) static int netdev_dpdk_vhost_send(struct netdev *netdev, int qid, struct dp_packet_batch *batch, - bool may_steal, bool concurrent_txq OVS_UNUSED) + bool concurrent_txq OVS_UNUSED) { - if (OVS_UNLIKELY(!may_steal || batch->packets[0]->source != DPBUF_DPDK)) { + if (OVS_UNLIKELY(batch->packets[0]->source != DPBUF_DPDK)) { dpdk_do_tx_copy(netdev, qid, batch); - dp_packet_delete_batch(batch, may_steal); + dp_packet_delete_batch(batch, true); } else { dp_packet_batch_apply_cutlen(batch); __netdev_dpdk_vhost_send(netdev, qid, batch->packets, batch->count); @@ -1889,11 +1889,11 @@ netdev_dpdk_vhost_send(struct netdev *netdev, int qid, static inline void netdev_dpdk_send__(struct netdev_dpdk *dev, int qid, - struct dp_packet_batch *batch, bool may_steal, + struct dp_packet_batch *batch, bool concurrent_txq) { if (OVS_UNLIKELY(!(dev->flags & NETDEV_UP))) { - dp_packet_delete_batch(batch, may_steal); + dp_packet_delete_batch(batch, true); return; } @@ -1902,12 +1902,11 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid, rte_spinlock_lock(&dev->tx_q[qid].tx_lock); } - if (OVS_UNLIKELY(!may_steal || - batch->packets[0]->source != DPBUF_DPDK)) { + if (OVS_UNLIKELY(batch->packets[0]->source != DPBUF_DPDK)) { struct netdev *netdev = &dev->up; dpdk_do_tx_copy(netdev, qid, batch); - dp_packet_delete_batch(batch, may_steal); + dp_packet_delete_batch(batch, true); } else { int tx_cnt, dropped; int batch_cnt = dp_packet_batch_size(batch); @@ -1935,12 +1934,11 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid, static int netdev_dpdk_eth_send(struct netdev *netdev, int qid, - struct dp_packet_batch *batch, bool may_steal, - bool concurrent_txq) + struct dp_packet_batch *batch, bool concurrent_txq) { struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); - netdev_dpdk_send__(dev, qid, batch, may_steal, concurrent_txq); + netdev_dpdk_send__(dev, qid, batch, concurrent_txq); return 0; } @@ -2907,8 +2905,7 @@ dpdk_ring_open(const char dev_name[], dpdk_port_t *eth_port_id) static int netdev_dpdk_ring_send(struct netdev *netdev, int qid, - struct dp_packet_batch *batch, bool may_steal, - bool concurrent_txq) + struct dp_packet_batch *batch, bool concurrent_txq) { struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); struct dp_packet *packet; @@ -2921,7 +2918,7 @@ netdev_dpdk_ring_send(struct netdev *netdev, int qid, dp_packet_mbuf_rss_flag_reset(packet); } - netdev_dpdk_send__(dev, qid, batch, may_steal, concurrent_txq); + netdev_dpdk_send__(dev, qid, batch, concurrent_txq); return 0; } diff --git a/lib/netdev-dummy.c b/lib/netdev-dummy.c index 246cdf1..140e08d 100644 --- a/lib/netdev-dummy.c +++ b/lib/netdev-dummy.c @@ -1062,7 +1062,7 @@ netdev_dummy_rxq_drain(struct netdev_rxq *rxq_) static int netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED, - struct dp_packet_batch *batch, bool may_steal, + struct dp_packet_batch *batch, bool concurrent_txq OVS_UNUSED) { struct netdev_dummy *dev = netdev_dummy_cast(netdev); @@ -1132,7 +1132,7 @@ netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED, ovs_mutex_unlock(&dev->mutex); } - dp_packet_delete_batch(batch, may_steal); + dp_packet_delete_batch(batch, true); return error; } diff --git a/lib/netdev-linux.c b/lib/netdev-linux.c index e809b88..634ff6f 100644 --- a/lib/netdev-linux.c +++ b/lib/netdev-linux.c @@ -1270,7 +1270,7 @@ netdev_linux_tap_batch_send(struct netdev *netdev_, * expected to do additional queuing of packets. */ static int netdev_linux_send(struct netdev *netdev_, int qid OVS_UNUSED, - struct dp_packet_batch *batch, bool may_steal, + struct dp_packet_batch *batch, bool concurrent_txq OVS_UNUSED) { int error = 0; @@ -1306,7 +1306,7 @@ netdev_linux_send(struct netdev *netdev_, int qid OVS_UNUSED, } free_batch: - dp_packet_delete_batch(batch, may_steal); + dp_packet_delete_batch(batch, true); return error; } diff --git a/lib/netdev-provider.h b/lib/netdev-provider.h index 1720deb..3d9f336 100644 --- a/lib/netdev-provider.h +++ b/lib/netdev-provider.h @@ -348,9 +348,8 @@ struct netdev_class { * If the function returns a non-zero value, some of the packets might have * been sent anyway. * - * If 'may_steal' is false, the caller retains ownership of all the - * packets. If 'may_steal' is true, the caller transfers ownership of all - * the packets to the network device, regardless of success. + * The caller transfers ownership of all the packets to the network + * device, regardless of success. * * If 'concurrent_txq' is true, the caller may perform concurrent calls * to netdev_send() with the same 'qid'. The netdev provider is responsible @@ -370,7 +369,7 @@ struct netdev_class { * datapath". It will also prevent the OVS implementation of bonding from * working properly over 'netdev'.) */ int (*send)(struct netdev *netdev, int qid, struct dp_packet_batch *batch, - bool may_steal, bool concurrent_txq); + bool concurrent_txq); /* Registers with the poll loop to wake up from the next call to * poll_block() when the packet transmission queue for 'netdev' has diff --git a/lib/netdev.c b/lib/netdev.c index 2d69fe5..e6f7c95 100644 --- a/lib/netdev.c +++ b/lib/netdev.c @@ -771,9 +771,8 @@ netdev_get_pt_mode(const struct netdev *netdev) * If the function returns a non-zero value, some of the packets might have * been sent anyway. * - * If 'may_steal' is false, the caller retains ownership of all the packets. - * If 'may_steal' is true, the caller transfers ownership of all the packets - * to the network device, regardless of success. + * The caller transfers ownership of all the packets to the network device, + * regardless of success. * * If 'concurrent_txq' is true, the caller may perform concurrent calls * to netdev_send() with the same 'qid'. The netdev provider is responsible @@ -787,15 +786,12 @@ netdev_get_pt_mode(const struct netdev *netdev) * queues. */ int netdev_send(struct netdev *netdev, int qid, struct dp_packet_batch *batch, - bool may_steal, bool concurrent_txq) + bool concurrent_txq) { - int error = netdev->netdev_class->send(netdev, qid, batch, may_steal, + int error = netdev->netdev_class->send(netdev, qid, batch, concurrent_txq); if (!error) { COVERAGE_INC(netdev_sent); - if (!may_steal) { - dp_packet_batch_reset_cutlen(batch); - } } return error; } diff --git a/lib/netdev.h b/lib/netdev.h index 3a545fe..dc9d9a0f 100644 --- a/lib/netdev.h +++ b/lib/netdev.h @@ -181,7 +181,7 @@ int netdev_rxq_drain(struct netdev_rxq *); /* Packet transmission. */ int netdev_send(struct netdev *, int qid, struct dp_packet_batch *, - bool may_steal, bool concurrent_txq); + bool concurrent_txq); void netdev_send_wait(struct netdev *, int qid); /* Flow offloading. */ -- 2.7.4 _______________________________________________ dev mailing list d...@openvswitch.org https://mail.openvswitch.org/mailman/listinfo/ovs-dev