On 25.09.2017 18:23, Bodireddy, Bhanuprakash wrote: >> Not needed anymore because 'may_steal' already handled on dpif-netdev >> layer and always true; > > LGTM. > 'may_steal' is still used by QoS policer in netdev layer. I am not familiar > with Policer functionality but > Just wondering may_steal isn't needed with this change.
This was added by a recent commit "netdev-dpdk: Execute QoS Checking before copying to mbuf." And yes, above commit will be mostly reverted, because 'may_steal' is always true. > > - Bhanuprakash. > >> >> Signed-off-by: Ilya Maximets <[email protected]> >> --- >> lib/dpif-netdev.c | 2 +- >> lib/netdev-bsd.c | 4 ++-- >> lib/netdev-dpdk.c | 25 +++++++++++-------------- >> lib/netdev-dummy.c | 4 ++-- >> lib/netdev-linux.c | 4 ++-- >> lib/netdev-provider.h | 7 +++---- >> lib/netdev.c | 12 ++++-------- >> lib/netdev.h | 2 +- >> 8 files changed, 26 insertions(+), 34 deletions(-) >> >> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index a2a25be..dcf55f3 >> 100644 >> --- a/lib/dpif-netdev.c >> +++ b/lib/dpif-netdev.c >> @@ -3121,7 +3121,7 @@ dp_netdev_pmd_flush_output_on_port(struct >> dp_netdev_pmd_thread *pmd, >> tx_qid = pmd->static_tx_qid; >> } >> >> - netdev_send(p->port->netdev, tx_qid, &p->output_pkts, true, >> dynamic_txqs); >> + netdev_send(p->port->netdev, tx_qid, &p->output_pkts, >> + dynamic_txqs); >> dp_packet_batch_init(&p->output_pkts); >> } >> >> diff --git a/lib/netdev-bsd.c b/lib/netdev-bsd.c index 8a4cdb3..4f243b5 >> 100644 >> --- a/lib/netdev-bsd.c >> +++ b/lib/netdev-bsd.c >> @@ -680,7 +680,7 @@ netdev_bsd_rxq_drain(struct netdev_rxq *rxq_) >> */ >> static int >> netdev_bsd_send(struct netdev *netdev_, int qid OVS_UNUSED, >> - struct dp_packet_batch *batch, bool may_steal, >> + struct dp_packet_batch *batch, >> bool concurrent_txq OVS_UNUSED) { >> struct netdev_bsd *dev = netdev_bsd_cast(netdev_); @@ -728,7 +728,7 >> @@ netdev_bsd_send(struct netdev *netdev_, int qid OVS_UNUSED, >> } >> >> ovs_mutex_unlock(&dev->mutex); >> - dp_packet_delete_batch(batch, may_steal); >> + dp_packet_delete_batch(batch, true); >> >> return error; >> } >> diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index 1d82bca..8e3158f >> 100644 >> --- a/lib/netdev-dpdk.c >> +++ b/lib/netdev-dpdk.c >> @@ -1872,12 +1872,12 @@ dpdk_do_tx_copy(struct netdev *netdev, int qid, >> struct dp_packet_batch *batch) static int netdev_dpdk_vhost_send(struct >> netdev *netdev, int qid, >> struct dp_packet_batch *batch, >> - bool may_steal, bool concurrent_txq OVS_UNUSED) >> + bool concurrent_txq OVS_UNUSED) >> { >> >> - if (OVS_UNLIKELY(!may_steal || batch->packets[0]->source != >> DPBUF_DPDK)) { >> + if (OVS_UNLIKELY(batch->packets[0]->source != DPBUF_DPDK)) { >> dpdk_do_tx_copy(netdev, qid, batch); >> - dp_packet_delete_batch(batch, may_steal); >> + dp_packet_delete_batch(batch, true); >> } else { >> dp_packet_batch_apply_cutlen(batch); >> __netdev_dpdk_vhost_send(netdev, qid, batch->packets, batch- >>> count); @@ -1887,11 +1887,11 @@ netdev_dpdk_vhost_send(struct netdev >> *netdev, int qid, >> >> static inline void >> netdev_dpdk_send__(struct netdev_dpdk *dev, int qid, >> - struct dp_packet_batch *batch, bool may_steal, >> + struct dp_packet_batch *batch, >> bool concurrent_txq) { >> if (OVS_UNLIKELY(!(dev->flags & NETDEV_UP))) { >> - dp_packet_delete_batch(batch, may_steal); >> + dp_packet_delete_batch(batch, true); >> return; >> } >> >> @@ -1900,12 +1900,11 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, >> int qid, >> rte_spinlock_lock(&dev->tx_q[qid].tx_lock); >> } >> >> - if (OVS_UNLIKELY(!may_steal || >> - batch->packets[0]->source != DPBUF_DPDK)) { >> + if (OVS_UNLIKELY(batch->packets[0]->source != DPBUF_DPDK)) { >> struct netdev *netdev = &dev->up; >> >> dpdk_do_tx_copy(netdev, qid, batch); >> - dp_packet_delete_batch(batch, may_steal); >> + dp_packet_delete_batch(batch, true); >> } else { >> int dropped; >> int cnt = batch->count; >> @@ -1933,12 +1932,11 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, >> int qid, >> >> static int >> netdev_dpdk_eth_send(struct netdev *netdev, int qid, >> - struct dp_packet_batch *batch, bool may_steal, >> - bool concurrent_txq) >> + struct dp_packet_batch *batch, bool >> + concurrent_txq) >> { >> struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); >> >> - netdev_dpdk_send__(dev, qid, batch, may_steal, concurrent_txq); >> + netdev_dpdk_send__(dev, qid, batch, concurrent_txq); >> return 0; >> } >> >> @@ -2905,8 +2903,7 @@ dpdk_ring_open(const char dev_name[], >> dpdk_port_t *eth_port_id) >> >> static int >> netdev_dpdk_ring_send(struct netdev *netdev, int qid, >> - struct dp_packet_batch *batch, bool may_steal, >> - bool concurrent_txq) >> + struct dp_packet_batch *batch, bool >> + concurrent_txq) >> { >> struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); >> unsigned i; >> @@ -2919,7 +2916,7 @@ netdev_dpdk_ring_send(struct netdev *netdev, int >> qid, >> dp_packet_rss_invalidate(batch->packets[i]); >> } >> >> - netdev_dpdk_send__(dev, qid, batch, may_steal, concurrent_txq); >> + netdev_dpdk_send__(dev, qid, batch, concurrent_txq); >> return 0; >> } >> >> diff --git a/lib/netdev-dummy.c b/lib/netdev-dummy.c index f731af1..57ef13f >> 100644 >> --- a/lib/netdev-dummy.c >> +++ b/lib/netdev-dummy.c >> @@ -1062,7 +1062,7 @@ netdev_dummy_rxq_drain(struct netdev_rxq >> *rxq_) >> >> static int >> netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED, >> - struct dp_packet_batch *batch, bool may_steal, >> + struct dp_packet_batch *batch, >> bool concurrent_txq OVS_UNUSED) { >> struct netdev_dummy *dev = netdev_dummy_cast(netdev); @@ -1132,7 >> +1132,7 @@ netdev_dummy_send(struct netdev *netdev, int qid >> OVS_UNUSED, >> ovs_mutex_unlock(&dev->mutex); >> } >> >> - dp_packet_delete_batch(batch, may_steal); >> + dp_packet_delete_batch(batch, true); >> >> return error; >> } >> diff --git a/lib/netdev-linux.c b/lib/netdev-linux.c index 2ff3e2b..aaf4899 >> 100644 >> --- a/lib/netdev-linux.c >> +++ b/lib/netdev-linux.c >> @@ -1269,7 +1269,7 @@ netdev_linux_tap_batch_send(struct netdev >> *netdev_, >> * expected to do additional queuing of packets. */ static int >> netdev_linux_send(struct netdev *netdev_, int qid OVS_UNUSED, >> - struct dp_packet_batch *batch, bool may_steal, >> + struct dp_packet_batch *batch, >> bool concurrent_txq OVS_UNUSED) { >> int error = 0; >> @@ -1305,7 +1305,7 @@ netdev_linux_send(struct netdev *netdev_, int qid >> OVS_UNUSED, >> } >> >> free_batch: >> - dp_packet_delete_batch(batch, may_steal); >> + dp_packet_delete_batch(batch, true); >> return error; >> } >> >> diff --git a/lib/netdev-provider.h b/lib/netdev-provider.h index >> b3c57d5..e6ec79a 100644 >> --- a/lib/netdev-provider.h >> +++ b/lib/netdev-provider.h >> @@ -347,9 +347,8 @@ struct netdev_class { >> * If the function returns a non-zero value, some of the packets might >> have >> * been sent anyway. >> * >> - * If 'may_steal' is false, the caller retains ownership of all the >> - * packets. If 'may_steal' is true, the caller transfers ownership of >> all >> - * the packets to the network device, regardless of success. >> + * The caller transfers ownership of all the packets to the network >> + * device, regardless of success. >> * >> * If 'concurrent_txq' is true, the caller may perform concurrent calls >> * to netdev_send() with the same 'qid'. The netdev provider is >> responsible >> @@ -369,7 +368,7 @@ struct netdev_class { >> * datapath". It will also prevent the OVS implementation of bonding >> from >> * working properly over 'netdev'.) */ >> int (*send)(struct netdev *netdev, int qid, struct dp_packet_batch >> *batch, >> - bool may_steal, bool concurrent_txq); >> + bool concurrent_txq); >> >> /* Registers with the poll loop to wake up from the next call to >> * poll_block() when the packet transmission queue for 'netdev' has diff >> -- >> git a/lib/netdev.c b/lib/netdev.c index cb4d9f0..9f97127 100644 >> --- a/lib/netdev.c >> +++ b/lib/netdev.c >> @@ -771,9 +771,8 @@ netdev_get_pt_mode(const struct netdev *netdev) >> * If the function returns a non-zero value, some of the packets might have >> * been sent anyway. >> * >> - * If 'may_steal' is false, the caller retains ownership of all the packets. >> - * If 'may_steal' is true, the caller transfers ownership of all the packets >> - * to the network device, regardless of success. >> + * The caller transfers ownership of all the packets to the network >> + device, >> + * regardless of success. >> * >> * If 'concurrent_txq' is true, the caller may perform concurrent calls >> * to netdev_send() with the same 'qid'. The netdev provider is responsible >> @@ -790,15 +789,12 @@ netdev_get_pt_mode(const struct netdev *netdev) >> * cases this function will always return EOPNOTSUPP. */ int >> netdev_send(struct netdev *netdev, int qid, struct dp_packet_batch *batch, >> - bool may_steal, bool concurrent_txq) >> + bool concurrent_txq) >> { >> - int error = netdev->netdev_class->send(netdev, qid, batch, may_steal, >> + int error = netdev->netdev_class->send(netdev, qid, batch, >> concurrent_txq); >> if (!error) { >> COVERAGE_INC(netdev_sent); >> - if (!may_steal) { >> - dp_packet_batch_reset_cutlen(batch); >> - } >> } >> return error; >> } >> diff --git a/lib/netdev.h b/lib/netdev.h index f8482f7..bdcacf5 100644 >> --- a/lib/netdev.h >> +++ b/lib/netdev.h >> @@ -181,7 +181,7 @@ int netdev_rxq_drain(struct netdev_rxq *); >> >> /* Packet transmission. */ >> int netdev_send(struct netdev *, int qid, struct dp_packet_batch *, >> - bool may_steal, bool concurrent_txq); >> + bool concurrent_txq); >> void netdev_send_wait(struct netdev *, int qid); >> >> /* Flow offloading. */ >> -- >> 2.7.4 > > > > _______________________________________________ dev mailing list [email protected] https://mail.openvswitch.org/mailman/listinfo/ovs-dev
