On Mon, Sep 15, 2014 at 10:07 AM, Pravin Shelar <[email protected]> wrote:
> On Fri, Sep 12, 2014 at 3:04 PM, Alex Wang <[email protected]> wrote:
>> The previous commit makes OVS create one tx queue for each
>> cpu core, each pmd thread will use a separate tx queue.
>> Also, tx of non-pmd threads on dpdk interface is all through
>> 'NON_PMD_THREAD_TX_QUEUE', protected by the 'nonpmd_mempool_mutex'.
>> Therefore, the spinlock is no longer needed.  And this commit
>> removes it from 'struct dpdk_tx_queue'.
>>
>
> This lock is still required since external tools can still change
> process affinity which can cause race inside OVS.
>
After discussing with Alex, I think it is safe to remove this lock
since dpdk only cares about logical core id which can not be changed
by external tool.

>> Signed-off-by: Alex Wang <[email protected]>
>>
Acked-by: Pravin B Shelar <[email protected]>
>> ---
>> PATCH -> V2
>> - rebase and refactor the code.
>>
>> V2 -> V3:
>> - rebase.
>>
>> V3 -> V4:
>> - rebase.
>> ---
>>  lib/netdev-dpdk.c |    6 ------
>>  1 file changed, 6 deletions(-)
>>
>> diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
>> index 0fdf874..ef4fe5d 100644
>> --- a/lib/netdev-dpdk.c
>> +++ b/lib/netdev-dpdk.c
>> @@ -158,7 +158,6 @@ struct dpdk_mp {
>>  /* There should be one 'struct dpdk_tx_queue' created for
>>   * each cpu core. */
>>  struct dpdk_tx_queue {
>> -    rte_spinlock_t tx_lock;
>>      bool flush_tx;                 /* Set to true to flush queue everytime 
>> */
>>                                     /* pkts are queued. */
>>      int count;
>> @@ -478,7 +477,6 @@ netdev_dpdk_set_txq(struct netdev_dpdk *netdev, unsigned 
>> int n_txqs)
>>      for (i = 0; i < n_txqs; i++) {
>>          int core_id = ovs_numa_get_numa_id(i);
>>
>> -        rte_spinlock_init(&netdev->tx_q[i].tx_lock);
>>          /* If the corresponding core is not on the same numa node
>>           * as 'netdev', flags the 'flush_tx'. */
>>          netdev->tx_q[i].flush_tx = netdev->socket_id == core_id;
>> @@ -720,9 +718,7 @@ dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
>>      if (txq->count == 0) {
>>          return;
>>      }
>> -    rte_spinlock_lock(&txq->tx_lock);
>>      dpdk_queue_flush__(dev, qid);
>> -    rte_spinlock_unlock(&txq->tx_lock);
>>  }
>>
>>  static int
>> @@ -762,7 +758,6 @@ dpdk_queue_pkts(struct netdev_dpdk *dev, int qid,
>>
>>      int i = 0;
>>
>> -    rte_spinlock_lock(&txq->tx_lock);
>>      while (i < cnt) {
>>          int freeslots = MAX_TX_QUEUE_LEN - txq->count;
>>          int tocopy = MIN(freeslots, cnt-i);
>> @@ -781,7 +776,6 @@ dpdk_queue_pkts(struct netdev_dpdk *dev, int qid,
>>              dpdk_queue_flush__(dev, qid);
>>          }
>>      }
>> -    rte_spinlock_unlock(&txq->tx_lock);
>>  }
>>
>>  /* Tx function. Transmit packets indefinitely */
>> --
>> 1.7.9.5
>>
_______________________________________________
dev mailing list
[email protected]
http://openvswitch.org/mailman/listinfo/dev

Reply via email to