>>
>> +static int
>> +netdev_dpdk_vhost_tx_burst(struct netdev_dpdk *dev, int qid) {
>> +    struct dpdk_tx_queue *txq = &dev->tx_q[qid];
>> +    struct rte_mbuf **cur_pkts = (struct rte_mbuf
>> +**)txq->vhost_burst_pkts;
>> +
>> +    int tx_vid = netdev_dpdk_get_vid(dev);
>> +    int tx_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
>> +    uint32_t sent = 0;
>> +    uint32_t retries = 0;
>> +    uint32_t sum, total_pkts;
>> +
>> +    total_pkts = sum = txq->vhost_pkt_cnt;
>> +    do {
>> +        uint32_t ret;
>> +        ret = rte_vhost_enqueue_burst(tx_vid, tx_qid, &cur_pkts[sent],
>sum);
>> +        if (OVS_UNLIKELY(!ret)) {
>> +            /* No packets enqueued - do not retry. */
>> +            break;
>> +        } else {
>> +            /* Packet have been sent. */
>> +            sent += ret;
>> +
>> +            /* 'sum' packet have to be retransmitted. */
>> +            sum -= ret;
>> +        }
>> +    } while (sum && (retries++ < VHOST_ENQ_RETRY_NUM));
>> +
>> +    for (int i = 0; i < total_pkts; i++) {
>> +        dp_packet_delete(txq->vhost_burst_pkts[i]);
>> +    }
>> +
>> +    /* Reset pkt count. */
>> +    txq->vhost_pkt_cnt = 0;
>> +
>> +    /* 'sum' refers to packets dropped. */
>> +    return sum;
>> +}
>> +
>> +/* Flush the txq if there are any packets available. */ static int
>> +netdev_dpdk_vhost_txq_flush(struct netdev *netdev, int qid,
>> +                            bool concurrent_txq OVS_UNUSED) {
>> +    struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
>> +    struct dpdk_tx_queue *txq;
>> +
>> +    qid = dev->tx_q[qid % netdev->n_txq].map;
>> +
>> +    /* The qid may be disabled in the guest and has been set to
>> +     * OVS_VHOST_QUEUE_DISABLED.
>> +     */
>> +    if (OVS_UNLIKELY(qid < 0)) {
>> +        return 0;
>> +    }
>> +
>> +    txq = &dev->tx_q[qid];
>> +    /* Increment the drop count and free the memory. */
>> +    if (OVS_UNLIKELY(!is_vhost_running(dev) ||
>> +                     !(dev->flags & NETDEV_UP))) {
>> +
>> +        if (txq->vhost_pkt_cnt) {
>> +            rte_spinlock_lock(&dev->stats_lock);
>> +            dev->stats.tx_dropped+= txq->vhost_pkt_cnt;
>> +            rte_spinlock_unlock(&dev->stats_lock);
>> +
>> +            for (int i = 0; i < txq->vhost_pkt_cnt; i++) {
>> +                dp_packet_delete(txq->vhost_burst_pkts[i]);
>
>Spinlock (tx_lock) must be held here to avoid queue and mempool breakage.

I think you are right. tx_lock might be acquired for freeing the packets.

---------------------------------------------------------------------------
    rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
    for (int i = 0; i < txq->vhost_pkt_cnt; i++) {
             dp_packet_delete(txq->vhost_burst_pkts[i]);
    }
    rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);

- Bhanuprakash
_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to