[dpdk-dev] [PATCH v2 1/4] net/virtio: fix vector Rx break caused by rxq flushing

Tiwei Bie tiwei.bie at intel.com
Mon Dec 11 06:13:29 CET 2017


The vector Rx will be broken if backend has consumed all
the descs in the avail ring before the device is started.
Because in current implementation, vector Rx will return
immediately without refilling the avail ring if the used
ring is empty. So we have to refill the avail ring after
flushing the elements in the used ring for vector Rx.

Besides, vector Rx has a different ring layout assumption
and mbuf management. So we need to handle it differently.

Fixes: d8227497ec5c ("net/virtio: flush Rx queues on start")
Cc: stable at dpdk.org

Reported-by: Antonio Fischetti <antonio.fischetti at intel.com>
Signed-off-by: Tiwei Bie <tiwei.bie at intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
Tested-by: Antonio Fischetti <antonio.fischetti at intel.com>
---
 drivers/net/virtio/virtio_ethdev.c |  2 +-
 drivers/net/virtio/virtqueue.c     | 31 ++++++++++++++++++++++++-------
 drivers/net/virtio/virtqueue.h     |  2 +-
 3 files changed, 26 insertions(+), 9 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index e0328f61d..64a0cc608 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1860,7 +1860,7 @@ virtio_dev_start(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxvq = dev->data->rx_queues[i];
 		/* Flush the old packets */
-		virtqueue_flush(rxvq->vq);
+		virtqueue_rxvq_flush(rxvq->vq);
 		virtqueue_notify(rxvq->vq);
 	}
 
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index c3a536f8a..696d0e4a4 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -37,6 +37,7 @@
 #include "virtqueue.h"
 #include "virtio_logs.h"
 #include "virtio_pci.h"
+#include "virtio_rxtx_simple.h"
 
 /*
  * Two types of mbuf to be cleaned:
@@ -62,8 +63,10 @@ virtqueue_detatch_unused(struct virtqueue *vq)
 
 /* Flush the elements in the used ring. */
 void
-virtqueue_flush(struct virtqueue *vq)
+virtqueue_rxvq_flush(struct virtqueue *vq)
 {
+	struct virtnet_rx *rxq = &vq->rxq;
+	struct virtio_hw *hw = vq->hw;
 	struct vring_used_elem *uep;
 	struct vq_desc_extra *dxp;
 	uint16_t used_idx, desc_idx;
@@ -74,13 +77,27 @@ virtqueue_flush(struct virtqueue *vq)
 	for (i = 0; i < nb_used; i++) {
 		used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
 		uep = &vq->vq_ring.used->ring[used_idx];
-		desc_idx = (uint16_t)uep->id;
-		dxp = &vq->vq_descx[desc_idx];
-		if (dxp->cookie != NULL) {
-			rte_pktmbuf_free(dxp->cookie);
-			dxp->cookie = NULL;
+		if (hw->use_simple_rx) {
+			desc_idx = used_idx;
+			rte_pktmbuf_free(vq->sw_ring[desc_idx]);
+			vq->vq_free_cnt++;
+		} else {
+			desc_idx = (uint16_t)uep->id;
+			dxp = &vq->vq_descx[desc_idx];
+			if (dxp->cookie != NULL) {
+				rte_pktmbuf_free(dxp->cookie);
+				dxp->cookie = NULL;
+			}
+			vq_ring_free_chain(vq, desc_idx);
 		}
 		vq->vq_used_cons_idx++;
-		vq_ring_free_chain(vq, desc_idx);
+	}
+
+	if (hw->use_simple_rx) {
+		while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+			virtio_rxq_rearm_vec(rxq);
+			if (virtqueue_kick_prepare(vq))
+				virtqueue_notify(vq);
+		}
 	}
 }
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 2305d91a4..ab466c2db 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -304,7 +304,7 @@ void virtqueue_dump(struct virtqueue *vq);
 struct rte_mbuf *virtqueue_detatch_unused(struct virtqueue *vq);
 
 /* Flush the elements in the used ring. */
-void virtqueue_flush(struct virtqueue *vq);
+void virtqueue_rxvq_flush(struct virtqueue *vq);
 
 static inline int
 virtqueue_full(const struct virtqueue *vq)
-- 
2.13.3



More information about the dev mailing list