[dpdk-dev] [PATCH v3 2/2] net/virtio: on demand cleanup when in order xmit

Marvin Liu yong.liu at intel.com
Wed Sep 18 19:06:55 CEST 2019


Check whether space are enough before burst enqueue operation. If more
space is needed, will try to clean up used descriptors for space on
demand. It can give more chances to free used descriptors, thus will
help RFC2544 performance. Also deduct failed xmit packets from total
xmit number.

Fixes: e5f456a98d3c ("net/virtio: support in-order Rx and Tx")
Cc: stable at dpdk.org

Signed-off-by: Marvin Liu <yong.liu at intel.com>

diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 91df5b1d0..7d5c60532 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -2152,6 +2152,22 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	return nb_tx;
 }
 
+static __rte_always_inline int
+virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
+{
+	uint16_t nb_used, nb_clean, nb_descs;
+	struct virtio_hw *hw = vq->hw;
+
+	nb_descs = vq->vq_free_cnt + need;
+	nb_used = VIRTQUEUE_NUSED(vq);
+	virtio_rmb(hw->weak_barriers);
+	nb_clean = RTE_MIN(need, (int)nb_used);
+
+	virtio_xmit_cleanup_inorder(vq, nb_clean);
+
+	return (nb_descs - vq->vq_free_cnt);
+}
+
 uint16_t
 virtio_xmit_pkts_inorder(void *tx_queue,
 			struct rte_mbuf **tx_pkts,
@@ -2161,8 +2177,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
 	struct virtqueue *vq = txvq->vq;
 	struct virtio_hw *hw = vq->hw;
 	uint16_t hdr_size = hw->vtnet_hdr_size;
-	uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
+	uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
 	struct rte_mbuf *inorder_pkts[nb_pkts];
+	int need;
 
 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
 		return nb_tx;
@@ -2178,14 +2195,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
 		virtio_xmit_cleanup_inorder(vq, nb_used);
 
-	if (unlikely(!vq->vq_free_cnt))
-		virtio_xmit_cleanup_inorder(vq, nb_used);
-
-	nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
-
-	for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
+	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
 		struct rte_mbuf *txm = tx_pkts[nb_tx];
-		int slots, need;
+		int slots;
 
 		/* optimize ring usage */
 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
@@ -2203,6 +2215,17 @@ virtio_xmit_pkts_inorder(void *tx_queue,
 		}
 
 		if (nb_inorder_pkts) {
+			need = nb_inorder_pkts - vq->vq_free_cnt;
+			if (unlikely(need > 0)) {
+				need = virtio_xmit_try_cleanup_inorder(vq,
+								       need);
+				if (unlikely(need > 0)) {
+					PMD_TX_LOG(ERR,
+						"No free tx descriptors to "
+						"transmit");
+					break;
+				}
+			}
 			virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
 							nb_inorder_pkts);
 			nb_inorder_pkts = 0;
@@ -2211,13 +2234,7 @@ virtio_xmit_pkts_inorder(void *tx_queue,
 		slots = txm->nb_segs + 1;
 		need = slots - vq->vq_free_cnt;
 		if (unlikely(need > 0)) {
-			nb_used = VIRTQUEUE_NUSED(vq);
-			virtio_rmb(hw->weak_barriers);
-			need = RTE_MIN(need, (int)nb_used);
-
-			virtio_xmit_cleanup_inorder(vq, need);
-
-			need = slots - vq->vq_free_cnt;
+			need = virtio_xmit_try_cleanup_inorder(vq, slots);
 
 			if (unlikely(need > 0)) {
 				PMD_TX_LOG(ERR,
@@ -2232,9 +2249,22 @@ virtio_xmit_pkts_inorder(void *tx_queue,
 	}
 
 	/* Transmit all inorder packets */
-	if (nb_inorder_pkts)
+	if (nb_inorder_pkts) {
+		need = nb_inorder_pkts - vq->vq_free_cnt;
+		if (unlikely(need > 0)) {
+			need = virtio_xmit_try_cleanup_inorder(vq,
+								  need);
+			if (unlikely(need > 0)) {
+				PMD_TX_LOG(ERR,
+					"No free tx descriptors to transmit");
+				nb_inorder_pkts = vq->vq_free_cnt;
+				nb_tx -= need;
+			}
+		}
+
 		virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
 						nb_inorder_pkts);
+	}
 
 	txvq->stats.packets += nb_tx;
 
-- 
2.17.1



More information about the dev mailing list