[dpdk-dev] [PATCH v3 2/2] net/virtio: move VLAN tag insertion to Tx prepare

Andrew Rybchenko arybchenko at solarflare.com
Sun Jun 16 12:17:09 CEST 2019


From: Dilshod Urazov <Dilshod.Urazov at oktetlabs.ru>

VLAN tag insertion should be in Tx prepare, not in Tx burst functions.

Fixes: 4fb7e803eb1a ("ethdev: add Tx preparation")
Cc: stable at dpdk.org

Signed-off-by: Dilshod Urazov <Dilshod.Urazov at oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <arybchenko at solarflare.com>
---
 drivers/net/virtio/virtio_rxtx.c | 50 +++++++++-----------------------
 1 file changed, 14 insertions(+), 36 deletions(-)

diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 07f8f47de..dcce39e8c 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -1966,6 +1966,20 @@ virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
 		}
 #endif
 
+		/* Do VLAN tag insertion */
+		if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
+			error = rte_vlan_insert(&m);
+			/* rte_vlan_insert() may change pointer
+			 * even in the case of failure
+			 */
+			tx_pkts[nb_tx] = m;
+
+			if (unlikely(error)) {
+				rte_errno = -error;
+				break;
+			}
+		}
+
 		error = rte_net_intel_cksum_prepare(m);
 		if (unlikely(error)) {
 			rte_errno = -error;
@@ -1989,7 +2003,6 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint16_t hdr_size = hw->vtnet_hdr_size;
 	uint16_t nb_tx = 0;
 	bool in_order = hw->use_inorder_tx;
-	int error;
 
 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
 		return nb_tx;
@@ -2007,17 +2020,6 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
 		struct rte_mbuf *txm = tx_pkts[nb_tx];
 		int can_push = 0, slots, need;
 
-		/* Do VLAN tag insertion */
-		if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
-			error = rte_vlan_insert(&txm);
-			if (unlikely(error)) {
-				rte_pktmbuf_free(txm);
-				continue;
-			}
-			/* vlan_insert may add a header mbuf */
-			tx_pkts[nb_tx] = txm;
-		}
-
 		/* optimize ring usage */
 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
 		      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
@@ -2077,7 +2079,6 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	struct virtio_hw *hw = vq->hw;
 	uint16_t hdr_size = hw->vtnet_hdr_size;
 	uint16_t nb_used, nb_tx = 0;
-	int error;
 
 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
 		return nb_tx;
@@ -2096,17 +2097,6 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		struct rte_mbuf *txm = tx_pkts[nb_tx];
 		int can_push = 0, use_indirect = 0, slots, need;
 
-		/* Do VLAN tag insertion */
-		if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
-			error = rte_vlan_insert(&txm);
-			if (unlikely(error)) {
-				rte_pktmbuf_free(txm);
-				continue;
-			}
-			/* vlan_insert may add a header mbuf */
-			tx_pkts[nb_tx] = txm;
-		}
-
 		/* optimize ring usage */
 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
 		      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
@@ -2176,7 +2166,6 @@ virtio_xmit_pkts_inorder(void *tx_queue,
 	uint16_t hdr_size = hw->vtnet_hdr_size;
 	uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
 	struct rte_mbuf *inorder_pkts[nb_pkts];
-	int error;
 
 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
 		return nb_tx;
@@ -2201,17 +2190,6 @@ virtio_xmit_pkts_inorder(void *tx_queue,
 		struct rte_mbuf *txm = tx_pkts[nb_tx];
 		int slots, need;
 
-		/* Do VLAN tag insertion */
-		if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
-			error = rte_vlan_insert(&txm);
-			if (unlikely(error)) {
-				rte_pktmbuf_free(txm);
-				continue;
-			}
-			/* vlan_insert may add a header mbuf */
-			tx_pkts[nb_tx] = txm;
-		}
-
 		/* optimize ring usage */
 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
 		     vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
-- 
2.17.1



More information about the dev mailing list