[dpdk-stable] patch 'vhost: handle mbuf allocation failure' has been queued to stable release 19.11.3

luca.boccassi at gmail.com luca.boccassi at gmail.com
Fri May 22 11:39:58 CEST 2020


Hi,

FYI, your patch has been queued to stable release 19.11.3

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 05/24/20. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Thanks.

Luca Boccassi

---
>From c651ee6999508d5cab36d2449cccce282ba903c4 Mon Sep 17 00:00:00 2001
From: Sivaprasad Tummala <sivaprasad.tummala at intel.com>
Date: Fri, 8 May 2020 16:47:51 +0530
Subject: [PATCH] vhost: handle mbuf allocation failure

[ upstream commit 0fd5608ef97f9c467f1ecc926463cf793189443e ]

vhost buffer allocation is successful for packets that fit
into a linear buffer. If it fails, vhost library is expected
to drop the current packet and skip to the next.

The patch fixes the error scenario by skipping to next packet.
Note: Drop counters are not currently supported.

Fixes: c3ff0ac70acb ("vhost: improve performance by supporting large buffer")

Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala at intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
---
 lib/librte_vhost/virtio_net.c | 70 +++++++++++++++++++++++++++--------
 1 file changed, 55 insertions(+), 15 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 22dcdf72bd..a6c106c13c 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1676,6 +1676,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 {
 	uint16_t i;
 	uint16_t free_entries;
+	uint16_t dropped = 0;
+	static bool allocerr_warned;
 
 	if (unlikely(dev->dequeue_zero_copy)) {
 		struct zcopy_mbuf *zmbuf, *next;
@@ -1739,13 +1741,35 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			update_shadow_used_ring_split(vq, head_idx, 0);
 
 		pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
-		if (unlikely(pkts[i] == NULL))
+		if (unlikely(pkts[i] == NULL)) {
+			/*
+			 * mbuf allocation fails for jumbo packets when external
+			 * buffer allocation is not allowed and linear buffer
+			 * is required. Drop this packet.
+			 */
+			if (!allocerr_warned) {
+				RTE_LOG(ERR, VHOST_DATA,
+					"Failed mbuf alloc of size %d from %s on %s.\n",
+					buf_len, mbuf_pool->name, dev->ifname);
+				allocerr_warned = true;
+			}
+			dropped += 1;
+			i++;
 			break;
+		}
 
 		err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
 				mbuf_pool);
 		if (unlikely(err)) {
 			rte_pktmbuf_free(pkts[i]);
+			if (!allocerr_warned) {
+				RTE_LOG(ERR, VHOST_DATA,
+					"Failed to copy desc to mbuf on %s.\n",
+					dev->ifname);
+				allocerr_warned = true;
+			}
+			dropped += 1;
+			i++;
 			break;
 		}
 
@@ -1755,6 +1779,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			zmbuf = get_zmbuf(vq);
 			if (!zmbuf) {
 				rte_pktmbuf_free(pkts[i]);
+				dropped += 1;
+				i++;
 				break;
 			}
 			zmbuf->mbuf = pkts[i];
@@ -1784,7 +1810,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		}
 	}
 
-	return i;
+	return (i - dropped);
 }
 
 static __rte_always_inline int
@@ -1918,6 +1944,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 	uint32_t buf_len;
 	uint16_t nr_vec = 0;
 	int err;
+	static bool allocerr_warned;
 
 	if (unlikely(fill_vec_buf_packed(dev, vq,
 					 vq->last_avail_idx, desc_count,
@@ -1928,14 +1955,24 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 
 	*pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
 	if (unlikely(*pkts == NULL)) {
-		RTE_LOG(ERR, VHOST_DATA,
-			"Failed to allocate memory for mbuf.\n");
+		if (!allocerr_warned) {
+			RTE_LOG(ERR, VHOST_DATA,
+				"Failed mbuf alloc of size %d from %s on %s.\n",
+				buf_len, mbuf_pool->name, dev->ifname);
+			allocerr_warned = true;
+		}
 		return -1;
 	}
 
 	err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
 				mbuf_pool);
 	if (unlikely(err)) {
+		if (!allocerr_warned) {
+			RTE_LOG(ERR, VHOST_DATA,
+				"Failed to copy desc to mbuf on %s.\n",
+				dev->ifname);
+			allocerr_warned = true;
+		}
 		rte_pktmbuf_free(*pkts);
 		return -1;
 	}
@@ -1950,21 +1987,24 @@ virtio_dev_tx_single_packed(struct virtio_net *dev,
 			    struct rte_mbuf **pkts)
 {
 
-	uint16_t buf_id, desc_count;
+	uint16_t buf_id, desc_count = 0;
+	int ret;
 
-	if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
-					&desc_count))
-		return -1;
+	ret = vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
+					&desc_count);
 
-	if (virtio_net_is_inorder(dev))
-		vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
-							   desc_count);
-	else
-		vhost_shadow_dequeue_single_packed(vq, buf_id, desc_count);
+	if (likely(desc_count > 0)) {
+		if (virtio_net_is_inorder(dev))
+			vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
+								   desc_count);
+		else
+			vhost_shadow_dequeue_single_packed(vq, buf_id,
+					desc_count);
 
-	vq_inc_last_avail_packed(vq, desc_count);
+		vq_inc_last_avail_packed(vq, desc_count);
+	}
 
-	return 0;
+	return ret;
 }
 
 static __rte_always_inline int
-- 
2.20.1

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2020-05-22 10:37:39.952737996 +0100
+++ 0019-vhost-handle-mbuf-allocation-failure.patch	2020-05-22 10:37:39.108412355 +0100
@@ -1,8 +1,10 @@
-From 0fd5608ef97f9c467f1ecc926463cf793189443e Mon Sep 17 00:00:00 2001
+From c651ee6999508d5cab36d2449cccce282ba903c4 Mon Sep 17 00:00:00 2001
 From: Sivaprasad Tummala <sivaprasad.tummala at intel.com>
 Date: Fri, 8 May 2020 16:47:51 +0530
 Subject: [PATCH] vhost: handle mbuf allocation failure
 
+[ upstream commit 0fd5608ef97f9c467f1ecc926463cf793189443e ]
+
 vhost buffer allocation is successful for packets that fit
 into a linear buffer. If it fails, vhost library is expected
 to drop the current packet and skip to the next.
@@ -11,7 +13,6 @@
 Note: Drop counters are not currently supported.
 
 Fixes: c3ff0ac70acb ("vhost: improve performance by supporting large buffer")
-Cc: stable at dpdk.org
 
 Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala at intel.com>
 Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
@@ -20,10 +21,10 @@
  1 file changed, 55 insertions(+), 15 deletions(-)
 
 diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
-index 5e8c6b99c0..751c1f3733 100644
+index 22dcdf72bd..a6c106c13c 100644
 --- a/lib/librte_vhost/virtio_net.c
 +++ b/lib/librte_vhost/virtio_net.c
-@@ -1673,6 +1673,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
+@@ -1676,6 +1676,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
  {
  	uint16_t i;
  	uint16_t free_entries;
@@ -32,7 +33,7 @@
  
  	if (unlikely(dev->dequeue_zero_copy)) {
  		struct zcopy_mbuf *zmbuf, *next;
-@@ -1734,13 +1736,35 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
+@@ -1739,13 +1741,35 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
  			update_shadow_used_ring_split(vq, head_idx, 0);
  
  		pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
@@ -44,7 +45,7 @@
 +			 * is required. Drop this packet.
 +			 */
 +			if (!allocerr_warned) {
-+				VHOST_LOG_DATA(ERR,
++				RTE_LOG(ERR, VHOST_DATA,
 +					"Failed mbuf alloc of size %d from %s on %s.\n",
 +					buf_len, mbuf_pool->name, dev->ifname);
 +				allocerr_warned = true;
@@ -59,7 +60,7 @@
  		if (unlikely(err)) {
  			rte_pktmbuf_free(pkts[i]);
 +			if (!allocerr_warned) {
-+				VHOST_LOG_DATA(ERR,
++				RTE_LOG(ERR, VHOST_DATA,
 +					"Failed to copy desc to mbuf on %s.\n",
 +					dev->ifname);
 +				allocerr_warned = true;
@@ -69,7 +70,7 @@
  			break;
  		}
  
-@@ -1750,6 +1774,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
+@@ -1755,6 +1779,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
  			zmbuf = get_zmbuf(vq);
  			if (!zmbuf) {
  				rte_pktmbuf_free(pkts[i]);
@@ -78,7 +79,7 @@
  				break;
  			}
  			zmbuf->mbuf = pkts[i];
-@@ -1779,7 +1805,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
+@@ -1784,7 +1810,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
  		}
  	}
  
@@ -87,7 +88,7 @@
  }
  
  static __rte_always_inline int
-@@ -1913,6 +1939,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
+@@ -1918,6 +1944,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
  	uint32_t buf_len;
  	uint16_t nr_vec = 0;
  	int err;
@@ -95,14 +96,14 @@
  
  	if (unlikely(fill_vec_buf_packed(dev, vq,
  					 vq->last_avail_idx, desc_count,
-@@ -1923,14 +1950,24 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
+@@ -1928,14 +1955,24 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
  
  	*pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
  	if (unlikely(*pkts == NULL)) {
--		VHOST_LOG_DATA(ERR,
+-		RTE_LOG(ERR, VHOST_DATA,
 -			"Failed to allocate memory for mbuf.\n");
 +		if (!allocerr_warned) {
-+			VHOST_LOG_DATA(ERR,
++			RTE_LOG(ERR, VHOST_DATA,
 +				"Failed mbuf alloc of size %d from %s on %s.\n",
 +				buf_len, mbuf_pool->name, dev->ifname);
 +			allocerr_warned = true;
@@ -114,7 +115,7 @@
  				mbuf_pool);
  	if (unlikely(err)) {
 +		if (!allocerr_warned) {
-+			VHOST_LOG_DATA(ERR,
++			RTE_LOG(ERR, VHOST_DATA,
 +				"Failed to copy desc to mbuf on %s.\n",
 +				dev->ifname);
 +			allocerr_warned = true;
@@ -122,7 +123,7 @@
  		rte_pktmbuf_free(*pkts);
  		return -1;
  	}
-@@ -1945,21 +1982,24 @@ virtio_dev_tx_single_packed(struct virtio_net *dev,
+@@ -1950,21 +1987,24 @@ virtio_dev_tx_single_packed(struct virtio_net *dev,
  			    struct rte_mbuf **pkts)
  {
  


More information about the stable mailing list