[dpdk-dev] [PATCH v6 1/5] vhost: fix async vhost ops return type

Cheng Jiang cheng1.jiang at intel.com
Mon Jul 19 10:10:18 CEST 2021


The async vhost callback ops should return negative value when there
are something wrong in the callback, so the return type should be
changed into int32_t. The issue in vhost example is also fixed.

Fixes: cd6760da1076 ("vhost: introduce async enqueue for split ring")
Fixes: 819a71685826 ("vhost: fix async callback return type")
Fixes: 6b3c81db8bb7 ("vhost: simplify async copy completion")
Fixes: abec60e7115d ("examples/vhost: support vhost async data path")
Fixes: 873e8dad6f49 ("vhost: support packed ring in async datapath")
Cc: stable at dpdk.org

Signed-off-by: Cheng Jiang <cheng1.jiang at intel.com>
---
 examples/vhost/ioat.c       |  4 +--
 examples/vhost/ioat.h       |  4 +--
 lib/vhost/rte_vhost_async.h |  8 ++---
 lib/vhost/virtio_net.c      | 61 ++++++++++++++++++++++++++++++++-----
 4 files changed, 61 insertions(+), 16 deletions(-)

diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c
index 2a2c2d7202..457f8171f0 100644
--- a/examples/vhost/ioat.c
+++ b/examples/vhost/ioat.c
@@ -122,7 +122,7 @@ open_ioat(const char *value)
 	return ret;
 }
 
-uint32_t
+int32_t
 ioat_transfer_data_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_desc *descs,
 		struct rte_vhost_async_status *opaque_data, uint16_t count)
@@ -168,7 +168,7 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
 	return i_desc;
 }
 
-uint32_t
+int32_t
 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t max_packets)
diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h
index 1aa28ed6a3..b57b5645b0 100644
--- a/examples/vhost/ioat.h
+++ b/examples/vhost/ioat.h
@@ -27,12 +27,12 @@ struct dma_for_vhost {
 #ifdef RTE_RAW_IOAT
 int open_ioat(const char *value);
 
-uint32_t
+int32_t
 ioat_transfer_data_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_desc *descs,
 		struct rte_vhost_async_status *opaque_data, uint16_t count);
 
-uint32_t
+int32_t
 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t max_packets);
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index 6faa31f5ad..e964d83837 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -59,9 +59,9 @@ struct rte_vhost_async_channel_ops {
 	 * @param count
 	 *  number of elements in the "descs" array
 	 * @return
-	 *  number of descs processed
+	 *  number of descs processed, negative value means error
 	 */
-	uint32_t (*transfer_data)(int vid, uint16_t queue_id,
+	int32_t (*transfer_data)(int vid, uint16_t queue_id,
 		struct rte_vhost_async_desc *descs,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t count);
@@ -76,9 +76,9 @@ struct rte_vhost_async_channel_ops {
 	 * @param max_packets
 	 *  max number of packets could be completed
 	 * @return
-	 *  number of async descs completed
+	 *  number of async descs completed, negative value means error
 	 */
-	uint32_t (*check_completed_copies)(int vid, uint16_t queue_id,
+	int32_t (*check_completed_copies)(int vid, uint16_t queue_id,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t max_packets);
 };
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index b93482587c..16ae4d9e19 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1528,6 +1528,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	struct async_inflight_info *pkts_info = vq->async_pkts_info;
 	uint32_t n_pkts = 0, pkt_err = 0;
 	uint32_t num_async_pkts = 0, num_done_pkts = 0;
+	int32_t n_xfer;
 	struct {
 		uint16_t pkt_idx;
 		uint16_t last_avail_idx;
@@ -1608,8 +1609,17 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 		if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
 			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
 			BUF_VECTOR_MAX))) {
-			n_pkts = vq->async_ops.transfer_data(dev->vid,
+			n_xfer = vq->async_ops.transfer_data(dev->vid,
 					queue_id, tdes, 0, pkt_burst_idx);
+			if (n_xfer >= 0) {
+				n_pkts = n_xfer;
+			} else {
+				VHOST_LOG_DATA(ERR,
+					"(%d) %s: failed to transfer data for queue id %d.\n",
+					dev->vid, __func__, queue_id);
+				n_pkts = 0;
+			}
+
 			iovec_idx = 0;
 			it_idx = 0;
 
@@ -1632,8 +1642,15 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	}
 
 	if (pkt_burst_idx) {
-		n_pkts = vq->async_ops.transfer_data(dev->vid,
-				queue_id, tdes, 0, pkt_burst_idx);
+		n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+		if (n_xfer >= 0) {
+			n_pkts = n_xfer;
+		} else {
+			VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+				dev->vid, __func__, queue_id);
+			n_pkts = 0;
+		}
+
 		vq->async_pkts_inflight_n += n_pkts;
 
 		if (unlikely(n_pkts < pkt_burst_idx))
@@ -1903,6 +1920,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 	uint16_t async_descs_idx = 0;
 	uint16_t num_buffers;
 	uint16_t num_descs;
+	int32_t n_xfer;
 
 	struct rte_vhost_iov_iter *it_pool = vq->it_pool;
 	struct iovec *vec_pool = vq->vec_pool;
@@ -1983,8 +2001,17 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 		 */
 		if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
 			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
-			n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,
-				tdes, 0, pkt_burst_idx);
+			n_xfer = vq->async_ops.transfer_data(dev->vid,
+					queue_id, tdes, 0, pkt_burst_idx);
+			if (n_xfer >= 0) {
+				n_pkts = n_xfer;
+			} else {
+				VHOST_LOG_DATA(ERR,
+					"(%d) %s: failed to transfer data for queue id %d.\n",
+					dev->vid, __func__, queue_id);
+				n_pkts = 0;
+			}
+
 			iovec_idx = 0;
 			it_idx = 0;
 			segs_await = 0;
@@ -2006,7 +2033,15 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 	} while (pkt_idx < count);
 
 	if (pkt_burst_idx) {
-		n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+		n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+		if (n_xfer >= 0) {
+			n_pkts = n_xfer;
+		} else {
+			VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+				dev->vid, __func__, queue_id);
+			n_pkts = 0;
+		}
+
 		vq->async_pkts_inflight_n += n_pkts;
 
 		if (unlikely(n_pkts < pkt_burst_idx))
@@ -2091,6 +2126,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 	uint16_t start_idx, pkts_idx, vq_size;
 	struct async_inflight_info *pkts_info;
 	uint16_t from, i;
+	int32_t n_cpl;
 
 	if (!dev)
 		return 0;
@@ -2118,9 +2154,18 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 	start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
 		vq_size, vq->async_pkts_inflight_n);
 
-	if (count > vq->async_last_pkts_n)
-		n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
+	if (count > vq->async_last_pkts_n) {
+		n_cpl = vq->async_ops.check_completed_copies(vid,
 			queue_id, 0, count - vq->async_last_pkts_n);
+		if (n_cpl >= 0) {
+			n_pkts_cpl = n_cpl;
+		} else {
+			VHOST_LOG_DATA(ERR,
+				"(%d) %s: failed to check completed copies for queue id %d.\n",
+				dev->vid, __func__, queue_id);
+			n_pkts_cpl = 0;
+		}
+	}
 	n_pkts_cpl += vq->async_last_pkts_n;
 
 	n_pkts_put = RTE_MIN(count, n_pkts_cpl);
-- 
2.29.2



More information about the dev mailing list