[dpdk-dev] [PATCH v6 4/5] examples/vhost: handle memory hotplug for async vhost

Cheng Jiang cheng1.jiang at intel.com
Mon Jul 19 10:10:21 CEST 2021


When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.

To accomplish that, we need to do these changes in the vhost sample:
1. add inflight packets count.
2. add vring_state_changed() callback.
3. add inflight packets clear process in destroy_device() and
vring_state_changed().

Signed-off-by: Cheng Jiang <cheng1.jiang at intel.com>
---
 examples/vhost/main.c | 55 +++++++++++++++++++++++++++++++++++++++++--
 examples/vhost/main.h |  1 +
 2 files changed, 54 insertions(+), 2 deletions(-)

diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index d2179eadb9..cfd2bc157c 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -851,8 +851,11 @@ complete_async_pkts(struct vhost_dev *vdev)
 
 	complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
 					VIRTIO_RXQ, p_cpl, MAX_PKT_BURST);
-	if (complete_count)
+	if (complete_count) {
 		free_pkts(p_cpl, complete_count);
+		__atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST);
+	}
+
 }
 
 static __rte_always_inline void
@@ -895,6 +898,7 @@ drain_vhost(struct vhost_dev *vdev)
 		complete_async_pkts(vdev);
 		ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ,
 					m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr);
+		__atomic_add_fetch(&vdev->pkts_inflight, ret - cpu_cpl_nr, __ATOMIC_SEQ_CST);
 
 		if (cpu_cpl_nr)
 			free_pkts(m_cpu_cpl, cpu_cpl_nr);
@@ -1226,6 +1230,9 @@ drain_eth_rx(struct vhost_dev *vdev)
 		enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
 					VIRTIO_RXQ, pkts, rx_count,
 					m_cpu_cpl, &cpu_cpl_nr);
+		__atomic_add_fetch(&vdev->pkts_inflight, enqueue_count - cpu_cpl_nr,
+					__ATOMIC_SEQ_CST);
+
 		if (cpu_cpl_nr)
 			free_pkts(m_cpu_cpl, cpu_cpl_nr);
 
@@ -1397,8 +1404,19 @@ destroy_device(int vid)
 		"(%d) device has been removed from data core\n",
 		vdev->vid);
 
-	if (async_vhost_driver)
+	if (async_vhost_driver) {
+		uint16_t n_pkt = 0;
+		struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+
+		while (vdev->pkts_inflight) {
+			n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, VIRTIO_RXQ,
+						m_cpl, vdev->pkts_inflight);
+			free_pkts(m_cpl, n_pkt);
+			__atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
+		}
+
 		rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
+	}
 
 	rte_free(vdev);
 }
@@ -1487,6 +1505,38 @@ new_device(int vid)
 	return 0;
 }
 
+static int
+vring_state_changed(int vid, uint16_t queue_id, int enable)
+{
+	struct vhost_dev *vdev = NULL;
+
+	TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+		if (vdev->vid == vid)
+			break;
+	}
+	if (!vdev)
+		return -1;
+
+	if (queue_id != VIRTIO_RXQ)
+		return 0;
+
+	if (async_vhost_driver) {
+		if (!enable) {
+			uint16_t n_pkt = 0;
+			struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+
+			while (vdev->pkts_inflight) {
+				n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, queue_id,
+							m_cpl, vdev->pkts_inflight);
+				free_pkts(m_cpl, n_pkt);
+				__atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
+			}
+		}
+	}
+
+	return 0;
+}
+
 /*
  * These callback allow devices to be added to the data core when configuration
  * has been fully complete.
@@ -1495,6 +1545,7 @@ static const struct vhost_device_ops virtio_net_device_ops =
 {
 	.new_device =  new_device,
 	.destroy_device = destroy_device,
+	.vring_state_changed = vring_state_changed,
 };
 
 /*
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index 0ccdce4b4a..e7b1ac60a6 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -51,6 +51,7 @@ struct vhost_dev {
 	uint64_t features;
 	size_t hdr_len;
 	uint16_t nr_vrings;
+	uint16_t pkts_inflight;
 	struct rte_vhost_memory *mem;
 	struct device_statistics stats;
 	TAILQ_ENTRY(vhost_dev) global_vdev_entry;
-- 
2.29.2



More information about the dev mailing list