[dpdk-dev] [PATCH v4 2/3] vhost: move dirty logging cache out of the virtqueue

Maxime Coquelin maxime.coquelin at redhat.com
Tue Mar 23 10:02:18 CET 2021


This patch moves the per-virtqueue's dirty logging cache
out of the virtqueue struct, by allocating it dynamically
only when live-migration is enabled.

It saves 8 cachelines in vhost_virtqueue struct.

Signed-off-by: Maxime Coquelin <maxime.coquelin at redhat.com>
Reviewed-by: Chenbo Xia <chenbo.xia at intel.com>
---
 lib/librte_vhost/vhost.c      | 13 +++++++++++++
 lib/librte_vhost/vhost.h      |  2 +-
 lib/librte_vhost/vhost_user.c | 21 +++++++++++++++++++++
 3 files changed, 35 insertions(+), 1 deletion(-)

diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 5a7c0c6cff..a8032e3ba1 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -145,6 +145,10 @@ __vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
 	if (unlikely(!dev->log_base))
 		return;
 
+	/* No cache, nothing to sync */
+	if (unlikely(!vq->log_cache))
+		return;
+
 	rte_atomic_thread_fence(__ATOMIC_RELEASE);
 
 	log_base = (unsigned long *)(uintptr_t)dev->log_base;
@@ -177,6 +181,14 @@ vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	uint32_t offset = page / (sizeof(unsigned long) << 3);
 	int i;
 
+	if (unlikely(!vq->log_cache)) {
+		/* No logging cache allocated, write dirty log map directly */
+		rte_atomic_thread_fence(__ATOMIC_RELEASE);
+		vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
+
+		return;
+	}
+
 	for (i = 0; i < vq->log_cache_nb_elem; i++) {
 		struct log_cache_entry *elem = vq->log_cache + i;
 
@@ -354,6 +366,7 @@ free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
 	}
 	rte_free(vq->batch_copy_elems);
 	rte_mempool_free(vq->iotlb_pool);
+	rte_free(vq->log_cache);
 	rte_free(vq);
 }
 
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 717f410548..3a71dfeed9 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -183,7 +183,7 @@ struct vhost_virtqueue {
 	bool			used_wrap_counter;
 	bool			avail_wrap_counter;
 
-	struct log_cache_entry log_cache[VHOST_LOG_CACHE_NR];
+	struct log_cache_entry *log_cache;
 	uint16_t log_cache_nb_elem;
 
 	rte_rwlock_t	iotlb_lock;
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index a60bb945ad..4d9e76e49e 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -2022,6 +2022,9 @@ vhost_user_get_vring_base(struct virtio_net **pdev,
 	rte_free(vq->batch_copy_elems);
 	vq->batch_copy_elems = NULL;
 
+	rte_free(vq->log_cache);
+	vq->log_cache = NULL;
+
 	msg->size = sizeof(msg->payload.state);
 	msg->fd_num = 0;
 
@@ -2121,6 +2124,7 @@ vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg,
 	int fd = msg->fds[0];
 	uint64_t size, off;
 	void *addr;
+	uint32_t i;
 
 	if (validate_msg_fds(msg, 1) != 0)
 		return RTE_VHOST_MSG_RESULT_ERR;
@@ -2174,6 +2178,23 @@ vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg,
 	dev->log_base = dev->log_addr + off;
 	dev->log_size = size;
 
+	for (i = 0; i < dev->nr_vring; i++) {
+		struct vhost_virtqueue *vq = dev->virtqueue[i];
+
+		rte_free(vq->log_cache);
+		vq->log_cache = NULL;
+		vq->log_cache_nb_elem = 0;
+		vq->log_cache = rte_zmalloc("vq log cache",
+				sizeof(struct log_cache_entry) * VHOST_LOG_CACHE_NR,
+				0);
+		/*
+		 * If log cache alloc fail, don't fail migration, but no
+		 * caching will be done, which will impact performance
+		 */
+		if (!vq->log_cache)
+			VHOST_LOG_CONFIG(ERR, "Failed to allocate VQ logging cache\n");
+	}
+
 	/*
 	 * The spec is not clear about it (yet), but QEMU doesn't expect
 	 * any payload in the reply.
-- 
2.30.2



More information about the dev mailing list