[dpdk-dev] [PATCH 2/3] net/virtio_user: fix wrong sequence of messages

Jianfeng Tan jianfeng.tan at intel.com
Fri Aug 5 13:36:42 CEST 2016


When virtio_user is used with VPP's native vhost user, it cannot
send/receive any packets.

The root cause is that vpp-vhost-user translates the message
VHOST_USER_SET_FEATURES as puting this device into init state,
aka, zero all related structures. However, previous code
puts this message at last in the whole initialization process,
which leads to all previous information are zeroed.

To fix this issue, we rearrange the sequence of those messages.
  - step 0, send VHOST_USER_SET_VRING_CALL so that vhost allocates
    virtqueue structures;
  - step 1, send VHOST_USER_SET_FEATURES to confirm the features;
  - step 2, send VHOST_USER_SET_MEM_TABLE to share mem regions;
  - step 3, send VHOST_USER_SET_VRING_NUM, VHOST_USER_SET_VRING_BASE,
    VHOST_USER_SET_VRING_ADDR, VHOST_USER_SET_VRING_KICK for each
    queue;
  - ...

Fixes: 37a7eb2ae816 ("net/virtio-user: add device emulation layer")

Reported-by: Zhihong Wang <zhihong.wang at intel.com>
Signed-off-by: Jianfeng Tan <jianfeng.tan at intel.com>
---
 drivers/net/virtio/virtio_user/virtio_user_dev.c | 120 ++++++++++++++---------
 1 file changed, 72 insertions(+), 48 deletions(-)

diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 2c4e999..afdf721 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -45,20 +45,14 @@
 #include "../virtio_ethdev.h"
 
 static int
-virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
+virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
 {
-	int callfd, kickfd;
+	/* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
+	 * firstly because vhost depends on this msg to allocate virtqueue
+	 * pair.
+	 */
+	int callfd;
 	struct vhost_vring_file file;
-	struct vhost_vring_state state;
-	struct vring *vring = &dev->vrings[queue_sel];
-	struct vhost_vring_addr addr = {
-		.index = queue_sel,
-		.desc_user_addr = (uint64_t)(uintptr_t)vring->desc,
-		.avail_user_addr = (uint64_t)(uintptr_t)vring->avail,
-		.used_user_addr = (uint64_t)(uintptr_t)vring->used,
-		.log_guest_addr = 0,
-		.flags = 0, /* disable log */
-	};
 
 	/* May use invalid flag, but some backend leverages kickfd and callfd as
 	 * criteria to judge if dev is alive. so finally we use real event_fd.
@@ -68,22 +62,30 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
 		PMD_DRV_LOG(ERR, "callfd error, %s\n", strerror(errno));
 		return -1;
 	}
-	kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
-	if (kickfd < 0) {
-		close(callfd);
-		PMD_DRV_LOG(ERR, "kickfd error, %s\n", strerror(errno));
-		return -1;
-	}
-
-	/* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
-	 * firstly because vhost depends on this msg to allocate virtqueue
-	 * pair.
-	 */
 	file.index = queue_sel;
 	file.fd = callfd;
 	vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_CALL, &file);
 	dev->callfds[queue_sel] = callfd;
 
+	return 0;
+}
+
+static int
+virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
+{
+	int kickfd;
+	struct vhost_vring_file file;
+	struct vhost_vring_state state;
+	struct vring *vring = &dev->vrings[queue_sel];
+	struct vhost_vring_addr addr = {
+		.index = queue_sel,
+		.desc_user_addr = (uint64_t)(uintptr_t)vring->desc,
+		.avail_user_addr = (uint64_t)(uintptr_t)vring->avail,
+		.used_user_addr = (uint64_t)(uintptr_t)vring->used,
+		.log_guest_addr = 0,
+		.flags = 0, /* disable log */
+	};
+
 	state.index = queue_sel;
 	state.num = vring->num;
 	vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_NUM, &state);
@@ -97,6 +99,12 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
 	 * lastly because vhost depends on this msg to judge if
 	 * virtio is ready.
 	 */
+	kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+	if (kickfd < 0) {
+		PMD_DRV_LOG(ERR, "kickfd error, %s\n", strerror(errno));
+		return -1;
+	}
+	file.index = queue_sel;
 	file.fd = kickfd;
 	vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_KICK, &file);
 	dev->kickfds[queue_sel] = kickfd;
@@ -104,44 +112,43 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
 	return 0;
 }
 
-int
-virtio_user_start_device(struct virtio_user_dev *dev)
+static int
+virtio_user_queue_setup(struct virtio_user_dev *dev,
+			int (*fn)(struct virtio_user_dev *, uint32_t))
 {
-	uint64_t features;
 	uint32_t i, queue_sel;
-	int ret;
-
-	/* construct memory region inside each implementation */
-	ret = vhost_user_sock(dev->vhostfd, VHOST_USER_SET_MEM_TABLE, NULL);
-	if (ret < 0)
-		goto error;
 
 	for (i = 0; i < dev->max_queue_pairs; ++i) {
 		queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX;
-		if (virtio_user_kick_queue(dev, queue_sel) < 0) {
-			PMD_DRV_LOG(INFO, "kick rx vq fails: %u", i);
-			goto error;
+		if (fn(dev, queue_sel) < 0) {
+			PMD_DRV_LOG(INFO, "setup rx vq fails: %u", i);
+			return -1;
 		}
 	}
 	for (i = 0; i < dev->max_queue_pairs; ++i) {
 		queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX;
-		if (virtio_user_kick_queue(dev, queue_sel) < 0) {
-			PMD_DRV_LOG(INFO, "kick tx vq fails: %u", i);
-			goto error;
+		if (fn(dev, queue_sel) < 0) {
+			PMD_DRV_LOG(INFO, "setup tx vq fails: %u", i);
+			return -1;
 		}
 	}
 
-	/* As this feature is negotiated from the vhost, all queues are
-	 * initialized in the disabled state. For non-mq case, we enable
-	 * the 1st queue pair by default.
-	 */
-	if (dev->features & (1ull << VHOST_USER_GET_PROTOCOL_FEATURES))
-		vhost_user_enable_queue_pair(dev->vhostfd, 0, 1);
+	return 0;
+}
 
-	/* After setup all virtqueues, we need to set_features so that these
-	 * features can be set into each virtqueue in vhost side. And before
-	 * that, make sure VHOST_USER_F_PROTOCOL_FEATURES is added if mq is
-	 * enabled, and VIRTIO_NET_F_MAC is stripped.
+int
+virtio_user_start_device(struct virtio_user_dev *dev)
+{
+	uint64_t features;
+	int ret;
+
+	/* Step 0: tell vhost to create queues */
+	if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
+		goto error;
+
+	/* Step 1: set features
+	 * Make sure VHOST_USER_F_PROTOCOL_FEATURES is added if mq is enabled,
+	 * and VIRTIO_NET_F_MAC is stripped.
 	 */
 	features = dev->features;
 	if (dev->max_queue_pairs > 1)
@@ -152,6 +159,23 @@ virtio_user_start_device(struct virtio_user_dev *dev)
 		goto error;
 	PMD_DRV_LOG(INFO, "set features: %" PRIx64, features);
 
+	/* Step 2: share memory regions */
+	ret = vhost_user_sock(dev->vhostfd, VHOST_USER_SET_MEM_TABLE, NULL);
+	if (ret < 0)
+		goto error;
+
+	/* Step 3: kick queues */
+	if (virtio_user_queue_setup(dev, virtio_user_kick_queue) < 0)
+		goto error;
+
+	/* Step 4: enable queues
+	 * As this feature is negotiated from the vhost, all queues are
+	 * initialized in the disabled state. For non-mq case, we enable
+	 * the 1st queue pair by default.
+	 */
+	if (dev->features & (1ull << VHOST_USER_GET_PROTOCOL_FEATURES))
+		vhost_user_enable_queue_pair(dev->vhostfd, 0, 1);
+
 	return 0;
 error:
 	/* TODO: free resource here or caller to check */
-- 
2.7.4



More information about the dev mailing list