[dpdk-dev] [PATCH v4 4/5] vhost: adapt vhost lib for selective datapath

Zhihong Wang zhihong.wang at intel.com
Fri Mar 30 12:01:04 CEST 2018


This patch adapts vhost lib for selective datapath by calling device ops
at the corresponding stage.

Signed-off-by: Zhihong Wang <zhihong.wang at intel.com>
---
Changes in v4:

 1. Remove the "engine" concept in the lib.

---
Changes in v2:

 1. Ensure negotiated capabilities are supported in vhost-user lib.

 2. Configure the data path at the right time.

 lib/librte_vhost/rte_vhost.h           | 27 ++++++++++
 lib/librte_vhost/rte_vhost_version.map |  2 +
 lib/librte_vhost/socket.c              | 94 ++++++++++++++++++++++++++++++++--
 lib/librte_vhost/vhost.c               |  3 ++
 lib/librte_vhost/vhost.h               |  2 +
 lib/librte_vhost/vhost_user.c          | 54 +++++++++++++++++--
 6 files changed, 172 insertions(+), 10 deletions(-)

diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
index d50f4c67d..3c3334d3e 100644
--- a/lib/librte_vhost/rte_vhost.h
+++ b/lib/librte_vhost/rte_vhost.h
@@ -279,6 +279,33 @@ int rte_vhost_driver_disable_features(const char *path, uint64_t features);
 int rte_vhost_driver_get_features(const char *path, uint64_t *features);
 
 /**
+ * Get the protocol feature bits before feature negotiation.
+ *
+ * @param path
+ *  The vhost-user socket file path
+ * @param protocol_features
+ *  A pointer to store the queried protocol feature bits
+ * @return
+ *  0 on success, -1 on failure
+ */
+int __rte_experimental
+rte_vhost_driver_get_protocol_features(const char *path,
+		uint64_t *protocol_features);
+
+/**
+ * Get the queue number bits before feature negotiation.
+ *
+ * @param path
+ *  The vhost-user socket file path
+ * @param queue_num
+ *  A pointer to store the queried queue number bits
+ * @return
+ *  0 on success, -1 on failure
+ */
+int __rte_experimental
+rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num);
+
+/**
  * Get the feature bits after negotiation
  *
  * @param vid
diff --git a/lib/librte_vhost/rte_vhost_version.map b/lib/librte_vhost/rte_vhost_version.map
index 6e2d5364a..812ccd72b 100644
--- a/lib/librte_vhost/rte_vhost_version.map
+++ b/lib/librte_vhost/rte_vhost_version.map
@@ -67,4 +67,6 @@ EXPERIMENTAL {
 	rte_vhost_driver_set_vdpa_did;
 	rte_vhost_driver_get_vdpa_did;
 	rte_vhost_get_vdpa_did;
+	rte_vhost_driver_get_protocol_features;
+	rte_vhost_driver_get_queue_num;
 } DPDK_18.02;
diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c
index 3d58da94e..ba7b422a0 100644
--- a/lib/librte_vhost/socket.c
+++ b/lib/librte_vhost/socket.c
@@ -216,6 +216,8 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
 
 	vhost_set_builtin_virtio_net(vid, vsocket->use_builtin_virtio_net);
 
+	vhost_set_vdpa_did(vid, vsocket->did);
+
 	if (vsocket->dequeue_zero_copy)
 		vhost_enable_dequeue_zero_copy(vid);
 
@@ -648,20 +650,102 @@ int
 rte_vhost_driver_get_features(const char *path, uint64_t *features)
 {
 	struct vhost_user_socket *vsocket;
+	uint64_t vdpa_features;
+	int did = -1;
+	int ret = 0;
 
 	pthread_mutex_lock(&vhost_user.mutex);
 	vsocket = find_vhost_user_socket(path);
-	if (vsocket)
-		*features = vsocket->features;
+	if (vsocket) {
+		did = vsocket->did;
+		if (did < 0 || vdpa_devices[did]->ops->feature_get == NULL)
+			*features = vsocket->features;
+		else if (vdpa_devices[did]->ops->feature_get(did,
+					&vdpa_features) < 0) {
+			RTE_LOG(ERR, VHOST_CONFIG,
+					"failed to get vdpa features "
+					"for socket file %s.\n", path);
+			ret = -1;
+		} else
+			*features = vsocket->features & vdpa_features;
+	} else {
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"socket file %s is not registered yet.\n", path);
+		ret = -1;
+	}
 	pthread_mutex_unlock(&vhost_user.mutex);
 
-	if (!vsocket) {
+	return ret;
+}
+
+int
+rte_vhost_driver_get_protocol_features(const char *path,
+		uint64_t *protocol_features)
+{
+	struct vhost_user_socket *vsocket;
+	uint64_t vdpa_protocol_features;
+	int did = -1;
+	int ret = 0;
+
+	pthread_mutex_lock(&vhost_user.mutex);
+	vsocket = find_vhost_user_socket(path);
+	if (vsocket) {
+		did = vsocket->did;
+		if (did < 0 || vdpa_devices[did]->ops->protocol_feature_get
+				== NULL)
+			*protocol_features = VHOST_USER_PROTOCOL_FEATURES;
+		else if (vdpa_devices[did]->ops->protocol_feature_get(did,
+					&vdpa_protocol_features) < 0) {
+			RTE_LOG(ERR, VHOST_CONFIG,
+					"failed to get vdpa protocol features "
+					"for socket file %s.\n", path);
+			ret = -1;
+		} else
+			*protocol_features = VHOST_USER_PROTOCOL_FEATURES
+				& vdpa_protocol_features;
+	} else {
 		RTE_LOG(ERR, VHOST_CONFIG,
 			"socket file %s is not registered yet.\n", path);
-		return -1;
+		ret = -1;
+	}
+	pthread_mutex_unlock(&vhost_user.mutex);
+
+	return ret;
+}
+
+int
+rte_vhost_driver_get_queue_num(const char *path,
+		uint32_t *queue_num)
+{
+	struct vhost_user_socket *vsocket;
+	uint32_t vdpa_queue_num;
+	int did = -1;
+	int ret = 0;
+
+	pthread_mutex_lock(&vhost_user.mutex);
+	vsocket = find_vhost_user_socket(path);
+	if (vsocket) {
+		did = vsocket->did;
+		if (did < 0 || vdpa_devices[did]->ops->queue_num_get == NULL)
+			*queue_num = VHOST_MAX_QUEUE_PAIRS;
+		else if (vdpa_devices[did]->ops->queue_num_get(
+					did, &vdpa_queue_num) < 0) {
+			RTE_LOG(ERR, VHOST_CONFIG,
+					"failed to get vdpa queue number "
+					"for socket file %s.\n", path);
+			ret = -1;
+		} else if (vdpa_queue_num > VHOST_MAX_QUEUE_PAIRS)
+			*queue_num = VHOST_MAX_QUEUE_PAIRS;
+		else
+			*queue_num = vdpa_queue_num;
 	} else {
-		return 0;
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"socket file %s is not registered yet.\n", path);
+		ret = -1;
 	}
+	pthread_mutex_unlock(&vhost_user.mutex);
+
+	return ret;
 }
 
 /*
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 1740cc1ab..66b6b492f 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -296,11 +296,14 @@ void
 vhost_destroy_device(int vid)
 {
 	struct virtio_net *dev = get_device(vid);
+	int did = dev->did;
 
 	if (dev == NULL)
 		return;
 
 	if (dev->flags & VIRTIO_DEV_RUNNING) {
+		if (did >= 0 && vdpa_devices[did]->ops->dev_close)
+			vdpa_devices[did]->ops->dev_close(dev->vid);
 		dev->flags &= ~VIRTIO_DEV_RUNNING;
 		dev->notify_ops->destroy_device(vid);
 	}
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 2970c1eab..30e8a0cfe 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -27,6 +27,8 @@
 #define VIRTIO_DEV_READY 2
 /* Used to indicate that the built-in vhost net device backend is enabled */
 #define VIRTIO_DEV_BUILTIN_VIRTIO_NET 4
+/* Used to indicate that the device has its own data path and configured */
+#define VIRTIO_DEV_VDPA_CONFIGURED 8
 
 /* Backend value set by guest. */
 #define VIRTIO_DEV_STOPPED -1
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index 5c5361066..5049c8e55 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -133,7 +133,11 @@ vhost_user_set_owner(void)
 static int
 vhost_user_reset_owner(struct virtio_net *dev)
 {
+	int did = dev->did;
+
 	if (dev->flags & VIRTIO_DEV_RUNNING) {
+		if (did >= 0 && vdpa_devices[did]->ops->dev_close)
+			vdpa_devices[did]->ops->dev_close(dev->vid);
 		dev->flags &= ~VIRTIO_DEV_RUNNING;
 		dev->notify_ops->destroy_device(dev->vid);
 	}
@@ -156,12 +160,25 @@ vhost_user_get_features(struct virtio_net *dev)
 }
 
 /*
+ * The queue number that we support are requested.
+ */
+static uint32_t
+vhost_user_get_queue_num(struct virtio_net *dev)
+{
+	uint32_t queue_num = 0;
+
+	rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
+	return (uint64_t)queue_num;
+}
+
+/*
  * We receive the negotiated features supported by us and the virtio device.
  */
 static int
 vhost_user_set_features(struct virtio_net *dev, uint64_t features)
 {
 	uint64_t vhost_features = 0;
+	int did = dev->did;
 
 	rte_vhost_driver_get_features(dev->ifname, &vhost_features);
 	if (features & ~vhost_features) {
@@ -191,6 +208,9 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t features)
 			dev->notify_ops->features_changed(dev->vid, features);
 	}
 
+	if (did >= 0 && vdpa_devices[did]->ops->feature_set)
+		vdpa_devices[did]->ops->feature_set(dev->vid);
+
 	dev->features = features;
 	if (dev->features &
 		((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
@@ -933,14 +953,18 @@ vhost_user_get_vring_base(struct virtio_net *dev,
 			  VhostUserMsg *msg)
 {
 	struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
+	int did = dev->did;
 
 	/* We have to stop the queue (virtio) if it is running. */
 	if (dev->flags & VIRTIO_DEV_RUNNING) {
+		if (did >= 0 && vdpa_devices[did]->ops->dev_close)
+			vdpa_devices[did]->ops->dev_close(dev->vid);
 		dev->flags &= ~VIRTIO_DEV_RUNNING;
 		dev->notify_ops->destroy_device(dev->vid);
 	}
 
 	dev->flags &= ~VIRTIO_DEV_READY;
+	dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
 
 	/* Here we are safe to get the last avail index */
 	msg->payload.state.num = vq->last_avail_idx;
@@ -983,16 +1007,22 @@ vhost_user_set_vring_enable(struct virtio_net *dev,
 			    VhostUserMsg *msg)
 {
 	int enable = (int)msg->payload.state.num;
+	int index = (int)msg->payload.state.index;
+	int did = dev->did;
 
 	RTE_LOG(INFO, VHOST_CONFIG,
 		"set queue enable: %d to qp idx: %d\n",
-		enable, msg->payload.state.index);
+		enable, index);
+
+	if (did >= 0 && vdpa_devices[did]->ops->vring_state_set)
+		vdpa_devices[did]->ops->vring_state_set(dev->vid,
+				index, enable);
 
 	if (dev->notify_ops->vring_state_changed)
 		dev->notify_ops->vring_state_changed(dev->vid,
-				msg->payload.state.index, enable);
+				index, enable);
 
-	dev->virtqueue[msg->payload.state.index]->enabled = enable;
+	dev->virtqueue[index]->enabled = enable;
 
 	return 0;
 }
@@ -1001,9 +1031,10 @@ static void
 vhost_user_get_protocol_features(struct virtio_net *dev,
 				 struct VhostUserMsg *msg)
 {
-	uint64_t features, protocol_features = VHOST_USER_PROTOCOL_FEATURES;
+	uint64_t features, protocol_features;
 
 	rte_vhost_driver_get_features(dev->ifname, &features);
+	rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
 
 	/*
 	 * REPLY_ACK protocol feature is only mandatory for now
@@ -1099,6 +1130,7 @@ static int
 vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
 {
 	uint8_t *mac = (uint8_t *)&msg->payload.u64;
+	int did = dev->did;
 
 	RTE_LOG(DEBUG, VHOST_CONFIG,
 		":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -1114,6 +1146,8 @@ vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
 	 */
 	rte_smp_wmb();
 	rte_atomic16_set(&dev->broadcast_rarp, 1);
+	if (did >= 0 && vdpa_devices[did]->ops->migration_done)
+		vdpa_devices[did]->ops->migration_done(dev->vid);
 
 	return 0;
 }
@@ -1375,6 +1409,7 @@ vhost_user_msg_handler(int vid, int fd)
 {
 	struct virtio_net *dev;
 	struct VhostUserMsg msg;
+	int did;
 	int ret;
 	int unlock_required = 0;
 
@@ -1527,7 +1562,7 @@ vhost_user_msg_handler(int vid, int fd)
 		break;
 
 	case VHOST_USER_GET_QUEUE_NUM:
-		msg.payload.u64 = VHOST_MAX_QUEUE_PAIRS;
+		msg.payload.u64 = (uint64_t)vhost_user_get_queue_num(dev);
 		msg.size = sizeof(msg.payload.u64);
 		send_vhost_reply(fd, &msg);
 		break;
@@ -1580,6 +1615,15 @@ vhost_user_msg_handler(int vid, int fd)
 		}
 	}
 
+	did = dev->did;
+	if (did >= 0 && virtio_is_ready(dev) &&
+			!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) &&
+			msg.request.master == VHOST_USER_SET_VRING_ENABLE) {
+		if (vdpa_devices[did]->ops->dev_conf)
+			vdpa_devices[did]->ops->dev_conf(vid);
+		dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
+	}
+
 	return 0;
 }
 
-- 
2.13.6



More information about the dev mailing list