[dpdk-dev] [PATCH v2 4/6] vhost: adapt vhost lib for selective datapath

Zhihong Wang zhihong.wang at intel.com
Mon Mar 5 10:20:28 CET 2018


This patch adapts vhost lib for selective datapath by calling device ops
at the corresponding stage.

Signed-off-by: Zhihong Wang <zhihong.wang at intel.com>
---
Changes in v2:

 1. Ensure negotiated capabilities are supported in vhost-user lib.

 2. Configure the data path at the right time.

 lib/librte_vhost/rte_vhost.h  | 25 ++++++++++++++
 lib/librte_vhost/socket.c     | 76 +++++++++++++++++++++++++++++++++++++++++--
 lib/librte_vhost/vhost.c      |  3 ++
 lib/librte_vhost/vhost.h      |  2 ++
 lib/librte_vhost/vhost_user.c | 56 +++++++++++++++++++++++++++----
 5 files changed, 154 insertions(+), 8 deletions(-)

diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
index a76acea6b..9bec36756 100644
--- a/lib/librte_vhost/rte_vhost.h
+++ b/lib/librte_vhost/rte_vhost.h
@@ -277,6 +277,31 @@ int rte_vhost_driver_disable_features(const char *path, uint64_t features);
 int rte_vhost_driver_get_features(const char *path, uint64_t *features);
 
 /**
+ * Get the protocol feature bits before feature negotiation.
+ *
+ * @param path
+ *  The vhost-user socket file path
+ * @param protocol_features
+ *  A pointer to store the queried protocol feature bits
+ * @return
+ *  0 on success, -1 on failure
+ */
+int rte_vhost_driver_get_protocol_features(const char *path,
+		uint64_t *protocol_features);
+
+/**
+ * Get the queue number bits before feature negotiation.
+ *
+ * @param path
+ *  The vhost-user socket file path
+ * @param queue_num
+ *  A pointer to store the queried queue number bits
+ * @return
+ *  0 on success, -1 on failure
+ */
+int rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num);
+
+/**
  * Get the feature bits after negotiation
  *
  * @param vid
diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c
index 5367ba771..0354740fa 100644
--- a/lib/librte_vhost/socket.c
+++ b/lib/librte_vhost/socket.c
@@ -216,6 +216,9 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
 
 	vhost_set_builtin_virtio_net(vid, vsocket->use_builtin_virtio_net);
 
+	vhost_set_vdpa_eid(vid, vsocket->eid);
+	vhost_set_vdpa_did(vid, vsocket->did);
+
 	if (vsocket->dequeue_zero_copy)
 		vhost_enable_dequeue_zero_copy(vid);
 
@@ -667,11 +670,80 @@ int
 rte_vhost_driver_get_features(const char *path, uint64_t *features)
 {
 	struct vhost_user_socket *vsocket;
+	struct rte_vdpa_eng_attr attr;
+	int eid = -1;
 
 	pthread_mutex_lock(&vhost_user.mutex);
 	vsocket = find_vhost_user_socket(path);
-	if (vsocket)
-		*features = vsocket->features;
+	if (vsocket) {
+		eid = vsocket->eid;
+		if (rte_vdpa_info_query(eid, &attr) < 0)
+			*features = vsocket->features;
+		else
+			*features = vsocket->features & attr.features;
+
+	}
+	pthread_mutex_unlock(&vhost_user.mutex);
+
+	if (!vsocket) {
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"socket file %s is not registered yet.\n", path);
+		return -1;
+	} else {
+		return 0;
+	}
+}
+
+int
+rte_vhost_driver_get_protocol_features(const char *path,
+		uint64_t *protocol_features)
+{
+	struct vhost_user_socket *vsocket;
+	struct rte_vdpa_eng_attr attr;
+	int eid = -1;
+
+	pthread_mutex_lock(&vhost_user.mutex);
+	vsocket = find_vhost_user_socket(path);
+	if (vsocket) {
+		eid = vsocket->eid;
+		if (rte_vdpa_info_query(eid, &attr) < 0)
+			*protocol_features = VHOST_USER_PROTOCOL_FEATURES;
+		else
+			*protocol_features = VHOST_USER_PROTOCOL_FEATURES
+				& attr.protocol_features;
+
+	}
+	pthread_mutex_unlock(&vhost_user.mutex);
+
+	if (!vsocket) {
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"socket file %s is not registered yet.\n", path);
+		return -1;
+	} else {
+		return 0;
+	}
+}
+
+int
+rte_vhost_driver_get_queue_num(const char *path,
+		uint32_t *queue_num)
+{
+	struct vhost_user_socket *vsocket;
+	struct rte_vdpa_eng_attr attr;
+	int eid = -1;
+
+	pthread_mutex_lock(&vhost_user.mutex);
+	vsocket = find_vhost_user_socket(path);
+	if (vsocket) {
+		eid = vsocket->eid;
+		if (rte_vdpa_info_query(eid, &attr) < 0)
+			*queue_num = VHOST_MAX_QUEUE_PAIRS;
+		else if (attr.queue_num > VHOST_MAX_QUEUE_PAIRS)
+			*queue_num = VHOST_MAX_QUEUE_PAIRS;
+		else
+			*queue_num = attr.queue_num;
+
+	}
 	pthread_mutex_unlock(&vhost_user.mutex);
 
 	if (!vsocket) {
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 45cf90f99..f8a5a1c42 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -297,11 +297,14 @@ void
 vhost_destroy_device(int vid)
 {
 	struct virtio_net *dev = get_device(vid);
+	int eid = dev->eid;
 
 	if (dev == NULL)
 		return;
 
 	if (dev->flags & VIRTIO_DEV_RUNNING) {
+		if (eid >= 0 && vdpa_engines[eid]->eng_drv->dev_ops.dev_close)
+			vdpa_engines[eid]->eng_drv->dev_ops.dev_close(dev->vid);
 		dev->flags &= ~VIRTIO_DEV_RUNNING;
 		dev->notify_ops->destroy_device(vid);
 	}
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index b3fa6da03..e11b27842 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -27,6 +27,8 @@
 #define VIRTIO_DEV_READY 2
 /* Used to indicate that the built-in vhost net device backend is enabled */
 #define VIRTIO_DEV_BUILTIN_VIRTIO_NET 4
+/* Used to indicate that the device has its own data path and configured */
+#define VIRTIO_DEV_VDPA_CONFIGURED 8
 
 /* Backend value set by guest. */
 #define VIRTIO_DEV_STOPPED -1
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index c93e48e4d..8b07b6c43 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -133,7 +133,11 @@ vhost_user_set_owner(void)
 static int
 vhost_user_reset_owner(struct virtio_net *dev)
 {
+	int eid = dev->eid;
+
 	if (dev->flags & VIRTIO_DEV_RUNNING) {
+		if (eid >= 0 && vdpa_engines[eid]->eng_drv->dev_ops.dev_close)
+			vdpa_engines[eid]->eng_drv->dev_ops.dev_close(dev->vid);
 		dev->flags &= ~VIRTIO_DEV_RUNNING;
 		dev->notify_ops->destroy_device(dev->vid);
 	}
@@ -156,12 +160,25 @@ vhost_user_get_features(struct virtio_net *dev)
 }
 
 /*
+ * The queue number that we support are requested.
+ */
+static uint32_t
+vhost_user_get_queue_num(struct virtio_net *dev)
+{
+	uint32_t queue_num = 0;
+
+	rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
+	return (uint64_t)queue_num;
+}
+
+/*
  * We receive the negotiated features supported by us and the virtio device.
  */
 static int
 vhost_user_set_features(struct virtio_net *dev, uint64_t features)
 {
 	uint64_t vhost_features = 0;
+	int eid = dev->eid;
 
 	rte_vhost_driver_get_features(dev->ifname, &vhost_features);
 	if (features & ~vhost_features) {
@@ -191,6 +208,9 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t features)
 			dev->notify_ops->features_changed(dev->vid, features);
 	}
 
+	if (eid >= 0 && vdpa_engines[eid]->eng_drv->dev_ops.feature_set)
+		vdpa_engines[eid]->eng_drv->dev_ops.feature_set(dev->vid);
+
 	dev->features = features;
 	if (dev->features &
 		((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
@@ -933,9 +953,12 @@ vhost_user_get_vring_base(struct virtio_net *dev,
 			  VhostUserMsg *msg)
 {
 	struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
+	int eid = dev->eid;
 
 	/* We have to stop the queue (virtio) if it is running. */
 	if (dev->flags & VIRTIO_DEV_RUNNING) {
+		if (eid >= 0 && vdpa_engines[eid]->eng_drv->dev_ops.dev_close)
+			vdpa_engines[eid]->eng_drv->dev_ops.dev_close(dev->vid);
 		dev->flags &= ~VIRTIO_DEV_RUNNING;
 		dev->notify_ops->destroy_device(dev->vid);
 	}
@@ -983,16 +1006,22 @@ vhost_user_set_vring_enable(struct virtio_net *dev,
 			    VhostUserMsg *msg)
 {
 	int enable = (int)msg->payload.state.num;
+	int index = (int)msg->payload.state.index;
+	int eid = dev->eid;
 
 	RTE_LOG(INFO, VHOST_CONFIG,
 		"set queue enable: %d to qp idx: %d\n",
-		enable, msg->payload.state.index);
+		enable, index);
+
+	if (eid >= 0 && vdpa_engines[eid]->eng_drv->dev_ops.vring_state_set)
+		vdpa_engines[eid]->eng_drv->dev_ops.vring_state_set(dev->vid,
+				index, enable);
 
 	if (dev->notify_ops->vring_state_changed)
 		dev->notify_ops->vring_state_changed(dev->vid,
-				msg->payload.state.index, enable);
+				index, enable);
 
-	dev->virtqueue[msg->payload.state.index]->enabled = enable;
+	dev->virtqueue[index]->enabled = enable;
 
 	return 0;
 }
@@ -1001,9 +1030,10 @@ static void
 vhost_user_get_protocol_features(struct virtio_net *dev,
 				 struct VhostUserMsg *msg)
 {
-	uint64_t features, protocol_features = VHOST_USER_PROTOCOL_FEATURES;
+	uint64_t features, protocol_features;
 
 	rte_vhost_driver_get_features(dev->ifname, &features);
+	rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
 
 	/*
 	 * REPLY_ACK protocol feature is only mandatory for now
@@ -1015,7 +1045,6 @@ vhost_user_get_protocol_features(struct virtio_net *dev,
 		protocol_features &=
 			~(1ULL << RTE_VHOST_USER_PROTOCOL_F_REPLY_ACK);
 
-	msg->payload.u64 = protocol_features;
 	msg->size = sizeof(msg->payload.u64);
 }
 
@@ -1100,6 +1129,7 @@ static int
 vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
 {
 	uint8_t *mac = (uint8_t *)&msg->payload.u64;
+	int eid = dev->eid;
 
 	RTE_LOG(DEBUG, VHOST_CONFIG,
 		":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -1115,6 +1145,8 @@ vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
 	 */
 	rte_smp_wmb();
 	rte_atomic16_set(&dev->broadcast_rarp, 1);
+	if (eid >= 0 && vdpa_engines[eid]->eng_drv->dev_ops.migration_done)
+		vdpa_engines[eid]->eng_drv->dev_ops.migration_done(dev->vid);
 
 	return 0;
 }
@@ -1376,6 +1408,8 @@ vhost_user_msg_handler(int vid, int fd)
 {
 	struct virtio_net *dev;
 	struct VhostUserMsg msg;
+	struct rte_vdpa_engine *eng;
+	int eid;
 	int ret;
 	int unlock_required = 0;
 
@@ -1528,7 +1562,7 @@ vhost_user_msg_handler(int vid, int fd)
 		break;
 
 	case VHOST_USER_GET_QUEUE_NUM:
-		msg.payload.u64 = VHOST_MAX_QUEUE_PAIRS;
+		msg.payload.u64 = (uint64_t)vhost_user_get_queue_num(dev);
 		msg.size = sizeof(msg.payload.u64);
 		send_vhost_reply(fd, &msg);
 		break;
@@ -1581,6 +1615,16 @@ vhost_user_msg_handler(int vid, int fd)
 		}
 	}
 
+	eid = dev->eid;
+	if (eid >= 0 && virtio_is_ready(dev) &&
+			!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) &&
+			msg.request.master == VHOST_USER_SET_VRING_ENABLE) {
+		eng = vdpa_engines[eid];
+		if (eng->eng_drv->dev_ops.dev_conf)
+			eng->eng_drv->dev_ops.dev_conf(vid);
+		dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
+	}
+
 	return 0;
 }
 
-- 
2.13.6



More information about the dev mailing list