[dpdk-dev] [PATCH 5/7] vhost: adapt vhost lib for selective datapath

Zhihong Wang zhihong.wang at intel.com
Sat Feb 3 00:28:55 CET 2018


This patch adapts vhost lib for selective datapath by calling device ops
at the corresponding stage.

Signed-off-by: Zhihong Wang <zhihong.wang at intel.com>
---
 lib/librte_vhost/socket.c     |  3 +++
 lib/librte_vhost/vhost.c      |  5 +++++
 lib/librte_vhost/vhost_user.c | 48 +++++++++++++++++++++++++++++++++++++++----
 3 files changed, 52 insertions(+), 4 deletions(-)

diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c
index c4f90af..8296e4b 100644
--- a/lib/librte_vhost/socket.c
+++ b/lib/librte_vhost/socket.c
@@ -205,6 +205,9 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
 	size = strnlen(vsocket->path, PATH_MAX);
 	vhost_set_ifname(vid, vsocket->path, size);
 
+	vhost_set_vdpa_eid(vid, vsocket->eid);
+	vhost_set_vdpa_did(vid, vsocket->did);
+
 	if (vsocket->dequeue_zero_copy)
 		vhost_enable_dequeue_zero_copy(vid);
 
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 2dff199..1a3ddd5 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -302,11 +302,16 @@ void
 vhost_destroy_device(int vid)
 {
 	struct virtio_net *dev = get_device(vid);
+	int eid = dev->eid;
 
 	if (dev == NULL)
 		return;
 
 	if (dev->flags & VIRTIO_DEV_RUNNING) {
+		if (eid >= 0 && vdpa_engines[eid] &&
+				vdpa_engines[eid]->eng_drv &&
+				vdpa_engines[eid]->eng_drv->dev_ops.dev_close)
+			vdpa_engines[eid]->eng_drv->dev_ops.dev_close(dev->vid);
 		dev->flags &= ~VIRTIO_DEV_RUNNING;
 		dev->notify_ops->destroy_device(vid);
 	}
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index b1762e6..05b53fa 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -116,7 +116,13 @@ vhost_user_set_owner(void)
 static int
 vhost_user_reset_owner(struct virtio_net *dev)
 {
+	int eid = dev->eid;
+
 	if (dev->flags & VIRTIO_DEV_RUNNING) {
+		if (eid >= 0 && vdpa_engines[eid] &&
+				vdpa_engines[eid]->eng_drv &&
+				vdpa_engines[eid]->eng_drv->dev_ops.dev_close)
+			vdpa_engines[eid]->eng_drv->dev_ops.dev_close(dev->vid);
 		dev->flags &= ~VIRTIO_DEV_RUNNING;
 		dev->notify_ops->destroy_device(dev->vid);
 	}
@@ -157,6 +163,7 @@ static int
 vhost_user_set_features(struct virtio_net *dev, uint64_t features)
 {
 	uint64_t vhost_features = 0;
+	int eid = dev->eid;
 
 	rte_vhost_driver_get_features(dev->ifname, &vhost_features);
 	if (features & ~vhost_features) {
@@ -186,6 +193,11 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t features)
 			dev->notify_ops->features_changed(dev->vid, features);
 	}
 
+	if (eid >= 0 && vdpa_engines[eid] &&
+			vdpa_engines[eid]->eng_drv &&
+			vdpa_engines[eid]->eng_drv->dev_ops.feature_set)
+		vdpa_engines[eid]->eng_drv->dev_ops.feature_set(dev->vid);
+
 	dev->features = features;
 	if (dev->features &
 		((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
@@ -883,9 +895,14 @@ vhost_user_get_vring_base(struct virtio_net *dev,
 			  VhostUserMsg *msg)
 {
 	struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
+	int eid = dev->eid;
 
 	/* We have to stop the queue (virtio) if it is running. */
 	if (dev->flags & VIRTIO_DEV_RUNNING) {
+		if (eid >= 0 && vdpa_engines[eid] &&
+				vdpa_engines[eid]->eng_drv &&
+				vdpa_engines[eid]->eng_drv->dev_ops.dev_close)
+			vdpa_engines[eid]->eng_drv->dev_ops.dev_close(dev->vid);
 		dev->flags &= ~VIRTIO_DEV_RUNNING;
 		dev->notify_ops->destroy_device(dev->vid);
 	}
@@ -928,16 +945,24 @@ vhost_user_set_vring_enable(struct virtio_net *dev,
 			    VhostUserMsg *msg)
 {
 	int enable = (int)msg->payload.state.num;
+	int index = (int)msg->payload.state.index;
+	int eid = dev->eid;
 
 	RTE_LOG(INFO, VHOST_CONFIG,
 		"set queue enable: %d to qp idx: %d\n",
-		enable, msg->payload.state.index);
+		enable, index);
+
+	if (eid >= 0 && vdpa_engines[eid] &&
+			vdpa_engines[eid]->eng_drv &&
+			vdpa_engines[eid]->eng_drv->dev_ops.vring_state_set)
+		vdpa_engines[eid]->eng_drv->dev_ops.vring_state_set(dev->vid,
+				index, enable);
 
 	if (dev->notify_ops->vring_state_changed)
 		dev->notify_ops->vring_state_changed(dev->vid,
-				msg->payload.state.index, enable);
+				index, enable);
 
-	dev->virtqueue[msg->payload.state.index]->enabled = enable;
+	dev->virtqueue[index]->enabled = enable;
 
 	return 0;
 }
@@ -1049,6 +1074,7 @@ static int
 vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
 {
 	uint8_t *mac = (uint8_t *)&msg->payload.u64;
+	int eid = dev->eid;
 
 	RTE_LOG(DEBUG, VHOST_CONFIG,
 		":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -1064,6 +1090,10 @@ vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
 	 */
 	rte_smp_wmb();
 	rte_atomic16_set(&dev->broadcast_rarp, 1);
+	if (eid >= 0 && vdpa_engines[eid] &&
+			vdpa_engines[eid]->eng_drv &&
+			vdpa_engines[eid]->eng_drv->dev_ops.migration_done)
+		vdpa_engines[eid]->eng_drv->dev_ops.migration_done(dev->vid);
 
 	return 0;
 }
@@ -1526,8 +1556,18 @@ vhost_user_msg_handler(int vid, int fd)
 						"dequeue zero copy is enabled\n");
 			}
 
-			if (dev->notify_ops->new_device(dev->vid) == 0)
+			if (dev->notify_ops->new_device(vid) == 0)
 				dev->flags |= VIRTIO_DEV_RUNNING;
+
+			struct rte_vdpa_engine *eng;
+			int eid = dev->eid;
+
+			if (eid >= 0) {
+				eng = vdpa_engines[eid];
+				if (eng && eng->eng_drv &&
+						eng->eng_drv->dev_ops.dev_conf)
+					eng->eng_drv->dev_ops.dev_conf(vid);
+			}
 		}
 	}
 
-- 
2.7.5



More information about the dev mailing list