[dpdk-dev] [PATCH v2 3/6] vhost: add apis for datapath configuration

Zhihong Wang zhihong.wang at intel.com
Mon Mar 5 10:20:26 CET 2018


This patch adds APIs for datapath configuration. The eid and did of the
vhost-user socket can be configured to identify the actual device.

When the default software datapath is used, eid and did are set to -1.
When alternative datapath is used, eid and did are set by app to specify
which device to use. Each vhost-user socket can have only 1 connection in
this case.

Signed-off-by: Zhihong Wang <zhihong.wang at intel.com>
---
 lib/librte_vhost/rte_vhost.h | 64 +++++++++++++++++++++++++++++++++++++++++++
 lib/librte_vhost/socket.c    | 65 ++++++++++++++++++++++++++++++++++++++++++++
 lib/librte_vhost/vhost.c     | 50 ++++++++++++++++++++++++++++++++++
 lib/librte_vhost/vhost.h     | 10 +++++++
 4 files changed, 189 insertions(+)

diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
index b05162366..a76acea6b 100644
--- a/lib/librte_vhost/rte_vhost.h
+++ b/lib/librte_vhost/rte_vhost.h
@@ -178,6 +178,50 @@ int rte_vhost_driver_register(const char *path, uint64_t flags);
 int rte_vhost_driver_unregister(const char *path);
 
 /**
+ * Set the engine id, enforce single connection per socket
+ *
+ * @param path
+ *  The vhost-user socket file path
+ * @param eid
+ *  Engine id
+ * @return
+ *  0 on success, -1 on failure
+ */
+int rte_vhost_driver_set_vdpa_eid(const char *path, int eid);
+
+/**
+ * Set the device id, enforce single connection per socket
+ *
+ * @param path
+ *  The vhost-user socket file path
+ * @param did
+ *  Device id
+ * @return
+ *  0 on success, -1 on failure
+ */
+int rte_vhost_driver_set_vdpa_did(const char *path, int did);
+
+/**
+ * Get the engine id
+ *
+ * @param path
+ *  The vhost-user socket file path
+ * @return
+ *  Engine id, -1 on failure
+ */
+int rte_vhost_driver_get_vdpa_eid(const char *path);
+
+/**
+ * Get the device id
+ *
+ * @param path
+ *  The vhost-user socket file path
+ * @return
+ *  Device id, -1 on failure
+ */
+int rte_vhost_driver_get_vdpa_did(const char *path);
+
+/**
  * Set the feature bits the vhost-user driver supports.
  *
  * @param path
@@ -442,6 +486,26 @@ int rte_vhost_vring_call(int vid, uint16_t vring_idx);
  */
 uint32_t rte_vhost_rx_queue_count(int vid, uint16_t qid);
 
+/**
+ * Get vdpa engine id for vhost device.
+ *
+ * @param vid
+ *  vhost device ID
+ * @return
+ *  engine id
+ */
+int rte_vhost_get_vdpa_eid(int vid);
+
+/**
+ * Get vdpa device id for vhost device.
+ *
+ * @param vid
+ *  vhost device ID
+ * @return
+ *  device id
+ */
+int rte_vhost_get_vdpa_did(int vid);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c
index 6ba60f5dc..5367ba771 100644
--- a/lib/librte_vhost/socket.c
+++ b/lib/librte_vhost/socket.c
@@ -52,6 +52,13 @@ struct vhost_user_socket {
 	uint64_t supported_features;
 	uint64_t features;
 
+	/* engine and device id to identify a certain port on a specific
+	 * backend, both are set to -1 for sw. when used, one socket can
+	 * have 1 connection only.
+	 */
+	int eid;
+	int did;
+
 	struct vhost_device_ops const *notify_ops;
 };
 
@@ -535,6 +542,64 @@ find_vhost_user_socket(const char *path)
 }
 
 int
+rte_vhost_driver_set_vdpa_eid(const char *path, int eid)
+{
+	struct vhost_user_socket *vsocket;
+
+	pthread_mutex_lock(&vhost_user.mutex);
+	vsocket = find_vhost_user_socket(path);
+	if (vsocket)
+		vsocket->eid = eid;
+	pthread_mutex_unlock(&vhost_user.mutex);
+
+	return vsocket ? 0 : -1;
+}
+
+int
+rte_vhost_driver_set_vdpa_did(const char *path, int did)
+{
+	struct vhost_user_socket *vsocket;
+
+	pthread_mutex_lock(&vhost_user.mutex);
+	vsocket = find_vhost_user_socket(path);
+	if (vsocket)
+		vsocket->did = did;
+	pthread_mutex_unlock(&vhost_user.mutex);
+
+	return vsocket ? 0 : -1;
+}
+
+int
+rte_vhost_driver_get_vdpa_eid(const char *path)
+{
+	struct vhost_user_socket *vsocket;
+	int eid = -1;
+
+	pthread_mutex_lock(&vhost_user.mutex);
+	vsocket = find_vhost_user_socket(path);
+	if (vsocket)
+		eid = vsocket->eid;
+	pthread_mutex_unlock(&vhost_user.mutex);
+
+	return eid;
+}
+
+int
+rte_vhost_driver_get_vdpa_did(const char *path)
+{
+	struct vhost_user_socket *vsocket;
+	int did = -1;
+
+	pthread_mutex_lock(&vhost_user.mutex);
+	vsocket = find_vhost_user_socket(path);
+	if (vsocket)
+		did = vsocket->did;
+	pthread_mutex_unlock(&vhost_user.mutex);
+
+	return did;
+}
+
+int
 rte_vhost_driver_disable_features(const char *path, uint64_t features)
 {
 	struct vhost_user_socket *vsocket;
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index f6f12a03b..45cf90f99 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -283,6 +283,8 @@ vhost_new_device(void)
 	dev->vid = i;
 	dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
 	dev->slave_req_fd = -1;
+	dev->eid = -1;
+	dev->did = -1;
 
 	return i;
 }
@@ -311,6 +313,34 @@ vhost_destroy_device(int vid)
 }
 
 void
+vhost_set_vdpa_eid(int vid, int eid)
+{
+	struct virtio_net *dev = get_device(vid);
+
+	if (eid < 0 || eid >= MAX_VDPA_ENGINE_NUM || vdpa_engines[eid] == NULL)
+		return;
+
+	if (dev == NULL)
+		return;
+
+	dev->eid = eid;
+}
+
+void
+vhost_set_vdpa_did(int vid, int did)
+{
+	struct virtio_net *dev = get_device(vid);
+
+	if (did < 0)
+		return;
+
+	if (dev == NULL)
+		return;
+
+	dev->did = did;
+}
+
+void
 vhost_set_ifname(int vid, const char *if_name, unsigned int if_len)
 {
 	struct virtio_net *dev;
@@ -614,3 +644,23 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
 
 	return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
 }
+
+int rte_vhost_get_vdpa_eid(int vid)
+{
+	struct virtio_net *dev = get_device(vid);
+
+	if (dev == NULL)
+		return -1;
+
+	return dev->eid;
+}
+
+int rte_vhost_get_vdpa_did(int vid)
+{
+	struct virtio_net *dev = get_device(vid);
+
+	if (dev == NULL)
+		return -1;
+
+	return dev->did;
+}
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index a0b0520e2..b3fa6da03 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -19,6 +19,7 @@
 #include <rte_rwlock.h>
 
 #include "rte_vhost.h"
+#include "rte_vdpa.h"
 
 /* Used to indicate that the device is running on a data core */
 #define VIRTIO_DEV_RUNNING 1
@@ -239,6 +240,12 @@ struct virtio_net {
 	struct guest_page       *guest_pages;
 
 	int			slave_req_fd;
+
+	/* engine and device id to identify a certain port on a specific
+	 * backend, both are set to -1 for sw.
+	 */
+	int			eid;
+	int			did;
 } __rte_cache_aligned;
 
 
@@ -365,6 +372,9 @@ void free_vq(struct vhost_virtqueue *vq);
 
 int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
 
+void vhost_set_vdpa_eid(int vid, int eid);
+void vhost_set_vdpa_did(int vid, int did);
+
 void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
 void vhost_enable_dequeue_zero_copy(int vid);
 void vhost_set_builtin_virtio_net(int vid, bool enable);
-- 
2.13.6



More information about the dev mailing list