[dpdk-dev] [PATCH v3 1/5] vhost: add vectorized data path

Marvin Liu yong.liu at intel.com
Fri Oct 9 10:14:06 CEST 2020


Packed ring operations are split into batch and single functions for
performance perspective. Ring operations in batch function can be
accelerated by SIMD instructions like AVX512.

So introduce vectorized parameter in vhost. Vectorized data path can be
selected if platform and ring format matched requirements. Otherwise
will fallback to original data path.

Signed-off-by: Marvin Liu <yong.liu at intel.com>

diff --git a/doc/guides/nics/vhost.rst b/doc/guides/nics/vhost.rst
index d36f3120b..efdaf4de0 100644
--- a/doc/guides/nics/vhost.rst
+++ b/doc/guides/nics/vhost.rst
@@ -64,6 +64,11 @@ The user can specify below arguments in `--vdev` option.
     It is used to enable external buffer support in vhost library.
     (Default: 0 (disabled))
 
+#.  ``vectorized``:
+
+    It is used to enable vectorized data path support in vhost library.
+    (Default: 0 (disabled))
+
 Vhost PMD event handling
 ------------------------
 
diff --git a/doc/guides/prog_guide/vhost_lib.rst b/doc/guides/prog_guide/vhost_lib.rst
index ba4c62aeb..5ef3844a0 100644
--- a/doc/guides/prog_guide/vhost_lib.rst
+++ b/doc/guides/prog_guide/vhost_lib.rst
@@ -118,6 +118,18 @@ The following is an overview of some key Vhost API functions:
 
     It is disabled by default.
 
+ - ``RTE_VHOST_USER_VECTORIZED``
+    Vectorized data path will used when this flag is set. When packed ring
+    enabled, available descriptors are stored from frontend driver in sequence.
+    SIMD instructions like AVX can be used to handle multiple descriptors
+    simultaneously. Thus can accelerate the throughput of ring operations.
+
+    * Only packed ring has vectorized data path.
+
+    * Will fallback to normal datapath if no vectorization support.
+
+    It is disabled by default.
+
 * ``rte_vhost_driver_set_features(path, features)``
 
   This function sets the feature bits the vhost-user driver supports. The
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 66efecb32..8f71054ad 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -34,6 +34,7 @@ enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
 #define ETH_VHOST_VIRTIO_NET_F_HOST_TSO "tso"
 #define ETH_VHOST_LINEAR_BUF  "linear-buffer"
 #define ETH_VHOST_EXT_BUF  "ext-buffer"
+#define ETH_VHOST_VECTORIZED "vectorized"
 #define VHOST_MAX_PKT_BURST 32
 
 static const char *valid_arguments[] = {
@@ -45,6 +46,7 @@ static const char *valid_arguments[] = {
 	ETH_VHOST_VIRTIO_NET_F_HOST_TSO,
 	ETH_VHOST_LINEAR_BUF,
 	ETH_VHOST_EXT_BUF,
+	ETH_VHOST_VECTORIZED,
 	NULL
 };
 
@@ -1509,6 +1511,7 @@ rte_pmd_vhost_probe(struct rte_vdev_device *dev)
 	int tso = 0;
 	int linear_buf = 0;
 	int ext_buf = 0;
+	int vectorized = 0;
 	struct rte_eth_dev *eth_dev;
 	const char *name = rte_vdev_device_name(dev);
 
@@ -1618,6 +1621,17 @@ rte_pmd_vhost_probe(struct rte_vdev_device *dev)
 			flags |= RTE_VHOST_USER_EXTBUF_SUPPORT;
 	}
 
+	if (rte_kvargs_count(kvlist, ETH_VHOST_VECTORIZED) == 1) {
+		ret = rte_kvargs_process(kvlist,
+				ETH_VHOST_VECTORIZED,
+				&open_int, &vectorized);
+		if (ret < 0)
+			goto out_free;
+
+		if (vectorized == 1)
+			flags |= RTE_VHOST_USER_VECTORIZED;
+	}
+
 	if (dev->device.numa_node == SOCKET_ID_ANY)
 		dev->device.numa_node = rte_socket_id();
 
@@ -1666,4 +1680,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
 	"postcopy-support=<0|1> "
 	"tso=<0|1> "
 	"linear-buffer=<0|1> "
-	"ext-buffer=<0|1>");
+	"ext-buffer=<0|1> "
+	"vectorized=<0|1>");
diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
index 010f16086..c49c1aca2 100644
--- a/lib/librte_vhost/rte_vhost.h
+++ b/lib/librte_vhost/rte_vhost.h
@@ -36,6 +36,7 @@ extern "C" {
 /* support only linear buffers (no chained mbufs) */
 #define RTE_VHOST_USER_LINEARBUF_SUPPORT	(1ULL << 6)
 #define RTE_VHOST_USER_ASYNC_COPY	(1ULL << 7)
+#define RTE_VHOST_USER_VECTORIZED	(1ULL << 8)
 
 /* Features. */
 #ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c
index 0169d3648..e492c8c87 100644
--- a/lib/librte_vhost/socket.c
+++ b/lib/librte_vhost/socket.c
@@ -42,6 +42,7 @@ struct vhost_user_socket {
 	bool extbuf;
 	bool linearbuf;
 	bool async_copy;
+	bool vectorized;
 
 	/*
 	 * The "supported_features" indicates the feature bits the
@@ -241,6 +242,9 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
 			dev->async_copy = 1;
 	}
 
+	if (vsocket->vectorized)
+		vhost_enable_vectorized(vid);
+
 	VHOST_LOG_CONFIG(INFO, "new device, handle is %d\n", vid);
 
 	if (vsocket->notify_ops->new_connection) {
@@ -876,6 +880,7 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
 	vsocket->vdpa_dev = NULL;
 	vsocket->extbuf = flags & RTE_VHOST_USER_EXTBUF_SUPPORT;
 	vsocket->linearbuf = flags & RTE_VHOST_USER_LINEARBUF_SUPPORT;
+	vsocket->vectorized = flags & RTE_VHOST_USER_VECTORIZED;
 	vsocket->async_copy = flags & RTE_VHOST_USER_ASYNC_COPY;
 
 	if (vsocket->async_copy &&
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index c7cd34e42..4b5ef10a8 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -738,6 +738,17 @@ vhost_enable_linearbuf(int vid)
 	dev->linearbuf = 1;
 }
 
+void
+vhost_enable_vectorized(int vid)
+{
+	struct virtio_net *dev = get_device(vid);
+
+	if (dev == NULL)
+		return;
+
+	dev->vectorized = 1;
+}
+
 int
 rte_vhost_get_mtu(int vid, uint16_t *mtu)
 {
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 20ccdc9bd..87583c0b6 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -363,6 +363,7 @@ struct virtio_net {
 	int			async_copy;
 	int			extbuf;
 	int			linearbuf;
+	int                     vectorized;
 	struct vhost_virtqueue	*virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
 	struct inflight_mem_info *inflight_info;
 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
@@ -700,6 +701,7 @@ void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
 void vhost_set_builtin_virtio_net(int vid, bool enable);
 void vhost_enable_extbuf(int vid);
 void vhost_enable_linearbuf(int vid);
+void vhost_enable_vectorized(int vid);
 int vhost_enable_guest_notification(struct virtio_net *dev,
 		struct vhost_virtqueue *vq, int enable);
 
-- 
2.17.1



More information about the dev mailing list