[dpdk-dev] [PATCH v2] vhost: add support for dynamic vhost PMD creation

Ferruh Yigit ferruh.yigit at intel.com
Wed May 18 19:10:22 CEST 2016


Add rte_eth_from_vhost() API to create vhost PMD dynamically from
applications.

Signed-off-by: Ferruh Yigit <ferruh.yigit at intel.com>

---

v2:
* drop rte_ prefix from non-public function
* re-use eth_rx_queue_setup/eth_tx_queue_setup
* pass vdev options as parameter to API
---
 drivers/net/vhost/rte_eth_vhost.c           | 130 ++++++++++++++++++++++++++++
 drivers/net/vhost/rte_eth_vhost.h           |  26 ++++++
 drivers/net/vhost/rte_pmd_vhost_version.map |   7 ++
 3 files changed, 163 insertions(+)

diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 310cbef..8019eb1 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -796,6 +796,79 @@ error:
 	return -1;
 }
 
+static int
+eth_from_vhost_create(const char *name, char *iface_name, uint16_t nb_queues,
+		const unsigned int numa_node, struct rte_mempool *mb_pool)
+{
+	struct rte_eth_dev_data *data = NULL;
+	struct pmd_internal *internal = NULL;
+	struct rte_eth_dev *dev = NULL;
+	struct internal_list *list;
+	int port_id;
+	int ret;
+	int i;
+
+	port_id = eth_dev_vhost_create(name, iface_name, nb_queues, numa_node);
+	if (port_id < 0)
+		return -1;
+
+	dev = &rte_eth_devices[port_id];
+	data = dev->data;
+
+	data->rx_queues = rte_zmalloc_socket(name, sizeof(void *) * nb_queues,
+			0, numa_node);
+	if (data->rx_queues == NULL)
+		goto error;
+
+	data->tx_queues = rte_zmalloc_socket(name, sizeof(void *) * nb_queues,
+			0, numa_node);
+	if (data->tx_queues == NULL)
+		goto error;
+
+	for (i = 0; i < nb_queues; i++) {
+		ret = eth_rx_queue_setup(dev, i, 0, numa_node, NULL, mb_pool);
+		if (ret < 0)
+			goto error;
+	}
+
+	for (i = 0; i < nb_queues; i++) {
+		ret = eth_tx_queue_setup(dev, i, 0, numa_node, NULL);
+		if (ret < 0)
+			goto error;
+	}
+
+	return port_id;
+
+error:
+	internal = data->dev_private;
+	list = find_internal_resource(internal->iface_name);
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_REMOVE(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (internal)
+		free(internal->dev_name);
+	free(vring_states[port_id]);
+	free(data->mac_addrs);
+	rte_eth_dev_release_port(dev);
+	if (data->rx_queues) {
+		for (i = 0; i < nb_queues; i++)
+			free(data->rx_queues[i]);
+		rte_free(data->rx_queues);
+	}
+	if (data->tx_queues) {
+		for (i = 0; i < nb_queues; i++)
+			free(data->tx_queues[i]);
+		rte_free(data->tx_queues);
+	}
+	rte_free(internal);
+	rte_free(list);
+	rte_free(data);
+
+	return -1;
+}
+
 static inline int
 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
 {
@@ -827,6 +900,63 @@ open_queues(const char *key __rte_unused, const char *value, void *extra_args)
 	return 0;
 }
 
+int
+rte_eth_from_vhost(const char *name, char *iface_name_arg,
+		const unsigned int numa_node, struct rte_mempool *mb_pool,
+		const char *params)
+{
+	char *iface_name = iface_name_arg;
+	struct rte_kvargs *kvlist = NULL;
+	uint16_t queues = 1;
+	int port_id;
+	int ret;
+
+	if (!name || !mb_pool)
+		return -1;
+
+	if (params) {
+		kvlist = rte_kvargs_parse(params, valid_arguments);
+		if (kvlist == NULL)
+			return -1;
+
+		if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
+			ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
+						 &open_iface, &iface_name);
+			if (ret < 0) {
+				rte_kvargs_free(kvlist);
+				return ret;
+			}
+		}
+
+		if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
+			ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
+						 &open_queues, &queues);
+			if (ret < 0) {
+				rte_kvargs_free(kvlist);
+				return ret;
+			}
+		}
+	}
+
+	if (!iface_name || !queues)
+		return -1;
+
+	port_id = eth_from_vhost_create(name, iface_name, queues, numa_node,
+			mb_pool);
+	if (port_id < 0)
+		return port_id;
+
+	ret = rte_vhost_driver_register(iface_name);
+	if (ret < 0)
+		return ret;
+
+	ret = vhost_driver_session_start();
+	if (ret < 0)
+		return ret;
+
+	return port_id;
+}
+
 static int
 rte_pmd_vhost_devinit(const char *name, const char *params)
 {
diff --git a/drivers/net/vhost/rte_eth_vhost.h b/drivers/net/vhost/rte_eth_vhost.h
index ff5d877..480dac8 100644
--- a/drivers/net/vhost/rte_eth_vhost.h
+++ b/drivers/net/vhost/rte_eth_vhost.h
@@ -102,6 +102,32 @@ struct rte_eth_vhost_queue_event {
 int rte_eth_vhost_get_queue_event(uint8_t port_id,
 		struct rte_eth_vhost_queue_event *event);
 
+/**
+ * Create a new ethdev from vhost device
+ *
+ * @param name
+ *    Name to be given to the new ethdev
+ * @param iface_name
+ *    Specifies a path to connect to a QEMU virtio-net device
+ * @param numa_node
+ *    The numa node on which the memory for this port is to be allocated
+ * @param mb_pool
+ *    Memory pool used by created ethdev
+ * @param params
+ *    Optional argument list, supported arguments:
+ *       iface : iface_name,
+ *       queues: number of rx/tx queues [default to 1]
+ *    These values can override direct API params when conflict.
+ *    sample params="iface=/tmp/sock0,queues=1"
+ *
+ * @return
+ *  - On success, created ehtdev port_id.
+ *  - On failure, a negative value.
+ */
+int rte_eth_from_vhost(const char *name, char *iface_name_arg,
+		const unsigned int numa_node, struct rte_mempool *mb_pool,
+		const char *params);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/drivers/net/vhost/rte_pmd_vhost_version.map b/drivers/net/vhost/rte_pmd_vhost_version.map
index 65bf3a8..bb2fe29 100644
--- a/drivers/net/vhost/rte_pmd_vhost_version.map
+++ b/drivers/net/vhost/rte_pmd_vhost_version.map
@@ -8,3 +8,10 @@ DPDK_16.04 {
 
 	local: *;
 };
+
+DPDK_16.07 {
+	global:
+
+	rte_eth_from_vhost;
+
+} DPDK_16.04;
-- 
2.5.5



More information about the dev mailing list