[dpdk-dev] [PATCH 1/3] vdpa/mlx5: manage virtqs by array

Matan Azrad matan at mellanox.com
Tue Mar 31 13:12:21 CEST 2020


As a preparation to listen the virtqs status before the device is
configured, manage the virtqs structures in array instead of list.

Signed-off-by: Matan Azrad <matan at mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo at mellanox.com>
---
 drivers/vdpa/mlx5/mlx5_vdpa.c       | 43 ++++++++++++++++------------------
 drivers/vdpa/mlx5/mlx5_vdpa.h       |  2 +-
 drivers/vdpa/mlx5/mlx5_vdpa_lm.c    | 43 ++++++++++++++++------------------
 drivers/vdpa/mlx5/mlx5_vdpa_steer.c | 18 +++++++--------
 drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 46 +++++++++++++++----------------------
 5 files changed, 68 insertions(+), 84 deletions(-)

diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index f10647b..b22f074 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -116,20 +116,18 @@
 {
 	int did = rte_vhost_get_vdpa_device_id(vid);
 	struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
-	struct mlx5_vdpa_virtq *virtq = NULL;
 
 	if (priv == NULL) {
 		DRV_LOG(ERR, "Invalid device id: %d.", did);
 		return -EINVAL;
 	}
-	SLIST_FOREACH(virtq, &priv->virtq_list, next)
-		if (virtq->index == vring)
-			break;
-	if (!virtq) {
+	if (!priv->configured || vring >= RTE_MIN((int)priv->nr_virtqs,
+	    (int)priv->caps.max_num_virtio_queues * 2) ||
+	    !priv->virtqs[vring].virtq) {
 		DRV_LOG(ERR, "Invalid or unconfigured vring id: %d.", vring);
 		return -EINVAL;
 	}
-	return mlx5_vdpa_virtq_enable(virtq, state);
+	return mlx5_vdpa_virtq_enable(&priv->virtqs[vring], state);
 }
 
 static int
@@ -482,28 +480,28 @@
 		rte_errno = ENODEV;
 		return -rte_errno;
 	}
-	priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv),
-			   RTE_CACHE_LINE_SIZE);
-	if (!priv) {
-		DRV_LOG(ERR, "Failed to allocate private memory.");
-		rte_errno = ENOMEM;
-		goto error;
-	}
 	ret = mlx5_devx_cmd_query_hca_attr(ctx, &attr);
 	if (ret) {
 		DRV_LOG(ERR, "Unable to read HCA capabilities.");
 		rte_errno = ENOTSUP;
 		goto error;
-	} else {
-		if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
-			DRV_LOG(ERR, "Not enough capabilities to support vdpa,"
-				" maybe old FW/OFED version?");
-			rte_errno = ENOTSUP;
-			goto error;
-		}
-		priv->caps = attr.vdpa;
-		priv->log_max_rqt_size = attr.log_max_rqt_size;
+	} else if (!attr.vdpa.valid || !attr.vdpa.max_num_virtio_queues) {
+		DRV_LOG(ERR, "Not enough capabilities to support vdpa, maybe "
+			"old FW/OFED version?");
+		rte_errno = ENOTSUP;
+		goto error;
+	}
+	priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
+			   sizeof(struct mlx5_vdpa_virtq) *
+			   attr.vdpa.max_num_virtio_queues * 2,
+			   RTE_CACHE_LINE_SIZE);
+	if (!priv) {
+		DRV_LOG(ERR, "Failed to allocate private memory.");
+		rte_errno = ENOMEM;
+		goto error;
 	}
+	priv->caps = attr.vdpa;
+	priv->log_max_rqt_size = attr.log_max_rqt_size;
 	priv->ctx = ctx;
 	priv->dev_addr.pci_addr = pci_dev->addr;
 	priv->dev_addr.type = PCI_ADDR;
@@ -519,7 +517,6 @@
 		goto error;
 	}
 	SLIST_INIT(&priv->mr_list);
-	SLIST_INIT(&priv->virtq_list);
 	pthread_mutex_lock(&priv_list_lock);
 	TAILQ_INSERT_TAIL(&priv_list, priv, next);
 	pthread_mutex_unlock(&priv_list_lock);
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index 75af410..baec106 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -120,11 +120,11 @@ struct mlx5_vdpa_priv {
 	uint16_t nr_virtqs;
 	uint64_t features; /* Negotiated features. */
 	uint16_t log_max_rqt_size;
-	SLIST_HEAD(virtq_list, mlx5_vdpa_virtq) virtq_list;
 	struct mlx5_vdpa_steer steer;
 	struct mlx5dv_var *var;
 	void *virtq_db_addr;
 	SLIST_HEAD(mr_list, mlx5_vdpa_query_mr) mr_list;
+	struct mlx5_vdpa_virtq virtqs[];
 };
 
 /**
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
index 4457760..77f2eda 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
@@ -15,13 +15,12 @@
 		.type = MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE,
 		.dirty_bitmap_dump_enable = enable,
 	};
-	struct mlx5_vdpa_virtq *virtq;
+	int i;
 
-	SLIST_FOREACH(virtq, &priv->virtq_list, next) {
-		attr.queue_index = virtq->index;
-		if (mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr)) {
-			DRV_LOG(ERR, "Failed to modify virtq %d logging.",
-				virtq->index);
+	for (i = 0; i < priv->nr_virtqs; ++i) {
+		attr.queue_index = i;
+		if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) {
+			DRV_LOG(ERR, "Failed to modify virtq %d logging.", i);
 			return -1;
 		}
 	}
@@ -47,7 +46,7 @@
 		.dirty_bitmap_size = log_size,
 	};
 	struct mlx5_vdpa_query_mr *mr = rte_malloc(__func__, sizeof(*mr), 0);
-	struct mlx5_vdpa_virtq *virtq;
+	int i;
 
 	if (!mr) {
 		DRV_LOG(ERR, "Failed to allocate mem for lm mr.");
@@ -67,11 +66,10 @@
 		goto err;
 	}
 	attr.dirty_bitmap_mkey = mr->mkey->id;
-	SLIST_FOREACH(virtq, &priv->virtq_list, next) {
-		attr.queue_index = virtq->index;
-		if (mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr)) {
-			DRV_LOG(ERR, "Failed to modify virtq %d for lm.",
-				virtq->index);
+	for (i = 0; i < priv->nr_virtqs; ++i) {
+		attr.queue_index = i;
+		if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) {
+			DRV_LOG(ERR, "Failed to modify virtq %d for lm.", i);
 			goto err;
 		}
 	}
@@ -94,9 +92,9 @@
 mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
 {
 	struct mlx5_devx_virtq_attr attr = {0};
-	struct mlx5_vdpa_virtq *virtq;
 	uint64_t features;
 	int ret = rte_vhost_get_negotiated_features(priv->vid, &features);
+	int i;
 
 	if (ret) {
 		DRV_LOG(ERR, "Failed to get negotiated features.");
@@ -104,27 +102,26 @@
 	}
 	if (!RTE_VHOST_NEED_LOG(features))
 		return 0;
-	SLIST_FOREACH(virtq, &priv->virtq_list, next) {
-		ret = mlx5_vdpa_virtq_modify(virtq, 0);
+	for (i = 0; i < priv->nr_virtqs; ++i) {
+		ret = mlx5_vdpa_virtq_modify(&priv->virtqs[i], 0);
 		if (ret)
 			return -1;
-		if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
-			DRV_LOG(ERR, "Failed to query virtq %d.", virtq->index);
+		if (mlx5_devx_cmd_query_virtq(priv->virtqs[i].virtq, &attr)) {
+			DRV_LOG(ERR, "Failed to query virtq %d.", i);
 			return -1;
 		}
 		DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, "
-			"hw_used_index=%d", priv->vid, virtq->index,
+			"hw_used_index=%d", priv->vid, i,
 			attr.hw_available_index, attr.hw_used_index);
-		ret = rte_vhost_set_vring_base(priv->vid, virtq->index,
+		ret = rte_vhost_set_vring_base(priv->vid, i,
 					       attr.hw_available_index,
 					       attr.hw_used_index);
 		if (ret) {
-			DRV_LOG(ERR, "Failed to set virtq %d base.",
-				virtq->index);
+			DRV_LOG(ERR, "Failed to set virtq %d base.", i);
 			return -1;
 		}
-		rte_vhost_log_used_vring(priv->vid, virtq->index, 0,
-				       MLX5_VDPA_USED_RING_LEN(virtq->vq_size));
+		rte_vhost_log_used_vring(priv->vid, i, 0,
+			      MLX5_VDPA_USED_RING_LEN(priv->virtqs[i].vq_size));
 	}
 	return 0;
 }
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
index 9c11452..96ffc21 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c
@@ -76,13 +76,13 @@
 static int
 mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)
 {
-	struct mlx5_vdpa_virtq *virtq;
+	int i;
 	uint32_t rqt_n = RTE_MIN(MLX5_VDPA_DEFAULT_RQT_SIZE,
 				 1 << priv->log_max_rqt_size);
 	struct mlx5_devx_rqt_attr *attr = rte_zmalloc(__func__, sizeof(*attr)
 						      + rqt_n *
 						      sizeof(uint32_t), 0);
-	uint32_t i = 0, j;
+	uint32_t k = 0, j;
 	int ret = 0;
 
 	if (!attr) {
@@ -90,15 +90,15 @@
 		rte_errno = ENOMEM;
 		return -ENOMEM;
 	}
-	SLIST_FOREACH(virtq, &priv->virtq_list, next) {
-		if (is_virtq_recvq(virtq->index, priv->nr_virtqs) &&
-		    virtq->enable) {
-			attr->rq_list[i] = virtq->virtq->id;
-			i++;
+	for (i = 0; i < priv->nr_virtqs; i++) {
+		if (is_virtq_recvq(i, priv->nr_virtqs) &&
+		    priv->virtqs[i].enable) {
+			attr->rq_list[k] = priv->virtqs[i].virtq->id;
+			k++;
 		}
 	}
-	for (j = 0; i != rqt_n; ++i, ++j)
-		attr->rq_list[i] = attr->rq_list[j];
+	for (j = 0; k != rqt_n; ++k, ++j)
+		attr->rq_list[k] = attr->rq_list[j];
 	attr->rq_type = MLX5_INLINE_Q_TYPE_VIRTQ;
 	attr->rqt_max_size = rqt_n;
 	attr->rqt_actual_size = rqt_n;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index 8bebb92..3575272 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -59,12 +59,9 @@
 				usleep(MLX5_VDPA_INTR_RETRIES_USEC);
 			}
 		}
-		virtq->intr_handle.fd = -1;
 	}
-	if (virtq->virtq) {
+	if (virtq->virtq)
 		claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
-		virtq->virtq = NULL;
-	}
 	for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
 		if (virtq->umems[i].obj)
 			claim_zero(mlx5_glue->devx_umem_dereg
@@ -72,27 +69,20 @@
 		if (virtq->umems[i].buf)
 			rte_free(virtq->umems[i].buf);
 	}
-	memset(&virtq->umems, 0, sizeof(virtq->umems));
 	if (virtq->eqp.fw_qp)
 		mlx5_vdpa_event_qp_destroy(&virtq->eqp);
+	memset(virtq, 0, sizeof(*virtq));
+	virtq->intr_handle.fd = -1;
 	return 0;
 }
 
 void
 mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
 {
-	struct mlx5_vdpa_virtq *entry;
-	struct mlx5_vdpa_virtq *next;
+	int i;
 
-	entry = SLIST_FIRST(&priv->virtq_list);
-	while (entry) {
-		next = SLIST_NEXT(entry, next);
-		mlx5_vdpa_virtq_unset(entry);
-		SLIST_REMOVE(&priv->virtq_list, entry, mlx5_vdpa_virtq, next);
-		rte_free(entry);
-		entry = next;
-	}
-	SLIST_INIT(&priv->virtq_list);
+	for (i = 0; i < priv->nr_virtqs; i++)
+		mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
 	if (priv->tis) {
 		claim_zero(mlx5_devx_cmd_destroy(priv->tis));
 		priv->tis = NULL;
@@ -106,6 +96,7 @@
 		priv->virtq_db_addr = NULL;
 	}
 	priv->features = 0;
+	priv->nr_virtqs = 0;
 }
 
 int
@@ -140,9 +131,9 @@
 }
 
 static int
-mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv,
-		      struct mlx5_vdpa_virtq *virtq, int index)
+mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
 {
+	struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
 	struct rte_vhost_vring vq;
 	struct mlx5_devx_virtq_attr attr = {0};
 	uint64_t gpa;
@@ -340,7 +331,6 @@
 mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
 {
 	struct mlx5_devx_tis_attr tis_attr = {0};
-	struct mlx5_vdpa_virtq *virtq;
 	uint32_t i;
 	uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
 	int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
@@ -349,6 +339,12 @@
 		DRV_LOG(ERR, "Failed to configure negotiated features.");
 		return -1;
 	}
+	if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
+		DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
+			(int)priv->caps.max_num_virtio_queues * 2,
+			(int)nr_vring);
+		return -E2BIG;
+	}
 	/* Always map the entire page. */
 	priv->virtq_db_addr = mmap(NULL, priv->var->length, PROT_READ |
 				   PROT_WRITE, MAP_SHARED, priv->ctx->cmd_fd,
@@ -372,16 +368,10 @@
 		DRV_LOG(ERR, "Failed to create TIS.");
 		goto error;
 	}
-	for (i = 0; i < nr_vring; i++) {
-		virtq = rte_zmalloc(__func__, sizeof(*virtq), 0);
-		if (!virtq || mlx5_vdpa_virtq_setup(priv, virtq, i)) {
-			if (virtq)
-				rte_free(virtq);
-			goto error;
-		}
-		SLIST_INSERT_HEAD(&priv->virtq_list, virtq, next);
-	}
 	priv->nr_virtqs = nr_vring;
+	for (i = 0; i < nr_vring; i++)
+		if (mlx5_vdpa_virtq_setup(priv, i))
+			goto error;
 	return 0;
 error:
 	mlx5_vdpa_virtqs_release(priv);
-- 
1.8.3.1



More information about the dev mailing list