[dpdk-dev] [PATCH 04/17] vdpa/mlx5: move DevX CQ creation to common

Michael Baum michaelba at nvidia.com
Thu Dec 17 12:44:22 CET 2020


Using common function for DevX CQ creation.

Signed-off-by: Michael Baum <michaelba at nvidia.com>
Acked-by: Matan Azrad <matan at nvidia.com>
---
 drivers/vdpa/mlx5/mlx5_vdpa.h       | 10 +----
 drivers/vdpa/mlx5/mlx5_vdpa_event.c | 81 +++++++++++--------------------------
 drivers/vdpa/mlx5/mlx5_vdpa_virtq.c |  2 +-
 3 files changed, 26 insertions(+), 67 deletions(-)

diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index d039ada..ddee9dc 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -22,6 +22,7 @@
 
 #include <mlx5_glue.h>
 #include <mlx5_devx_cmds.h>
+#include <mlx5_common_devx.h>
 #include <mlx5_prm.h>
 
 
@@ -46,13 +47,7 @@ struct mlx5_vdpa_cq {
 	uint32_t armed:1;
 	int callfd;
 	rte_spinlock_t sl;
-	struct mlx5_devx_obj *cq;
-	struct mlx5dv_devx_umem *umem_obj;
-	union {
-		volatile void *umem_buf;
-		volatile struct mlx5_cqe *cqes;
-	};
-	volatile uint32_t *db_rec;
+	struct mlx5_devx_cq cq_obj;
 	uint64_t errors;
 };
 
@@ -144,7 +139,6 @@ struct mlx5_vdpa_priv {
 	uint32_t gpa_mkey_index;
 	struct ibv_mr *null_mr;
 	struct rte_vhost_memory *vmem;
-	uint32_t eqn;
 	struct mlx5dv_devx_event_channel *eventc;
 	struct mlx5dv_devx_event_channel *err_chnl;
 	struct mlx5dv_devx_uar *uar;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/drivers/vdpa/mlx5/mlx5_vdpa_event.c
index 3aeaeb8..ef92338 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_event.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_event.c
@@ -7,6 +7,7 @@
 #include <sys/eventfd.h>
 
 #include <rte_malloc.h>
+#include <rte_memory.h>
 #include <rte_errno.h>
 #include <rte_lcore.h>
 #include <rte_atomic.h>
@@ -15,6 +16,7 @@
 #include <rte_alarm.h>
 
 #include <mlx5_common.h>
+#include <mlx5_common_devx.h>
 #include <mlx5_glue.h>
 
 #include "mlx5_vdpa_utils.h"
@@ -47,7 +49,6 @@
 		priv->eventc = NULL;
 	}
 #endif
-	priv->eqn = 0;
 }
 
 /* Prepare all the global resources for all the event objects.*/
@@ -58,11 +59,6 @@
 
 	if (priv->eventc)
 		return 0;
-	if (mlx5_glue->devx_query_eqn(priv->ctx, 0, &priv->eqn)) {
-		rte_errno = errno;
-		DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno);
-		return -1;
-	}
 	priv->eventc = mlx5_glue->devx_create_event_channel(priv->ctx,
 			   MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
 	if (!priv->eventc) {
@@ -97,12 +93,7 @@
 static void
 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
 {
-	if (cq->cq)
-		claim_zero(mlx5_devx_cmd_destroy(cq->cq));
-	if (cq->umem_obj)
-		claim_zero(mlx5_glue->devx_umem_dereg(cq->umem_obj));
-	if (cq->umem_buf)
-		rte_free((void *)(uintptr_t)cq->umem_buf);
+	mlx5_devx_cq_destroy(&cq->cq_obj);
 	memset(cq, 0, sizeof(*cq));
 }
 
@@ -112,12 +103,12 @@
 	uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
 	uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
 	uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
-	uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq->id;
+	uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq_obj.cq->id;
 	uint64_t db_be = rte_cpu_to_be_64(doorbell);
 	uint32_t *addr = RTE_PTR_ADD(priv->uar->base_addr, MLX5_CQ_DOORBELL);
 
 	rte_io_wmb();
-	cq->db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
+	cq->cq_obj.db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
 	rte_wmb();
 #ifdef RTE_ARCH_64
 	*(uint64_t *)addr = db_be;
@@ -134,49 +125,23 @@
 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
 		    int callfd, struct mlx5_vdpa_cq *cq)
 {
-	struct mlx5_devx_cq_attr attr = {0};
-	size_t pgsize = sysconf(_SC_PAGESIZE);
-	uint32_t umem_size;
+	struct mlx5_devx_cq_attr attr = {
+		.use_first_only = 1,
+		.uar_page_id = priv->uar->page_id,
+	};
 	uint16_t event_nums[1] = {0};
-	uint16_t cq_size = 1 << log_desc_n;
 	int ret;
 
-	cq->log_desc_n = log_desc_n;
-	umem_size = sizeof(struct mlx5_cqe) * cq_size + sizeof(*cq->db_rec) * 2;
-	cq->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
-	if (!cq->umem_buf) {
-		DRV_LOG(ERR, "Failed to allocate memory for CQ.");
-		rte_errno = ENOMEM;
-		return -ENOMEM;
-	}
-	cq->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
-						(void *)(uintptr_t)cq->umem_buf,
-						umem_size,
-						IBV_ACCESS_LOCAL_WRITE);
-	if (!cq->umem_obj) {
-		DRV_LOG(ERR, "Failed to register umem for CQ.");
-		goto error;
-	}
-	attr.q_umem_valid = 1;
-	attr.db_umem_valid = 1;
-	attr.use_first_only = 1;
-	attr.overrun_ignore = 0;
-	attr.uar_page_id = priv->uar->page_id;
-	attr.q_umem_id = cq->umem_obj->umem_id;
-	attr.q_umem_offset = 0;
-	attr.db_umem_id = cq->umem_obj->umem_id;
-	attr.db_umem_offset = sizeof(struct mlx5_cqe) * cq_size;
-	attr.eqn = priv->eqn;
-	attr.log_cq_size = log_desc_n;
-	attr.log_page_size = rte_log2_u32(pgsize);
-	cq->cq = mlx5_devx_cmd_create_cq(priv->ctx, &attr);
-	if (!cq->cq)
+	ret = mlx5_devx_cq_create(priv->ctx, &cq->cq_obj, log_desc_n, &attr,
+				  SOCKET_ID_ANY);
+	if (ret)
 		goto error;
-	cq->db_rec = RTE_PTR_ADD(cq->umem_buf, (uintptr_t)attr.db_umem_offset);
 	cq->cq_ci = 0;
+	cq->log_desc_n = log_desc_n;
 	rte_spinlock_init(&cq->sl);
 	/* Subscribe CQ event to the event channel controlled by the driver. */
-	ret = mlx5_glue->devx_subscribe_devx_event(priv->eventc, cq->cq->obj,
+	ret = mlx5_glue->devx_subscribe_devx_event(priv->eventc,
+						   cq->cq_obj.cq->obj,
 						   sizeof(event_nums),
 						   event_nums,
 						   (uint64_t)(uintptr_t)cq);
@@ -187,8 +152,8 @@
 	}
 	cq->callfd = callfd;
 	/* Init CQ to ones to be in HW owner in the start. */
-	cq->cqes[0].op_own = MLX5_CQE_OWNER_MASK;
-	cq->cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
+	cq->cq_obj.cqes[0].op_own = MLX5_CQE_OWNER_MASK;
+	cq->cq_obj.cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
 	/* First arming. */
 	mlx5_vdpa_cq_arm(priv, cq);
 	return 0;
@@ -215,7 +180,7 @@
 	uint16_t cur_wqe_counter;
 	uint16_t comp;
 
-	last_word.word = rte_read32(&cq->cqes[0].wqe_counter);
+	last_word.word = rte_read32(&cq->cq_obj.cqes[0].wqe_counter);
 	cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
 	comp = cur_wqe_counter + (uint16_t)1 - next_wqe_counter;
 	if (comp) {
@@ -229,7 +194,7 @@
 			cq->errors++;
 		rte_io_wmb();
 		/* Ring CQ doorbell record. */
-		cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
+		cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
 		rte_io_wmb();
 		/* Ring SW QP doorbell record. */
 		eqp->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
@@ -245,7 +210,7 @@
 
 	for (i = 0; i < priv->nr_virtqs; i++) {
 		cq = &priv->virtqs[i].eqp.cq;
-		if (cq->cq && !cq->armed)
+		if (cq->cq_obj.cq && !cq->armed)
 			mlx5_vdpa_cq_arm(priv, cq);
 	}
 }
@@ -290,7 +255,7 @@
 		pthread_mutex_lock(&priv->vq_config_lock);
 		for (i = 0; i < priv->nr_virtqs; i++) {
 			cq = &priv->virtqs[i].eqp.cq;
-			if (cq->cq && !cq->armed) {
+			if (cq->cq_obj.cq && !cq->armed) {
 				uint32_t comp = mlx5_vdpa_cq_poll(cq);
 
 				if (comp) {
@@ -369,7 +334,7 @@
 		DRV_LOG(DEBUG, "Device %s virtq %d cq %d event was captured."
 			" Timer is %s, cq ci is %u.\n",
 			priv->vdev->device->name,
-			(int)virtq->index, cq->cq->id,
+			(int)virtq->index, cq->cq_obj.cq->id,
 			priv->timer_on ? "on" : "off", cq->cq_ci);
 		cq->armed = 0;
 	}
@@ -679,7 +644,7 @@
 		goto error;
 	}
 	attr.uar_index = priv->uar->page_id;
-	attr.cqn = eqp->cq.cq->id;
+	attr.cqn = eqp->cq.cq_obj.cq->id;
 	attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
 	attr.rq_size = 1 << log_desc_n;
 	attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index 3e882e4..cc77314 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -497,7 +497,7 @@
 		return -1;
 	if (vq.size != virtq->vq_size || vq.kickfd != virtq->intr_handle.fd)
 		return 1;
-	if (virtq->eqp.cq.cq) {
+	if (virtq->eqp.cq.cq_obj.cq) {
 		if (vq.callfd != virtq->eqp.cq.callfd)
 			return 1;
 	} else if (vq.callfd != -1) {
-- 
1.8.3.1



More information about the dev mailing list