[dpdk-dev] [RFC 1/3] common/mlx5: add common qp_create
Raja Zidane
rzidane at nvidia.com
Wed Aug 18 17:14:39 CEST 2021
Signed-off-by: Raja Zidane <rzidane at nvidia.com>
---
drivers/common/mlx5/mlx5_common_devx.c | 111 +++++++++++++++++++++++++
drivers/common/mlx5/mlx5_common_devx.h | 20 +++++
drivers/common/mlx5/version.map | 2 +
drivers/crypto/mlx5/mlx5_crypto.c | 80 +++++++-----------
drivers/crypto/mlx5/mlx5_crypto.h | 5 +-
drivers/vdpa/mlx5/mlx5_vdpa.h | 5 +-
drivers/vdpa/mlx5/mlx5_vdpa_event.c | 58 ++++---------
7 files changed, 181 insertions(+), 100 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_common_devx.c b/drivers/common/mlx5/mlx5_common_devx.c
index 22c8d356c4..640fe3bbb9 100644
--- a/drivers/common/mlx5/mlx5_common_devx.c
+++ b/drivers/common/mlx5/mlx5_common_devx.c
@@ -271,6 +271,117 @@ mlx5_devx_sq_create(void *ctx, struct mlx5_devx_sq *sq_obj, uint16_t log_wqbb_n,
return -rte_errno;
}
+/**
+ * Destroy DevX Queue Pair.
+ *
+ * @param[in] qp
+ * DevX QP to destroy.
+ */
+void
+mlx5_devx_qp_destroy(struct mlx5_devx_qp *qp)
+{
+ if (qp->qp)
+ claim_zero(mlx5_devx_cmd_destroy(qp->qp));
+ if (qp->umem_obj)
+ claim_zero(mlx5_os_umem_dereg(qp->umem_obj));
+ if (qp->umem_buf)
+ mlx5_free((void *)(uintptr_t)qp->umem_buf);
+}
+
+/**
+ * Create Queue Pair using DevX API.
+ *
+ * Get a pointer to partially initialized attributes structure, and updates the
+ * following fields:
+ * wq_umem_id
+ * wq_umem_offset
+ * dbr_umem_valid
+ * dbr_umem_id
+ * dbr_address
+ * sq_size
+ * log_page_size
+ * rq_size
+ * All other fields are updated by caller.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param[in/out] qp_obj
+ * Pointer to QP to create.
+ * @param[in] log_wqbb_n
+ * Log of number of WQBBs in queue.
+ * @param[in] attr
+ * Pointer to QP attributes structure.
+ * @param[in] socket
+ * Socket to use for allocation.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_devx_qp_create(void *ctx, struct mlx5_devx_qp *qp_obj, uint16_t log_wqbb_n,
+ struct mlx5_devx_qp_attr *attr, int socket)
+{
+ struct mlx5_devx_obj *qp = NULL;
+ struct mlx5dv_devx_umem *umem_obj = NULL;
+ void *umem_buf = NULL;
+ size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
+ uint32_t umem_size, umem_dbrec;
+ uint16_t qp_size = 1 << log_wqbb_n;
+ int ret;
+
+ if (alignment == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get WQE buf alignment.");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ /* Allocate memory buffer for WQEs and doorbell record. */
+ umem_size = MLX5_WQE_SIZE * qp_size;
+ umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
+ umem_size += MLX5_DBR_SIZE;
+ umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
+ alignment, socket);
+ if (!umem_buf) {
+ DRV_LOG(ERR, "Failed to allocate memory for QP.");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ /* Register allocated buffer in user space with DevX. */
+ umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size,
+ IBV_ACCESS_LOCAL_WRITE);
+ if (!umem_obj) {
+ DRV_LOG(ERR, "Failed to register umem for QP.");
+ rte_errno = errno;
+ goto error;
+ }
+ /* Fill attributes for SQ object creation. */
+ attr->wq_umem_id = mlx5_os_get_umem_id(umem_obj);
+ attr->wq_umem_offset = 0;
+ attr->dbr_umem_valid = 1;
+ attr->dbr_umem_id = attr->wq_umem_id;
+ attr->dbr_address = umem_dbrec;
+ attr->log_page_size = MLX5_LOG_PAGE_SIZE;
+ /* Create send queue object with DevX. */
+ qp = mlx5_devx_cmd_create_qp(ctx, attr);
+ if (!qp) {
+ DRV_LOG(ERR, "Can't create DevX QP object.");
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ qp_obj->umem_buf = umem_buf;
+ qp_obj->umem_obj = umem_obj;
+ qp_obj->qp = qp;
+ qp_obj->db_rec = RTE_PTR_ADD(qp_obj->umem_buf, umem_dbrec);
+ return 0;
+error:
+ ret = rte_errno;
+ if (umem_obj)
+ claim_zero(mlx5_os_umem_dereg(umem_obj));
+ if (umem_buf)
+ mlx5_free((void *)(uintptr_t)umem_buf);
+ rte_errno = ret;
+ return -rte_errno;
+}
+
/**
* Destroy DevX Receive Queue.
*
diff --git a/drivers/common/mlx5/mlx5_common_devx.h b/drivers/common/mlx5/mlx5_common_devx.h
index aad0184e5a..b05260b401 100644
--- a/drivers/common/mlx5/mlx5_common_devx.h
+++ b/drivers/common/mlx5/mlx5_common_devx.h
@@ -33,6 +33,18 @@ struct mlx5_devx_sq {
volatile uint32_t *db_rec; /* The SQ doorbell record. */
};
+/* DevX Queue Pair structure. */
+struct mlx5_devx_qp {
+ struct mlx5_devx_obj *qp; /* The QP DevX object. */
+ void *umem_obj; /* The QP umem object. */
+ union {
+ void *umem_buf;
+ struct mlx5_wqe *wqes; /* The QP ring buffer. */
+ struct mlx5_aso_wqe *aso_wqes;
+ };
+ volatile uint32_t *db_rec; /* The QP doorbell record. */
+};
+
/* DevX Receive Queue structure. */
struct mlx5_devx_rq {
struct mlx5_devx_obj *rq; /* The RQ DevX object. */
@@ -59,6 +71,14 @@ int mlx5_devx_sq_create(void *ctx, struct mlx5_devx_sq *sq_obj,
uint16_t log_wqbb_n,
struct mlx5_devx_create_sq_attr *attr, int socket);
+__rte_internal
+void mlx5_devx_qp_destroy(struct mlx5_devx_qp *qp);
+
+__rte_internal
+int mlx5_devx_qp_create(void *ctx, struct mlx5_devx_qp *qp_obj,
+ uint16_t log_wqbb_n,
+ struct mlx5_devx_qp_attr *attr, int socket);
+
__rte_internal
void mlx5_devx_rq_destroy(struct mlx5_devx_rq *rq);
diff --git a/drivers/common/mlx5/version.map b/drivers/common/mlx5/version.map
index e5cb6b7060..9487f787b6 100644
--- a/drivers/common/mlx5/version.map
+++ b/drivers/common/mlx5/version.map
@@ -71,6 +71,8 @@ INTERNAL {
mlx5_devx_rq_destroy;
mlx5_devx_sq_create;
mlx5_devx_sq_destroy;
+ mlx5_devx_qp_create;
+ mlx5_devx_qp_destroy;
mlx5_free;
diff --git a/drivers/crypto/mlx5/mlx5_crypto.c b/drivers/crypto/mlx5/mlx5_crypto.c
index b3d5200ca3..c66a3a7add 100644
--- a/drivers/crypto/mlx5/mlx5_crypto.c
+++ b/drivers/crypto/mlx5/mlx5_crypto.c
@@ -257,12 +257,12 @@ mlx5_crypto_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
{
struct mlx5_crypto_qp *qp = dev->data->queue_pairs[qp_id];
- if (qp->qp_obj != NULL)
- claim_zero(mlx5_devx_cmd_destroy(qp->qp_obj));
- if (qp->umem_obj != NULL)
- claim_zero(mlx5_glue->devx_umem_dereg(qp->umem_obj));
- if (qp->umem_buf != NULL)
- rte_free(qp->umem_buf);
+ if (qp->qp_obj.qp != NULL)
+ claim_zero(mlx5_devx_cmd_destroy(qp->qp_obj.qp));
+ if (qp->qp_obj.umem_obj != NULL)
+ claim_zero(mlx5_glue->devx_umem_dereg(qp->qp_obj.umem_obj));
+ if (qp->qp_obj.umem_buf != NULL)
+ rte_free(qp->qp_obj.umem_buf);
mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
mlx5_devx_cq_destroy(&qp->cq_obj);
rte_free(qp);
@@ -277,20 +277,20 @@ mlx5_crypto_qp2rts(struct mlx5_crypto_qp *qp)
* In Order to configure self loopback, when calling these functions the
* remote QP id that is used is the id of the same QP.
*/
- if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj, MLX5_CMD_OP_RST2INIT_QP,
- qp->qp_obj->id)) {
+ if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj.qp, MLX5_CMD_OP_RST2INIT_QP,
+ qp->qp_obj.qp->id)) {
DRV_LOG(ERR, "Failed to modify QP to INIT state(%u).",
rte_errno);
return -1;
}
- if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj, MLX5_CMD_OP_INIT2RTR_QP,
- qp->qp_obj->id)) {
+ if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj.qp, MLX5_CMD_OP_INIT2RTR_QP,
+ qp->qp_obj.qp->id)) {
DRV_LOG(ERR, "Failed to modify QP to RTR state(%u).",
rte_errno);
return -1;
}
- if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj, MLX5_CMD_OP_RTR2RTS_QP,
- qp->qp_obj->id)) {
+ if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj.qp, MLX5_CMD_OP_RTR2RTS_QP,
+ qp->qp_obj.qp->id)) {
DRV_LOG(ERR, "Failed to modify QP to RTS state(%u).",
rte_errno);
return -1;
@@ -452,7 +452,7 @@ mlx5_crypto_wqe_set(struct mlx5_crypto_priv *priv,
memcpy(klms, &umr->kseg[0], sizeof(*klms) * klm_n);
}
ds = 2 + klm_n;
- cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj->id << 8) | ds);
+ cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) | ds);
cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) |
MLX5_OPCODE_RDMA_WRITE);
ds = RTE_ALIGN(ds, 4);
@@ -461,7 +461,7 @@ mlx5_crypto_wqe_set(struct mlx5_crypto_priv *priv,
if (priv->max_rdmar_ds > ds) {
cseg += ds;
ds = priv->max_rdmar_ds - ds;
- cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj->id << 8) | ds);
+ cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) | ds);
cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) |
MLX5_OPCODE_NOP);
qp->db_pi += ds >> 2; /* Here, DS is 4 aligned for sure. */
@@ -503,7 +503,7 @@ mlx5_crypto_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
return 0;
do {
op = *ops++;
- umr = RTE_PTR_ADD(qp->umem_buf, priv->wqe_set_size * qp->pi);
+ umr = RTE_PTR_ADD(qp->qp_obj.umem_buf, priv->wqe_set_size * qp->pi);
if (unlikely(mlx5_crypto_wqe_set(priv, qp, op, umr) == 0)) {
qp->stats.enqueue_err_count++;
if (remain != nb_ops) {
@@ -517,7 +517,7 @@ mlx5_crypto_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
} while (--remain);
qp->stats.enqueued_count += nb_ops;
rte_io_wmb();
- qp->db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->db_pi);
+ qp->qp_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->db_pi);
rte_wmb();
mlx5_crypto_uar_write(*(volatile uint64_t *)qp->wqe, qp->priv);
rte_wmb();
@@ -583,7 +583,7 @@ mlx5_crypto_qp_init(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp)
uint32_t i;
for (i = 0 ; i < qp->entries_n; i++) {
- struct mlx5_wqe_cseg *cseg = RTE_PTR_ADD(qp->umem_buf, i *
+ struct mlx5_wqe_cseg *cseg = RTE_PTR_ADD(qp->qp_obj.umem_buf, i *
priv->wqe_set_size);
struct mlx5_wqe_umr_cseg *ucseg = (struct mlx5_wqe_umr_cseg *)
(cseg + 1);
@@ -593,7 +593,7 @@ mlx5_crypto_qp_init(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp)
struct mlx5_wqe_rseg *rseg;
/* Init UMR WQE. */
- cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj->id << 8) |
+ cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) |
(priv->umr_wqe_size / MLX5_WSEG_SIZE));
cseg->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
MLX5_COMP_MODE_OFFSET);
@@ -628,7 +628,7 @@ mlx5_crypto_indirect_mkeys_prepare(struct mlx5_crypto_priv *priv,
.klm_num = RTE_ALIGN(priv->max_segs_num, 4),
};
- for (umr = (struct mlx5_umr_wqe *)qp->umem_buf, i = 0;
+ for (umr = (struct mlx5_umr_wqe *)qp->qp_obj.umem_buf, i = 0;
i < qp->entries_n; i++, umr = RTE_PTR_ADD(umr, priv->wqe_set_size)) {
attr.klm_array = (struct mlx5_klm *)&umr->kseg[0];
qp->mkey[i] = mlx5_devx_cmd_mkey_create(priv->ctx, &attr);
@@ -649,9 +649,7 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
struct mlx5_devx_qp_attr attr = {0};
struct mlx5_crypto_qp *qp;
uint16_t log_nb_desc = rte_log2_u32(qp_conf->nb_descriptors);
- uint32_t umem_size = RTE_BIT32(log_nb_desc) *
- priv->wqe_set_size +
- sizeof(*qp->db_rec) * 2;
+ uint32_t ret;
uint32_t alloc_size = sizeof(*qp);
struct mlx5_devx_cq_attr cq_attr = {
.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar),
@@ -675,18 +673,15 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
DRV_LOG(ERR, "Failed to create CQ.");
goto error;
}
- qp->umem_buf = rte_zmalloc_socket(__func__, umem_size, 4096, socket_id);
- if (qp->umem_buf == NULL) {
- DRV_LOG(ERR, "Failed to allocate QP umem.");
- rte_errno = ENOMEM;
- goto error;
- }
- qp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
- (void *)(uintptr_t)qp->umem_buf,
- umem_size,
- IBV_ACCESS_LOCAL_WRITE);
- if (qp->umem_obj == NULL) {
- DRV_LOG(ERR, "Failed to register QP umem.");
+ /* fill attributes*/
+ attr.pd = priv->pdn;
+ attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar);
+ attr.cqn = qp->cq_obj.cq->id;
+ attr.rq_size = 0;
+ attr.sq_size = RTE_BIT32(log_nb_desc);
+ ret = mlx5_devx_qp_create(priv->ctx, &qp->qp_obj, log_nb_desc, &attr, socket_id);
+ if(ret) {
+ DRV_LOG(ERR, "Failed to create QP");
goto error;
}
if (mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N,
@@ -697,23 +692,6 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
goto error;
}
qp->mr_ctrl.dev_gen_ptr = &priv->mr_scache.dev_gen;
- attr.pd = priv->pdn;
- attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar);
- attr.cqn = qp->cq_obj.cq->id;
- attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
- attr.rq_size = 0;
- attr.sq_size = RTE_BIT32(log_nb_desc);
- attr.dbr_umem_valid = 1;
- attr.wq_umem_id = qp->umem_obj->umem_id;
- attr.wq_umem_offset = 0;
- attr.dbr_umem_id = qp->umem_obj->umem_id;
- attr.dbr_address = RTE_BIT64(log_nb_desc) * priv->wqe_set_size;
- qp->qp_obj = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
- if (qp->qp_obj == NULL) {
- DRV_LOG(ERR, "Failed to create QP(%u).", rte_errno);
- goto error;
- }
- qp->db_rec = RTE_PTR_ADD(qp->umem_buf, (uintptr_t)attr.dbr_address);
if (mlx5_crypto_qp2rts(qp))
goto error;
qp->mkey = (struct mlx5_devx_obj **)RTE_ALIGN((uintptr_t)(qp + 1),
diff --git a/drivers/crypto/mlx5/mlx5_crypto.h b/drivers/crypto/mlx5/mlx5_crypto.h
index d49b0001f0..013eed30b5 100644
--- a/drivers/crypto/mlx5/mlx5_crypto.h
+++ b/drivers/crypto/mlx5/mlx5_crypto.h
@@ -43,11 +43,8 @@ struct mlx5_crypto_priv {
struct mlx5_crypto_qp {
struct mlx5_crypto_priv *priv;
struct mlx5_devx_cq cq_obj;
- struct mlx5_devx_obj *qp_obj;
+ struct mlx5_devx_qp qp_obj;
struct rte_cryptodev_stats stats;
- struct mlx5dv_devx_umem *umem_obj;
- void *umem_buf;
- volatile uint32_t *db_rec;
struct rte_crypto_op **ops;
struct mlx5_devx_obj **mkey; /* WQE's indirect mekys. */
struct mlx5_mr_ctrl mr_ctrl;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index 2a04e36607..a27f3fdadb 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -54,10 +54,7 @@ struct mlx5_vdpa_cq {
struct mlx5_vdpa_event_qp {
struct mlx5_vdpa_cq cq;
struct mlx5_devx_obj *fw_qp;
- struct mlx5_devx_obj *sw_qp;
- struct mlx5dv_devx_umem *umem_obj;
- void *umem_buf;
- volatile uint32_t *db_rec;
+ struct mlx5_devx_qp sw_qp;
};
struct mlx5_vdpa_query_mr {
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/drivers/vdpa/mlx5/mlx5_vdpa_event.c
index 3541c652ce..d327a605fa 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_event.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_event.c
@@ -179,7 +179,7 @@ mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
rte_io_wmb();
/* Ring SW QP doorbell record. */
- eqp->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
+ eqp->sw_qp.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
}
return comp;
}
@@ -531,12 +531,12 @@ mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
void
mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
{
- if (eqp->sw_qp)
- claim_zero(mlx5_devx_cmd_destroy(eqp->sw_qp));
- if (eqp->umem_obj)
- claim_zero(mlx5_glue->devx_umem_dereg(eqp->umem_obj));
- if (eqp->umem_buf)
- rte_free(eqp->umem_buf);
+ if (eqp->sw_qp.qp)
+ claim_zero(mlx5_devx_cmd_destroy(eqp->sw_qp.qp));
+ if (eqp->sw_qp.umem_obj)
+ claim_zero(mlx5_glue->devx_umem_dereg(eqp->sw_qp.umem_obj));
+ if (eqp->sw_qp.umem_buf)
+ rte_free(eqp->sw_qp.umem_buf);
if (eqp->fw_qp)
claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
mlx5_vdpa_cq_destroy(&eqp->cq);
@@ -547,36 +547,36 @@ static int
mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
{
if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
- eqp->sw_qp->id)) {
+ eqp->sw_qp.qp->id)) {
DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
rte_errno);
return -1;
}
- if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RST2INIT_QP,
+ if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp, MLX5_CMD_OP_RST2INIT_QP,
eqp->fw_qp->id)) {
DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
rte_errno);
return -1;
}
if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
- eqp->sw_qp->id)) {
+ eqp->sw_qp.qp->id)) {
DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
rte_errno);
return -1;
}
- if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_INIT2RTR_QP,
+ if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp, MLX5_CMD_OP_INIT2RTR_QP,
eqp->fw_qp->id)) {
DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
rte_errno);
return -1;
}
if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
- eqp->sw_qp->id)) {
+ eqp->sw_qp.qp->id)) {
DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
rte_errno);
return -1;
}
- if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RTR2RTS_QP,
+ if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp, MLX5_CMD_OP_RTR2RTS_QP,
eqp->fw_qp->id)) {
DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
rte_errno);
@@ -591,8 +591,7 @@ mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
{
struct mlx5_devx_qp_attr attr = {0};
uint16_t log_desc_n = rte_log2_u32(desc_n);
- uint32_t umem_size = (1 << log_desc_n) * MLX5_WSEG_SIZE +
- sizeof(*eqp->db_rec) * 2;
+ uint32_t ret;
if (mlx5_vdpa_event_qp_global_prepare(priv))
return -1;
@@ -605,42 +604,19 @@ mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
goto error;
}
- eqp->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
- if (!eqp->umem_buf) {
- DRV_LOG(ERR, "Failed to allocate memory for SW QP.");
- rte_errno = ENOMEM;
- goto error;
- }
- eqp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
- (void *)(uintptr_t)eqp->umem_buf,
- umem_size,
- IBV_ACCESS_LOCAL_WRITE);
- if (!eqp->umem_obj) {
- DRV_LOG(ERR, "Failed to register umem for SW QP.");
- goto error;
- }
- attr.uar_index = priv->uar->page_id;
- attr.cqn = eqp->cq.cq_obj.cq->id;
- attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
attr.rq_size = 1 << log_desc_n;
attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
attr.sq_size = 0; /* No need SQ. */
- attr.dbr_umem_valid = 1;
- attr.wq_umem_id = eqp->umem_obj->umem_id;
- attr.wq_umem_offset = 0;
- attr.dbr_umem_id = eqp->umem_obj->umem_id;
attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
- attr.dbr_address = RTE_BIT64(log_desc_n) * MLX5_WSEG_SIZE;
- eqp->sw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
- if (!eqp->sw_qp) {
+ ret = mlx5_devx_qp_create(priv->ctx, &(eqp->sw_qp), log_desc_n, &attr, SOCKET_ID_ANY);
+ if (ret) {
DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
goto error;
}
- eqp->db_rec = RTE_PTR_ADD(eqp->umem_buf, (uintptr_t)attr.dbr_address);
if (mlx5_vdpa_qps2rts(eqp))
goto error;
/* First ringing. */
- rte_write32(rte_cpu_to_be_32(1 << log_desc_n), &eqp->db_rec[0]);
+ rte_write32(rte_cpu_to_be_32(1 << log_desc_n), &eqp->sw_qp.db_rec[0]);
return 0;
error:
mlx5_vdpa_event_qp_destroy(eqp);
--
2.27.0
More information about the dev
mailing list