[dpdk-dev] [PATCH v1 14/21] net/mlx5: add Hash Rx queue object

Nelio Laranjeiro nelio.laranjeiro at 6wind.com
Wed Aug 2 16:10:30 CEST 2017


Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro at 6wind.com>
---
 drivers/net/mlx5/mlx5.c      |   3 +
 drivers/net/mlx5/mlx5.h      |   1 +
 drivers/net/mlx5/mlx5_flow.c | 137 +++++++++++++++---------------------
 drivers/net/mlx5/mlx5_rxq.c  | 161 +++++++++++++++++++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_rxtx.h |  19 +++++
 5 files changed, 239 insertions(+), 82 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index d5cb6e4..52cbb20 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -182,6 +182,9 @@ mlx5_dev_close(struct rte_eth_dev *dev)
 	}
 	if (priv->reta_idx != NULL)
 		rte_free(priv->reta_idx);
+	i = mlx5_priv_hrxq_ibv_verify(priv);
+	if (i)
+		WARN("%p: some Hash Rx queue still remain", (void*)priv);
 	i = mlx5_priv_ind_table_ibv_verify(priv);
 	if (i)
 		WARN("%p: some Indirection table still remain", (void*)priv);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 081c2c6..3c2e719 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -147,6 +147,7 @@ struct priv {
 	LIST_HEAD(mr, mlx5_mr) mr; /* Memory region. */
 	LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
 	LIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */
+	LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */
 	LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */
 	LIST_HEAD(txqibv, mlx5_txq_ibv) txqsibv; /* Verbs Tx queues. */
 	/* Verbs Indirection tables. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 049a8e2..f258567 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -90,13 +90,9 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
 struct rte_flow {
 	TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
 	struct ibv_exp_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
-	struct mlx5_ind_table_ibv *ind_table; /**< Indirection table. */
-	struct ibv_qp *qp; /**< Verbs queue pair. */
 	struct ibv_exp_flow *ibv_flow; /**< Verbs flow. */
-	struct ibv_exp_wq *wq; /**< Verbs work queue. */
-	struct ibv_cq *cq; /**< Verbs completion queue. */
+	struct mlx5_hrxq *hrxq; /**< Hash Rx queue. */
 	uint32_t mark:1; /**< Set if the flow is marked. */
-	uint64_t hash_fields; /**< Fields that participate in the hash. */
 };
 
 /** Static initializer for items. */
@@ -1033,56 +1029,36 @@ priv_flow_create_action_queue(struct priv *priv,
 				   NULL, "cannot allocate flow memory");
 		return NULL;
 	}
-	for (i = 0; i != flow->actions.queues_n; ++i) {
-		struct mlx5_rxq_data *q = (*priv->rxqs)[flow->actions.queues[i]];
-
-		q->mark |= flow->actions.mark;
-	}
 	rte_flow->mark = flow->actions.mark;
 	rte_flow->ibv_attr = flow->ibv_attr;
-	rte_flow->hash_fields = flow->hash_fields;
-	rte_flow->ind_table =
-		mlx5_priv_ind_table_ibv_get(priv, flow->actions.queues,
+	rte_flow->hrxq = mlx5_priv_hrxq_get(priv, rss_hash_default_key,
+					    rss_hash_default_key_len,
+					    flow->hash_fields,
+					    flow->actions.queues,
 					    flow->actions.queues_n);
-	if (!rte_flow->ind_table) {
-		rte_flow->ind_table =
-			mlx5_priv_ind_table_ibv_new(priv, flow->actions.queues,
-						    flow->actions.queues_n);
-		if (!rte_flow->ind_table) {
-			rte_flow_error_set(error, ENOMEM,
-					   RTE_FLOW_ERROR_TYPE_HANDLE,
-					   NULL,
-					   "cannot allocate indirection table");
-			goto error;
-		}
+	if (rte_flow->hrxq) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "duplicated flow");
+		goto error;
 	}
-	rte_flow->qp = ibv_exp_create_qp(
-		priv->ctx,
-		&(struct ibv_exp_qp_init_attr){
-			.qp_type = IBV_QPT_RAW_PACKET,
-			.comp_mask =
-				IBV_EXP_QP_INIT_ATTR_PD |
-				IBV_EXP_QP_INIT_ATTR_PORT |
-				IBV_EXP_QP_INIT_ATTR_RX_HASH,
-			.pd = priv->pd,
-			.rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
-				.rx_hash_function =
-					IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
-				.rx_hash_key_len = rss_hash_default_key_len,
-				.rx_hash_key = rss_hash_default_key,
-				.rx_hash_fields_mask = rte_flow->hash_fields,
-				.rwq_ind_tbl = rte_flow->ind_table->ind_table,
-			},
-			.port_num = priv->port,
-		});
-	if (!rte_flow->qp) {
+	rte_flow->hrxq = mlx5_priv_hrxq_new(priv, rss_hash_default_key,
+					    rss_hash_default_key_len,
+					    flow->hash_fields,
+					    flow->actions.queues,
+					    flow->actions.queues_n);
+	if (!rte_flow->hrxq) {
 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
-				   NULL, "cannot allocate QP");
+				   NULL, "cannot create hash rxq");
 		goto error;
 	}
+	for (i = 0; i != flow->actions.queues_n; ++i) {
+		struct mlx5_rxq_data *q = (*priv->rxqs)[flow->actions.queues[i]];
+
+		q->mark |= flow->actions.mark;
+	}
 	if (!priv->dev->data->dev_started)
 		return rte_flow;
-	rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
+	rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->hrxq->qp,
 						 rte_flow->ibv_attr);
 	if (!rte_flow->ibv_flow) {
 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
@@ -1092,10 +1068,8 @@ priv_flow_create_action_queue(struct priv *priv,
 	return rte_flow;
 error:
 	assert(rte_flow);
-	if (rte_flow->qp)
-		ibv_destroy_qp(rte_flow->qp);
-	if (rte_flow->ind_table)
-		mlx5_priv_ind_table_ibv_release(priv, rte_flow->ind_table);
+	if (rte_flow->hrxq)
+		mlx5_priv_hrxq_release(priv, rte_flow->hrxq);
 	rte_free(rte_flow);
 	return NULL;
 }
@@ -1210,40 +1184,41 @@ priv_flow_destroy(struct priv *priv,
 		  struct rte_flow *flow)
 {
 	unsigned int i;
-
-	TAILQ_REMOVE(&priv->flows, flow, next);
-	if (flow->ibv_flow)
-		claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
-	if (flow->qp)
-		claim_zero(ibv_destroy_qp(flow->qp));
-	for (i = 0; i != flow->ind_table->queues_n; ++i) {
+	uint16_t *queues;
+	uint16_t queues_n;
+
+	queues = flow->hrxq->ind_table->queues;
+	queues_n = flow->hrxq->ind_table->queues_n;
+	if (!flow->mark)
+		goto out;
+	for (i = 0; i != queues_n; ++i) {
 		struct rte_flow *tmp;
-		struct mlx5_rxq_data *rxq =
-			(*priv->rxqs)[flow->ind_table->queues[i]];
+		struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]];
+		int mark = 0;
 
 		/*
 		 * To remove the mark from the queue, the queue must not be
 		 * present in any other marked flow (RSS or not).
 		 */
-		if (flow->mark) {
-			int mark = 0;
-
-			TAILQ_FOREACH(tmp, &priv->flows, next) {
-				unsigned int j;
-
-				if (!tmp->mark)
-					continue;
-				for (j = 0;
-				     (j != tmp->ind_table->queues_n) && !mark;
-				     j++)
-					if (tmp->ind_table->queues[j] ==
-					    flow->ind_table->queues[i])
-						mark = 1;
-			}
-			rxq->mark = mark;
+		TAILQ_FOREACH(tmp, &priv->flows, next) {
+			unsigned int j;
+
+			if (!tmp->mark)
+				continue;
+			for (j = 0;
+			     (j != tmp->hrxq->ind_table->queues_n) && !mark;
+			     j++)
+				if (tmp->hrxq->ind_table->queues[j] ==
+				    queues[i])
+					mark = 1;
 		}
+		rxq->mark = mark;
 	}
-	mlx5_priv_ind_table_ibv_release(priv, flow->ind_table);
+out:
+	TAILQ_REMOVE(&priv->flows, flow, next);
+	if (flow->ibv_flow)
+		claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
+	mlx5_priv_hrxq_release(priv, flow->hrxq);
 	rte_free(flow->ibv_attr);
 	DEBUG("Flow destroyed %p", (void *)flow);
 	rte_free(flow);
@@ -1345,10 +1320,8 @@ priv_flow_start(struct priv *priv)
 	struct rte_flow *flow;
 
 	TAILQ_FOREACH(flow, &priv->flows, next) {
-		struct ibv_qp *qp;
-
-		qp = flow->qp;
-		flow->ibv_flow = ibv_exp_create_flow(qp, flow->ibv_attr);
+		flow->ibv_flow = ibv_exp_create_flow(flow->hrxq->qp,
+						     flow->ibv_attr);
 		if (!flow->ibv_flow) {
 			DEBUG("Flow %p cannot be applied", (void *)flow);
 			rte_errno = EINVAL;
@@ -1358,8 +1331,8 @@ priv_flow_start(struct priv *priv)
 		if (flow->mark) {
 			unsigned int n;
 
-			for (n = 0; n < flow->ind_table->queues_n; ++n) {
-				uint16_t idx = flow->ind_table->queues[n];
+			for (n = 0; n < flow->hrxq->ind_table->queues_n; ++n) {
+				uint16_t idx = flow->hrxq->ind_table->queues[n];
 				(*priv->rxqs)[idx]->mark = 1;
 			}
 		}
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index bd6f966..076b575 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1688,3 +1688,164 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv)
 	}
 	return ret;
 }
+
+/**
+ * Create an Rx Hash queue.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param rss_key
+ *   RSS key for the Rx hash queue.
+ * @param rss_key_len
+ *   RSS key length.
+ * @param hash_fields
+ *   Verbs protocol hash field to make the RSS on.
+ * @param queues
+ *   Queues entering in hash queue.
+ * @param queues_n
+ *   Number of queues.
+ *
+ * @return
+ *   An hash Rx queue on success.
+ */
+struct mlx5_hrxq*
+mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
+		   uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
+{
+	struct mlx5_hrxq *hrxq;
+	struct mlx5_ind_table_ibv *ind_tbl;
+	struct ibv_qp *qp;
+
+	ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n);
+	if (!ind_tbl)
+		ind_tbl = mlx5_priv_ind_table_ibv_new(priv, queues, queues_n);
+	if (!ind_tbl)
+		return NULL;
+	qp = ibv_exp_create_qp(
+		priv->ctx,
+		&(struct ibv_exp_qp_init_attr){
+			.qp_type = IBV_QPT_RAW_PACKET,
+			.comp_mask =
+				IBV_EXP_QP_INIT_ATTR_PD |
+				IBV_EXP_QP_INIT_ATTR_PORT |
+				IBV_EXP_QP_INIT_ATTR_RX_HASH,
+			.pd = priv->pd,
+			.rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
+				.rx_hash_function =
+					IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
+				.rx_hash_key_len = rss_key_len,
+				.rx_hash_key = rss_key,
+				.rx_hash_fields_mask = hash_fields,
+				.rwq_ind_tbl = ind_tbl->ind_table,
+			},
+			.port_num = priv->port,
+		});
+	if (!qp)
+		goto error;
+	hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
+	if (!hrxq)
+		goto error;
+	hrxq->ind_table = ind_tbl;
+	hrxq->qp = qp;
+	hrxq->rss_key_len = rss_key_len;
+	hrxq->hash_fields = hash_fields;
+	memcpy(hrxq->rss_key, rss_key, rss_key_len);
+	rte_atomic32_inc(&hrxq->refcnt);
+	LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
+	DEBUG("%p: Hash Rx queue %p: refcnt %d", (void*)priv,
+	      (void*)hrxq, rte_atomic32_read(&hrxq->refcnt));
+	return hrxq;
+error:
+	mlx5_priv_ind_table_ibv_release(priv, ind_tbl);
+	if (qp)
+		claim_zero(ibv_destroy_qp(qp));
+	return NULL;
+}
+
+/**
+ * Get an Rx Hash queue.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param rss_conf
+ *   RSS configuration for the Rx hash queue.
+ * @param queues
+ *   Queues entering in hash queue.
+ * @param queues_n
+ *   Number of queues.
+ *
+ * @return
+ *   An hash Rx queue on success.
+ */
+struct mlx5_hrxq*
+mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
+		   uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
+{
+	struct mlx5_hrxq *hrxq;
+
+	LIST_FOREACH(hrxq, &priv->hrxqs, next) {
+		if (hrxq->rss_key_len != rss_key_len)
+			continue;
+		if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
+			continue;
+		if (hrxq->hash_fields != hash_fields)
+			continue;
+		mlx5_priv_ind_table_ibv_get(priv, queues, queues_n);
+		rte_atomic32_inc(&hrxq->refcnt);
+		DEBUG("%p: Hash Rx queue %p: refcnt %d", (void*)priv,
+		      (void*)hrxq, rte_atomic32_read(&hrxq->refcnt));
+		return hrxq;
+	}
+	return NULL;
+}
+
+/**
+ * Release the hash Rx queue.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param hrxq
+ *   Pointer to Hash Rx queue to release.
+ *
+ * @return
+ *   0 on success, errno value on failure.
+ */
+int
+mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq)
+{
+	DEBUG("%p: Hash Rx queue %p: refcnt %d", (void*)priv,
+	      (void*)hrxq, rte_atomic32_read(&hrxq->refcnt));
+	if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
+		claim_zero(ibv_destroy_qp(hrxq->qp));
+		mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table);
+		LIST_REMOVE(hrxq, next);
+		rte_free(hrxq);
+		return 0;
+	} else {
+		claim_nonzero(mlx5_priv_ind_table_ibv_release(priv,
+							      hrxq->ind_table));
+	}
+	return EBUSY;
+}
+
+/**
+ * Verify the Rx Queue list is empty
+ *
+ * @param priv
+ *  Pointer to private structure.
+ *
+ * @return the number of object not released.
+ */
+int
+mlx5_priv_hrxq_ibv_verify(struct priv *priv)
+{
+	struct mlx5_hrxq *hrxq;
+	int ret = 0;
+
+	LIST_FOREACH(hrxq, &priv->hrxqs, next) {
+		DEBUG("%p: Verbs Hash Rx queue %p still referenced",
+		      (void*)priv, (void*)hrxq);
+		++ret;
+	}
+	return ret;
+}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 2b48a01..6397a50 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -161,6 +161,17 @@ struct mlx5_ind_table_ibv {
 	uint16_t queues[]; /**< Queue list. */
 };
 
+/* Hash Rx queue. */
+struct mlx5_hrxq {
+	LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */
+	rte_atomic32_t refcnt; /* Reference counter. */
+	struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
+	struct ibv_qp *qp; /* Verbs queue pair. */
+	uint64_t hash_fields; /* Verbs Hash fields. */
+	uint8_t rss_key_len; /* Hash key length in bytes. */
+	uint8_t rss_key[]; /* Hash key. */
+};
+
 /* Hash RX queue types. */
 enum hash_rxq_type {
 	HASH_RXQ_TCPV4,
@@ -363,6 +374,14 @@ struct mlx5_ind_table_ibv* mlx5_priv_ind_table_ibv_get(struct priv *priv,
 int mlx5_priv_ind_table_ibv_release(struct priv * priv,
 				    struct mlx5_ind_table_ibv *ind_table);
 int mlx5_priv_ind_table_ibv_verify(struct priv *priv);
+struct mlx5_hrxq* mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key,
+				     uint8_t rss_key_len, uint64_t hash_fields,
+				     uint16_t queues[], uint16_t queues_n);
+struct mlx5_hrxq* mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key,
+				     uint8_t rss_key_len, uint64_t hash_fields,
+				     uint16_t queues[], uint16_t queues_n);
+int mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq);
+int mlx5_priv_hrxq_ibv_verify(struct priv *priv);
 
 /* mlx5_txq.c */
 
-- 
2.1.4



More information about the dev mailing list