[dpdk-dev] [DPDK 18.08 v1 02/12] net/mlx5: handle drop queues are regular queues

Nelio Laranjeiro nelio.laranjeiro at 6wind.com
Mon May 28 13:21:35 CEST 2018


Drop queues are essentially used in flows due to Verbs API, the
information if the fate of the flow is a drop or not is already present
in the flow.  Due to this, drop queues can be fully mapped on regular
queues.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro at 6wind.com>
---
 drivers/net/mlx5/mlx5.c      |   9 --
 drivers/net/mlx5/mlx5.h      |   3 +-
 drivers/net/mlx5/mlx5_flow.c |  26 -----
 drivers/net/mlx5/mlx5_rxq.c  | 221 +++++++++++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_rxtx.h |   6 +
 5 files changed, 229 insertions(+), 36 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index f455ea8ea..8188a67e3 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -252,7 +252,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
 		priv->txqs_n = 0;
 		priv->txqs = NULL;
 	}
-	mlx5_flow_delete_drop_queue(dev);
 	mlx5_mprq_free_mp(dev);
 	mlx5_mr_release(dev);
 	if (priv->pd != NULL) {
@@ -1161,14 +1160,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		mlx5_link_update(eth_dev, 0);
 		/* Store device configuration on private structure. */
 		priv->config = config;
-		/* Create drop queue. */
-		err = mlx5_flow_create_drop_queue(eth_dev);
-		if (err) {
-			DRV_LOG(ERR, "port %u drop queue allocation failed: %s",
-				eth_dev->data->port_id, strerror(rte_errno));
-			err = rte_errno;
-			goto port_error;
-		}
 		/* Supported Verbs flow priority number detection. */
 		if (verb_priorities == 0)
 			verb_priorities = mlx5_get_max_verbs_prio(eth_dev);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index c4c962b92..69f4f7eb6 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -169,7 +169,7 @@ struct priv {
 	struct rte_intr_handle intr_handle; /* Interrupt handler. */
 	unsigned int (*reta_idx)[]; /* RETA index table. */
 	unsigned int reta_idx_n; /* RETA index size. */
-	struct mlx5_hrxq_drop *flow_drop_queue; /* Flow drop queue. */
+	struct mlx5_hrxq *flow_drop_queue; /* Flow drop queue. */
 	struct mlx5_flows flows; /* RTE Flow rules. */
 	struct mlx5_flows ctrl_flows; /* Control flow rules. */
 	struct {
@@ -294,6 +294,7 @@ int mlx5_traffic_restart(struct rte_eth_dev *dev);
 
 /* mlx5_flow.c */
 
+void mlx5_flow_print(struct rte_flow *flow);
 unsigned int mlx5_get_max_verbs_prio(struct rte_eth_dev *dev);
 int mlx5_flow_validate(struct rte_eth_dev *dev,
 		       const struct rte_flow_attr *attr,
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index a45cb06e1..6497e99c1 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -184,32 +184,6 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
 	}
 }
 
-/**
- * Create drop queue.
- *
- * @param dev
- *   Pointer to Ethernet device.
- *
- * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
- */
-int
-mlx5_flow_create_drop_queue(struct rte_eth_dev *dev __rte_unused)
-{
-	return 0;
-}
-
-/**
- * Delete drop queue.
- *
- * @param dev
- *   Pointer to Ethernet device.
- */
-void
-mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev __rte_unused)
-{
-}
-
 /**
  * Remove all flows.
  *
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 171ce9e30..7ac9bd000 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1967,3 +1967,224 @@ mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
 	}
 	return ret;
 }
+
+/**
+ * Create a drop Rx queue Verbs object.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_rxq_ibv *
+mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)
+{
+	struct priv *priv = dev->data->dev_private;
+	struct ibv_cq *cq;
+	struct ibv_wq *wq = NULL;
+	struct mlx5_rxq_ibv *rxq;
+
+	cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
+	if (!cq) {
+		DEBUG("port %u cannot allocate CQ for drop queue",
+		      dev->data->port_id);
+		rte_errno = errno;
+		goto error;
+	}
+	wq = mlx5_glue->create_wq(priv->ctx,
+		 &(struct ibv_wq_init_attr){
+			.wq_type = IBV_WQT_RQ,
+			.max_wr = 1,
+			.max_sge = 1,
+			.pd = priv->pd,
+			.cq = cq,
+		 });
+	if (!wq) {
+		DEBUG("port %u cannot allocate WQ for drop queue",
+		      dev->data->port_id);
+		rte_errno = errno;
+		goto error;
+	}
+	rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
+	if (!rxq) {
+		DEBUG("port %u cannot allocate drop Rx queue memory",
+		      dev->data->port_id);
+		rte_errno = ENOMEM;
+		goto error;
+	}
+	rxq->cq = cq;
+	rxq->wq = wq;
+	return rxq;
+error:
+	if (wq)
+		claim_zero(mlx5_glue->destroy_wq(wq));
+	if (cq)
+		claim_zero(mlx5_glue->destroy_cq(cq));
+	return NULL;
+}
+
+/**
+ * Release a drop Rx queue Verbs object.
+ *
+ * @param rxq
+ *   Pointer to the drop Verbs Rx queue.
+ *
+ * @return
+ *   The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+void
+mlx5_rxq_ibv_drop_release(struct mlx5_rxq_ibv *rxq)
+{
+	if (rxq->wq)
+		claim_zero(mlx5_glue->destroy_wq(rxq->wq));
+	if (rxq->cq)
+		claim_zero(mlx5_glue->destroy_cq(rxq->cq));
+	rte_free(rxq);
+}
+
+/**
+ * Create a drop indirection table.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_ind_table_ibv *
+mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev)
+{
+	struct priv *priv = dev->data->dev_private;
+	struct mlx5_ind_table_ibv *ind_tbl;
+	struct mlx5_rxq_ibv *rxq;
+	struct mlx5_ind_table_ibv tmpl;
+
+	rxq = mlx5_rxq_ibv_drop_new(dev);
+	if (!rxq)
+		return NULL;
+	tmpl.ind_table = mlx5_glue->create_rwq_ind_table
+		(priv->ctx,
+		 &(struct ibv_rwq_ind_table_init_attr){
+			.log_ind_tbl_size = 0,
+			.ind_tbl = &rxq->wq,
+			.comp_mask = 0,
+		 });
+	if (!tmpl.ind_table) {
+		DEBUG("port %u cannot allocate indirection table for drop"
+		      " queue",
+		      dev->data->port_id);
+		rte_errno = errno;
+		goto error;
+	}
+	ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
+	if (!ind_tbl) {
+		rte_errno = ENOMEM;
+		goto error;
+	}
+	ind_tbl->ind_table = tmpl.ind_table;
+	return ind_tbl;
+error:
+	mlx5_rxq_ibv_drop_release(rxq);
+	return NULL;
+}
+
+/**
+ * Release a drop indirection table.
+ *
+ * @param ind_tbl
+ *   Indirection table to release.
+ */
+void
+mlx5_ind_table_ibv_drop_release(struct mlx5_ind_table_ibv *ind_tbl)
+{
+	claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
+	rte_free(ind_tbl);
+}
+
+/**
+ * Create a drop Rx Hash queue.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_hrxq *
+mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
+{
+	struct priv *priv = dev->data->dev_private;
+	struct mlx5_ind_table_ibv *ind_tbl;
+	struct ibv_qp *qp;
+	struct mlx5_hrxq *hrxq;
+
+	if (priv->flow_drop_queue) {
+		rte_atomic32_inc(&priv->flow_drop_queue->refcnt);
+		return priv->flow_drop_queue;
+	}
+	ind_tbl = mlx5_ind_table_ibv_drop_new(dev);
+	if (!ind_tbl)
+		return NULL;
+	qp = mlx5_glue->create_qp_ex(priv->ctx,
+		 &(struct ibv_qp_init_attr_ex){
+			.qp_type = IBV_QPT_RAW_PACKET,
+			.comp_mask =
+				IBV_QP_INIT_ATTR_PD |
+				IBV_QP_INIT_ATTR_IND_TABLE |
+				IBV_QP_INIT_ATTR_RX_HASH,
+			.rx_hash_conf = (struct ibv_rx_hash_conf){
+				.rx_hash_function =
+					IBV_RX_HASH_FUNC_TOEPLITZ,
+				.rx_hash_key_len = rss_hash_default_key_len,
+				.rx_hash_key = rss_hash_default_key,
+				.rx_hash_fields_mask = 0,
+				},
+			.rwq_ind_tbl = ind_tbl->ind_table,
+			.pd = priv->pd
+		 });
+	if (!qp) {
+		DEBUG("port %u cannot allocate QP for drop queue",
+		      dev->data->port_id);
+		rte_errno = errno;
+		goto error;
+	}
+	hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
+	if (!hrxq) {
+		DRV_LOG(WARNING,
+			"port %u cannot allocate memory for drop queue",
+			dev->data->port_id);
+		rte_errno = ENOMEM;
+		goto error;
+	}
+	hrxq->ind_table = ind_tbl;
+	hrxq->qp = qp;
+	priv->flow_drop_queue = hrxq;
+	rte_atomic32_set(&hrxq->refcnt, 1);
+	return hrxq;
+error:
+	if (ind_tbl)
+		mlx5_ind_table_ibv_drop_release(ind_tbl);
+	return NULL;
+}
+
+/**
+ * Release a drop hash Rx queue.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param hrxq
+ *   Pointer to Hash Rx queue to release.
+ */
+void
+mlx5_hrxq_drop_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
+{
+	struct priv *priv = dev->data->dev_private;
+
+	if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
+		claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
+		mlx5_ind_table_ibv_drop_release(hrxq->ind_table);
+		rte_free(hrxq);
+		priv->flow_drop_queue = NULL;
+	}
+}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index a6017f0dd..3f7a206c1 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -245,6 +245,8 @@ struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
 struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
 int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv);
 int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv);
+struct mlx5_rxq_ibv *mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev);
+void mlx5_rxq_ibv_drop_release(struct mlx5_rxq_ibv *rxq);
 int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev);
 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
 				   uint16_t desc, unsigned int socket,
@@ -265,6 +267,8 @@ struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev,
 int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
 			       struct mlx5_ind_table_ibv *ind_tbl);
 int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev);
+void mlx5_ind_table_ibv_drop_release(struct mlx5_ind_table_ibv *ind_tbl);
 struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
 				const uint8_t *rss_key, uint32_t rss_key_len,
 				uint64_t hash_fields,
@@ -277,6 +281,8 @@ struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
 				uint32_t tunnel, uint32_t rss_level);
 int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
 int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev);
+void mlx5_hrxq_drop_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq);
 uint64_t mlx5_get_rx_port_offloads(void);
 uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
 
-- 
2.17.0



More information about the dev mailing list