[dpdk-dev] [PATCH v3 04/10] net/mlx5: modify hash Rx queue objects

Andrey Vesnovaty andreyv at nvidia.com
Sun Oct 4 00:06:13 CEST 2020


From: Andrey Vesnovaty <andreyv at mellanox.com>

Implement mlx5_hrxq_modify() to modify hash RX queue object.
This commit relays on capability to modify TIR object via DevX.

Signed-off-by: Andrey Vesnovaty <andreyv at mellanox.com>
---
 drivers/net/mlx5/mlx5.h      |   4 +
 drivers/net/mlx5/mlx5_devx.c | 178 +++++++++++++++++++++++++++--------
 drivers/net/mlx5/mlx5_rxq.c  | 103 ++++++++++++++++++++
 drivers/net/mlx5/mlx5_rxtx.h |   5 +-
 4 files changed, 251 insertions(+), 39 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 865e72d318..210004bc81 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -747,6 +747,10 @@ struct mlx5_obj_ops {
 	void (*ind_table_destroy)(struct mlx5_ind_table_obj *ind_tbl);
 	int (*hrxq_new)(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
 			int tunnel __rte_unused);
+	int (*hrxq_modify)(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
+			   const uint8_t *rss_key,
+			   uint64_t hash_fields,
+			   const struct mlx5_ind_table_obj *ind_tbl);
 	void (*hrxq_destroy)(struct mlx5_hrxq *hrxq);
 	int (*drop_action_create)(struct rte_eth_dev *dev);
 	void (*drop_action_destroy)(struct rte_eth_dev *dev);
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index 3e81fcc252..676f7c6fb3 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -675,33 +675,39 @@ mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
 }
 
 /**
- * Create an Rx Hash queue.
+ * Set TIR attribute struct with relevant input values.
  *
- * @param dev
+ * @param[in] dev
  *   Pointer to Ethernet device.
- * @param hrxq
- *   Pointer to Rx Hash queue.
- * @param tunnel
+ * @param[in] rss_key
+ *   RSS key for the Rx hash queue.
+ * @param[in] hash_fields
+ *   Verbs protocol hash field to make the RSS on.
+ * @param[in] ind_tbl
+ *   Indirection table for TIR.
+ * @param[in] queues
+ *   Queues entering in hash queue. In case of empty hash_fields only the
+ *   first queue index will be taken for the indirection table.
+ * @param[in] queues_n
+ *   Number of queues.
+ * @param[in] tunnel
  *   Tunnel type.
+ * @param[out] tir_attr
+ *   Parameters structure for TIR creation/modification.
  *
  * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
+ *   The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
  */
-static int
-mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
-		   int tunnel __rte_unused)
+static void
+mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
+		       uint64_t hash_fields,
+		       const struct mlx5_ind_table_obj *ind_tbl,
+		       int tunnel, enum mlx5_rxq_obj_type rxq_obj_type,
+		       struct mlx5_devx_tir_attr *tir_attr)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table;
-	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]];
-	struct mlx5_rxq_ctrl *rxq_ctrl =
-		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
-	struct mlx5_devx_tir_attr tir_attr;
-	const uint8_t *rss_key = hrxq->rss_key;
-	uint64_t hash_fields = hrxq->hash_fields;
 	bool lro = true;
 	uint32_t i;
-	int err;
 
 	/* Enable TIR LRO only if all the queues were configured for. */
 	for (i = 0; i < ind_tbl->queues_n; ++i) {
@@ -710,26 +716,24 @@ mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
 			break;
 		}
 	}
-	memset(&tir_attr, 0, sizeof(tir_attr));
-	tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
-	tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
-	tir_attr.tunneled_offload_en = !!tunnel;
+	memset(tir_attr, 0, sizeof(*tir_attr));
+	tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
+	tir_attr->rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
+	tir_attr->tunneled_offload_en = !!tunnel;
 	/* If needed, translate hash_fields bitmap to PRM format. */
 	if (hash_fields) {
-		struct mlx5_rx_hash_field_select *rx_hash_field_select = NULL;
+		struct mlx5_rx_hash_field_select *rx_hash_field_select =
 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
-		rx_hash_field_select = hash_fields & IBV_RX_HASH_INNER ?
-				       &tir_attr.rx_hash_field_selector_inner :
-				       &tir_attr.rx_hash_field_selector_outer;
-#else
-		rx_hash_field_select = &tir_attr.rx_hash_field_selector_outer;
+			hash_fields & IBV_RX_HASH_INNER ?
+				&tir_attr->rx_hash_field_selector_inner :
 #endif
+				&tir_attr->rx_hash_field_selector_outer;
 		/* 1 bit: 0: IPv4, 1: IPv6. */
 		rx_hash_field_select->l3_prot_type =
 					!!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
 		/* 1 bit: 0: TCP, 1: UDP. */
 		rx_hash_field_select->l4_prot_type =
-					 !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
+					!!(hash_fields & MLX5_UDP_IBV_RX_HASH);
 		/* Bitmask which sets which fields to use in RX Hash. */
 		rx_hash_field_select->selected_fields =
 			((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
@@ -741,20 +745,53 @@ mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
 			(!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
 	}
-	if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
-		tir_attr.transport_domain = priv->sh->td->id;
+	if (rxq_obj_type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
+		tir_attr->transport_domain = priv->sh->td->id;
 	else
-		tir_attr.transport_domain = priv->sh->tdn;
-	memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN);
-	tir_attr.indirect_table = ind_tbl->rqt->id;
+		tir_attr->transport_domain = priv->sh->tdn;
+	memcpy(tir_attr->rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN);
+	tir_attr->indirect_table = ind_tbl->rqt->id;
 	if (dev->data->dev_conf.lpbk_mode)
-		tir_attr.self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+		tir_attr->self_lb_block =
+					MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
 	if (lro) {
-		tir_attr.lro_timeout_period_usecs = priv->config.lro.timeout;
-		tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
-		tir_attr.lro_enable_mask = MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
-					   MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
+		tir_attr->lro_timeout_period_usecs = priv->config.lro.timeout;
+		tir_attr->lro_max_msg_sz = priv->max_lro_msg_size;
+		tir_attr->lro_enable_mask =
+				MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+				MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
 	}
+}
+
+/**
+ * Create an Rx Hash queue.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param hrxq
+ *   Pointer to Rx Hash queue.
+ * @param tunnel
+ *   Tunnel type.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
+		   int tunnel __rte_unused)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table;
+	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]];
+	struct mlx5_rxq_ctrl *rxq_ctrl =
+		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+	struct mlx5_devx_tir_attr tir_attr = {0};
+	const uint8_t *rss_key = hrxq->rss_key;
+	uint64_t hash_fields = hrxq->hash_fields;
+	int err;
+
+	mlx5_devx_tir_attr_set(dev, rss_key, hash_fields, ind_tbl, tunnel,
+			       rxq_ctrl->type, &tir_attr);
 	hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
 	if (!hrxq->tir) {
 		DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
@@ -791,6 +828,70 @@ mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq)
 	claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
 }
 
+/**
+ * Modify an Rx Hash queue configuration.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param hrxq
+ *   Hash Rx queue to modify.
+ * @param rss_key
+ *   RSS key for the Rx hash queue.
+ * @param hash_fields
+ *   Verbs protocol hash field to make the RSS on.
+ * @param queues
+ *   Queues entering in hash queue. In case of empty hash_fields only the
+ *   first queue index will be taken for the indirection table.
+ * @param queues_n
+ *   Number of queues.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_devx_hrxq_modify(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
+		       const uint8_t *rss_key,
+		       uint64_t hash_fields,
+		       const struct mlx5_ind_table_obj *ind_tbl)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]];
+	struct mlx5_rxq_ctrl *rxq_ctrl =
+		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+	enum mlx5_rxq_obj_type rxq_obj_type = rxq_ctrl->obj->type;
+	struct mlx5_devx_modify_tir_attr modify_tir = {0};
+
+	/* validations */
+	if (rxq_obj_type == MLX5_RXQ_OBJ_TYPE_IBV) {
+		/*  shared action supported by devx interface only */
+		rte_errno = EINVAL;
+		return -rte_errno;
+	}
+	/*
+	 * untested for modification fields:
+	 * - rx_hash_symmetric not set in hrxq_new(),
+	 * - rx_hash_fn set hard-coded in hrxq_new(),
+	 * - lro_xxx not set after rxq setup
+	 */
+	if (ind_tbl != hrxq->ind_table)
+		modify_tir.modify_bitmask |=
+			MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_INDIRECT_TABLE;
+	if (hash_fields != hrxq->hash_fields ||
+			memcmp(hrxq->rss_key, rss_key, MLX5_RSS_HASH_KEY_LEN))
+		modify_tir.modify_bitmask |=
+			MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_HASH;
+	mlx5_devx_tir_attr_set(dev, rss_key, hash_fields, ind_tbl,
+			       0, /* N/A - tunnel modification unsupported */
+			       rxq_obj_type, &modify_tir.tir);
+	if (mlx5_devx_cmd_modify_tir(hrxq->tir, &modify_tir)) {
+		DRV_LOG(ERR, "port %u cannot modify DevX TIR",
+			dev->data->port_id);
+		rte_errno = errno;
+		return -rte_errno;
+	}
+	return 0;
+}
+
 /**
  * Create a DevX drop action for Rx Hash queue.
  *
@@ -833,6 +934,7 @@ struct mlx5_obj_ops devx_obj_ops = {
 	.ind_table_destroy = mlx5_devx_ind_table_destroy,
 	.hrxq_new = mlx5_devx_hrxq_new,
 	.hrxq_destroy = mlx5_devx_tir_destroy,
+	.hrxq_modify = mlx5_devx_hrxq_modify,
 	.drop_action_create = mlx5_devx_drop_action_create,
 	.drop_action_destroy = mlx5_devx_drop_action_destroy,
 };
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 487f9973bb..2618f72be0 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1706,6 +1706,29 @@ mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
 	return MLX5_RXQ_TYPE_UNDEFINED;
 }
 
+/**
+ * Match queues listed in arguments to queues contained in indirection table
+ * object.
+ *
+ * @param ind_tbl
+ *   Pointer to indirection table to match.
+ * @param queues
+ *   Queues to match to ques in indirection table.
+ * @param queues_n
+ *   Number of queues in the array.
+ *
+ * @return
+ *   1 if all queues in indirection table match 0 othrwise.
+ */
+static int
+mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl,
+		       const uint16_t *queues, uint32_t queues_n)
+{
+		return (ind_tbl->queues_n == queues_n) &&
+		    (!memcmp(ind_tbl->queues, queues,
+			    ind_tbl->queues_n * sizeof(ind_tbl->queues[0])));
+}
+
 /**
  * Get an indirection table.
  *
@@ -1902,6 +1925,86 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
 	return 0;
 }
 
+/**
+ * Modify an Rx Hash queue configuration.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param hrxq
+ *   Index to Hash Rx queue to modify.
+ * @param rss_key
+ *   RSS key for the Rx hash queue.
+ * @param rss_key_len
+ *   RSS key length.
+ * @param hash_fields
+ *   Verbs protocol hash field to make the RSS on.
+ * @param queues
+ *   Queues entering in hash queue. In case of empty hash_fields only the
+ *   first queue index will be taken for the indirection table.
+ * @param queues_n
+ *   Number of queues.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,
+		 const uint8_t *rss_key, uint32_t rss_key_len,
+		 uint64_t hash_fields,
+		 const uint16_t *queues, uint32_t queues_n)
+{
+	int err;
+	struct mlx5_ind_table_obj *ind_tbl = NULL;
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_hrxq *hrxq =
+		mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
+	int ret;
+
+	if (!hrxq) {
+		rte_errno = EINVAL;
+		return -rte_errno;
+	}
+	/* validations */
+	if (hrxq->rss_key_len != rss_key_len) {
+		/* rss_key_len is fixed size 40 byte & not supposed to change */
+		rte_errno = EINVAL;
+		return -rte_errno;
+	}
+
+	queues_n = hash_fields ? queues_n : 1;
+	if (mlx5_ind_table_obj_match_queues(hrxq->ind_table,
+					    queues, queues_n)) {
+		ind_tbl = hrxq->ind_table;
+	} else {
+		ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
+		if (!ind_tbl)
+			ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
+	}
+	if (!ind_tbl) {
+		rte_errno = ENOMEM;
+		return -rte_errno;
+	}
+	ret = priv->obj_ops.hrxq_modify(dev, hrxq, rss_key, hash_fields,
+					ind_tbl);
+	if (ret) {
+		rte_errno = errno;
+		goto error;
+	}
+	if (ind_tbl != hrxq->ind_table) {
+		mlx5_ind_table_obj_release(dev, hrxq->ind_table);
+		hrxq->ind_table = ind_tbl;
+	}
+	hrxq->hash_fields = hash_fields;
+	memcpy(hrxq->rss_key, rss_key, rss_key_len);
+	return 0;
+error:
+	err = rte_errno;
+	if (ind_tbl != hrxq->ind_table)
+		mlx5_ind_table_obj_release(dev, ind_tbl);
+	rte_errno = err;
+	return -rte_errno;
+}
+
 /**
  * Release the hash Rx queue.
  *
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index a8e6837ba1..2c28c3a521 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -387,7 +387,10 @@ void mlx5_drop_action_destroy(struct rte_eth_dev *dev);
 uint64_t mlx5_get_rx_port_offloads(void);
 uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
 void mlx5_rxq_timestamp_set(struct rte_eth_dev *dev);
-
+int mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hxrq_idx,
+		     const uint8_t *rss_key, uint32_t rss_key_len,
+		     uint64_t hash_fields,
+		     const uint16_t *queues, uint32_t queues_n);
 
 /* mlx5_txq.c */
 
-- 
2.26.2



More information about the dev mailing list