[dpdk-dev] [PATCH v2] net/mlx5: add parameters to enable/disable vector code

Nelio Laranjeiro nelio.laranjeiro at 6wind.com
Wed Aug 2 17:32:56 CEST 2017


Vector code is very young and can present some issues for users, to avoid
them to modify the selections function by commenting the code and recompile
the PMD, new devices parameters are added to deactivate the Tx and/or Rx
vector code.
By using such device parameters, the user will be able to fall back to
regular burst functions.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro at 6wind.com>
Acked-by: Yongseok Koh <yskoh at mellanox.com>
---
 doc/guides/nics/mlx5.rst             | 14 ++++++++++++++
 drivers/net/mlx5/mlx5.c              | 23 +++++++++++++++++++++++
 drivers/net/mlx5/mlx5.h              |  2 ++
 drivers/net/mlx5/mlx5_rxtx_vec_sse.c |  9 +++++++--
 4 files changed, 46 insertions(+), 2 deletions(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index db15118..f4cb18b 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -254,6 +254,20 @@ Run-time configuration
   When hardware TSO is enabled, packets marked with TCP segmentation
   offload will be divided into segments by the hardware. Disabled by default.
 
+- ``tx_vec_en`` parameter [int]
+
+  A nonzero value enables Tx vector on ConnectX-5 only NIC if the number of
+  global Tx queues on the port is lesser than MLX5_VPMD_MIN_TXQS.
+
+  Enabled by default on ConnectX-5.
+
+- ``rx_vec_en`` parameter [int]
+
+  A nonzero value enables Rx vector if the port is not configured in
+  multi-segment otherwise this parameter is ignored.
+
+  Enabled by default.
+
 Prerequisites
 -------------
 
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 757c910..b7e5046 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -94,6 +94,12 @@
 /* Device parameter to enable hardware TSO offload. */
 #define MLX5_TSO "tso"
 
+/* Device parameter to enable hardware Tx vector. */
+#define MLX5_TX_VEC_EN "tx_vec_en"
+
+/* Device parameter to enable hardware Rx vector. */
+#define MLX5_RX_VEC_EN "rx_vec_en"
+
 /* Default PMD specific parameter value. */
 #define MLX5_ARG_UNSET (-1)
 
@@ -105,6 +111,8 @@ struct mlx5_args {
 	int mpw_hdr_dseg;
 	int inline_max_packet_sz;
 	int tso;
+	int tx_vec_en;
+	int rx_vec_en;
 };
 /**
  * Retrieve integer value from environment variable.
@@ -324,6 +332,10 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
 		args->inline_max_packet_sz = tmp;
 	} else if (strcmp(MLX5_TSO, key) == 0) {
 		args->tso = !!tmp;
+	} else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
+		args->tx_vec_en = !!tmp;
+	} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
+		args->rx_vec_en = !!tmp;
 	} else {
 		WARN("%s: unknown parameter", key);
 		return -EINVAL;
@@ -353,6 +365,8 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
 		MLX5_TXQ_MPW_HDR_DSEG_EN,
 		MLX5_TXQ_MAX_INLINE_LEN,
 		MLX5_TSO,
+		MLX5_TX_VEC_EN,
+		MLX5_RX_VEC_EN,
 		NULL,
 	};
 	struct rte_kvargs *kvlist;
@@ -408,6 +422,10 @@ mlx5_args_assign(struct priv *priv, struct mlx5_args *args)
 		priv->inline_max_packet_sz = args->inline_max_packet_sz;
 	if (args->tso != MLX5_ARG_UNSET)
 		priv->tso = args->tso;
+	if (args->tx_vec_en != MLX5_ARG_UNSET)
+		priv->tx_vec_en = args->tx_vec_en;
+	if (args->rx_vec_en != MLX5_ARG_UNSET)
+		priv->rx_vec_en = args->rx_vec_en;
 }
 
 /**
@@ -553,6 +571,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 			.mpw_hdr_dseg = MLX5_ARG_UNSET,
 			.inline_max_packet_sz = MLX5_ARG_UNSET,
 			.tso = MLX5_ARG_UNSET,
+			.tx_vec_en = MLX5_ARG_UNSET,
+			.rx_vec_en = MLX5_ARG_UNSET,
 		};
 
 		exp_device_attr.comp_mask =
@@ -615,6 +635,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 		priv->mps = mps; /* Enable MPW by default if supported. */
 		priv->cqe_comp = 1; /* Enable compression by default. */
 		priv->tunnel_en = tunnel_en;
+		/* Enable vector by default if supported. */
+		priv->tx_vec_en = 1;
+		priv->rx_vec_en = 1;
 		err = mlx5_args(&args, pci_dev->device.devargs);
 		if (err) {
 			ERROR("failed to process device arguments: %s",
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index f16f778..43c5384 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -131,6 +131,8 @@ struct priv {
 	unsigned int tso:1; /* Whether TSO is supported. */
 	unsigned int tunnel_en:1;
 	unsigned int isolated:1; /* Whether isolated mode is enabled. */
+	unsigned int tx_vec_en:1; /* Whether Tx vector is enabled. */
+	unsigned int rx_vec_en:1; /* Whether Rx vector is enabled. */
 	/* Whether Tx offloads for tunneled packets are supported. */
 	unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */
 	unsigned int txq_inline; /* Maximum packet size for inlining. */
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c
index 74e5953..40915f2 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c
@@ -1309,7 +1309,8 @@ priv_check_raw_vec_tx_support(struct priv *priv)
 int __attribute__((cold))
 priv_check_vec_tx_support(struct priv *priv)
 {
-	if (priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
+	if (!priv->tx_vec_en ||
+	    priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
 	    priv->mps != MLX5_MPW_ENHANCED ||
 	    priv->tso)
 		return -ENOTSUP;
@@ -1328,7 +1329,9 @@ priv_check_vec_tx_support(struct priv *priv)
 int __attribute__((cold))
 rxq_check_vec_support(struct rxq *rxq)
 {
-	if (rxq->sges_n != 0)
+	struct rxq_ctrl *ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+
+	if (!ctrl->priv->rx_vec_en || rxq->sges_n != 0)
 		return -ENOTSUP;
 	return 1;
 }
@@ -1347,6 +1350,8 @@ priv_check_vec_rx_support(struct priv *priv)
 {
 	uint16_t i;
 
+	if (!priv->rx_vec_en)
+		return -ENOTSUP;
 	/* All the configured queues should support. */
 	for (i = 0; i < priv->rxqs_n; ++i) {
 		struct rxq *rxq = (*priv->rxqs)[i];
-- 
2.1.4



More information about the dev mailing list