[dpdk-dev] [PATCH 2/2] vdpa/mlx5: specify lag port affinity

Xueming Li xuemingl at nvidia.com
Mon Oct 26 12:10:56 CET 2020


If set TIS lag port affinity to auto, firmware assign port affinity on
each creation with Round Robin. In case of 2 PFs, if create virtq,
destroy and create again, then each virtq will get same port affinity.

To resolve this fw limitation, this patch sets create TIS with specified
affinity for each PF.

Cc: stable at dpdk.org

Signed-off-by: Xueming Li <xuemingl at nvidia.com>
Acked-by: Matan Azrad <matan at nvidia.com>
---
 drivers/vdpa/mlx5/mlx5_vdpa.c       |  3 +++
 drivers/vdpa/mlx5/mlx5_vdpa.h       |  3 ++-
 drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 23 ++++++++++++++---------
 3 files changed, 19 insertions(+), 10 deletions(-)

diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index a8f3e4b1de..2e17ed4fca 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -730,6 +730,9 @@ mlx5_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	}
 	priv->caps = attr.vdpa;
 	priv->log_max_rqt_size = attr.log_max_rqt_size;
+	priv->num_lag_ports = attr.num_lag_ports;
+	if (attr.num_lag_ports == 0)
+		priv->num_lag_ports = 1;
 	priv->ctx = ctx;
 	priv->pci_dev = pci_dev;
 	priv->var = mlx5_glue->dv_alloc_var(ctx, 0);
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index fcbc12ab0c..c8c1adfde4 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -146,8 +146,9 @@ struct mlx5_vdpa_priv {
 	struct mlx5dv_devx_uar *uar;
 	struct rte_intr_handle intr_handle;
 	struct mlx5_devx_obj *td;
-	struct mlx5_devx_obj *tis;
+	struct mlx5_devx_obj *tiss[16]; /* TIS list for each LAG port. */
 	uint16_t nr_virtqs;
+	uint8_t num_lag_ports;
 	uint64_t features; /* Negotiated features. */
 	uint16_t log_max_rqt_size;
 	struct mlx5_vdpa_steer steer;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index 17e71cf4f4..4724baca4e 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -103,12 +103,13 @@ void
 mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
 {
 	int i;
-
 	for (i = 0; i < priv->nr_virtqs; i++)
 		mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
-	if (priv->tis) {
-		claim_zero(mlx5_devx_cmd_destroy(priv->tis));
-		priv->tis = NULL;
+	for (i = 0; i < priv->num_lag_ports; i++) {
+		if (priv->tiss[i]) {
+			claim_zero(mlx5_devx_cmd_destroy(priv->tiss[i]));
+			priv->tiss[i] = NULL;
+		}
 	}
 	if (priv->td) {
 		claim_zero(mlx5_devx_cmd_destroy(priv->td));
@@ -302,7 +303,7 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
 	attr.hw_used_index = last_used_idx;
 	attr.q_size = vq.size;
 	attr.mkey = priv->gpa_mkey_index;
-	attr.tis_id = priv->tis->id;
+	attr.tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;
 	attr.queue_index = index;
 	attr.pd = priv->pdn;
 	virtq->virtq = mlx5_devx_cmd_create_virtq(priv->ctx, &attr);
@@ -432,10 +433,14 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
 		return -rte_errno;
 	}
 	tis_attr.transport_domain = priv->td->id;
-	priv->tis = mlx5_devx_cmd_create_tis(priv->ctx, &tis_attr);
-	if (!priv->tis) {
-		DRV_LOG(ERR, "Failed to create TIS.");
-		goto error;
+	for (i = 0; i < priv->num_lag_ports; i++) {
+		/* 0 is auto affinity, non-zero value to propose port. */
+		tis_attr.lag_tx_port_affinity = i + 1;
+		priv->tiss[i] = mlx5_devx_cmd_create_tis(priv->ctx, &tis_attr);
+		if (!priv->tiss[i]) {
+			DRV_LOG(ERR, "Failed to create TIS %u.", i);
+			goto error;
+		}
 	}
 	priv->nr_virtqs = nr_vring;
 	for (i = 0; i < nr_vring; i++)
-- 
2.25.1



More information about the dev mailing list