patch 'net/nfp: fix control message packets' has been queued to stable release 22.11.4

Xueming Li xuemingl at nvidia.com
Sun Oct 22 16:20:38 CEST 2023


Hi,

FYI, your patch has been queued to stable release 22.11.4

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 11/15/23. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://git.dpdk.org/dpdk-stable/log/?h=22.11-staging

This queued commit can be viewed at:
https://git.dpdk.org/dpdk-stable/commit/?h=22.11-staging&id=ce57bf585c8cbc35f56396a0249d4d259f4733a8

Thanks.

Xueming Li <xuemingl at nvidia.com>

---
>From ce57bf585c8cbc35f56396a0249d4d259f4733a8 Mon Sep 17 00:00:00 2001
From: Long Wu <long.wu at corigine.com>
Date: Thu, 13 Jul 2023 11:02:17 +0800
Subject: [PATCH] net/nfp: fix control message packets
Cc: Xueming Li <xuemingl at nvidia.com>

[ upstream commit 9dded5bc59acc33c56c7259ec4f482da9f5cec5e ]

If we add two cards that uses flower firmware into one dpdk-testpmd,
NFP PMD will printf error log. The reason is that the second card
uses the control VNIC Rx queue of the first card.

Because rte_eth_dma_zone_reserve() will reserve new DMA zone if
DMA zone's name is unique. But if there is already a zone with the
same name, rte_eth_dma_zone_reserve() will return the pointer of
the previously DMA zone. We try to reserve DMA zone for each card
but we use the same name to reserve.

We use the PCI address to give control VNIC a unique ring name
to avoid the above situation and let each NIC's ring have its
own DMA zone.

Fixes: 945441ebdb9c ("net/nfp: add flower ctrl VNIC")

Signed-off-by: Long Wu <long.wu at corigine.com>
Acked-by: Ferruh Yigit <ferruh.yigit at amd.com>
---
 drivers/net/nfp/flower/nfp_flower.c | 32 +++++++++++++++++++++--------
 1 file changed, 23 insertions(+), 9 deletions(-)

diff --git a/drivers/net/nfp/flower/nfp_flower.c b/drivers/net/nfp/flower/nfp_flower.c
index 0661c38f08..c14a481a81 100644
--- a/drivers/net/nfp/flower/nfp_flower.c
+++ b/drivers/net/nfp/flower/nfp_flower.c
@@ -690,6 +690,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 	int ret = 0;
 	uint16_t n_txq;
 	uint16_t n_rxq;
+	const char *pci_name;
 	unsigned int numa_node;
 	struct rte_mempool *mp;
 	struct nfp_net_rxq *rxq;
@@ -698,6 +699,8 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 	struct rte_eth_dev *eth_dev;
 	const struct rte_memzone *tz;
 	struct nfp_app_fw_flower *app_fw_flower;
+	char ctrl_rxring_name[RTE_MEMZONE_NAMESIZE];
+	char ctrl_txring_name[RTE_MEMZONE_NAMESIZE];
 	char ctrl_pktmbuf_pool_name[RTE_MEMZONE_NAMESIZE];

 	/* Set up some pointers here for ease of use */
@@ -730,10 +733,12 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 		goto eth_dev_cleanup;
 	}

+	pci_name = strchr(pf_dev->pci_dev->name, ':') + 1;
+
 	/* Create a mbuf pool for the ctrl vNIC */
 	numa_node = rte_socket_id();
 	snprintf(ctrl_pktmbuf_pool_name, sizeof(ctrl_pktmbuf_pool_name),
-			"%s_ctrlmp", pf_dev->pci_dev->device.name);
+			"%s_ctrlmp", pci_name);
 	app_fw_flower->ctrl_pktmbuf_pool =
 			rte_pktmbuf_pool_create(ctrl_pktmbuf_pool_name,
 			4 * CTRL_VNIC_NB_DESC, 64, 0, 9216, numa_node);
@@ -772,6 +777,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 	eth_dev->data->nb_rx_queues = n_txq;
 	eth_dev->data->dev_private = hw;

+	snprintf(ctrl_rxring_name, sizeof(ctrl_rxring_name), "%s_ctrx_ring", pci_name);
 	/* Set up the Rx queues */
 	for (i = 0; i < n_rxq; i++) {
 		rxq = rte_zmalloc_socket("ethdev RX queue",
@@ -810,7 +816,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 		 * handle the maximum ring size is allocated in order to allow for
 		 * resizing in later calls to the queue setup function.
 		 */
-		tz = rte_eth_dma_zone_reserve(eth_dev, "ctrl_rx_ring", i,
+		tz = rte_eth_dma_zone_reserve(eth_dev, ctrl_rxring_name, i,
 				sizeof(struct nfp_net_rx_desc) * NFP_NET_MAX_RX_DESC,
 				NFP_MEMZONE_ALIGN, numa_node);
 		if (tz == NULL) {
@@ -829,7 +835,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 				sizeof(*rxq->rxbufs) * CTRL_VNIC_NB_DESC,
 				RTE_CACHE_LINE_SIZE, numa_node);
 		if (rxq->rxbufs == NULL) {
-			rte_eth_dma_zone_free(eth_dev, "ctrl_rx_ring", i);
+			rte_eth_dma_zone_free(eth_dev, ctrl_rxring_name, i);
 			rte_free(rxq);
 			ret = -ENOMEM;
 			goto rx_queue_setup_cleanup;
@@ -847,6 +853,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 		nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(i), rte_log2_u32(CTRL_VNIC_NB_DESC));
 	}

+	snprintf(ctrl_txring_name, sizeof(ctrl_txring_name), "%s_cttx_ring", pci_name);
 	/* Set up the Tx queues */
 	for (i = 0; i < n_txq; i++) {
 		txq = rte_zmalloc_socket("ethdev TX queue",
@@ -865,7 +872,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 		 * handle the maximum ring size is allocated in order to allow for
 		 * resizing in later calls to the queue setup function.
 		 */
-		tz = rte_eth_dma_zone_reserve(eth_dev, "ctrl_tx_ring", i,
+		tz = rte_eth_dma_zone_reserve(eth_dev, ctrl_txring_name, i,
 				sizeof(struct nfp_net_nfd3_tx_desc) * NFP_NET_MAX_TX_DESC,
 				NFP_MEMZONE_ALIGN, numa_node);
 		if (tz == NULL) {
@@ -895,7 +902,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 				sizeof(*txq->txbufs) * CTRL_VNIC_NB_DESC,
 				RTE_CACHE_LINE_SIZE, numa_node);
 		if (txq->txbufs == NULL) {
-			rte_eth_dma_zone_free(eth_dev, "ctrl_tx_ring", i);
+			rte_eth_dma_zone_free(eth_dev, ctrl_txring_name, i);
 			rte_free(txq);
 			ret = -ENOMEM;
 			goto tx_queue_setup_cleanup;
@@ -920,7 +927,7 @@ tx_queue_setup_cleanup:
 		txq = eth_dev->data->tx_queues[i];
 		if (txq != NULL) {
 			rte_free(txq->txbufs);
-			rte_eth_dma_zone_free(eth_dev, "ctrl_tx_ring", i);
+			rte_eth_dma_zone_free(eth_dev, ctrl_txring_name, i);
 			rte_free(txq);
 		}
 	}
@@ -929,7 +936,7 @@ rx_queue_setup_cleanup:
 		rxq = eth_dev->data->rx_queues[i];
 		if (rxq != NULL) {
 			rte_free(rxq->rxbufs);
-			rte_eth_dma_zone_free(eth_dev, "ctrl_rx_ring", i);
+			rte_eth_dma_zone_free(eth_dev, ctrl_rxring_name, i);
 			rte_free(rxq);
 		}
 	}
@@ -950,28 +957,35 @@ static void
 nfp_flower_cleanup_ctrl_vnic(struct nfp_net_hw *hw)
 {
 	uint32_t i;
+	const char *pci_name;
 	struct nfp_net_rxq *rxq;
 	struct nfp_net_txq *txq;
 	struct rte_eth_dev *eth_dev;
 	struct nfp_app_fw_flower *app_fw_flower;
+	char ctrl_txring_name[RTE_MEMZONE_NAMESIZE];
+	char ctrl_rxring_name[RTE_MEMZONE_NAMESIZE];

 	eth_dev = hw->eth_dev;
 	app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(hw->pf_dev->app_fw_priv);

+	pci_name = strchr(app_fw_flower->pf_hw->pf_dev->pci_dev->name, ':') + 1;
+
+	snprintf(ctrl_txring_name, sizeof(ctrl_txring_name), "%s_cttx_ring", pci_name);
 	for (i = 0; i < hw->max_tx_queues; i++) {
 		txq = eth_dev->data->tx_queues[i];
 		if (txq != NULL) {
 			rte_free(txq->txbufs);
-			rte_eth_dma_zone_free(eth_dev, "ctrl_tx_ring", i);
+			rte_eth_dma_zone_free(eth_dev, ctrl_txring_name, i);
 			rte_free(txq);
 		}
 	}

+	snprintf(ctrl_rxring_name, sizeof(ctrl_rxring_name), "%s_ctrx_ring", pci_name);
 	for (i = 0; i < hw->max_rx_queues; i++) {
 		rxq = eth_dev->data->rx_queues[i];
 		if (rxq != NULL) {
 			rte_free(rxq->rxbufs);
-			rte_eth_dma_zone_free(eth_dev, "ctrl_rx_ring", i);
+			rte_eth_dma_zone_free(eth_dev, ctrl_rxring_name, i);
 			rte_free(rxq);
 		}
 	}
--
2.25.1

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2023-10-22 22:17:34.815647400 +0800
+++ 0009-net-nfp-fix-control-message-packets.patch	2023-10-22 22:17:34.136723700 +0800
@@ -1 +1 @@
-From 9dded5bc59acc33c56c7259ec4f482da9f5cec5e Mon Sep 17 00:00:00 2001
+From ce57bf585c8cbc35f56396a0249d4d259f4733a8 Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit 9dded5bc59acc33c56c7259ec4f482da9f5cec5e ]
@@ -21 +23,0 @@
-Cc: stable at dpdk.org
@@ -30 +32 @@
-index bbcfa6e66a..93aa618052 100644
+index 0661c38f08..c14a481a81 100644
@@ -33 +35 @@
-@@ -385,6 +385,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
+@@ -690,6 +690,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
@@ -41 +43 @@
-@@ -393,6 +394,8 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
+@@ -698,6 +699,8 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
@@ -50 +52 @@
-@@ -425,10 +428,12 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
+@@ -730,10 +733,12 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
@@ -59 +61 @@
--			"%s_ctrlmp", (strchr(pf_dev->pci_dev->name, ':') + 1));
+-			"%s_ctrlmp", pf_dev->pci_dev->device.name);
@@ -64 +66 @@
-@@ -467,6 +472,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
+@@ -772,6 +777,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
@@ -72 +74 @@
-@@ -502,7 +508,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
+@@ -810,7 +816,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
@@ -78,2 +80 @@
- 				sizeof(struct nfp_net_rx_desc) *
- 				hw->dev_info->max_qc_size,
+ 				sizeof(struct nfp_net_rx_desc) * NFP_NET_MAX_RX_DESC,
@@ -81 +82,2 @@
-@@ -522,7 +528,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
+ 		if (tz == NULL) {
+@@ -829,7 +835,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
@@ -90 +92 @@
-@@ -540,6 +546,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
+@@ -847,6 +853,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
@@ -98 +100 @@
-@@ -558,7 +565,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
+@@ -865,7 +872,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
@@ -104,2 +106 @@
- 				sizeof(struct nfp_net_nfd3_tx_desc) *
- 				hw->dev_info->max_qc_size,
+ 				sizeof(struct nfp_net_nfd3_tx_desc) * NFP_NET_MAX_TX_DESC,
@@ -107 +108,2 @@
-@@ -586,7 +593,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
+ 		if (tz == NULL) {
+@@ -895,7 +902,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
@@ -116 +118 @@
-@@ -611,7 +618,7 @@ tx_queue_setup_cleanup:
+@@ -920,7 +927,7 @@ tx_queue_setup_cleanup:
@@ -125 +127 @@
-@@ -620,7 +627,7 @@ rx_queue_setup_cleanup:
+@@ -929,7 +936,7 @@ rx_queue_setup_cleanup:
@@ -134 +136 @@
-@@ -641,28 +648,35 @@ static void
+@@ -950,28 +957,35 @@ static void


More information about the stable mailing list