patch 'net/bnxt: fix VF resource allocation strategy' has been queued to stable release 20.11.5
luca.boccassi at gmail.com
luca.boccassi at gmail.com
Fri Feb 18 13:38:24 CET 2022
Hi,
FYI, your patch has been queued to stable release 20.11.5
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 02/20/22. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Queued patches are on a temporary branch at:
https://github.com/bluca/dpdk-stable
This queued commit can be viewed at:
https://github.com/bluca/dpdk-stable/commit/edea3f39ddcb60bc4856759dd526d9a42f2f8925
Thanks.
Luca Boccassi
---
>From edea3f39ddcb60bc4856759dd526d9a42f2f8925 Mon Sep 17 00:00:00 2001
From: Ajit Khaparde <ajit.khaparde at broadcom.com>
Date: Thu, 20 Jan 2022 14:42:28 +0530
Subject: [PATCH] net/bnxt: fix VF resource allocation strategy
[ upstream commit 8496483099104c970720ae7baf5ab61aaa4611c7 ]
1. VFs need a notification queue to handle async messages.
But the current logic does not reserve a notification queue leading
to initialization failure in some cases.
2. With the current logic, DPDK PF driver reserves only one VNIC
to the VFs leading to initialization failure with more than 1 RXQs.
Added logic to distribute number of NQs and VNICs from the pool
across VFs and PF.
While reserving resources for the VFs, the strategy is to keep
both min & max values the same. This could result in a failure
when there isn't enough resources to satisfy the request.
Hence fixed to instruct the FW to not reserve all minimum
resources requested for the VF. The VF driver can request the FW
for the allocated resources during probe.
Fixes: b7778e8a1c00 ("net/bnxt: refactor to properly allocate resources for PF/VF")
Signed-off-by: Ajit Khaparde <ajit.khaparde at broadcom.com>
Signed-off-by: Kalesh AP <kalesh-anakkur.purayil at broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur at broadcom.com>
---
drivers/net/bnxt/bnxt_hwrm.c | 32 +++++++++++++++++---------------
drivers/net/bnxt/bnxt_hwrm.h | 2 ++
2 files changed, 19 insertions(+), 15 deletions(-)
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 7cdafde88b..9f61d8d037 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -787,15 +787,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
if (!BNXT_CHIP_THOR(bp) && !bp->pdev->max_vfs)
bp->max_l2_ctx += bp->max_rx_em_flows;
- /* TODO: For now, do not support VMDq/RFS on VFs. */
- if (BNXT_PF(bp)) {
- if (bp->pf->max_vfs)
- bp->max_vnics = 1;
- else
- bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
- } else {
- bp->max_vnics = 1;
- }
+ bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n",
bp->max_l2_ctx, bp->max_vnics);
bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
@@ -3375,7 +3367,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
rte_cpu_to_le_16(pf_resc->num_hw_ring_grps);
} else if (BNXT_HAS_NQ(bp)) {
enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
- req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
+ req.num_msix = rte_cpu_to_le_16(pf_resc->num_nq_rings);
}
req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
@@ -3387,7 +3379,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
req.num_tx_rings = rte_cpu_to_le_16(pf_resc->num_tx_rings);
req.num_rx_rings = rte_cpu_to_le_16(pf_resc->num_rx_rings);
req.num_l2_ctxs = rte_cpu_to_le_16(pf_resc->num_l2_ctxs);
- req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
+ req.num_vnics = rte_cpu_to_le_16(pf_resc->num_vnics);
req.fid = rte_cpu_to_le_16(0xffff);
req.enables = rte_cpu_to_le_32(enables);
@@ -3424,14 +3416,12 @@ bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp,
req->min_rx_rings = req->max_rx_rings;
req->max_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
req->min_l2_ctxs = req->max_l2_ctxs;
- /* TODO: For now, do not support VMDq/RFS on VFs. */
- req->max_vnics = rte_cpu_to_le_16(1);
+ req->max_vnics = rte_cpu_to_le_16(bp->max_vnics / (num_vfs + 1));
req->min_vnics = req->max_vnics;
req->max_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
(num_vfs + 1));
req->min_hw_ring_grps = req->max_hw_ring_grps;
- req->flags =
- rte_cpu_to_le_16(HWRM_FUNC_VF_RESOURCE_CFG_INPUT_FLAGS_MIN_GUARANTEED);
+ req->max_msix = rte_cpu_to_le_16(bp->max_nq_rings / (num_vfs + 1));
}
static void
@@ -3491,6 +3481,8 @@ static int bnxt_update_max_resources(struct bnxt *bp,
bp->max_rx_rings -= rte_le_to_cpu_16(resp->alloc_rx_rings);
bp->max_l2_ctx -= rte_le_to_cpu_16(resp->alloc_l2_ctx);
bp->max_ring_grps -= rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
+ bp->max_nq_rings -= rte_le_to_cpu_16(resp->alloc_msix);
+ bp->max_vnics -= rte_le_to_cpu_16(resp->alloc_vnics);
HWRM_UNLOCK();
@@ -3564,6 +3556,8 @@ static int bnxt_query_pf_resources(struct bnxt *bp,
pf_resc->num_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
pf_resc->num_l2_ctxs = rte_le_to_cpu_16(resp->alloc_l2_ctx);
pf_resc->num_hw_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps);
+ pf_resc->num_nq_rings = rte_le_to_cpu_32(resp->alloc_msix);
+ pf_resc->num_vnics = rte_le_to_cpu_16(resp->alloc_vnics);
bp->pf->evb_mode = resp->evb_mode;
HWRM_UNLOCK();
@@ -3584,6 +3578,8 @@ bnxt_calculate_pf_resources(struct bnxt *bp,
pf_resc->num_rx_rings = bp->max_rx_rings;
pf_resc->num_l2_ctxs = bp->max_l2_ctx;
pf_resc->num_hw_ring_grps = bp->max_ring_grps;
+ pf_resc->num_nq_rings = bp->max_nq_rings;
+ pf_resc->num_vnics = bp->max_vnics;
return;
}
@@ -3602,6 +3598,10 @@ bnxt_calculate_pf_resources(struct bnxt *bp,
bp->max_l2_ctx % (num_vfs + 1);
pf_resc->num_hw_ring_grps = bp->max_ring_grps / (num_vfs + 1) +
bp->max_ring_grps % (num_vfs + 1);
+ pf_resc->num_nq_rings = bp->max_nq_rings / (num_vfs + 1) +
+ bp->max_nq_rings % (num_vfs + 1);
+ pf_resc->num_vnics = bp->max_vnics / (num_vfs + 1) +
+ bp->max_vnics % (num_vfs + 1);
}
int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
@@ -3777,6 +3777,8 @@ bnxt_update_pf_resources(struct bnxt *bp,
bp->max_tx_rings = pf_resc->num_tx_rings;
bp->max_rx_rings = pf_resc->num_rx_rings;
bp->max_ring_grps = pf_resc->num_hw_ring_grps;
+ bp->max_nq_rings = pf_resc->num_nq_rings;
+ bp->max_vnics = pf_resc->num_vnics;
}
static int32_t
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 926f32df05..d36defbc70 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -113,6 +113,8 @@ struct bnxt_pf_resource_info {
uint16_t num_rx_rings;
uint16_t num_cp_rings;
uint16_t num_l2_ctxs;
+ uint16_t num_nq_rings;
+ uint16_t num_vnics;
uint32_t num_hw_ring_grps;
};
--
2.30.2
---
Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- - 2022-02-18 12:37:40.142091040 +0000
+++ 0055-net-bnxt-fix-VF-resource-allocation-strategy.patch 2022-02-18 12:37:37.686791831 +0000
@@ -1 +1 @@
-From 8496483099104c970720ae7baf5ab61aaa4611c7 Mon Sep 17 00:00:00 2001
+From edea3f39ddcb60bc4856759dd526d9a42f2f8925 Mon Sep 17 00:00:00 2001
@@ -5,0 +6,2 @@
+[ upstream commit 8496483099104c970720ae7baf5ab61aaa4611c7 ]
+
@@ -23 +24,0 @@
-Cc: stable at dpdk.org
@@ -34 +35 @@
-index 5418fa1994..b4aeec593e 100644
+index 7cdafde88b..9f61d8d037 100644
@@ -37 +38 @@
-@@ -902,15 +902,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
+@@ -787,15 +787,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
@@ -39 +40 @@
- if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
+ if (!BNXT_CHIP_THOR(bp) && !bp->pdev->max_vfs)
@@ -54 +55 @@
-@@ -3495,7 +3487,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
+@@ -3375,7 +3367,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
@@ -63 +64 @@
-@@ -3508,7 +3500,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
+@@ -3387,7 +3379,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
@@ -72 +73 @@
-@@ -3545,14 +3537,12 @@ bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp,
+@@ -3424,14 +3416,12 @@ bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp,
@@ -89 +90 @@
-@@ -3612,6 +3602,8 @@ static int bnxt_update_max_resources(struct bnxt *bp,
+@@ -3491,6 +3481,8 @@ static int bnxt_update_max_resources(struct bnxt *bp,
@@ -98 +99 @@
-@@ -3685,6 +3677,8 @@ static int bnxt_query_pf_resources(struct bnxt *bp,
+@@ -3564,6 +3556,8 @@ static int bnxt_query_pf_resources(struct bnxt *bp,
@@ -107 +108 @@
-@@ -3705,6 +3699,8 @@ bnxt_calculate_pf_resources(struct bnxt *bp,
+@@ -3584,6 +3578,8 @@ bnxt_calculate_pf_resources(struct bnxt *bp,
@@ -116 +117 @@
-@@ -3723,6 +3719,10 @@ bnxt_calculate_pf_resources(struct bnxt *bp,
+@@ -3602,6 +3598,10 @@ bnxt_calculate_pf_resources(struct bnxt *bp,
@@ -127 +128 @@
-@@ -3898,6 +3898,8 @@ bnxt_update_pf_resources(struct bnxt *bp,
+@@ -3777,6 +3777,8 @@ bnxt_update_pf_resources(struct bnxt *bp,
@@ -137 +138 @@
-index 21e1b7a499..63f8d8ceab 100644
+index 926f32df05..d36defbc70 100644
@@ -140 +141 @@
-@@ -114,6 +114,8 @@ struct bnxt_pf_resource_info {
+@@ -113,6 +113,8 @@ struct bnxt_pf_resource_info {
More information about the stable
mailing list