patch 'net/mlx5: fix shared queue port number in vector Rx' has been queued to stable release 23.11.3
Xueming Li
xuemingl at nvidia.com
Sat Dec 7 09:00:26 CET 2024
Hi,
FYI, your patch has been queued to stable release 23.11.3
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 12/10/24. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Queued patches are on a temporary branch at:
https://git.dpdk.org/dpdk-stable/log/?h=23.11-staging
This queued commit can be viewed at:
https://git.dpdk.org/dpdk-stable/commit/?h=23.11-staging&id=200ea61fba771b8fe4a8ae6f2abf9827a427f8f6
Thanks.
Xueming Li <xuemingl at nvidia.com>
---
>From 200ea61fba771b8fe4a8ae6f2abf9827a427f8f6 Mon Sep 17 00:00:00 2001
From: Alexander Kozyrev <akozyrev at nvidia.com>
Date: Mon, 28 Oct 2024 19:53:54 +0200
Subject: [PATCH] net/mlx5: fix shared queue port number in vector Rx
Cc: Xueming Li <xuemingl at nvidia.com>
[ upstream commit 3638f431b9ff39003e31c3a761d407e04b25576a ]
Wrong CQE is used to get the shared Rx queue port number in
vectorized Rx burst routine. Fix the CQE indexing.
Fixes: 25ed2ebff131 ("net/mlx5: support shared Rx queue port data path")
Signed-off-by: Alexander Kozyrev <akozyrev at nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo at nvidia.com>
---
drivers/net/mlx5/mlx5_rxtx_vec_altivec.h | 12 ++++++------
drivers/net/mlx5/mlx5_rxtx_vec_neon.h | 24 ++++++++++++------------
drivers/net/mlx5/mlx5_rxtx_vec_sse.h | 6 +++---
3 files changed, 21 insertions(+), 21 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
index cccfa7f2d3..f6e74f4180 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
@@ -1249,9 +1249,9 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
if (unlikely(rxq->shared)) {
pkts[pos]->port = cq[pos].user_index_low;
- pkts[pos + p1]->port = cq[pos + p1].user_index_low;
- pkts[pos + p2]->port = cq[pos + p2].user_index_low;
- pkts[pos + p3]->port = cq[pos + p3].user_index_low;
+ pkts[pos + 1]->port = cq[pos + p1].user_index_low;
+ pkts[pos + 2]->port = cq[pos + p2].user_index_low;
+ pkts[pos + 3]->port = cq[pos + p3].user_index_low;
}
if (rxq->hw_timestamp) {
int offset = rxq->timestamp_offset;
@@ -1295,17 +1295,17 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
metadata;
pkts[pos]->ol_flags |= metadata ? flag : 0ULL;
metadata = rte_be_to_cpu_32
- (cq[pos + 1].flow_table_metadata) & mask;
+ (cq[pos + p1].flow_table_metadata) & mask;
*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) =
metadata;
pkts[pos + 1]->ol_flags |= metadata ? flag : 0ULL;
metadata = rte_be_to_cpu_32
- (cq[pos + 2].flow_table_metadata) & mask;
+ (cq[pos + p2].flow_table_metadata) & mask;
*RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) =
metadata;
pkts[pos + 2]->ol_flags |= metadata ? flag : 0ULL;
metadata = rte_be_to_cpu_32
- (cq[pos + 3].flow_table_metadata) & mask;
+ (cq[pos + p3].flow_table_metadata) & mask;
*RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) =
metadata;
pkts[pos + 3]->ol_flags |= metadata ? flag : 0ULL;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index 3ed688191f..942d395dc9 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -835,13 +835,13 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag,
opcode, &elts[pos]);
if (unlikely(rxq->shared)) {
- elts[pos]->port = container_of(p0, struct mlx5_cqe,
+ pkts[pos]->port = container_of(p0, struct mlx5_cqe,
pkt_info)->user_index_low;
- elts[pos + 1]->port = container_of(p1, struct mlx5_cqe,
+ pkts[pos + 1]->port = container_of(p1, struct mlx5_cqe,
pkt_info)->user_index_low;
- elts[pos + 2]->port = container_of(p2, struct mlx5_cqe,
+ pkts[pos + 2]->port = container_of(p2, struct mlx5_cqe,
pkt_info)->user_index_low;
- elts[pos + 3]->port = container_of(p3, struct mlx5_cqe,
+ pkts[pos + 3]->port = container_of(p3, struct mlx5_cqe,
pkt_info)->user_index_low;
}
if (unlikely(rxq->hw_timestamp)) {
@@ -853,34 +853,34 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
ts = rte_be_to_cpu_64
(container_of(p0, struct mlx5_cqe,
pkt_info)->timestamp);
- mlx5_timestamp_set(elts[pos], offset,
+ mlx5_timestamp_set(pkts[pos], offset,
mlx5_txpp_convert_rx_ts(sh, ts));
ts = rte_be_to_cpu_64
(container_of(p1, struct mlx5_cqe,
pkt_info)->timestamp);
- mlx5_timestamp_set(elts[pos + 1], offset,
+ mlx5_timestamp_set(pkts[pos + 1], offset,
mlx5_txpp_convert_rx_ts(sh, ts));
ts = rte_be_to_cpu_64
(container_of(p2, struct mlx5_cqe,
pkt_info)->timestamp);
- mlx5_timestamp_set(elts[pos + 2], offset,
+ mlx5_timestamp_set(pkts[pos + 2], offset,
mlx5_txpp_convert_rx_ts(sh, ts));
ts = rte_be_to_cpu_64
(container_of(p3, struct mlx5_cqe,
pkt_info)->timestamp);
- mlx5_timestamp_set(elts[pos + 3], offset,
+ mlx5_timestamp_set(pkts[pos + 3], offset,
mlx5_txpp_convert_rx_ts(sh, ts));
} else {
- mlx5_timestamp_set(elts[pos], offset,
+ mlx5_timestamp_set(pkts[pos], offset,
rte_be_to_cpu_64(container_of(p0,
struct mlx5_cqe, pkt_info)->timestamp));
- mlx5_timestamp_set(elts[pos + 1], offset,
+ mlx5_timestamp_set(pkts[pos + 1], offset,
rte_be_to_cpu_64(container_of(p1,
struct mlx5_cqe, pkt_info)->timestamp));
- mlx5_timestamp_set(elts[pos + 2], offset,
+ mlx5_timestamp_set(pkts[pos + 2], offset,
rte_be_to_cpu_64(container_of(p2,
struct mlx5_cqe, pkt_info)->timestamp));
- mlx5_timestamp_set(elts[pos + 3], offset,
+ mlx5_timestamp_set(pkts[pos + 3], offset,
rte_be_to_cpu_64(container_of(p3,
struct mlx5_cqe, pkt_info)->timestamp));
}
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index 2bdd1f676d..fb59c11346 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -783,9 +783,9 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
if (unlikely(rxq->shared)) {
pkts[pos]->port = cq[pos].user_index_low;
- pkts[pos + p1]->port = cq[pos + p1].user_index_low;
- pkts[pos + p2]->port = cq[pos + p2].user_index_low;
- pkts[pos + p3]->port = cq[pos + p3].user_index_low;
+ pkts[pos + 1]->port = cq[pos + p1].user_index_low;
+ pkts[pos + 2]->port = cq[pos + p2].user_index_low;
+ pkts[pos + 3]->port = cq[pos + p3].user_index_low;
}
if (unlikely(rxq->hw_timestamp)) {
int offset = rxq->timestamp_offset;
--
2.34.1
---
Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- - 2024-12-06 23:26:46.325119501 +0800
+++ 0068-net-mlx5-fix-shared-queue-port-number-in-vector-Rx.patch 2024-12-06 23:26:44.003044827 +0800
@@ -1 +1 @@
-From 3638f431b9ff39003e31c3a761d407e04b25576a Mon Sep 17 00:00:00 2001
+From 200ea61fba771b8fe4a8ae6f2abf9827a427f8f6 Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit 3638f431b9ff39003e31c3a761d407e04b25576a ]
@@ -10 +12,0 @@
-Cc: stable at dpdk.org
@@ -21 +23 @@
-index b2bbc4ba17..ca614ecf9d 100644
+index cccfa7f2d3..f6e74f4180 100644
@@ -24 +26 @@
-@@ -1251,9 +1251,9 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+@@ -1249,9 +1249,9 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
@@ -37 +39 @@
-@@ -1297,17 +1297,17 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+@@ -1295,17 +1295,17 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
@@ -59 +61 @@
-index 0ce9827ed9..519fff5b2c 100644
+index 3ed688191f..942d395dc9 100644
@@ -62 +64 @@
-@@ -837,13 +837,13 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+@@ -835,13 +835,13 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
@@ -80 +82 @@
-@@ -855,34 +855,34 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+@@ -853,34 +853,34 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
@@ -124 +126 @@
-index e71d6c303f..0a2b67e750 100644
+index 2bdd1f676d..fb59c11346 100644
@@ -127 +129 @@
-@@ -785,9 +785,9 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
+@@ -783,9 +783,9 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
More information about the stable
mailing list