patch 'net/mlx5: fix ESP header match after UDP for group 0' has been queued to stable release 23.11.6
Shani Peretz
shperetz at nvidia.com
Sun Dec 21 15:56:56 CET 2025
Hi,
FYI, your patch has been queued to stable release 23.11.6
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 12/26/25. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Queued patches are on a temporary branch at:
https://github.com/shanipr/dpdk-stable
This queued commit can be viewed at:
https://github.com/shanipr/dpdk-stable/commit/9f90b07dc4eb496de9fbac8660e58a5f06b4843e
Thanks.
Shani
---
>From 9f90b07dc4eb496de9fbac8660e58a5f06b4843e Mon Sep 17 00:00:00 2001
From: Viacheslav Ovsiienko <viacheslavo at nvidia.com>
Date: Wed, 12 Nov 2025 11:12:45 +0200
Subject: [PATCH] net/mlx5: fix ESP header match after UDP for group 0
[ upstream commit ed8eb60c9b2c243b4098f59dc6d9a87ee0bbd4c8 ]
The ESP item translation routine always forced the match
on IP next protocol to be 50 (ESP). This prevented on
matching ESP packets over UDP.
The patch checks if UDP header is expected, and also forces
match on UDP destination port 4500 if it is not set
by the caller yet.
Fixes: 18ca4a4ec73a ("net/mlx5: support ESP SPI match and RSS hash")
Cc: stable at dpdk.org
Signed-off-by: Viacheslav Ovsiienko <viacheslavo at nvidia.com>
Acked-by: Matan Azrad <matan at nvidia.com>
---
drivers/net/mlx5/linux/mlx5_flow_os.c | 6 -----
drivers/net/mlx5/mlx5_flow.h | 3 +++
drivers/net/mlx5/mlx5_flow_dv.c | 34 ++++++++++++++++-----------
3 files changed, 23 insertions(+), 20 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_flow_os.c b/drivers/net/mlx5/linux/mlx5_flow_os.c
index 2767b11708..2851b05f6a 100644
--- a/drivers/net/mlx5/linux/mlx5_flow_os.c
+++ b/drivers/net/mlx5/linux/mlx5_flow_os.c
@@ -23,18 +23,12 @@ mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
MLX5_FLOW_LAYER_OUTER_L3;
- const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
- MLX5_FLOW_LAYER_OUTER_L4;
int ret;
if (!(item_flags & l3m))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"L3 is mandatory to filter on L4");
- if (item_flags & l4m)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "multiple L4 layers not supported");
if (target_protocol != 0xff && target_protocol != IPPROTO_ESP)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 3a39b77cb1..3303065f87 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -434,6 +434,9 @@ enum mlx5_feature_name {
/* UDP port numbers for GENEVE. */
#define MLX5_UDP_PORT_GENEVE 6081
+/* UDP port numbers for ESP. */
+#define MLX5_UDP_PORT_ESP 4500
+
/* Lowest priority indicator. */
#define MLX5_FLOW_LOWEST_PRIO_INDICATOR ((uint32_t)-1)
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 0a070bf51c..07dacae868 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -9350,29 +9350,35 @@ flow_dv_translate_item_tcp(void *key, const struct rte_flow_item *item,
*/
static void
flow_dv_translate_item_esp(void *key, const struct rte_flow_item *item,
- int inner, uint32_t key_type)
+ int inner, uint32_t key_type, uint64_t item_flags)
{
const struct rte_flow_item_esp *esp_m;
const struct rte_flow_item_esp *esp_v;
void *headers_v;
char *spi_v;
+ bool over_udp = item_flags & (inner ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP);
headers_v = inner ? MLX5_ADDR_OF(fte_match_param, key, inner_headers) :
- MLX5_ADDR_OF(fte_match_param, key, outer_headers);
- if (key_type & MLX5_SET_MATCHER_M)
- MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- ip_protocol, 0xff);
- else
- MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- ip_protocol, IPPROTO_ESP);
+ MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ if (key_type & MLX5_SET_MATCHER_M) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 0xff);
+ if (over_udp && !MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport))
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, 0xFFFF);
+ } else {
+ if (!over_udp)
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ESP);
+ else
+ if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport))
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ MLX5_UDP_PORT_ESP);
+ }
if (MLX5_ITEM_VALID(item, key_type))
return;
- MLX5_ITEM_UPDATE(item, key_type, esp_v, esp_m,
- &rte_flow_item_esp_mask);
+ MLX5_ITEM_UPDATE(item, key_type, esp_v, esp_m, &rte_flow_item_esp_mask);
headers_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
- spi_v = inner ? MLX5_ADDR_OF(fte_match_set_misc, headers_v,
- inner_esp_spi) : MLX5_ADDR_OF(fte_match_set_misc
- , headers_v, outer_esp_spi);
+ spi_v = inner ? MLX5_ADDR_OF(fte_match_set_misc, headers_v, inner_esp_spi) :
+ MLX5_ADDR_OF(fte_match_set_misc, headers_v, outer_esp_spi);
*(uint32_t *)spi_v = esp_m->hdr.spi & esp_v->hdr.spi;
}
@@ -13747,7 +13753,7 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ESP:
- flow_dv_translate_item_esp(key, items, tunnel, key_type);
+ flow_dv_translate_item_esp(key, items, tunnel, key_type, wks->item_flags);
wks->priority = MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_ITEM_ESP;
break;
--
2.43.0
---
Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- - 2025-12-21 16:54:20.601099034 +0200
+++ 0068-net-mlx5-fix-ESP-header-match-after-UDP-for-group-0.patch 2025-12-21 16:54:17.240089000 +0200
@@ -1 +1 @@
-From ed8eb60c9b2c243b4098f59dc6d9a87ee0bbd4c8 Mon Sep 17 00:00:00 2001
+From 9f90b07dc4eb496de9fbac8660e58a5f06b4843e Mon Sep 17 00:00:00 2001
@@ -3 +3 @@
-Date: Tue, 9 Sep 2025 09:28:53 +0300
+Date: Wed, 12 Nov 2025 11:12:45 +0200
@@ -5,0 +6,2 @@
+[ upstream commit ed8eb60c9b2c243b4098f59dc6d9a87ee0bbd4c8 ]
+
@@ -26 +28 @@
-index 777125e9a8..f5eee46e44 100644
+index 2767b11708..2851b05f6a 100644
@@ -29 +31 @@
-@@ -25,8 +25,6 @@ mlx5_flow_os_validate_item_esp(const struct rte_eth_dev *dev,
+@@ -23,18 +23,12 @@ mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
@@ -35,7 +37,6 @@
- static const struct rte_flow_item_esp mlx5_flow_item_esp_mask = {
- .hdr = {
- .spi = RTE_BE32(0xffffffff),
-@@ -41,10 +39,6 @@ mlx5_flow_os_validate_item_esp(const struct rte_eth_dev *dev,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "L3 is mandatory to filter on L4");
- }
+ int ret;
+
+ if (!(item_flags & l3m))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 is mandatory to filter on L4");
@@ -50 +51 @@
-index 367dacc277..ff61706054 100644
+index 3a39b77cb1..3303065f87 100644
@@ -53 +54 @@
-@@ -489,6 +489,9 @@ struct mlx5_mirror {
+@@ -434,6 +434,9 @@ enum mlx5_feature_name {
@@ -64 +65 @@
-index 18d0d29377..bcce1597e2 100644
+index 0a070bf51c..07dacae868 100644
@@ -67 +68 @@
-@@ -9713,29 +9713,35 @@ flow_dv_translate_item_tcp(void *key, const struct rte_flow_item *item,
+@@ -9350,29 +9350,35 @@ flow_dv_translate_item_tcp(void *key, const struct rte_flow_item *item,
@@ -116 +117 @@
-@@ -14224,7 +14230,7 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
+@@ -13747,7 +13753,7 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
More information about the stable
mailing list