patch 'app/testpmd: fix encap/decap size calculation' has been queued to stable release 22.11.2

Xueming Li xuemingl at nvidia.com
Sun Apr 9 17:25:16 CEST 2023


Hi,

FYI, your patch has been queued to stable release 22.11.2

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 04/11/23. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://git.dpdk.org/dpdk-stable/log/?h=22.11-staging

This queued commit can be viewed at:
https://git.dpdk.org/dpdk-stable/log/?h=22.11-staging/commit/1cbb884f358ba31219bd9e981bc28c5e2bf349c7

Thanks.

Xueming Li <xuemingl at nvidia.com>

---
>From 1cbb884f358ba31219bd9e981bc28c5e2bf349c7 Mon Sep 17 00:00:00 2001
From: Michael Baum <michaelba at nvidia.com>
Date: Thu, 16 Mar 2023 20:24:12 +0200
Subject: [PATCH] app/testpmd: fix encap/decap size calculation
Cc: Xueming Li <xuemingl at nvidia.com>

[ upstream commit 7bdf7a13ae34dd9c5d8a6aeef7019003dade6f83 ]

Testpmd app has some functions to create either encap or decap buffer
for some special cases:
 - "l2_encap" and "l2_decap"
 - "mplsogre_encap" and "mplsogre_decap"
 - "mplsoudp_encap" and "mplsoudp_decap"

The functions use both "rte_flow_item_eth" and "rte_flow_item_vlan"
structures to represent the headers and copy them into "raw_encap"
action. The size of either "raw_encap" or "raw_decap" is calculated as
sum of headers size.

However, the both "rte_flow_item_eth" and "rte_flow_item_vlan" contain
more fields than original headers, so using them cause bad size
calculation.

This patch uses "rte_ether_hdr" and "rte_vlan_hdr" structures for header
size calculation.

Fixes: 3e77031be855 ("app/testpmd: add MPLSoGRE encapsulation")
Fixes: a1191d39cb57 ("app/testpmd: add MPLSoUDP encapsulation")

Signed-off-by: Michael Baum <michaelba at nvidia.com>
Acked-by: Ori Kam <orika at nvidia.com>
Acked-by: Ferruh Yigit <ferruh.yigit at amd.com>
---
 app/test-pmd/cmdline_flow.c | 48 ++++++++++++++++++-------------------
 1 file changed, 24 insertions(+), 24 deletions(-)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index 88108498e0..042da57a61 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -7737,15 +7737,15 @@ parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
 	       l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(eth.src.addr_bytes,
 	       l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth.hdr, sizeof(struct rte_ether_hdr));
+	header += sizeof(struct rte_ether_hdr);
 	if (l2_encap_conf.select_vlan) {
 		if (l2_encap_conf.select_ipv4)
 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 		else
 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan.hdr, sizeof(struct rte_vlan_hdr));
+		header += sizeof(struct rte_vlan_hdr);
 	}
 	action_encap_data->conf.size = header -
 		action_encap_data->data;
@@ -7793,11 +7793,11 @@ parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
 	header = action_decap_data->data;
 	if (l2_decap_conf.select_vlan)
 		eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth.hdr, sizeof(struct rte_ether_hdr));
+	header += sizeof(struct rte_ether_hdr);
 	if (l2_decap_conf.select_vlan) {
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan.hdr, sizeof(struct rte_vlan_hdr));
+		header += sizeof(struct rte_vlan_hdr);
 	}
 	action_decap_data->conf.size = header -
 		action_decap_data->data;
@@ -7877,15 +7877,15 @@ parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
 	       mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(eth.src.addr_bytes,
 	       mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth.hdr, sizeof(struct rte_ether_hdr));
+	header += sizeof(struct rte_ether_hdr);
 	if (mplsogre_encap_conf.select_vlan) {
 		if (mplsogre_encap_conf.select_ipv4)
 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 		else
 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan.hdr, sizeof(struct rte_vlan_hdr));
+		header += sizeof(struct rte_vlan_hdr);
 	}
 	if (mplsogre_encap_conf.select_ipv4) {
 		memcpy(header, &ipv4, sizeof(ipv4));
@@ -7972,15 +7972,15 @@ parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
 	       mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(eth.src.addr_bytes,
 	       mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth.hdr, sizeof(struct rte_ether_hdr));
+	header += sizeof(struct rte_ether_hdr);
 	if (mplsogre_encap_conf.select_vlan) {
 		if (mplsogre_encap_conf.select_ipv4)
 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 		else
 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan.hdr, sizeof(struct rte_vlan_hdr));
+		header += sizeof(struct rte_vlan_hdr);
 	}
 	if (mplsogre_encap_conf.select_ipv4) {
 		memcpy(header, &ipv4, sizeof(ipv4));
@@ -8071,15 +8071,15 @@ parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
 	       mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(eth.src.addr_bytes,
 	       mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth.hdr, sizeof(struct rte_ether_hdr));
+	header += sizeof(struct rte_ether_hdr);
 	if (mplsoudp_encap_conf.select_vlan) {
 		if (mplsoudp_encap_conf.select_ipv4)
 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 		else
 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan.hdr, sizeof(struct rte_vlan_hdr));
+		header += sizeof(struct rte_vlan_hdr);
 	}
 	if (mplsoudp_encap_conf.select_ipv4) {
 		memcpy(header, &ipv4, sizeof(ipv4));
@@ -8168,15 +8168,15 @@ parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
 	       mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(eth.src.addr_bytes,
 	       mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth.hdr, sizeof(struct rte_ether_hdr));
+	header += sizeof(struct rte_ether_hdr);
 	if (mplsoudp_encap_conf.select_vlan) {
 		if (mplsoudp_encap_conf.select_ipv4)
 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 		else
 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan.hdr, sizeof(struct rte_vlan_hdr));
+		header += sizeof(struct rte_vlan_hdr);
 	}
 	if (mplsoudp_encap_conf.select_ipv4) {
 		memcpy(header, &ipv4, sizeof(ipv4));
-- 
2.25.1

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2023-04-09 21:45:42.040054800 +0800
+++ 0128-app-testpmd-fix-encap-decap-size-calculation.patch	2023-04-09 21:45:38.799042200 +0800
@@ -1 +1 @@
-From 7bdf7a13ae34dd9c5d8a6aeef7019003dade6f83 Mon Sep 17 00:00:00 2001
+From 1cbb884f358ba31219bd9e981bc28c5e2bf349c7 Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit 7bdf7a13ae34dd9c5d8a6aeef7019003dade6f83 ]
@@ -26 +28,0 @@
-Cc: stable at dpdk.org
@@ -36 +38 @@
-index 9309607f11..58939ec321 100644
+index 88108498e0..042da57a61 100644
@@ -39 +41 @@
-@@ -8245,15 +8245,15 @@ parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
+@@ -7737,15 +7737,15 @@ parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
@@ -41 +43 @@
- 	memcpy(eth.hdr.src_addr.addr_bytes,
+ 	memcpy(eth.src.addr_bytes,
@@ -49 +51 @@
- 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
@@ -51 +53 @@
- 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
@@ -59 +61 @@
-@@ -8301,11 +8301,11 @@ parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
+@@ -7793,11 +7793,11 @@ parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
@@ -62 +64 @@
- 		eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
+ 		eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
@@ -75 +77 @@
-@@ -8385,15 +8385,15 @@ parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
+@@ -7877,15 +7877,15 @@ parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
@@ -77 +79 @@
- 	memcpy(eth.hdr.src_addr.addr_bytes,
+ 	memcpy(eth.src.addr_bytes,
@@ -85 +87 @@
- 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
@@ -87 +89 @@
- 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
@@ -95 +97 @@
-@@ -8480,15 +8480,15 @@ parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
+@@ -7972,15 +7972,15 @@ parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
@@ -97 +99 @@
- 	memcpy(eth.hdr.src_addr.addr_bytes,
+ 	memcpy(eth.src.addr_bytes,
@@ -105 +107 @@
- 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
@@ -107 +109 @@
- 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
@@ -115 +117 @@
-@@ -8579,15 +8579,15 @@ parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
+@@ -8071,15 +8071,15 @@ parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
@@ -117 +119 @@
- 	memcpy(eth.hdr.src_addr.addr_bytes,
+ 	memcpy(eth.src.addr_bytes,
@@ -125 +127 @@
- 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
@@ -127 +129 @@
- 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
@@ -135 +137 @@
-@@ -8676,15 +8676,15 @@ parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
+@@ -8168,15 +8168,15 @@ parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
@@ -137 +139 @@
- 	memcpy(eth.hdr.src_addr.addr_bytes,
+ 	memcpy(eth.src.addr_bytes,
@@ -145 +147 @@
- 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
@@ -147 +149 @@
- 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ 			vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);


More information about the stable mailing list