patch 'net/mlx5: remove duplication of L3 flow item validation' has been queued to stable release 23.11.1
Xueming Li
xuemingl at nvidia.com
Sat Apr 13 14:49:39 CEST 2024
Hi,
FYI, your patch has been queued to stable release 23.11.1
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 04/15/24. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Queued patches are on a temporary branch at:
https://git.dpdk.org/dpdk-stable/log/?h=23.11-staging
This queued commit can be viewed at:
https://git.dpdk.org/dpdk-stable/commit/?h=23.11-staging&id=c551015ebb6f2a9615e6e2583603794232665af5
Thanks.
Xueming Li <xuemingl at nvidia.com>
---
>From c551015ebb6f2a9615e6e2583603794232665af5 Mon Sep 17 00:00:00 2001
From: Gregory Etelson <getelson at nvidia.com>
Date: Thu, 29 Feb 2024 18:05:03 +0200
Subject: [PATCH] net/mlx5: remove duplication of L3 flow item validation
Cc: Xueming Li <xuemingl at nvidia.com>
[ upstream commit 27e44a6f53eccc7d2ce80f6466fa214158f0ee81 ]
Remove code duplications in DV L3 items validation translation.
Fixes: 3193c2494eea ("net/mlx5: fix L4 protocol validation")
Signed-off-by: Gregory Etelson <getelson at nvidia.com>
Acked-by: Ori Kam <orika at nvidia.com>
---
drivers/net/mlx5/mlx5_flow_dv.c | 151 +++++++++-----------------------
1 file changed, 43 insertions(+), 108 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 1c85331cb6..eaadbf577f 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -7248,6 +7248,40 @@ flow_dv_validate_item_flex(struct rte_eth_dev *dev,
return 0;
}
+static __rte_always_inline uint8_t
+mlx5_flow_l3_next_protocol(const struct rte_flow_item *l3_item,
+ enum MLX5_SET_MATCHER key_type)
+{
+#define MLX5_L3_NEXT_PROTOCOL(i, ms) \
+ ((i)->type == RTE_FLOW_ITEM_TYPE_IPV4 ? \
+ ((const struct rte_flow_item_ipv4 *)(i)->ms)->hdr.next_proto_id : \
+ (i)->type == RTE_FLOW_ITEM_TYPE_IPV6 ? \
+ ((const struct rte_flow_item_ipv6 *)(i)->ms)->hdr.proto : \
+ (i)->type == RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT ? \
+ ((const struct rte_flow_item_ipv6_frag_ext *)(i)->ms)->hdr.next_header :\
+ 0xff)
+
+ uint8_t next_protocol;
+
+ if (l3_item->mask != NULL && l3_item->spec != NULL) {
+ next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, mask);
+ if (next_protocol)
+ next_protocol &= MLX5_L3_NEXT_PROTOCOL(l3_item, spec);
+ else
+ next_protocol = 0xff;
+ } else if (key_type == MLX5_SET_MATCHER_HS_M && l3_item->mask != NULL) {
+ next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, mask);
+ } else if (key_type == MLX5_SET_MATCHER_HS_V && l3_item->spec != NULL) {
+ next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, spec);
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
+ return next_protocol;
+
+#undef MLX5_L3_NEXT_PROTOCOL
+}
+
/**
* Validate IB BTH item.
*
@@ -7530,19 +7564,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- if (items->mask != NULL &&
- ((const struct rte_flow_item_ipv4 *)
- items->mask)->hdr.next_proto_id) {
- next_protocol =
- ((const struct rte_flow_item_ipv4 *)
- (items->spec))->hdr.next_proto_id;
- next_protocol &=
- ((const struct rte_flow_item_ipv4 *)
- (items->mask))->hdr.next_proto_id;
- } else {
- /* Reset for inner layer. */
- next_protocol = 0xff;
- }
+ next_protocol = mlx5_flow_l3_next_protocol
+ (items, (enum MLX5_SET_MATCHER)-1);
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
mlx5_flow_tunnel_ip_check(items, next_protocol,
@@ -7556,22 +7579,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- if (items->mask != NULL &&
- ((const struct rte_flow_item_ipv6 *)
- items->mask)->hdr.proto) {
- item_ipv6_proto =
- ((const struct rte_flow_item_ipv6 *)
- items->spec)->hdr.proto;
- next_protocol =
- ((const struct rte_flow_item_ipv6 *)
- items->spec)->hdr.proto;
- next_protocol &=
- ((const struct rte_flow_item_ipv6 *)
- items->mask)->hdr.proto;
- } else {
- /* Reset for inner layer. */
- next_protocol = 0xff;
- }
+ next_protocol = mlx5_flow_l3_next_protocol
+ (items, (enum MLX5_SET_MATCHER)-1);
break;
case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
ret = flow_dv_validate_item_ipv6_frag_ext(items,
@@ -7582,19 +7591,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
last_item = tunnel ?
MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
- if (items->mask != NULL &&
- ((const struct rte_flow_item_ipv6_frag_ext *)
- items->mask)->hdr.next_header) {
- next_protocol =
- ((const struct rte_flow_item_ipv6_frag_ext *)
- items->spec)->hdr.next_header;
- next_protocol &=
- ((const struct rte_flow_item_ipv6_frag_ext *)
- items->mask)->hdr.next_header;
- } else {
- /* Reset for inner layer. */
- next_protocol = 0xff;
- }
+ next_protocol = mlx5_flow_l3_next_protocol
+ (items, (enum MLX5_SET_MATCHER)-1);
break;
case RTE_FLOW_ITEM_TYPE_TCP:
ret = mlx5_flow_validate_item_tcp
@@ -13735,28 +13733,7 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
wks->priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- if (items->mask != NULL &&
- items->spec != NULL &&
- ((const struct rte_flow_item_ipv4 *)
- items->mask)->hdr.next_proto_id) {
- next_protocol =
- ((const struct rte_flow_item_ipv4 *)
- (items->spec))->hdr.next_proto_id;
- next_protocol &=
- ((const struct rte_flow_item_ipv4 *)
- (items->mask))->hdr.next_proto_id;
- } else if (key_type == MLX5_SET_MATCHER_HS_M &&
- items->mask != NULL) {
- next_protocol = ((const struct rte_flow_item_ipv4 *)
- (items->mask))->hdr.next_proto_id;
- } else if (key_type == MLX5_SET_MATCHER_HS_V &&
- items->spec != NULL) {
- next_protocol = ((const struct rte_flow_item_ipv4 *)
- (items->spec))->hdr.next_proto_id;
- } else {
- /* Reset for inner layer. */
- next_protocol = 0xff;
- }
+ next_protocol = mlx5_flow_l3_next_protocol(items, key_type);
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
mlx5_flow_tunnel_ip_check(items, next_protocol,
@@ -13766,56 +13743,14 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
wks->priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- if (items->mask != NULL &&
- items->spec != NULL &&
- ((const struct rte_flow_item_ipv6 *)
- items->mask)->hdr.proto) {
- next_protocol =
- ((const struct rte_flow_item_ipv6 *)
- items->spec)->hdr.proto;
- next_protocol &=
- ((const struct rte_flow_item_ipv6 *)
- items->mask)->hdr.proto;
- } else if (key_type == MLX5_SET_MATCHER_HS_M &&
- items->mask != NULL) {
- next_protocol = ((const struct rte_flow_item_ipv6 *)
- (items->mask))->hdr.proto;
- } else if (key_type == MLX5_SET_MATCHER_HS_V &&
- items->spec != NULL) {
- next_protocol = ((const struct rte_flow_item_ipv6 *)
- (items->spec))->hdr.proto;
- } else {
- /* Reset for inner layer. */
- next_protocol = 0xff;
- }
+ next_protocol = mlx5_flow_l3_next_protocol(items, key_type);
break;
case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
flow_dv_translate_item_ipv6_frag_ext
(key, items, tunnel, key_type);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
- if (items->mask != NULL &&
- items->spec != NULL &&
- ((const struct rte_flow_item_ipv6_frag_ext *)
- items->mask)->hdr.next_header) {
- next_protocol =
- ((const struct rte_flow_item_ipv6_frag_ext *)
- items->spec)->hdr.next_header;
- next_protocol &=
- ((const struct rte_flow_item_ipv6_frag_ext *)
- items->mask)->hdr.next_header;
- } else if (key_type == MLX5_SET_MATCHER_HS_M &&
- items->mask != NULL) {
- next_protocol = ((const struct rte_flow_item_ipv6_frag_ext *)
- (items->mask))->hdr.next_header;
- } else if (key_type == MLX5_SET_MATCHER_HS_V &&
- items->spec != NULL) {
- next_protocol = ((const struct rte_flow_item_ipv6_frag_ext *)
- (items->spec))->hdr.next_header;
- } else {
- /* Reset for inner layer. */
- next_protocol = 0xff;
- }
+ next_protocol = mlx5_flow_l3_next_protocol(items, key_type);
break;
case RTE_FLOW_ITEM_TYPE_TCP:
flow_dv_translate_item_tcp(key, items, tunnel, key_type);
--
2.34.1
---
Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- - 2024-04-13 20:43:08.039898658 +0800
+++ 0099-net-mlx5-remove-duplication-of-L3-flow-item-validati.patch 2024-04-13 20:43:05.087753814 +0800
@@ -1 +1 @@
-From 27e44a6f53eccc7d2ce80f6466fa214158f0ee81 Mon Sep 17 00:00:00 2001
+From c551015ebb6f2a9615e6e2583603794232665af5 Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit 27e44a6f53eccc7d2ce80f6466fa214158f0ee81 ]
@@ -9 +11,0 @@
-Cc: stable at dpdk.org
@@ -18 +20 @@
-index f1584ed6e0..9e444c8a1c 100644
+index 1c85331cb6..eaadbf577f 100644
@@ -21 +23 @@
-@@ -7488,6 +7488,40 @@ flow_dv_validate_item_flex(struct rte_eth_dev *dev,
+@@ -7248,6 +7248,40 @@ flow_dv_validate_item_flex(struct rte_eth_dev *dev,
@@ -62 +64 @@
-@@ -7770,19 +7804,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+@@ -7530,19 +7564,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
@@ -84 +86 @@
-@@ -7796,22 +7819,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+@@ -7556,22 +7579,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
@@ -109 +111 @@
-@@ -7822,19 +7831,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+@@ -7582,19 +7591,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
@@ -131 +133 @@
-@@ -13997,28 +13995,7 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
+@@ -13735,28 +13733,7 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
@@ -161 +163 @@
-@@ -14028,56 +14005,14 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
+@@ -13766,56 +13743,14 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
More information about the stable
mailing list