[dpdk-dev] [PATCH] net/mlx5: fix tunnel flow priority

Matan Azrad matan at mellanox.com
Thu Feb 13 14:03:25 CET 2020


The PMD manages internally the priority of the flows in addition to the
user configured priority.

So, 2 flows with the same user priority may get different priority.

The method:
As much as the flow is more specific it gets higher priority
(higher means first to be matched).

In addition, When the user creates a RSS flow the PMD splits the flows
according to the flow RSS layers as the HW requests for RSS TIR.
The internal priority for each flow is decided by the flow last layer.
L2, L3 and L4 (L2 low and L4 high).

The tunnel layer was wrongly decided to be L4 all the time, even when
the flow is configured with inner-RSS.

Hence, the first RSS split which takes the tunnel layer priority all the
time will be matched before the more specific splits.

Change the priority of tunnel layer to be L2 when inner-RSS is
configured.

Fixes: d4a405186b73 ("net/mlx5: support tunnel RSS level")
Cc: stable at dpdk.org

Signed-off-by: Matan Azrad <matan at mellanox.com>
Acked-by: Ori Kam <orika at mellanox.com>
---
 drivers/net/mlx5/mlx5_flow_dv.c | 14 ++++++++++++++
 1 file changed, 14 insertions(+)

diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index a9bb0b4..a8c8a58 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -7620,6 +7620,8 @@ struct field_modify_info modify_tcp[] = {
 		case RTE_FLOW_ITEM_TYPE_GRE:
 			flow_dv_translate_item_gre(match_mask, match_value,
 						   items, tunnel);
+			matcher.priority = flow->rss.level >= 2 ?
+				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_GRE;
 			break;
 		case RTE_FLOW_ITEM_TYPE_GRE_KEY:
@@ -7630,27 +7632,37 @@ struct field_modify_info modify_tcp[] = {
 		case RTE_FLOW_ITEM_TYPE_NVGRE:
 			flow_dv_translate_item_nvgre(match_mask, match_value,
 						     items, tunnel);
+			matcher.priority = flow->rss.level >= 2 ?
+				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_GRE;
 			break;
 		case RTE_FLOW_ITEM_TYPE_VXLAN:
 			flow_dv_translate_item_vxlan(match_mask, match_value,
 						     items, tunnel);
+			matcher.priority = flow->rss.level >= 2 ?
+				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_VXLAN;
 			break;
 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
 			flow_dv_translate_item_vxlan_gpe(match_mask,
 							 match_value, items,
 							 tunnel);
+			matcher.priority = flow->rss.level >= 2 ?
+				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
 			break;
 		case RTE_FLOW_ITEM_TYPE_GENEVE:
 			flow_dv_translate_item_geneve(match_mask, match_value,
 						      items, tunnel);
+			matcher.priority = flow->rss.level >= 2 ?
+				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_GENEVE;
 			break;
 		case RTE_FLOW_ITEM_TYPE_MPLS:
 			flow_dv_translate_item_mpls(match_mask, match_value,
 						    items, last_item, tunnel);
+			matcher.priority = flow->rss.level >= 2 ?
+				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_MPLS;
 			break;
 		case RTE_FLOW_ITEM_TYPE_MARK:
@@ -7692,6 +7704,8 @@ struct field_modify_info modify_tcp[] = {
 		case RTE_FLOW_ITEM_TYPE_GTP:
 			flow_dv_translate_item_gtp(match_mask, match_value,
 						   items, tunnel);
+			matcher.priority = flow->rss.level >= 2 ?
+				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_GTP;
 			break;
 		default:
-- 
1.8.3.1



More information about the dev mailing list