[dpdk-dev] [PATCH v1 1/1] net/mlx5: support match ICMP identifier fields

Li Zhang lizh at nvidia.com
Wed Sep 23 04:35:22 CEST 2020


PRM expose fields "Icmp_header_data" in ICMP.
Update ICMP mask parameter with ICMP identifier and sequence number fields.
ICMP sequence number spec with mask, Icmp_header_data low 16 bits are set.
ICMP identifier spec with mask, Icmp_header_data high 16 bits are set.

Signed-off-by: Li Zhang <lizh at nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c    |  9 +++++++--
 drivers/net/mlx5/mlx5_flow_dv.c | 16 +++++++++++++++-
 2 files changed, 22 insertions(+), 3 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 4c29898203..e3c765950e 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1310,6 +1310,7 @@ mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
 			     struct rte_flow_error *error)
 {
 	const struct rte_flow_item_icmp *mask = item->mask;
+	struct rte_flow_item_icmp default_mask;
 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
 				      MLX5_FLOW_LAYER_OUTER_L3_IPV4;
@@ -1331,11 +1332,15 @@ mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "multiple L4 layers not supported");
+	memcpy(&default_mask, &rte_flow_item_icmp_mask,
+	       sizeof(struct rte_flow_item_icmp));
+	default_mask.hdr.icmp_ident = RTE_BE16(0xFFFF);
+	default_mask.hdr.icmp_seq_nb = RTE_BE16(0xFFFF);
 	if (!mask)
-		mask = &rte_flow_item_icmp_mask;
+		mask = &default_mask;
 	ret = mlx5_flow_item_acceptable
 		(item, (const uint8_t *)mask,
-		 (const uint8_t *)&rte_flow_item_icmp_mask,
+		 (const uint8_t *)&default_mask,
 		 sizeof(struct rte_flow_item_icmp), error);
 	if (ret < 0)
 		return ret;
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 58358ce366..23400fecdd 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -7328,6 +7328,8 @@ flow_dv_translate_item_icmp(void *matcher, void *key,
 {
 	const struct rte_flow_item_icmp *icmp_m = item->mask;
 	const struct rte_flow_item_icmp *icmp_v = item->spec;
+	uint32_t icmp_header_data_m = 0;
+	uint32_t icmp_header_data_v = 0;
 	void *headers_m;
 	void *headers_v;
 	void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
@@ -7346,8 +7348,14 @@ flow_dv_translate_item_icmp(void *matcher, void *key,
 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
 	if (!icmp_v)
 		return;
-	if (!icmp_m)
+	if (!icmp_m) {
 		icmp_m = &rte_flow_item_icmp_mask;
+		icmp_header_data_m = RTE_BE32(UINT32_MAX);
+	} else {
+		icmp_header_data_m = rte_cpu_to_be_16(icmp_m->hdr.icmp_seq_nb);
+		icmp_header_data_m |=
+			rte_cpu_to_be_16(icmp_m->hdr.icmp_ident) << 16;
+	}
 	/*
 	 * Force flow only to match the non-fragmented IPv4 ICMP packets.
 	 * If only the protocol is specified, no need to match the frag.
@@ -7362,6 +7370,12 @@ flow_dv_translate_item_icmp(void *matcher, void *key,
 		 icmp_m->hdr.icmp_code);
 	MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
 		 icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
+	icmp_header_data_v = rte_cpu_to_be_16(icmp_v->hdr.icmp_seq_nb);
+	icmp_header_data_v |= rte_cpu_to_be_16(icmp_v->hdr.icmp_ident) << 16;
+	MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
+		 icmp_header_data_m);
+	MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
+		 icmp_header_data_v & icmp_header_data_m);
 }
 
 /**
-- 
2.21.0



More information about the dev mailing list