[dpdk-dev] [PATCH v1 3/5] net/ice: enable QinQ filter for switch

Haiyue Wang haiyue.wang at intel.com
Mon Dec 14 08:07:05 CET 2020


Enable the double VLAN support for QinQ filter switch.

Signed-off-by: Wei Zhao <wei.zhao1 at intel.com>
Signed-off-by: Haiyue Wang <haiyue.wang at intel.com>
---
 drivers/net/ice/ice_generic_flow.c  |   8 +++
 drivers/net/ice/ice_generic_flow.h  |   1 +
 drivers/net/ice/ice_switch_filter.c | 104 +++++++++++++++++++++++++---
 3 files changed, 102 insertions(+), 11 deletions(-)

diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 1429cbc3b..1712d3b2e 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1455,6 +1455,14 @@ enum rte_flow_item_type pattern_eth_qinq_pppoes[] = {
 	RTE_FLOW_ITEM_TYPE_PPPOES,
 	RTE_FLOW_ITEM_TYPE_END,
 };
+enum rte_flow_item_type pattern_eth_qinq_pppoes_proto[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_VLAN,
+	RTE_FLOW_ITEM_TYPE_VLAN,
+	RTE_FLOW_ITEM_TYPE_PPPOES,
+	RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID,
+	RTE_FLOW_ITEM_TYPE_END,
+};
 enum rte_flow_item_type pattern_eth_pppoes_ipv4[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
 	RTE_FLOW_ITEM_TYPE_PPPOES,
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index 434d2f425..dc45d8dc6 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -426,6 +426,7 @@ extern enum rte_flow_item_type pattern_eth_pppoes_proto[];
 extern enum rte_flow_item_type pattern_eth_vlan_pppoes[];
 extern enum rte_flow_item_type pattern_eth_vlan_pppoes_proto[];
 extern enum rte_flow_item_type pattern_eth_qinq_pppoes[];
+extern enum rte_flow_item_type pattern_eth_qinq_pppoes_proto[];
 extern enum rte_flow_item_type pattern_eth_pppoes_ipv4[];
 extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4[];
 extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4[];
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 8cba6eb7b..43c755e30 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -35,11 +35,15 @@
 #define ICE_SW_INSET_ETHER ( \
 	ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
 #define ICE_SW_INSET_MAC_VLAN ( \
-		ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
-		ICE_INSET_VLAN_OUTER)
+	ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
+	ICE_INSET_VLAN_INNER)
+#define ICE_SW_INSET_MAC_QINQ  ( \
+	ICE_SW_INSET_MAC_VLAN | ICE_INSET_VLAN_OUTER)
 #define ICE_SW_INSET_MAC_IPV4 ( \
 	ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
 	ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
+#define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
+	ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
 #define ICE_SW_INSET_MAC_IPV4_TCP ( \
 	ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
 	ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
@@ -52,6 +56,8 @@
 	ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
 	ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
 	ICE_INSET_IPV6_NEXT_HDR)
+#define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
+	ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
 #define ICE_SW_INSET_MAC_IPV6_TCP ( \
 	ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
 	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
@@ -182,6 +188,8 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
 			ICE_SW_INSET_ETHER, ICE_INSET_NONE},
 	{pattern_ethertype_vlan,
 			ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
+	{pattern_ethertype_qinq,
+			ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE},
 	{pattern_eth_arp,
 			ICE_INSET_NONE, ICE_INSET_NONE},
 	{pattern_eth_ipv4,
@@ -262,6 +270,18 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
 			ICE_INSET_NONE, ICE_INSET_NONE},
 	{pattern_eth_ipv6_pfcp,
 			ICE_INSET_NONE, ICE_INSET_NONE},
+	{pattern_eth_qinq_ipv4,
+			ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE},
+	{pattern_eth_qinq_ipv6,
+			ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE},
+	{pattern_eth_qinq_pppoes,
+			ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+	{pattern_eth_qinq_pppoes_proto,
+			ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
+	{pattern_eth_qinq_pppoes_ipv4,
+			ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
+	{pattern_eth_qinq_pppoes_ipv6,
+			ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
 };
 
 static struct
@@ -304,6 +324,8 @@ ice_pattern_match_item ice_switch_pattern_perm_comms[] = {
 			ICE_SW_INSET_ETHER, ICE_INSET_NONE},
 	{pattern_ethertype_vlan,
 			ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
+	{pattern_ethertype_qinq,
+			ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE},
 	{pattern_eth_arp,
 		ICE_INSET_NONE, ICE_INSET_NONE},
 	{pattern_eth_ipv4,
@@ -384,6 +406,18 @@ ice_pattern_match_item ice_switch_pattern_perm_comms[] = {
 			ICE_INSET_NONE, ICE_INSET_NONE},
 	{pattern_eth_ipv6_pfcp,
 			ICE_INSET_NONE, ICE_INSET_NONE},
+	{pattern_eth_qinq_ipv4,
+			ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE},
+	{pattern_eth_qinq_ipv6,
+			ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE},
+	{pattern_eth_qinq_pppoes,
+			ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+	{pattern_eth_qinq_pppoes_proto,
+			ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
+	{pattern_eth_qinq_pppoes_ipv4,
+			ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
+	{pattern_eth_qinq_pppoes_ipv6,
+			ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
 };
 
 static int
@@ -516,6 +550,8 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 	bool pppoe_elem_valid = 0;
 	bool pppoe_patt_valid = 0;
 	bool pppoe_prot_valid = 0;
+	bool inner_vlan_valid = 0;
+	bool outer_vlan_valid = 0;
 	bool tunnel_valid = 0;
 	bool profile_rule = 0;
 	bool nvgre_valid = 0;
@@ -1062,23 +1098,40 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 					   "Invalid VLAN item");
 				return 0;
 			}
+
+			if (!outer_vlan_valid &&
+			    (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
+			     *tun_type == ICE_NON_TUN_QINQ))
+				outer_vlan_valid = 1;
+			else if (!inner_vlan_valid &&
+				 (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
+				  *tun_type == ICE_NON_TUN_QINQ))
+				inner_vlan_valid = 1;
+			else if (!inner_vlan_valid)
+				inner_vlan_valid = 1;
+
 			if (vlan_spec && vlan_mask) {
-				list[t].type = ICE_VLAN_OFOS;
+				if (outer_vlan_valid && !inner_vlan_valid) {
+					list[t].type = ICE_VLAN_EX;
+					input_set |= ICE_INSET_VLAN_OUTER;
+				} else if (inner_vlan_valid) {
+					list[t].type = ICE_VLAN_OFOS;
+					input_set |= ICE_INSET_VLAN_INNER;
+				}
+
 				if (vlan_mask->tci) {
 					list[t].h_u.vlan_hdr.vlan =
 						vlan_spec->tci;
 					list[t].m_u.vlan_hdr.vlan =
 						vlan_mask->tci;
-					input_set |= ICE_INSET_VLAN_OUTER;
 					input_set_byte += 2;
 				}
 				if (vlan_mask->inner_type) {
-					list[t].h_u.vlan_hdr.type =
-						vlan_spec->inner_type;
-					list[t].m_u.vlan_hdr.type =
-						vlan_mask->inner_type;
-					input_set |= ICE_INSET_ETHERTYPE;
-					input_set_byte += 2;
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Invalid VLAN input set.");
+					return 0;
 				}
 				t++;
 			}
@@ -1380,8 +1433,27 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
 		}
 	}
 
+	if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
+	    inner_vlan_valid && outer_vlan_valid)
+		*tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
+	else if (*tun_type == ICE_SW_TUN_PPPOE &&
+		 inner_vlan_valid && outer_vlan_valid)
+		*tun_type = ICE_SW_TUN_PPPOE_QINQ;
+	else if (*tun_type == ICE_NON_TUN &&
+		 inner_vlan_valid && outer_vlan_valid)
+		*tun_type = ICE_NON_TUN_QINQ;
+	else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
+		 inner_vlan_valid && outer_vlan_valid)
+		*tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
+
 	if (pppoe_patt_valid && !pppoe_prot_valid) {
-		if (ipv6_valid && udp_valid)
+		if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
+			*tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
+		else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
+			*tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
+		else if (inner_vlan_valid && outer_vlan_valid)
+			*tun_type = ICE_SW_TUN_PPPOE_QINQ;
+		else if (ipv6_valid && udp_valid)
 			*tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
 		else if (ipv6_valid && tcp_valid)
 			*tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
@@ -1659,6 +1731,7 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
 	uint16_t lkups_num = 0;
 	const struct rte_flow_item *item = pattern;
 	uint16_t item_num = 0;
+	uint16_t vlan_num = 0;
 	enum ice_sw_tunnel_type tun_type =
 			ICE_NON_TUN;
 	struct ice_pattern_match_item *pattern_match_item = NULL;
@@ -1674,6 +1747,10 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
 			if (eth_mask->type == UINT16_MAX)
 				tun_type = ICE_SW_TUN_AND_NON_TUN;
 		}
+
+		if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
+			vlan_num++;
+
 		/* reserve one more memory slot for ETH which may
 		 * consume 2 lookup items.
 		 */
@@ -1681,6 +1758,11 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
 			item_num++;
 	}
 
+	if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
+		tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
+	else if (vlan_num == 2)
+		tun_type = ICE_NON_TUN_QINQ;
+
 	list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
 	if (!list) {
 		rte_flow_error_set(error, EINVAL,
-- 
2.29.2



More information about the dev mailing list