[dpdk-dev] [PATCH v5 09/11] net/mlx5: introduce VXLAN-GPE tunnel type

Xueming Li xuemingl at mellanox.com
Fri Apr 20 14:23:38 CEST 2018


Signed-off-by: Xueming Li <xuemingl at mellanox.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro at 6wind.com>
---
 doc/guides/nics/mlx5.rst     |   8 ++--
 drivers/net/mlx5/mlx5_flow.c | 107 ++++++++++++++++++++++++++++++++++++++++++-
 drivers/net/mlx5/mlx5_rxtx.c |   3 +-
 3 files changed, 111 insertions(+), 7 deletions(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 421274729..6b83759c8 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -329,16 +329,16 @@ Run-time configuration
 
 - ``l3_vxlan_en`` parameter [int]
 
-  A nonzero value allows L3 VXLAN flow creation. To enable L3 VXLAN, users
-  has to configure firemware and enable this prameter. This is a prerequisite
-  to receive this kind of traffic.
+  A nonzero value allows L3 VXLAN and VXLAN-GPE flow creation. To enable
+  L3 VXLAN or VXLAN-GPE, users has to configure firemware and enable this
+  prameter. This is a prerequisite to receive this kind of traffic.
 
   Disabled by default.
 
 Firmware configuration
 ~~~~~~~~~~~~~~~~~~~~~~
 
-- L3 VXLAN destination UDP port
+- L3 VXLAN and VXLAN-GPE destination UDP port
 
    .. code-block:: console
 
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 593c960f8..a55644cd0 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -92,6 +92,11 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
 		       struct mlx5_flow_data *data);
 
 static int
+mlx5_flow_create_vxlan_gpe(const struct rte_flow_item *item,
+			   const void *default_mask,
+			   struct mlx5_flow_data *data);
+
+static int
 mlx5_flow_create_gre(const struct rte_flow_item *item,
 		     const void *default_mask,
 		     struct mlx5_flow_data *data);
@@ -242,10 +247,12 @@ struct rte_flow {
 
 #define IS_TUNNEL(type) ( \
 	(type) == RTE_FLOW_ITEM_TYPE_VXLAN || \
+	(type) == RTE_FLOW_ITEM_TYPE_VXLAN_GPE || \
 	(type) == RTE_FLOW_ITEM_TYPE_GRE)
 
 const uint32_t flow_ptype[] = {
 	[RTE_FLOW_ITEM_TYPE_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+	[RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = RTE_PTYPE_TUNNEL_VXLAN_GPE,
 	[RTE_FLOW_ITEM_TYPE_GRE] = RTE_PTYPE_TUNNEL_GRE,
 };
 
@@ -254,6 +261,8 @@ const uint32_t flow_ptype[] = {
 const uint32_t ptype_ext[] = {
 	[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] = RTE_PTYPE_TUNNEL_VXLAN |
 					      RTE_PTYPE_L4_UDP,
+	[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN_GPE)]	= RTE_PTYPE_TUNNEL_VXLAN_GPE |
+						  RTE_PTYPE_L4_UDP,
 	[PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)] = RTE_PTYPE_TUNNEL_GRE,
 };
 
@@ -311,6 +320,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
 	[RTE_FLOW_ITEM_TYPE_END] = {
 		.items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
 			       RTE_FLOW_ITEM_TYPE_VXLAN,
+			       RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
 			       RTE_FLOW_ITEM_TYPE_GRE),
 	},
 	[RTE_FLOW_ITEM_TYPE_ETH] = {
@@ -389,7 +399,8 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
 		.dst_sz = sizeof(struct ibv_flow_spec_ipv6),
 	},
 	[RTE_FLOW_ITEM_TYPE_UDP] = {
-		.items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN),
+		.items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN,
+			       RTE_FLOW_ITEM_TYPE_VXLAN_GPE),
 		.actions = valid_actions,
 		.mask = &(const struct rte_flow_item_udp){
 			.hdr = {
@@ -441,6 +452,19 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
 		.convert = mlx5_flow_create_vxlan,
 		.dst_sz = sizeof(struct ibv_flow_spec_tunnel),
 	},
+	[RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = {
+		.items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
+			       RTE_FLOW_ITEM_TYPE_IPV4,
+			       RTE_FLOW_ITEM_TYPE_IPV6),
+		.actions = valid_actions,
+		.mask = &(const struct rte_flow_item_vxlan_gpe){
+			.vni = "\xff\xff\xff",
+		},
+		.default_mask = &rte_flow_item_vxlan_gpe_mask,
+		.mask_sz = sizeof(struct rte_flow_item_vxlan_gpe),
+		.convert = mlx5_flow_create_vxlan_gpe,
+		.dst_sz = sizeof(struct ibv_flow_spec_tunnel),
+	},
 };
 
 /** Structure to pass to the conversion function. */
@@ -1775,6 +1799,87 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
 }
 
 /**
+ * Convert VXLAN-GPE item to Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param default_mask[in]
+ *   Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ *   User structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_create_vxlan_gpe(const struct rte_flow_item *item,
+			   const void *default_mask,
+			   struct mlx5_flow_data *data)
+{
+	struct priv *priv = data->dev->data->dev_private;
+	const struct rte_flow_item_vxlan_gpe *spec = item->spec;
+	const struct rte_flow_item_vxlan_gpe *mask = item->mask;
+	struct mlx5_flow_parse *parser = data->parser;
+	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+	struct ibv_flow_spec_tunnel vxlan = {
+		.type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
+		.size = size,
+	};
+	union vni {
+		uint32_t vlan_id;
+		uint8_t vni[4];
+	} id;
+
+	if (!priv->config.l3_vxlan_en)
+		return rte_flow_error_set(data->error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM,
+					  item,
+					  "L3 VXLAN not enabled by device"
+					  " parameter and/or not configured"
+					  " in firmware");
+	id.vni[0] = 0;
+	parser->inner = IBV_FLOW_SPEC_INNER;
+	parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN_GPE)];
+	parser->out_layer = parser->layer;
+	parser->layer = HASH_RXQ_TUNNEL;
+	/* Default VXLAN-GPE to outer RSS. */
+	if (!parser->rss_conf.level)
+		parser->rss_conf.level = 1;
+	if (spec) {
+		if (!mask)
+			mask = default_mask;
+		memcpy(&id.vni[1], spec->vni, 3);
+		vxlan.val.tunnel_id = id.vlan_id;
+		memcpy(&id.vni[1], mask->vni, 3);
+		vxlan.mask.tunnel_id = id.vlan_id;
+		if (spec->protocol)
+			return rte_flow_error_set(data->error, EINVAL,
+						  RTE_FLOW_ERROR_TYPE_ITEM,
+						  item,
+						  "VxLAN-GPE protocol not"
+						  " supported");
+		/* Remove unwanted bits from values. */
+		vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
+	}
+	/*
+	 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
+	 * layer is defined in the Verbs specification it is interpreted as
+	 * wildcard and all packets will match this rule, if it follows a full
+	 * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
+	 * before will also match this rule.
+	 * To avoid such situation, VNI 0 is currently refused.
+	 */
+	/* Only allow tunnel w/o tunnel id pattern after proper outer spec. */
+	if (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id)
+		return rte_flow_error_set(data->error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM,
+					  item,
+					  "VxLAN-GPE vni cannot be 0");
+	mlx5_flow_create_copy(parser, &vxlan, size);
+	return 0;
+}
+
+/**
  * Convert GRE item to Verbs specification.
  *
  * @param item[in]
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 060ff0e85..f10ea13c1 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -466,8 +466,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 			uint8_t vlan_sz =
 				(buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0;
 			const uint64_t is_tunneled =
-				buf->ol_flags & (PKT_TX_TUNNEL_GRE |
-						 PKT_TX_TUNNEL_VXLAN);
+				buf->ol_flags & (PKT_TX_TUNNEL_MASK);
 
 			tso_header_sz = buf->l2_len + vlan_sz +
 					buf->l3_len + buf->l4_len;
-- 
2.13.3



More information about the dev mailing list