[dpdk-dev] [PATCH v3 1/7] net/mlx5: add flow validation of eCPRI header

Bing Zhao bingz at mellanox.com
Thu Jul 16 16:23:40 CEST 2020


When creating a flow with eCPRI header item, the validation of it is
mandatory. The detailed limitations are listed below:
  1. Over Ether / VLAN, ethertype must be 0xAEFE.
  2. No tunnel support is described in the specification now.
  3. L3 layer is only supported when L4 is UDP, see #4.
  4. Over TCP is not supported from the specification, and over UDP
     is not supported right now.
  5. Concatenation indicator matching is not supported now.
  6. No need to check the revision.
  7. Only type field in the common header is mandatory, and one byte
     should be matched integrally.
  8. Fields in the message payload header are optional.
  9. Only messages with type #0, #2 and #5 are supported now.

Some limitations are only from software right now, because there is
no need to support all the message types and variants of protocol
stack listed in the specification.

Signed-off-by: Bing Zhao <bingz at mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c    | 107 +++++++++++++++++++++++++++++++++++++++-
 drivers/net/mlx5/mlx5_flow.h    |   9 ++++
 drivers/net/mlx5/mlx5_flow_dv.c |  23 +++++++++
 3 files changed, 138 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index ae5ccc2..12d80b5 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1227,11 +1227,17 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
 					  "rss action not supported for "
 					  "egress");
-	if (rss->level > 1 &&  !tunnel)
+	if (rss->level > 1 && !tunnel)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
 					  "inner RSS is not supported for "
 					  "non-tunnel flows");
+	if ((item_flags & MLX5_FLOW_LAYER_ECPRI) &&
+	    !(item_flags & MLX5_FLOW_LAYER_INNER_L4_UDP)) {
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+					  "RSS on eCPRI is not supported now");
+	}
 	return 0;
 }
 
@@ -1597,6 +1603,10 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
  *   Item specification.
  * @param[in] item_flags
  *   Bit-fields that holds the items detected until now.
+ * @param[in] last_item
+ *   Previous validated item in the pattern items.
+ * @param[in] ether_type
+ *   Type in the ethernet layer header (including dot1q).
  * @param[in] acc_mask
  *   Acceptable mask, if NULL default internal default mask
  *   will be used to check whether item fields are supported.
@@ -1695,6 +1705,10 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
  *   Item specification.
  * @param[in] item_flags
  *   Bit-fields that holds the items detected until now.
+ * @param[in] last_item
+ *   Previous validated item in the pattern items.
+ * @param[in] ether_type
+ *   Type in the ethernet layer header (including dot1q).
  * @param[in] acc_mask
  *   Acceptable mask, if NULL default internal default mask
  *   will be used to check whether item fields are supported.
@@ -2357,6 +2371,97 @@ mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
 	return 0;
 }
 
+/**
+ * Validate eCPRI item.
+ *
+ * @param[in] item
+ *   Item specification.
+ * @param[in] item_flags
+ *   Bit-fields that holds the items detected until now.
+ * @param[in] last_item
+ *   Previous validated item in the pattern items.
+ * @param[in] ether_type
+ *   Type in the ethernet layer header (including dot1q).
+ * @param[in] acc_mask
+ *   Acceptable mask, if NULL default internal default mask
+ *   will be used to check whether item fields are supported.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
+			      uint64_t item_flags,
+			      uint64_t last_item,
+			      uint16_t ether_type,
+			      const struct rte_flow_item_ecpri *acc_mask,
+			      struct rte_flow_error *error)
+{
+	const struct rte_flow_item_ecpri *mask = item->mask;
+	const struct rte_flow_item_ecpri nic_mask = {
+		.hdr = {
+			.common = {
+				.u32 =
+				RTE_BE32(((const struct rte_ecpri_common_hdr) {
+					.type = 0xFF,
+					}).u32),
+			},
+			.dummy[0] = 0xFFFFFFFF,
+		},
+	};
+	const uint64_t outer_l2_vlan = (MLX5_FLOW_LAYER_OUTER_L2 |
+					MLX5_FLOW_LAYER_OUTER_VLAN);
+	struct rte_flow_item_ecpri mask_lo;
+
+	if ((last_item & outer_l2_vlan) && ether_type &&
+	    ether_type != RTE_ETHER_TYPE_ECPRI)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "eCPRI cannot follow L2/VLAN layer "
+					  "which ether type is not 0xAEFE.");
+	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "eCPRI with tunnel is not supported "
+					  "right now.");
+	if (item_flags & MLX5_FLOW_LAYER_OUTER_L3)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "multiple L3 layers not supported");
+	else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "eCPRI cannot follow a TCP layer.");
+	/* In specification, eCPRI could be over UDP layer. */
+	else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "eCPRI over UDP layer is not yet "
+					  "supported right now.");
+	/* Mask for type field in common header could be zero. */
+	if (!mask)
+		mask = &rte_flow_item_ecpri_mask;
+	mask_lo.hdr.common.u32 = rte_be_to_cpu_32(mask->hdr.common.u32);
+	/* Input mask is in big-endian format. */
+	if (mask_lo.hdr.common.type != 0 && mask_lo.hdr.common.type != 0xff)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+					  "partial mask is not supported "
+					  "for protocol");
+	else if (mask_lo.hdr.common.type == 0 && mask->hdr.dummy[0] != 0)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+					  "message header mask must be after "
+					  "a type mask");
+	return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+					 acc_mask ? (const uint8_t *)acc_mask
+						  : (const uint8_t *)&nic_mask,
+					 sizeof(struct rte_flow_item_ecpri),
+					 error);
+}
+
 /* Allocate unique ID for the split Q/RSS subflows. */
 static uint32_t
 flow_qrss_get_id(struct rte_eth_dev *dev)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 43cbda8..6dfeef3 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -128,6 +128,9 @@ enum mlx5_feature_name {
 /* Pattern tunnel Layer bits (continued). */
 #define MLX5_FLOW_LAYER_GTP (1u << 28)
 
+/* Pattern eCPRI Layer bit. */
+#define MLX5_FLOW_LAYER_ECPRI (UINT64_C(1) << 29)
+
 /* Outer Masks. */
 #define MLX5_FLOW_LAYER_OUTER_L3 \
 	(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
@@ -1027,6 +1030,12 @@ int mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
 				   uint64_t item_flags,
 				   struct rte_eth_dev *dev,
 				   struct rte_flow_error *error);
+int mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
+				  uint64_t item_flags,
+				  uint64_t last_item,
+				  uint16_t ether_type,
+				  const struct rte_flow_item_ecpri *acc_mask,
+				  struct rte_flow_error *error);
 struct mlx5_meter_domains_infos *mlx5_flow_create_mtr_tbls
 					(struct rte_eth_dev *dev,
 					 const struct mlx5_flow_meter *fm);
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 8b5b683..f042a42 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -4923,6 +4923,17 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 			.hop_limits = 0xff,
 		},
 	};
+	const struct rte_flow_item_ecpri nic_ecpri_mask = {
+		.hdr = {
+			.common = {
+				.u32 =
+				RTE_BE32(((const struct rte_ecpri_common_hdr) {
+					.type = 0xFF,
+					}).u32),
+			},
+			.dummy[0] = 0xffffffff,
+		},
+	};
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_dev_config *dev_conf = &priv->config;
 	uint16_t queue_index = 0xFFFF;
@@ -5173,6 +5184,17 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 				return ret;
 			last_item = MLX5_FLOW_LAYER_GTP;
 			break;
+		case RTE_FLOW_ITEM_TYPE_ECPRI:
+			/* Capacity will be checked in the translate stage. */
+			ret = mlx5_flow_validate_item_ecpri(items, item_flags,
+							    last_item,
+							    ether_type,
+							    &nic_ecpri_mask,
+							    error);
+			if (ret < 0)
+				return ret;
+			last_item = MLX5_FLOW_LAYER_ECPRI;
+			break;
 		default:
 			return rte_flow_error_set(error, ENOTSUP,
 						  RTE_FLOW_ERROR_TYPE_ITEM,
@@ -5882,6 +5904,7 @@ flow_dv_translate_item_eth(void *matcher, void *key,
 	 * Set match on ethertype only if ETH header is not followed by VLAN.
 	 * HW is optimized for IPv4/IPv6. In such cases, avoid setting
 	 * ethertype, and use ip_version field instead.
+	 * eCPRI over Ether layer will use type value 0xAEFE.
 	 */
 	if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
 	    eth_m->type == 0xFFFF) {
-- 
2.5.5



More information about the dev mailing list