[dpdk-dev] [DPDK 18.08 v1 03/12] net/mlx5: support flow Ethernet item among with drop action

Nelio Laranjeiro nelio.laranjeiro at 6wind.com
Mon May 28 13:21:36 CEST 2018


Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro at 6wind.com>
---
 drivers/net/mlx5/mlx5_flow.c | 676 ++++++++++++++++++++++++++++++++---
 1 file changed, 634 insertions(+), 42 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 6497e99c1..85dc5edaf 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -35,11 +35,62 @@
 extern const struct eth_dev_ops mlx5_dev_ops;
 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
 
+/* Pattern Layer bits. */
+#define MLX5_FLOW_LAYER_L2 (1u << 0)
+#define MLX5_FLOW_LAYER_L3_IPV4 (1u << 1)
+#define MLX5_FLOW_LAYER_L3_IPV6 (1u << 2)
+#define MLX5_FLOW_LAYER_L4_UDP (1u << 3)
+#define MLX5_FLOW_LAYER_L4_TCP (1u << 4)
+#define MLX5_FLOW_LAYER_VLAN (1u << 5)
+/* Masks. */
+#define MLX5_FLOW_LAYER_L3 \
+	(MLX5_FLOW_LAYER_L3_IPV4 | MLX5_FLOW_LAYER_L3_IPV6)
+#define MLX5_FLOW_LAYER_L4 \
+	(MLX5_FLOW_LAYER_L4_UDP | MLX5_FLOW_LAYER_L4_TCP)
+#define MLX5_FLOW_LAYER_OUTER \
+	(MLX5_FLOW_LAYER_L2 | MLX5_FLOW_LAYER_L3 | \
+	 MLX5_FLOW_LAYER_L4)
+
+/* Action fate on the packet. */
+#define MLX5_FLOW_FATE_DROP (1u << 0)
+
+/* Verbs flow priority per layer level. */
+#define MLX5_FLOW_PRIO_L4 0
+#define MLX5_FLOW_PRIO_L3 1
+#define MLX5_FLOW_PRIO_L2 2
+
+/* Control flow priority. */
+#define MLX5_FLOW_CTRL 0xffffffff
+#define MLX5_FLOW_CTRL_PRIO_OFFSET (MLX5_FLOW_PRIO_L2 + 1)
+
+/** Handles information leading to a drop fate. */
+struct mlx5_flow_verbs {
+	unsigned int size; /**< Size of the attribute. */
+	uint32_t layers;
+	/**< Bit-fields of present layers see MLX5_FLOW_ITEMS_*. */
+	uint32_t fate;
+	/**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
+	struct {
+		struct ibv_flow_attr *attr;
+		/**< Pointer to the Specification buffer. */
+		uint8_t *specs; /**< Pointer to the specifications. */
+	};
+	struct ibv_flow *flow; /**< Verbs flow pointer. */
+	struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
+};
+
+/* Flow structure. */
 struct rte_flow {
 	TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
+	struct rte_flow_attr attributes; /**< User flow attribute. */
+	struct mlx5_flow_verbs verbs; /* Verbs drop flow. */
 };
 
 static const struct rte_flow_ops mlx5_flow_ops = {
+	.validate = mlx5_flow_validate,
+	.create = mlx5_flow_create,
+	.destroy = mlx5_flow_destroy,
+	.flush = mlx5_flow_flush,
 	.isolate = mlx5_flow_isolate,
 };
 
@@ -76,12 +127,374 @@ struct ibv_spec_header {
 };
 
 /**
- * Convert a flow.
+ * Flow debug purpose function only used when CONFIG_RTE_LIBRTE_MLX5_DEBUG=y
+ *
+ * @param flow
+ *   Pointer to the flow structure to display.
+ */
+void
+mlx5_flow_print(struct rte_flow *flow __rte_unused)
+{
+#ifndef NDEBUG
+	fprintf(stdout, "---------8<------------\n");
+	fprintf(stdout, "%s: flow information\n", MLX5_DRIVER_NAME);
+	fprintf(stdout, " attributes: group %u priority %u ingress %d egress %d"
+		" transfer %d\n", flow->attributes.group,
+		flow->attributes.priority,
+		flow->attributes.ingress,
+		flow->attributes.egress,
+		flow->attributes.transfer);
+	fprintf(stdout, " layers: %s/%s/%s\n",
+		flow->verbs.layers & MLX5_FLOW_LAYER_L2 ? "l2" : "-",
+		flow->verbs.layers & MLX5_FLOW_LAYER_L3 ? "l3" : "-",
+		flow->verbs.layers & MLX5_FLOW_LAYER_L4 ? "l4" : "-");
+	if (flow->verbs.fate & MLX5_FLOW_FATE_DROP)
+		fprintf(stdout, " fate: drop queue\n");
+	if (flow->verbs.attr) {
+		struct ibv_spec_header *hdr =
+			(struct ibv_spec_header *)flow->verbs.specs;
+		const int n = flow->verbs.attr->num_of_specs;
+		int i;
+
+		fprintf(stdout, " Verbs attributes: priority %u specs_n %u\n",
+			flow->verbs.attr->priority,
+			flow->verbs.attr->num_of_specs);
+		for (i = 0; i != n; ++i) {
+			rte_hexdump(stdout, " ", hdr, hdr->size);
+			hdr = (struct ibv_spec_header *)
+				((uint8_t *)hdr + hdr->size);
+		}
+	}
+	fprintf(stdout, "--------->8------------\n");
+#endif
+}
+
+/**
+ * Validate Attributes provided by the user.
+ *
+ * @param attr
+ *   Pointer to flow attributes
+ * @param flow
+ *   Pointer to the rte_flow structure.
+ * @param error
+ *   Pointer to error structure.
+ */
+static int
+mlx5_flow_attributes(const struct rte_flow_attr *attr,
+		     struct rte_flow *flow,
+		     struct rte_flow_error *error)
+{
+	if (attr->group)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+					  NULL,
+					  "groups are not supported");
+	if (attr->priority && attr->priority != MLX5_FLOW_CTRL)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+					  NULL,
+					  "priorities are not supported");
+	if (attr->egress)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+					  NULL,
+					  "egress is not supported");
+	if (attr->transfer)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+					  NULL,
+					  "transfer is not supported");
+	if (!attr->ingress)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+					  NULL,
+					  "only ingress is supported");
+	flow->attributes = *attr;
+	return 0;
+}
+
+/**
+ * Check support for a given item.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param default_mask[in]
+ *   Bit-masks covering supported fields to compare with spec, last and mask in
+ *   \item.
+ * @param nic_mask[in]
+ *   Bit-masks covering supported fields by the NIC to compare with user mask.
+ * @param size
+ *   Bit-Mask size in bytes.
+ * @param error[out]
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_item_validate(const struct rte_flow_item *item,
+			const uint8_t *default_mask,
+			const uint8_t *nic_mask,
+			unsigned int size,
+			struct rte_flow_error *error)
+{
+	const uint8_t *mask = item->mask ? item->mask : default_mask;
+	unsigned int i;
+
+	assert(nic_mask);
+	for (i = 0; i < size; ++i)
+		if ((nic_mask[i] | mask[i]) != nic_mask[i])
+			return rte_flow_error_set(error, ENOTSUP,
+						  RTE_FLOW_ERROR_TYPE_ITEM,
+						  item,
+						  "mask enables non supported"
+						  " bits");
+	if (!item->spec && (item->mask || item->last))
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM,
+					  item,
+					  "mask/last without a spec is not"
+					  " supported");
+	if (item->spec && item->last) {
+		uint8_t spec[size];
+		uint8_t last[size];
+		unsigned int i;
+		int ret;
+
+		for (i = 0; i < size; ++i) {
+			spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
+			last[i] = ((const uint8_t *)item->last)[i] & mask[i];
+		}
+		ret = memcmp(spec, last, size);
+		if (ret != 0)
+			return rte_flow_error_set(error, ENOTSUP,
+						  RTE_FLOW_ERROR_TYPE_ITEM,
+						  item,
+						  "spec and last different"
+						  " is not supported");
+	}
+	return 0;
+}
+
+/**
+ * Add a verbs specification.
+ *
+ * @param flow
+ *   Pointer to flow structure.
+ * @param src
+ *   Create specification.
+ * @param size
+ *   Size in bytes of the specification to copy.
+ */
+static void
+mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)
+{
+	if (flow->verbs.specs) {
+		void *dst;
+
+		dst = (void *)(flow->verbs.specs + flow->verbs.size);
+		memcpy(dst, src, size);
+		++flow->verbs.attr->num_of_specs;
+	}
+	flow->verbs.size += size;
+}
+
+/**
+ * Validate Ethernet layer and possibly create the Verbs specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param flow[in, out]
+ *   Pointer to flow structure.
+ * @param error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,
+		   struct rte_flow_error *error)
+{
+	const struct rte_flow_item_eth *spec = item->spec;
+	const struct rte_flow_item_eth *mask = item->mask;
+	const struct rte_flow_item_eth nic_mask = {
+		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+		.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+		.type = RTE_BE16(0xffff),
+	};
+	const unsigned int size = sizeof(struct ibv_flow_spec_eth);
+	struct ibv_flow_spec_eth eth = {
+		.type = IBV_FLOW_SPEC_ETH,
+		.size = size,
+	};
+	int ret;
+
+	if (flow->verbs.layers & MLX5_FLOW_LAYER_L2)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ITEM,
+					  item,
+					  "L2 layers already configured");
+	if (!mask)
+		mask = &rte_flow_item_eth_mask;
+	ret = mlx5_flow_item_validate(item, (const uint8_t *)mask,
+				      (const uint8_t *)&nic_mask,
+				      sizeof(struct rte_flow_item_eth),
+				      error);
+	if (ret)
+		return ret;
+	if (spec) {
+		unsigned int i;
+
+		memcpy(&eth.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
+		memcpy(&eth.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+		eth.val.ether_type = spec->type;
+		memcpy(&eth.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
+		memcpy(&eth.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+		eth.mask.ether_type = mask->type;
+		/* Remove unwanted bits from values. */
+		for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+			eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
+			eth.val.src_mac[i] &= eth.mask.src_mac[i];
+		}
+		eth.val.ether_type &= eth.mask.ether_type;
+	}
+	mlx5_flow_spec_verbs_add(flow, &eth, size);
+	flow->verbs.layers |= MLX5_FLOW_LAYER_L2;
+	return 0;
+}
+
+/**
+ * Validate items provided by the user.
+ *
+ * @param items
+ *   Pointer to flow items array.
+ * @param flow
+ *   Pointer to the rte_flow structure.
+ * @param error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_items(const struct rte_flow_item items[],
+		struct rte_flow *flow __rte_unused,
+		struct rte_flow_error *error)
+{
+	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+		int ret = 0;
+
+		switch (items->type) {
+		case RTE_FLOW_ITEM_TYPE_VOID:
+			break;
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			ret = mlx5_flow_item_eth(items, flow, error);
+			break;
+		default:
+			return rte_flow_error_set(error, ENOTSUP,
+						  RTE_FLOW_ERROR_TYPE_ITEM,
+						  items,
+						  "item not supported");
+		}
+		if (ret < 0)
+			return ret;
+	}
+	if (!flow->verbs.layers) {
+		const struct rte_flow_item item = {
+			.type = RTE_FLOW_ITEM_TYPE_ETH,
+		};
+
+		return mlx5_flow_item_eth(&item, flow, error);
+	}
+	return 0;
+}
+
+/**
+ * Validate action drop provided by the user.
+ *
+ * @param actions
+ *   Pointer to flow actions array.
+ * @param flow
+ *   Pointer to the rte_flow structure.
+ * @param error
+ *   Pointer to error structure.
+ */
+static int
+mlx5_flow_action_drop(const struct rte_flow_action *actions,
+		      struct rte_flow *flow,
+		      struct rte_flow_error *error)
+{
+	unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
+	struct ibv_flow_spec_action_drop drop = {
+			.type = IBV_FLOW_SPEC_ACTION_DROP,
+			.size = size,
+	};
+
+	if (flow->verbs.fate)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ACTION,
+					  actions,
+					  "multiple fate actions are not"
+					  " supported");
+	mlx5_flow_spec_verbs_add(flow, &drop, size);
+	flow->verbs.fate |= MLX5_FLOW_FATE_DROP;
+	return 0;
+}
+
+/**
+ * Validate actions provided by the user.
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param actions
+ *   Pointer to flow actions array.
+ * @param flow
+ *   Pointer to the rte_flow structure.
+ * @param error
+ *   Pointer to error structure.
+ */
+static int
+mlx5_flow_actions(struct rte_eth_dev *dev __rte_unused,
+		  const struct rte_flow_action actions[],
+		  struct rte_flow *flow __rte_unused,
+		  struct rte_flow_error *error)
+{
+	int ret;
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			ret = mlx5_flow_action_drop(actions, flow, error);
+			break;
+		default:
+			return rte_flow_error_set(error, ENOTSUP,
+						  RTE_FLOW_ERROR_TYPE_ACTION,
+						  actions,
+						  "action not supported");
+		}
+		if (ret < 0)
+			return ret;
+	}
+	if (!flow->verbs.fate)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					  NULL,
+					  "no fate action found");
+	return 0;
+}
+
+/**
+ * Validate the rule and return a flow structure filled accordingly.
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param list
- *   Pointer to a TAILQ flow list.
+ * @param flow
+ *   Pointer an flow structure.
+ * @param flow_size
+ *   Size of the allocated space to store the flow information.
  * @param[in] attr
  *   Flow rule attributes.
  * @param[in] pattern
@@ -92,21 +505,42 @@ struct ibv_spec_header {
  *   Perform verbose error reporting if not NULL.
  *
  * @return
- *   A flow on success, NULL otherwise and rte_errno is set.
+ *   the amount of bytes necessary to create the flow, a negative errno value
+ *   otherwise and rte_errno is set.
  */
-static struct rte_flow *
-mlx5_flow_list_create(struct rte_eth_dev *dev __rte_unused,
-		      struct mlx5_flows *list __rte_unused,
-		      const struct rte_flow_attr *attr __rte_unused,
-		      const struct rte_flow_item items[] __rte_unused,
-		      const struct rte_flow_action actions[] __rte_unused,
-		      struct rte_flow_error *error)
+static int
+mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,
+		size_t flow_size,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item items[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error)
 {
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			   NULL,
-			   "action not supported");
-	return NULL;
+	struct rte_flow vflow = { .verbs.layers = 0, };
+	size_t size;
+	int ret;
+
+	/* Make a first virtual parse. */
+	ret = mlx5_flow_actions(dev, actions, &vflow, error);
+	if (ret)
+		return ret;
+	ret = mlx5_flow_items(items, &vflow, error);
+	if (ret)
+		return ret;
+	/* Size of the verbs specification is now known. */
+	size = sizeof(vflow) + sizeof(struct ibv_flow_attr) + vflow.verbs.size;
+	if (size <= flow_size) {
+		ret = mlx5_flow_attributes(attr, flow, error);
+		if (ret)
+			return ret;
+		ret = mlx5_flow_items(items, flow, error);
+		if (ret)
+			return ret;
+		ret = mlx5_flow_actions(dev, actions, flow, error);
+		if (ret)
+			return ret;
+	}
+	return size;
 }
 
 /**
@@ -116,16 +550,175 @@ mlx5_flow_list_create(struct rte_eth_dev *dev __rte_unused,
  * @see rte_flow_ops
  */
 int
-mlx5_flow_validate(struct rte_eth_dev *dev __rte_unused,
-		   const struct rte_flow_attr *attr __rte_unused,
-		   const struct rte_flow_item items[] __rte_unused,
-		   const struct rte_flow_action actions[] __rte_unused,
+mlx5_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item items[],
+		   const struct rte_flow_action actions[],
 		   struct rte_flow_error *error)
 {
-	return rte_flow_error_set(error, ENOTSUP,
-				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-				  NULL,
-				  "action not supported");
+	int ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
+
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+/**
+ * Remove the flow.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param flow
+ *   Pointer to flow structure.
+ */
+static void
+mlx5_flow_fate_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+	if (flow->verbs.fate & MLX5_FLOW_FATE_DROP) {
+		if (flow->verbs.flow) {
+			claim_zero(mlx5_glue->destroy_flow(flow->verbs.flow));
+			flow->verbs.flow = NULL;
+		}
+	}
+	if (flow->verbs.hrxq) {
+		mlx5_hrxq_drop_release(dev, flow->verbs.hrxq);
+		flow->verbs.hrxq = NULL;
+	}
+}
+
+/**
+ * Update the verbs specification according to the pattern it matches.
+ *
+ * @param layers
+ *   Bit-fields of present layers see MLX5_FLOW_ITEMS_*.
+ * @param attr[in, out]
+ *   Pointer to Verbs attribute to update.
+ * @param control[in]
+ *   The specification is used for default PMD flows.
+ */
+static void
+mlx5_flow_verbs_priority(uint32_t layers, struct ibv_flow_attr *attr,
+			 const uint32_t control)
+{
+	if (layers & MLX5_FLOW_LAYER_L4)
+		attr->priority = MLX5_FLOW_PRIO_L4;
+	else if (layers & MLX5_FLOW_LAYER_L3)
+		attr->priority = MLX5_FLOW_PRIO_L3;
+	else
+		attr->priority = MLX5_FLOW_PRIO_L2;
+	if (control)
+		attr->priority += MLX5_FLOW_CTRL_PRIO_OFFSET;
+}
+
+/**
+ * Apply the flow.
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param flow
+ *   Pointer to flow structure.
+ * @param error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_fate_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
+	       struct rte_flow_error *error)
+{
+	if (flow->verbs.fate & MLX5_FLOW_FATE_DROP)
+		flow->verbs.hrxq = mlx5_hrxq_drop_new(dev);
+		if (!flow->verbs.hrxq)
+			return rte_flow_error_set
+				(error, errno,
+				 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				 NULL,
+				 "cannot allocate Drop queue");
+	else
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					  NULL,
+					  "no fate found");
+	mlx5_flow_verbs_priority(flow->verbs.layers, flow->verbs.attr,
+				 flow->attributes.priority == MLX5_FLOW_CTRL);
+	flow->verbs.flow =
+		mlx5_glue->create_flow(flow->verbs.hrxq->qp, flow->verbs.attr);
+	if (!flow->verbs.flow) {
+		if (flow->verbs.fate & MLX5_FLOW_FATE_DROP)
+			mlx5_hrxq_drop_release(dev, flow->verbs.hrxq);
+		flow->verbs.hrxq = NULL;
+		return rte_flow_error_set(error, errno,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					  NULL,
+					  "kernel module refuses to create"
+					  " flow");
+	}
+	return 0;
+}
+
+/**
+ * Create a flow.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param list
+ *   Pointer to a TAILQ flow list.
+ * @param[in] attr
+ *   Flow rule attributes.
+ * @param[in] pattern
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ *
+ * @return
+ *   A flow on success, NULL otherwise and rte_errno is set.
+ */
+static struct rte_flow *
+mlx5_flow_list_create(struct rte_eth_dev *dev,
+		      struct mlx5_flows *list,
+		      const struct rte_flow_attr *attr,
+		      const struct rte_flow_item items[],
+		      const struct rte_flow_action actions[],
+		      struct rte_flow_error *error)
+{
+	struct rte_flow *flow;
+	size_t size;
+	int ret;
+
+	ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
+	if (ret < 0)
+		return NULL;
+	size = ret;
+	flow = rte_zmalloc(__func__, size, 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL,
+				   "cannot allocate memory");
+		return NULL;
+	}
+	flow->verbs.attr = (struct ibv_flow_attr *)(flow + 1);
+	flow->verbs.specs = (uint8_t *)(flow->verbs.attr + 1);
+	ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
+	if (ret < 0)
+		goto error;
+	assert((size_t)ret == size);
+	if (!dev->data->dev_started)
+		return flow;
+	ret = mlx5_flow_fate_apply(dev, flow, error);
+	if (ret < 0)
+		goto error;
+	TAILQ_INSERT_TAIL(list, flow, next);
+	return flow;
+error:
+	ret = rte_errno; /* Save rte_errno before cleanup. */
+	mlx5_flow_fate_remove(dev, flow);
+	rte_free(flow);
+	rte_errno = ret; /* Restore rte_errno. */
+	return NULL;
 }
 
 /**
@@ -135,17 +728,15 @@ mlx5_flow_validate(struct rte_eth_dev *dev __rte_unused,
  * @see rte_flow_ops
  */
 struct rte_flow *
-mlx5_flow_create(struct rte_eth_dev *dev __rte_unused,
-		 const struct rte_flow_attr *attr __rte_unused,
-		 const struct rte_flow_item items[] __rte_unused,
-		 const struct rte_flow_action actions[] __rte_unused,
+mlx5_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item items[],
+		 const struct rte_flow_action actions[],
 		 struct rte_flow_error *error)
 {
-	rte_flow_error_set(error, ENOTSUP,
-			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-			   NULL,
-			   "action not supported");
-	return NULL;
+	return mlx5_flow_list_create
+		(dev, &((struct priv *)dev->data->dev_private)->flows,
+		 attr, items, actions, error);
 }
 
 /**
@@ -159,10 +750,12 @@ mlx5_flow_create(struct rte_eth_dev *dev __rte_unused,
  *   Flow to destroy.
  */
 static void
-mlx5_flow_list_destroy(struct rte_eth_dev *dev __rte_unused,
-		       struct mlx5_flows *list __rte_unused,
-		       struct rte_flow *flow __rte_unused)
+mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
+		       struct rte_flow *flow)
 {
+	mlx5_flow_fate_remove(dev, flow);
+	TAILQ_REMOVE(list, flow, next);
+	rte_free(flow);
 }
 
 /**
@@ -266,6 +859,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
 	struct priv *priv = dev->data->dev_private;
 	const struct rte_flow_attr attr = {
 		.ingress = 1,
+		.priority = MLX5_FLOW_CTRL,
 	};
 	struct rte_flow_item items[] = {
 		{
@@ -369,9 +963,8 @@ int
 mlx5_flow_flush(struct rte_eth_dev *dev,
 		struct rte_flow_error *error __rte_unused)
 {
-	struct priv *priv = dev->data->dev_private;
-
-	mlx5_flow_list_flush(dev, &priv->flows);
+	mlx5_flow_list_flush(dev,
+			     &((struct priv *)dev->data->dev_private)->flows);
 	return 0;
 }
 
@@ -684,9 +1277,8 @@ mlx5_fdir_filter_update(struct rte_eth_dev *dev,
 static void
 mlx5_fdir_filter_flush(struct rte_eth_dev *dev)
 {
-	struct priv *priv = dev->data->dev_private;
-
-	mlx5_flow_list_flush(dev, &priv->flows);
+	mlx5_flow_list_flush(dev,
+			     &((struct priv *)dev->data->dev_private)->flows);
 }
 
 /**
-- 
2.17.0



More information about the dev mailing list