[dpdk-dev] [PATCH 7/7] net/mlx5: fix errno values for flow engine

Yongseok Koh yskoh at mellanox.com
Mon Oct 8 20:02:19 CEST 2018


Fixes: 4f07e13d6af5 ("net/mlx5: split flow validation to dedicated function")
Fixes: f7adfffa3de1 ("net/mlx5: add Direct Verbs validation function")
Fixes: edcdef4e5fe4 ("net/mlx5: add Linux TC flower driver for E-Switch flow")
Cc: orika at mellanox.com

Signed-off-by: Yongseok Koh <yskoh at mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c       | 70 +++++++++++++++++++-------------------
 drivers/net/mlx5/mlx5_flow_dv.c    |  2 +-
 drivers/net/mlx5/mlx5_flow_tcf.c   |  8 ++---
 drivers/net/mlx5/mlx5_flow_verbs.c |  4 +--
 4 files changed, 42 insertions(+), 42 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 30aa95f14..ed60c40f9 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -452,10 +452,10 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
 		}
 		ret = memcmp(spec, last, size);
 		if (ret != 0)
-			return rte_flow_error_set(error, ENOTSUP,
+			return rte_flow_error_set(error, EINVAL,
 						  RTE_FLOW_ERROR_TYPE_ITEM,
 						  item,
-						  "range is not supported");
+						  "range is not valid");
 	}
 	return 0;
 }
@@ -657,15 +657,15 @@ mlx5_flow_validate_action_flag(uint64_t action_flags,
 {
 
 	if (action_flags & MLX5_FLOW_ACTION_DROP)
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					  "can't drop and flag in same flow");
 	if (action_flags & MLX5_FLOW_ACTION_MARK)
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					  "can't mark and flag in same flow");
 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					  "can't have 2 flag"
 					  " actions in same flow");
@@ -704,17 +704,17 @@ mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
 					  "mark id must in 0 <= id < "
 					  RTE_STR(MLX5_FLOW_MARK_MAX));
 	if (action_flags & MLX5_FLOW_ACTION_DROP)
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					  "can't drop and mark in same flow");
 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					  "can't flag and mark in same flow");
 	if (action_flags & MLX5_FLOW_ACTION_MARK)
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
-					  "can't have 2 flag actions in same"
+					  "can't have 2 mark actions in same"
 					  " flow");
 	return 0;
 }
@@ -735,15 +735,15 @@ mlx5_flow_validate_action_drop(uint64_t action_flags,
 			       struct rte_flow_error *error)
 {
 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					  "can't drop and flag in same flow");
 	if (action_flags & MLX5_FLOW_ACTION_MARK)
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					  "can't drop and mark in same flow");
 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					  "can't have 2 fate actions in"
 					  " same flow");
@@ -775,7 +775,7 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
 	const struct rte_flow_action_queue *queue = action->conf;
 
 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					  "can't have 2 fate actions in"
 					  " same flow");
@@ -818,7 +818,7 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
 	unsigned int i;
 
 	if (action_flags & MLX5_FLOW_FATE_ACTIONS)
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					  "can't have 2 fate actions"
 					  " in same flow");
@@ -931,7 +931,7 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
 					  NULL, "transfer is not supported");
 	if (!attributes->ingress)
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
 					  NULL,
 					  "ingress attribute is mandatory");
@@ -1017,11 +1017,11 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
 					MLX5_FLOW_LAYER_OUTER_VLAN;
 
 	if (item_flags & vlanm)
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "VLAN layer already configured");
 	else if ((item_flags & l34m) != 0)
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "L2 layer cannot follow L3/L4 layer");
 	if (!mask)
@@ -1085,7 +1085,7 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
 					  "multiple L3 layers not supported");
 	else if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
 					MLX5_FLOW_LAYER_OUTER_L4))
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "L3 cannot follow an L4 layer.");
 	if (!mask)
@@ -1141,7 +1141,7 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
 					  "multiple L3 layers not supported");
 	else if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
 					MLX5_FLOW_LAYER_OUTER_L4))
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "L3 cannot follow an L4 layer.");
 	/*
@@ -1192,18 +1192,18 @@ mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
 	int ret;
 
 	if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "protocol filtering not compatible"
 					  " with UDP layer");
 	if (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
 				     MLX5_FLOW_LAYER_OUTER_L3)))
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "L3 is mandatory to filter on L4");
 	if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
 				   MLX5_FLOW_LAYER_OUTER_L4))
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "L4 layer is already present");
 	if (!mask)
@@ -1243,18 +1243,18 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
 	int ret;
 
 	if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "protocol filtering not compatible"
 					  " with TCP layer");
 	if (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
 				     MLX5_FLOW_LAYER_OUTER_L3)))
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "L3 is mandatory to filter on L4");
 	if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
 				   MLX5_FLOW_LAYER_OUTER_L4))
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "L4 layer is already present");
 	if (!mask)
@@ -1307,7 +1307,7 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
 	 * https://tools.ietf.org/html/rfc7348
 	 */
 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "no outer UDP layer found");
 	if (!mask)
@@ -1335,11 +1335,11 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
 	 * currently refused.
 	 */
 	if (!vlan_id)
-		return rte_flow_error_set(error, EINVAL,
+		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "VXLAN vni cannot be 0");
 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
-		return rte_flow_error_set(error, EINVAL,
+		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "VXLAN tunnel must be fully defined");
 	return 0;
@@ -1393,7 +1393,7 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
 	 * https://tools.ietf.org/html/rfc7348
 	 */
 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "no outer UDP layer found");
 	if (!mask)
@@ -1407,7 +1407,7 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
 		return ret;
 	if (spec) {
 		if (spec->protocol)
-			return rte_flow_error_set(error, EINVAL,
+			return rte_flow_error_set(error, ENOTSUP,
 						  RTE_FLOW_ERROR_TYPE_ITEM,
 						  item,
 						  "VxLAN-GPE protocol"
@@ -1426,11 +1426,11 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
 	 * is currently refused.
 	 */
 	if (!vlan_id)
-		return rte_flow_error_set(error, EINVAL,
+		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "VXLAN-GPE vni cannot be 0");
 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
-		return rte_flow_error_set(error, EINVAL,
+		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "VXLAN-GPE tunnel must be fully"
 					  " defined");
@@ -1463,7 +1463,7 @@ mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
 	int ret;
 
 	if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "protocol filtering not compatible"
 					  " with this GRE layer");
@@ -1520,7 +1520,7 @@ mlx5_flow_validate_item_mpls(const struct rte_flow_item *item __rte_unused,
 	int ret;
 
 	if (target_protocol != 0xff && target_protocol != IPPROTO_MPLS)
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "protocol filtering not compatible"
 					  " with MPLS layer");
@@ -2336,7 +2336,7 @@ mlx5_flow_query_count(struct rte_flow *flow __rte_unused,
 		}
 		return 0;
 	}
-	return rte_flow_error_set(error, ENOTSUP,
+	return rte_flow_error_set(error, EINVAL,
 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 				  NULL,
 				  "flow does not have counter");
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 2fb1d9ee7..3bb462ceb 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -79,7 +79,7 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev,
 					  NULL,
 					  "transfer is not supported");
 	if (!attributes->ingress)
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
 					  NULL,
 					  "ingress attribute is mandatory");
diff --git a/drivers/net/mlx5/mlx5_flow_tcf.c b/drivers/net/mlx5/mlx5_flow_tcf.c
index 0406e84f6..91f6ef678 100644
--- a/drivers/net/mlx5/mlx5_flow_tcf.c
+++ b/drivers/net/mlx5/mlx5_flow_tcf.c
@@ -291,7 +291,7 @@ flow_tcf_item_mask(const struct rte_flow_item *item, const void *mask_default,
 		if (item->last &&
 		    (((const uint8_t *)item->spec)[i] & mask[i]) !=
 		    (((const uint8_t *)item->last)[i] & mask[i])) {
-			rte_flow_error_set(error, ENOTSUP,
+			rte_flow_error_set(error, EINVAL,
 					   RTE_FLOW_ERROR_TYPE_ITEM_LAST,
 					   item->last,
 					   "range between \"spec\" and \"last\""
@@ -383,7 +383,7 @@ flow_tcf_validate_attributes(const struct rte_flow_attr *attr,
 					  attr,
 					  "lowest priority level is 0xfffe");
 	if (!attr->ingress)
-		return rte_flow_error_set(error, ENOTSUP,
+		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
 					  attr, "only ingress is supported");
 	if (attr->egress)
@@ -655,7 +655,7 @@ flow_tcf_validate(struct rte_eth_dev *dev,
 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
 			if (action_flags & MLX5_TCF_FATE_ACTIONS)
 				return rte_flow_error_set
-					(error, ENOTSUP,
+					(error, EINVAL,
 					 RTE_FLOW_ERROR_TYPE_ACTION, actions,
 					 "can't have multiple fate actions");
 			conf.port_id = actions->conf;
@@ -678,7 +678,7 @@ flow_tcf_validate(struct rte_eth_dev *dev,
 		case RTE_FLOW_ACTION_TYPE_DROP:
 			if (action_flags & MLX5_TCF_FATE_ACTIONS)
 				return rte_flow_error_set
-					(error, ENOTSUP,
+					(error, EINVAL,
 					 RTE_FLOW_ERROR_TYPE_ACTION, actions,
 					 "can't have multiple fate actions");
 			action_flags |= MLX5_FLOW_ACTION_DROP;
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 3df467214..696447674 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -948,7 +948,7 @@ flow_verbs_translate_action_count(struct rte_eth_dev *dev,
 		flow->counter = flow_verbs_counter_new(dev, count->shared,
 						       count->id);
 		if (!flow->counter)
-			return rte_flow_error_set(error, ENOTSUP,
+			return rte_flow_error_set(error, rte_errno,
 						  RTE_FLOW_ERROR_TYPE_ACTION,
 						  action,
 						  "cannot get counter"
@@ -1094,7 +1094,7 @@ flow_verbs_validate(struct rte_eth_dev *dev,
 			if (next_protocol != 0xff &&
 			    next_protocol != IPPROTO_MPLS)
 				return rte_flow_error_set
-					(error, ENOTSUP,
+					(error, EINVAL,
 					 RTE_FLOW_ERROR_TYPE_ITEM, items,
 					 "protocol filtering not compatible"
 					 " with MPLS layer");
-- 
2.11.0



More information about the dev mailing list