[dpdk-stable] patch 'net/hns3: fix flow error type' has been queued to stable release 19.11.6
luca.boccassi at gmail.com
luca.boccassi at gmail.com
Wed Oct 28 11:44:39 CET 2020
Hi,
FYI, your patch has been queued to stable release 19.11.6
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 10/30/20. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Thanks.
Luca Boccassi
---
>From 377ce6254e47889ec3d2026309148626a6187a6c Mon Sep 17 00:00:00 2001
From: "Wei Hu (Xavier)" <xavier.huwei at huawei.com>
Date: Tue, 29 Sep 2020 20:01:12 +0800
Subject: [PATCH] net/hns3: fix flow error type
[ upstream commit f2577609209330341922bea57e24b7e27ba084d3 ]
The API of rte_flow_error_set is used to pass detail error information
to caller, this patch sets suitable type when calling rte_flow_error_set
API.
Fixes: fcba820d9b9e ("net/hns3: support flow director")
Fixes: c37ca66f2b27 ("net/hns3: support RSS")
Signed-off-by: Chengwen Feng <fengchengwen at huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei at huawei.com>
---
drivers/net/hns3/hns3_flow.c | 54 ++++++++++++++++++------------------
1 file changed, 27 insertions(+), 27 deletions(-)
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index 1137cb24e8..21dd126ffe 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -128,9 +128,9 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
if (cnt) {
if (!cnt->shared || cnt->shared != shared)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- cnt,
- "Counter id is used,shared flag not match");
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ cnt,
+ "Counter id is used, shared flag not match");
cnt->ref_cnt++;
return 0;
}
@@ -138,7 +138,7 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
if (cnt == NULL)
return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_ACTION, cnt,
+ RTE_FLOW_ERROR_TYPE_HANDLE, cnt,
"Alloc mem for counter failed");
cnt->id = id;
cnt->shared = shared;
@@ -166,13 +166,13 @@ hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
cnt = hns3_counter_lookup(dev, flow->counter_id);
if (cnt == NULL)
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Can't find counter id");
ret = hns3_get_count(&hns->hw, flow->counter_id, &value);
if (ret) {
rte_flow_error_set(error, -ret,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "Read counter fail.");
return ret;
}
@@ -279,9 +279,9 @@ hns3_handle_actions(struct rte_eth_dev *dev,
(const struct rte_flow_action_mark *)actions->conf;
if (mark->id >= HNS3_MAX_FILTER_ID)
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "Invalid Mark ID");
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ actions,
+ "Invalid Mark ID");
rule->fd_id = mark->id;
rule->flags |= HNS3_RULE_FLAG_FDID;
break;
@@ -295,9 +295,9 @@ hns3_handle_actions(struct rte_eth_dev *dev,
counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
if (act_count->id >= counter_num)
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "Invalid counter id");
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ actions,
+ "Invalid counter id");
rule->act_cnt = *act_count;
rule->flags |= HNS3_RULE_FLAG_COUNTER;
break;
@@ -461,7 +461,7 @@ hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
ipv4_mask->hdr.time_to_live ||
ipv4_mask->hdr.hdr_checksum) {
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK,
item,
"Only support src & dst ip,tos,proto in IPV4");
}
@@ -526,7 +526,7 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
if (ipv6_mask->hdr.vtc_flow ||
ipv6_mask->hdr.payload_len || ipv6_mask->hdr.hop_limits) {
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK,
item,
"Only support src & dst ip,proto in IPV6");
}
@@ -586,7 +586,7 @@ hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
tcp_mask->hdr.rx_win ||
tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) {
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK,
item,
"Only support src & dst port in TCP");
}
@@ -633,7 +633,7 @@ hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
udp_mask = item->mask;
if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK,
item,
"Only support src & dst port in UDP");
}
@@ -680,7 +680,7 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
sctp_mask = item->mask;
if (sctp_mask->hdr.cksum)
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK,
item,
"Only support src & dst port in SCTP");
@@ -825,14 +825,14 @@ hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
if (vxlan_mask->flags)
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
"Flags is not supported in VxLAN");
/* VNI must be totally masked or not. */
if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN))
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
"VNI must be totally masked or not in VxLAN");
if (vxlan_mask->vni[0]) {
hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
@@ -876,14 +876,14 @@ hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
"Ver/protocal is not supported in NVGRE");
/* TNI must be totally masked or not. */
if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
"TNI must be totally masked or not in NVGRE");
if (nvgre_mask->tni[0]) {
@@ -930,13 +930,13 @@ hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
"Ver/protocal is not supported in GENEVE");
/* VNI must be totally masked or not. */
if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
"VNI must be totally masked or not in GENEVE");
if (geneve_mask->vni[0]) {
hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
@@ -967,7 +967,7 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
break;
default:
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_HANDLE,
+ RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "Unsupported tunnel type!");
}
if (ret)
@@ -1021,7 +1021,7 @@ hns3_parse_normal(const struct rte_flow_item *item,
break;
default:
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_HANDLE,
+ RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "Unsupported normal type!");
}
@@ -1037,7 +1037,7 @@ hns3_validate_item(const struct rte_flow_item *item,
if (item->last)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,
+ RTE_FLOW_ERROR_TYPE_ITEM_LAST, item,
"Not supported last point for range");
for (i = 0; i < step_mngr.count; i++) {
@@ -1123,7 +1123,7 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev,
if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"fdir_conf.mode isn't perfect");
step_mngr.items = first_items;
--
2.20.1
---
Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- - 2020-10-28 10:35:15.475830650 +0000
+++ 0120-net-hns3-fix-flow-error-type.patch 2020-10-28 10:35:11.676832556 +0000
@@ -1,15 +1,16 @@
-From f2577609209330341922bea57e24b7e27ba084d3 Mon Sep 17 00:00:00 2001
+From 377ce6254e47889ec3d2026309148626a6187a6c Mon Sep 17 00:00:00 2001
From: "Wei Hu (Xavier)" <xavier.huwei at huawei.com>
Date: Tue, 29 Sep 2020 20:01:12 +0800
Subject: [PATCH] net/hns3: fix flow error type
+[ upstream commit f2577609209330341922bea57e24b7e27ba084d3 ]
+
The API of rte_flow_error_set is used to pass detail error information
to caller, this patch sets suitable type when calling rte_flow_error_set
API.
Fixes: fcba820d9b9e ("net/hns3: support flow director")
Fixes: c37ca66f2b27 ("net/hns3: support RSS")
-Cc: stable at dpdk.org
Signed-off-by: Chengwen Feng <fengchengwen at huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei at huawei.com>
@@ -18,10 +19,10 @@
1 file changed, 27 insertions(+), 27 deletions(-)
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
-index a6676d650d..4fb129e32b 100644
+index 1137cb24e8..21dd126ffe 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
-@@ -168,9 +168,9 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
+@@ -128,9 +128,9 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
if (cnt) {
if (!cnt->shared || cnt->shared != shared)
return rte_flow_error_set(error, ENOTSUP,
@@ -34,7 +35,7 @@
cnt->ref_cnt++;
return 0;
}
-@@ -178,7 +178,7 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
+@@ -138,7 +138,7 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
if (cnt == NULL)
return rte_flow_error_set(error, ENOMEM,
@@ -43,7 +44,7 @@
"Alloc mem for counter failed");
cnt->id = id;
cnt->shared = shared;
-@@ -206,13 +206,13 @@ hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+@@ -166,13 +166,13 @@ hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
cnt = hns3_counter_lookup(dev, flow->counter_id);
if (cnt == NULL)
return rte_flow_error_set(error, EINVAL,
@@ -59,7 +60,7 @@
NULL, "Read counter fail.");
return ret;
}
-@@ -374,9 +374,9 @@ hns3_handle_actions(struct rte_eth_dev *dev,
+@@ -279,9 +279,9 @@ hns3_handle_actions(struct rte_eth_dev *dev,
(const struct rte_flow_action_mark *)actions->conf;
if (mark->id >= HNS3_MAX_FILTER_ID)
return rte_flow_error_set(error, EINVAL,
@@ -72,7 +73,7 @@
rule->fd_id = mark->id;
rule->flags |= HNS3_RULE_FLAG_FDID;
break;
-@@ -390,9 +390,9 @@ hns3_handle_actions(struct rte_eth_dev *dev,
+@@ -295,9 +295,9 @@ hns3_handle_actions(struct rte_eth_dev *dev,
counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
if (act_count->id >= counter_num)
return rte_flow_error_set(error, EINVAL,
@@ -85,7 +86,7 @@
rule->act_cnt = *act_count;
rule->flags |= HNS3_RULE_FLAG_COUNTER;
break;
-@@ -556,7 +556,7 @@ hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+@@ -461,7 +461,7 @@ hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
ipv4_mask->hdr.time_to_live ||
ipv4_mask->hdr.hdr_checksum) {
return rte_flow_error_set(error, EINVAL,
@@ -94,7 +95,7 @@
item,
"Only support src & dst ip,tos,proto in IPV4");
}
-@@ -621,7 +621,7 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+@@ -526,7 +526,7 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
if (ipv6_mask->hdr.vtc_flow ||
ipv6_mask->hdr.payload_len || ipv6_mask->hdr.hop_limits) {
return rte_flow_error_set(error, EINVAL,
@@ -103,7 +104,7 @@
item,
"Only support src & dst ip,proto in IPV6");
}
-@@ -681,7 +681,7 @@ hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+@@ -586,7 +586,7 @@ hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
tcp_mask->hdr.rx_win ||
tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) {
return rte_flow_error_set(error, EINVAL,
@@ -112,7 +113,7 @@
item,
"Only support src & dst port in TCP");
}
-@@ -728,7 +728,7 @@ hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+@@ -633,7 +633,7 @@ hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
udp_mask = item->mask;
if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
return rte_flow_error_set(error, EINVAL,
@@ -121,7 +122,7 @@
item,
"Only support src & dst port in UDP");
}
-@@ -775,7 +775,7 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+@@ -680,7 +680,7 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
sctp_mask = item->mask;
if (sctp_mask->hdr.cksum)
return rte_flow_error_set(error, EINVAL,
@@ -130,7 +131,7 @@
item,
"Only support src & dst port in SCTP");
-@@ -920,14 +920,14 @@ hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+@@ -825,14 +825,14 @@ hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
if (vxlan_mask->flags)
return rte_flow_error_set(error, EINVAL,
@@ -147,7 +148,7 @@
"VNI must be totally masked or not in VxLAN");
if (vxlan_mask->vni[0]) {
hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
-@@ -971,14 +971,14 @@ hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+@@ -876,14 +876,14 @@ hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
return rte_flow_error_set(error, EINVAL,
@@ -164,7 +165,7 @@
"TNI must be totally masked or not in NVGRE");
if (nvgre_mask->tni[0]) {
-@@ -1025,13 +1025,13 @@ hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+@@ -930,13 +930,13 @@ hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
return rte_flow_error_set(error, EINVAL,
@@ -180,7 +181,7 @@
"VNI must be totally masked or not in GENEVE");
if (geneve_mask->vni[0]) {
hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
-@@ -1062,7 +1062,7 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+@@ -967,7 +967,7 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
break;
default:
return rte_flow_error_set(error, ENOTSUP,
@@ -189,7 +190,7 @@
NULL, "Unsupported tunnel type!");
}
if (ret)
-@@ -1116,7 +1116,7 @@ hns3_parse_normal(const struct rte_flow_item *item,
+@@ -1021,7 +1021,7 @@ hns3_parse_normal(const struct rte_flow_item *item,
break;
default:
return rte_flow_error_set(error, ENOTSUP,
@@ -198,7 +199,7 @@
NULL, "Unsupported normal type!");
}
-@@ -1132,7 +1132,7 @@ hns3_validate_item(const struct rte_flow_item *item,
+@@ -1037,7 +1037,7 @@ hns3_validate_item(const struct rte_flow_item *item,
if (item->last)
return rte_flow_error_set(error, ENOTSUP,
@@ -207,7 +208,7 @@
"Not supported last point for range");
for (i = 0; i < step_mngr.count; i++) {
-@@ -1218,7 +1218,7 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev,
+@@ -1123,7 +1123,7 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev,
if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT)
return rte_flow_error_set(error, ENOTSUP,
More information about the stable
mailing list