[dpdk-dev] [PATCH v1 4/4] net/iavf: support FDIR for IP fragment packet
Jeff Guo
jia.guo at intel.com
Wed Mar 17 04:12:11 CET 2021
New FDIR parsing are added to handle the fragmented IPv4/IPv6 packet.
Signed-off-by: Ting Xu <ting.xu at intel.com>
Signed-off-by: Jeff Guo <jia.guo at intel.com>
---
drivers/net/iavf/iavf_fdir.c | 278 ++++++++++++++++++---------
drivers/net/iavf/iavf_generic_flow.h | 5 +
2 files changed, 190 insertions(+), 93 deletions(-)
diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index e3f3b5f22a..348d423081 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -34,7 +34,7 @@
#define IAVF_FDIR_INSET_ETH_IPV4 (\
IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
- IAVF_INSET_IPV4_TTL)
+ IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_ID)
#define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
@@ -56,6 +56,9 @@
IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
IAVF_INSET_IPV6_HOP_LIMIT)
+#define IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT (\
+ IAVF_INSET_IPV6_ID)
+
#define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
@@ -117,6 +120,7 @@ static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
{iavf_pattern_eth_ipv4_tcp, IAVF_FDIR_INSET_ETH_IPV4_TCP, IAVF_INSET_NONE},
{iavf_pattern_eth_ipv4_sctp, IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE},
{iavf_pattern_eth_ipv6, IAVF_FDIR_INSET_ETH_IPV6, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv6_frag_ext, IAVF_FDIR_INSET_ETH_IPV6_FRAG_EXT, IAVF_INSET_NONE},
{iavf_pattern_eth_ipv6_udp, IAVF_FDIR_INSET_ETH_IPV6_UDP, IAVF_INSET_NONE},
{iavf_pattern_eth_ipv6_tcp, IAVF_FDIR_INSET_ETH_IPV6_TCP, IAVF_INSET_NONE},
{iavf_pattern_eth_ipv6_sctp, IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE},
@@ -496,12 +500,13 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
struct iavf_fdir_conf *filter)
{
struct virtchnl_proto_hdrs hdrs = filter->add_fltr.rule_cfg.proto_hdrs;
- const struct rte_flow_item *item = pattern;
- enum rte_flow_item_type item_type;
enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
const struct rte_flow_item_eth *eth_spec, *eth_mask;
- const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec;
+ const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_last;
+ const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_mask;
const struct rte_flow_item_udp *udp_spec, *udp_mask;
const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
@@ -512,15 +517,16 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
const struct rte_flow_item_ah *ah_spec, *ah_mask;
const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
const struct rte_flow_item_ecpri *ecpri_spec, *ecpri_mask;
+ const struct rte_flow_item *item = pattern;
+ struct virtchnl_proto_hdr *hdr, *hdr1 = NULL;
struct rte_ecpri_common_hdr ecpri_common;
uint64_t input_set = IAVF_INSET_NONE;
- uint8_t proto_id;
-
+ enum rte_flow_item_type item_type;
enum rte_flow_item_type next_type;
+ bool spec_all_pid = false;
uint16_t ether_type;
-
+ uint8_t proto_id;
int layer = 0;
- struct virtchnl_proto_hdr *hdr;
uint8_t ipv6_addr_mask[16] = {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
@@ -528,12 +534,6 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
};
for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
- if (item->last) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Not support range");
- }
-
item_type = item->type;
switch (item_type) {
@@ -542,12 +542,12 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
eth_mask = item->mask;
next_type = (item + 1)->type;
- hdr = &hdrs.proto_hdr[layer];
+ hdr1 = &hdrs.proto_hdr[layer];
- VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
if (next_type == RTE_FLOW_ITEM_TYPE_END &&
- (!eth_spec || !eth_mask)) {
+ (!eth_spec || !eth_mask)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "NULL eth spec/mask.");
@@ -583,10 +583,11 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
}
input_set |= IAVF_INSET_ETHERTYPE;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1, ETH,
+ ETHERTYPE);
- rte_memcpy(hdr->buffer,
- eth_spec, sizeof(struct rte_ether_hdr));
+ rte_memcpy(hdr1->buffer, eth_spec,
+ sizeof(struct rte_ether_hdr));
}
hdrs.count = ++layer;
@@ -595,51 +596,85 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
case RTE_FLOW_ITEM_TYPE_IPV4:
l3 = RTE_FLOW_ITEM_TYPE_IPV4;
ipv4_spec = item->spec;
+ ipv4_last = item->last;
ipv4_mask = item->mask;
+ next_type = (item + 1)->type;
hdr = &hdrs.proto_hdr[layer];
VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
- if (ipv4_spec && ipv4_mask) {
- if (ipv4_mask->hdr.version_ihl ||
- ipv4_mask->hdr.total_length ||
- ipv4_mask->hdr.packet_id ||
- ipv4_mask->hdr.fragment_offset ||
- ipv4_mask->hdr.hdr_checksum) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Invalid IPv4 mask.");
- return -rte_errno;
- }
+ if (!(ipv4_spec && ipv4_mask)) {
+ hdrs.count = ++layer;
+ break;
+ }
- if (ipv4_mask->hdr.type_of_service ==
- UINT8_MAX) {
- input_set |= IAVF_INSET_IPV4_TOS;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
- }
- if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
- input_set |= IAVF_INSET_IPV4_PROTO;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
- }
- if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
- input_set |= IAVF_INSET_IPV4_TTL;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
- }
- if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
- input_set |= IAVF_INSET_IPV4_SRC;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
- }
- if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
- input_set |= IAVF_INSET_IPV4_DST;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
- }
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
- rte_memcpy(hdr->buffer,
- &ipv4_spec->hdr,
- sizeof(ipv4_spec->hdr));
+ if (ipv4_mask->hdr.type_of_service ==
+ UINT8_MAX) {
+ input_set |= IAVF_INSET_IPV4_TOS;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
+ DSCP);
+ }
+
+ if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+ input_set |= IAVF_INSET_IPV4_PROTO;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
+ PROT);
+ }
+
+ if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+ input_set |= IAVF_INSET_IPV4_TTL;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
+ TTL);
+ }
+
+ if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+ input_set |= IAVF_INSET_IPV4_SRC;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
+ SRC);
+ }
+
+ if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+ input_set |= IAVF_INSET_IPV4_DST;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4,
+ DST);
+ }
+
+ if (ipv4_mask->hdr.packet_id == UINT16_MAX ||
+ ipv4_mask->hdr.fragment_offset == UINT16_MAX) {
+ if (ipv4_last &&
+ ipv4_spec->hdr.packet_id == 0 &&
+ ipv4_last->hdr.packet_id == 0xffff)
+ spec_all_pid = true;
+
+ /* All IPv4 fragment packet has the same
+ * ethertype, if the spec is for all invalid
+ * packet id, set the ethertype into input set.
+ */
+ input_set |= spec_all_pid ?
+ IAVF_INSET_ETHERTYPE :
+ IAVF_INSET_IPV4_ID;
+
+ if (spec_all_pid)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1,
+ ETH, ETHERTYPE);
+ else
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
+ IPV4, PKID);
}
+ rte_memcpy(hdr->buffer, &ipv4_spec->hdr,
+ sizeof(ipv4_spec->hdr));
+
hdrs.count = ++layer;
break;
@@ -652,46 +687,98 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
- if (ipv6_spec && ipv6_mask) {
- if (ipv6_mask->hdr.payload_len) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Invalid IPv6 mask");
- return -rte_errno;
- }
+ if (!(ipv6_spec && ipv6_mask)) {
+ hdrs.count = ++layer;
+ break;
+ }
- if ((ipv6_mask->hdr.vtc_flow &
- rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
- == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
- input_set |= IAVF_INSET_IPV6_TC;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
- }
- if (ipv6_mask->hdr.proto == UINT8_MAX) {
- input_set |= IAVF_INSET_IPV6_NEXT_HDR;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
- }
- if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
- input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
- }
- if (!memcmp(ipv6_mask->hdr.src_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.src_addr))) {
- input_set |= IAVF_INSET_IPV6_SRC;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
- }
- if (!memcmp(ipv6_mask->hdr.dst_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.dst_addr))) {
- input_set |= IAVF_INSET_IPV6_DST;
- VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
- }
+ if (ipv6_mask->hdr.payload_len) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid IPv6 mask");
+ return -rte_errno;
+ }
- rte_memcpy(hdr->buffer,
- &ipv6_spec->hdr,
- sizeof(ipv6_spec->hdr));
+ if ((ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
+ == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
+ input_set |= IAVF_INSET_IPV6_TC;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
+ TC);
+ }
+
+ if (ipv6_mask->hdr.proto == UINT8_MAX) {
+ input_set |= IAVF_INSET_IPV6_NEXT_HDR;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
+ PROT);
+ }
+
+ if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+ input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
+ HOP_LIMIT);
}
+ if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
+ RTE_DIM(ipv6_mask->hdr.src_addr))) {
+ input_set |= IAVF_INSET_IPV6_SRC;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
+ SRC);
+ }
+ if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
+ RTE_DIM(ipv6_mask->hdr.dst_addr))) {
+ input_set |= IAVF_INSET_IPV6_DST;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6,
+ DST);
+ }
+
+ rte_memcpy(hdr->buffer, &ipv6_spec->hdr,
+ sizeof(ipv6_spec->hdr));
+
+ hdrs.count = ++layer;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
+ ipv6_frag_spec = item->spec;
+ ipv6_frag_last = item->last;
+ ipv6_frag_mask = item->mask;
+ next_type = (item + 1)->type;
+
+ hdr = &hdrs.proto_hdr[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6_EH_FRAG);
+
+ if (!(ipv6_frag_spec && ipv6_frag_mask)) {
+ hdrs.count = ++layer;
+ break;
+ }
+
+ if (ipv6_frag_mask->hdr.id == UINT32_MAX ||
+ ipv6_frag_mask->hdr.frag_data == UINT16_MAX) {
+ if (ipv6_frag_last &&
+ ipv6_frag_spec->hdr.id == 0 &&
+ ipv6_frag_last->hdr.id == 0xffffffff)
+ spec_all_pid = true;
+
+ /* All IPv6 fragment packet has the
+ * same ethertype, if the spec is for
+ * all invalid packet id, set the
+ * ethertype into input set.
+ */
+ input_set |= spec_all_pid ?
+ IAVF_INSET_ETHERTYPE :
+ IAVF_INSET_IPV6_ID;
+ if (spec_all_pid)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1,
+ ETH, ETHERTYPE);
+ else
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
+ IPV6, PKID);
+ }
+
+ rte_memcpy(hdr->buffer, &ipv6_frag_spec->hdr,
+ sizeof(ipv6_frag_spec->hdr));
+
hdrs.count = ++layer;
break;
@@ -1009,8 +1096,13 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
return -rte_errno;
}
- if (input_set & ~input_set_mask)
- return -EINVAL;
+ if (spec_all_pid) {
+ if (input_set & ~(input_set_mask | IAVF_INSET_ETHERTYPE))
+ return -EINVAL;
+ } else {
+ if (input_set & ~input_set_mask)
+ return -EINVAL;
+ }
filter->input_set = input_set;
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index ce3d12bcd9..f17fc91bfa 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -61,6 +61,7 @@
#define IAVF_PFCP_S_FIELD (1ULL << 44)
#define IAVF_PFCP_SEID (1ULL << 43)
#define IAVF_ECPRI_PC_RTC_ID (1ULL << 42)
+#define IAVF_IP_PK_ID (1ULL << 41)
/* input set */
@@ -84,6 +85,8 @@
(IAVF_PROT_IPV4_OUTER | IAVF_IP_PROTO)
#define IAVF_INSET_IPV4_TTL \
(IAVF_PROT_IPV4_OUTER | IAVF_IP_TTL)
+#define IAVF_INSET_IPV4_ID \
+ (IAVF_PROT_IPV4_OUTER | IAVF_IP_PK_ID)
#define IAVF_INSET_IPV6_SRC \
(IAVF_PROT_IPV6_OUTER | IAVF_IP_SRC)
#define IAVF_INSET_IPV6_DST \
@@ -94,6 +97,8 @@
(IAVF_PROT_IPV6_OUTER | IAVF_IP_TTL)
#define IAVF_INSET_IPV6_TC \
(IAVF_PROT_IPV6_OUTER | IAVF_IP_TOS)
+#define IAVF_INSET_IPV6_ID \
+ (IAVF_PROT_IPV6_OUTER | IAVF_IP_PK_ID)
#define IAVF_INSET_TCP_SRC_PORT \
(IAVF_PROT_TCP_OUTER | IAVF_SPORT)
--
2.20.1
More information about the dev
mailing list