[dts] [PATCH] add TestSuite_iavf_fdir.py and related files

Peng Yuan yuan.peng at intel.com
Tue Jun 2 12:24:50 CEST 2020


Add TestSuite_iavf_fdir.py and related files to DTS

Signed-off-by: Peng Yuan <yuan.peng at intel.com>
---
 conf/iavf_fdir.cfg           |    9 +
 dep/pfcp.py                  |   24 +
 framework/packet.py          |    1 +
 tests/TestSuite_iavf_fdir.py | 2996 ++++++++++++++++++++++++++++++++++
 tests/rte_flow_common.py     |   96 +-
 5 files changed, 3125 insertions(+), 1 deletion(-)
 create mode 100644 conf/iavf_fdir.cfg
 create mode 100644 dep/pfcp.py
 create mode 100644 tests/TestSuite_iavf_fdir.py

diff --git a/conf/iavf_fdir.cfg b/conf/iavf_fdir.cfg
new file mode 100644
index 00000000..cfd84192
--- /dev/null
+++ b/conf/iavf_fdir.cfg
@@ -0,0 +1,9 @@
+[suite]
+# cvl_iavf_fdir common options ice driver file location
+ice_driver_file_location = "/home/pengyuan/nd/nd_linux-cpk/ice-0.16.0_rc35_1_g7fb2b219_dirty/src/ice.ko"
+# os default package file location
+os_default_package_file_location="/lib/firmware/updates/intel/ice/ddp/ice-1.3.11.0.pkg"
+# comms package file location
+comms_package_file_location="/lib/firmware/updates/intel/ice/ddp/ice_comms-1.3.16.0.pkg"
+# package file location
+package_file_location="/lib/firmware/updates/intel/ice/ddp/ice.pkg"
diff --git a/dep/pfcp.py b/dep/pfcp.py
new file mode 100644
index 00000000..33f25105
--- /dev/null
+++ b/dep/pfcp.py
@@ -0,0 +1,24 @@
+from scapy.packet import Packet, bind_layers, Padding
+from scapy.fields import *
+from scapy.layers.inet import UDP
+
+class PFCP(Packet):
+    name = "PFCP"
+    fields_desc =  [ BitField("version", 1, 3),
+                     BitField("MP", 0, 4),
+                     BitField("Sfield", 0, 1),
+                     ByteField("MsgType", 0),
+                     ShortField("len", None),
+                     LongField("SEID", 0),
+                     ThreeBytesField("SeqNum", 0),
+                     BitField("MsgPrio", 0, 4),
+                     BitField("spare", 0, 4)]
+
+
+    def post_build(self, pkt, pay):
+        if self.len is None:
+            l = len(pkt)+len(pay)
+            pkt = pkt[:2]+struct.pack("!H", l)+pkt[4:]
+        return pkt+pay
+
+bind_layers(UDP, PFCP, dport=8805)
diff --git a/framework/packet.py b/framework/packet.py
index 42982cd5..603840b4 100644
--- a/framework/packet.py
+++ b/framework/packet.py
@@ -56,6 +56,7 @@ from Dot1BR import Dot1BR
 from nsh import NSH
 from mpls import MPLS
 from igmp import IGMP
+from pfcp import PFCP
 
 from utils import convert_ip2int
 from utils import convert_int2ip
diff --git a/tests/TestSuite_iavf_fdir.py b/tests/TestSuite_iavf_fdir.py
new file mode 100644
index 00000000..8d114878
--- /dev/null
+++ b/tests/TestSuite_iavf_fdir.py
@@ -0,0 +1,2996 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2019 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+#   * Redistributions of source code must retain the above copyright
+#     notice, this list of conditions and the following disclaimer.
+#   * Redistributions in binary form must reproduce the above copyright
+#     notice, this list of conditions and the following disclaimer in
+#     the documentation and/or other materials provided with the
+#     distribution.
+#   * Neither the name of Intel Corporation nor the names of its
+#     contributors may be used to endorse or promote products derived
+#     from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import re
+import time
+
+from packet import Packet
+from pmd_output import PmdOutput
+from test_case import TestCase
+import rte_flow_common as rfc
+from multiprocessing import Process
+from multiprocessing import Manager
+
+from utils import GREEN, RED
+import utils
+
+MAC_IPV4_PAY = {
+    "match": [
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", proto=255, ttl=2, tos=4) / Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", frag=1, proto=255, ttl=2, tos=4)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", proto=255, ttl=2, tos=4)/UDP(sport=22,dport=23)/Raw("x" * 80)'],
+    "mismatch": [
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.22",dst="192.168.0.21", proto=255, ttl=2, tos=4) / Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.1.21", proto=255, ttl=2, tos=4) / Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", proto=1, ttl=2, tos=4) / Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", proto=255, ttl=3, tos=4) / Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", proto=255, ttl=2, tos=9) / Raw("x" * 80)'
+    ]
+}
+
+MAC_IPV4_PAY_protocol = {
+    "match": [
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", proto=1)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", frag=1, proto=1)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", ttl=2, tos=4)/UDP(sport=22,dport=23)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", frag=1, ttl=2, tos=4)/UDP(sport=22,dport=23)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", proto=17, ttl=2, tos=4)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", frag=1, proto=17, ttl=2, tos=4)/Raw("x" * 80)'],
+    "mismatch": [
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.22", proto=1)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", proto=6)/UDP(sport=22,dport=23)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=23)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", frag=1)/TCP(sport=22,dport=23)/Raw("x" * 80)']
+}
+
+MAC_IPV4_UDP = {
+    "match": [
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", ttl=2, tos=4) /UDP(sport=22,dport=23)/Raw("x" * 80)'],
+    "mismatch": [
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.1.19",dst="192.168.0.21", ttl=2, tos=4) /UDP(sport=22,dport=23)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.22", ttl=2, tos=4) /UDP(sport=22,dport=23)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", ttl=2, tos=4) /UDP(sport=21,dport=23)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", ttl=2, tos=4) /UDP(sport=22,dport=24)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", ttl=64, tos=4) /UDP(sport=22,dport=23)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", ttl=2, tos=1) /UDP(sport=22,dport=23)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", ttl=2, tos=4) /TCP(sport=22,dport=23)/Raw("x" * 80)']
+}
+
+MAC_IPV4_TCP = {
+    "match": [
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", ttl=2, tos=4) /TCP(sport=22,dport=23)/Raw("x" * 80)'],
+    "mismatch": [
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.1.19",dst="192.168.0.21", ttl=2, tos=4) /TCP(sport=22,dport=23)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.22", ttl=2, tos=4) /TCP(sport=22,dport=23)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", ttl=2, tos=4) /TCP(sport=21,dport=23)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", ttl=2, tos=4) /TCP(sport=22,dport=24)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", ttl=64, tos=4) /TCP(sport=22,dport=23)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", ttl=2, tos=1) /TCP(sport=22,dport=23)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", ttl=2, tos=4) /UDP(sport=22,dport=23)/Raw("x" * 80)']
+}
+
+MAC_IPV4_SCTP = {
+    "match": [
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", ttl=2, tos=4) /SCTP(sport=22,dport=23)/Raw("x" * 80)'],
+    "mismatch": [
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.1.19",dst="192.168.0.21", ttl=2, tos=4) /SCTP(sport=22,dport=23)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.22", ttl=2, tos=4) /SCTP(sport=22,dport=23)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", ttl=2, tos=4) /SCTP(sport=21,dport=23)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", ttl=2, tos=4) /SCTP(sport=22,dport=24)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", ttl=64, tos=4) /SCTP(sport=22,dport=23)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", ttl=2, tos=1) /SCTP(sport=22,dport=23)/Raw("x" * 80)',
+        'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", ttl=2, tos=4)/Raw("x" * 80)']
+}
+
+MAC_IPV6_PAY = {
+    "match": [
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2", nh=0, tc=1, hlim=2)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2", nh=0, tc=1, hlim=2)/UDP(sport=22,dport=23)/("X"*480)'],
+    "mismatch": [
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2022", src="2001::2", nh=0, tc=1, hlim=2)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::1", nh=0, tc=1, hlim=2)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2", nh=2, tc=1, hlim=2)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2", nh=0, tc=2, hlim=2)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2", nh=0, tc=1, hlim=5)/("X"*480)']
+}
+
+MAC_IPV6_PAY_protocol = {
+    "match": [
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2", nh=44, tc=1, hlim=2)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020")/IPv6ExtHdrFragment(100)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", nh=44)/TCP(sport=22,dport=23)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020")/IPv6ExtHdrFragment(100)/TCP(sport=22,dport=23)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", nh=6)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020")/TCP(sport=22,dport=23)/("X"*480)'],
+    "mismatch": [
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2021", nh=44)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020")/UDP(sport=22,dport=23)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", nh=17)/TCP(sport=22,dport=23)/("X"*480)']
+}
+
+MAC_IPV6_UDP = {
+    "match": [
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2",tc=1, hlim=2)/UDP(sport=22,dport=23)/("X"*480)'],
+    "mismatch": [
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2021", src="2001::2",tc=1, hlim=2)/UDP(sport=22,dport=23)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2002::2",tc=1, hlim=2)/UDP(sport=22,dport=23)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2",tc=3, hlim=2)/UDP(sport=22,dport=23)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2",tc=1, hlim=1)/UDP(sport=22,dport=23)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2",tc=1, hlim=2)/UDP(sport=21,dport=23)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2",tc=1, hlim=2)/UDP(sport=22,dport=24)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2",tc=1, hlim=2)/TCP(sport=22,dport=23)/("X"*480)']
+}
+
+MAC_IPV6_TCP = {
+    "match": [
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2",tc=1, hlim=2)/TCP(sport=22,dport=23)/("X"*480)'],
+    "mismatch": [
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2021", src="2001::2",tc=1, hlim=2)/TCP(sport=22,dport=23)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2002::2",tc=1, hlim=2)/TCP(sport=22,dport=23)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2",tc=3, hlim=2)/TCP(sport=22,dport=23)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2",tc=1, hlim=1)/TCP(sport=22,dport=23)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2",tc=1, hlim=2)/TCP(sport=21,dport=23)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2",tc=1, hlim=2)/TCP(sport=22,dport=24)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2",tc=1, hlim=2)/SCTP(sport=22,dport=23)/("X"*480)']
+}
+
+MAC_IPV6_SCTP = {
+    "match": [
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2",tc=1, hlim=2)/SCTP(sport=22,dport=23)/("X"*480)'],
+    "mismatch": [
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2021", src="2001::2",tc=1, hlim=2)/SCTP(sport=22,dport=23)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2002::2",tc=1, hlim=2)/SCTP(sport=22,dport=23)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2",tc=3, hlim=2)/SCTP(sport=22,dport=23)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2",tc=1, hlim=1)/SCTP(sport=22,dport=23)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2",tc=1, hlim=2)/SCTP(sport=21,dport=23)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2",tc=1, hlim=2)/SCTP(sport=22,dport=24)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2",tc=1, hlim=2)/UDP(sport=22,dport=23)/("X"*480)',
+        'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2",tc=1, hlim=2)/("X"*480)']
+}
+
+MAC_IPV4_GTPU_EH = {
+    "match": [
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/GTP_PDUSession_ExtensionHeader(pdu_type=1, qos_flow=0x34)/IP()/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/GTP_PDUSession_ExtensionHeader(pdu_type=1, qos_flow=0x34)/IP(frag=1)/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/GTP_PDUSession_ExtensionHeader(pdu_type=1, qos_flow=0x34)/IP()/UDP()/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/GTP_PDUSession_ExtensionHeader(pdu_type=1, qos_flow=0x34)/IP()/TCP(sport=22,dport=23)/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/GTP_PDUSession_ExtensionHeader(pdu_type=1, qos_flow=0x34)/IP()/ICMP()/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/GTP_PDUSession_ExtensionHeader(pdu_type=1, qos_flow=0x34)/IPv6()/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/GTP_PDUSession_ExtensionHeader(pdu_type=1, qos_flow=0x34)/IPv6(nh=44)/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/GTP_PDUSession_ExtensionHeader(pdu_type=1, qos_flow=0x34)/IPv6()/UDP()/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/GTP_PDUSession_ExtensionHeader(pdu_type=1, qos_flow=0x34)/IPv6()/TCP(sport=22,dport=23)/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/GTP_PDUSession_ExtensionHeader(pdu_type=1, qos_flow=0x34)/IPv6()/ICMP()/Raw("x"*20)'],
+    "mismatch": [
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/GTP_PDUSession_ExtensionHeader(pdu_type=1, qos_flow=0x34)/IP()/SCTP()/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/GTP_PDUSession_ExtensionHeader(pdu_type=1, qos_flow=0x34)/IPv6()/SCTP()/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x1234567)/GTP_PDUSession_ExtensionHeader(pdu_type=1, qos_flow=0x34)/IP()/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/GTP_PDUSession_ExtensionHeader(pdu_type=1, qos_flow=0x35)/IP()/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/IP()/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/GTP_PDUSession_ExtensionHeader(pdu_type=1, qos_flow=0x34)/Raw("x"*20)']
+}
+
+MAC_IPV4_GTPU = {
+    "match": [
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/IP()/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/IP(frag=1)/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/IP()/UDP()/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/IP()/TCP(sport=22, dport=23)/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/IP()/ICMP()/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/IPv6()/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/IPv6(nh=44)/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/IPv6()/UDP()/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/IPv6()/TCP(sport=22, dport=23)/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/IPv6()/ICMP()/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/GTP_PDUSession_ExtensionHeader(pdu_type=1, qos_flow=0x35)/IP()/Raw("x"*20)'],
+    "mismatch": [
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/IP()/SCTP()/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/IPv6()/SCTP()/Raw("x"*20)',
+        'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x1234567)/IP()/Raw("x"*20)']
+}
+
+L2_Ethertype = [
+    'Ether(dst="00:11:22:33:44:55")/PPPoED()/PPP()/IP()/Raw("x" *80)',
+    'Ether(dst="00:11:22:33:44:55", type=0x8863)/IP()/Raw("x" * 80)',
+    'Ether(dst="00:11:22:33:44:55")/PPPoE()/PPP(proto=0x0021)/IP()/Raw("x" * 80)',
+    'Ether(dst="00:11:22:33:44:55", type=0x8864)/IP()/Raw("x" * 80)',
+    'Ether(dst="00:11:22:33:44:55")/ARP(pdst="192.168.1.1")',
+    'Ether(dst="00:11:22:33:44:55", type=0x0806)/Raw("x" *80)',
+    'Ether(dst="00:11:22:33:44:55",type=0x8100)',
+    'Ether(dst="00:11:22:33:44:55")/Dot1Q(vlan=1)',
+    'Ether(dst="00:11:22:33:44:55",type=0x88f7)/"\\x00\\x02"',
+    'Ether(dst="00:11:22:33:44:55",type=0x8847)']
+
+PFCP = [
+    'Ether(dst="00:11:22:33:44:55")/IP()/UDP(sport=22, dport=8805)/PFCP(Sfield=0)',
+    'Ether(dst="00:11:22:33:44:55")/IP()/UDP(sport=22, dport=8805)/PFCP(Sfield=1, SEID=123)',
+    'Ether(dst="00:11:22:33:44:55")/IPv6()/UDP(sport=22, dport=8805)/PFCP(Sfield=0)',
+    'Ether(dst="00:11:22:33:44:55")/IPv6()/UDP(sport=22, dport=8805)/PFCP(Sfield=1, SEID=256)',
+    'Ether(dst="00:11:22:33:44:55")/IPv6()/UDP(sport=22, dport=23)/Raw("x"*20)']
+
+CREATE_2048_RULES_4_VFS = [
+    'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.7.255", ttl=2, tos=4) /UDP(sport=22,dport=23)/Raw("x" * 80)',
+    'Ether(dst="00:11:22:33:44:66")/IP(src="192.168.0.20",dst="192.168.7.255", ttl=2, tos=4) /UDP(sport=22,dport=23)/Raw("x" * 80)',
+    'Ether(dst="00:11:22:33:44:77")/IP(src="192.168.0.20",dst="192.168.7.255", ttl=2, tos=4) /UDP(sport=22,dport=23)/Raw("x" * 80)',
+    'Ether(dst="00:11:22:33:44:88")/IP(src="192.168.0.20",dst="192.168.7.255", ttl=2, tos=4) /UDP(sport=22,dport=23)/Raw("x" * 80)']
+
+tv_l2_ethertype_queue_index = {
+    "name": "test_l2_ethertype_queue_index",
+    "rule": [
+        "flow create 0 ingress pattern eth type is 0x8863 / end actions queue index 1 / mark id 1 / end",
+        "flow create 0 ingress pattern eth type is 0x8864 / end actions queue index 2 / mark id 2 / end",
+        "flow create 0 ingress pattern eth type is 0x0806 / end actions queue index 3 / mark id 3 / end",
+        "flow create 0 ingress pattern eth type is 0x8100 / end actions queue index 4 / mark id 4 / end",
+        "flow create 0 ingress pattern eth type is 0x88f7 / end actions queue index 5 / mark id 5 / end"],
+    "scapy_str": L2_Ethertype,
+    "check_param": [
+        {"port_id": 0, "queue": 1, "mark_id": 1},
+        {"port_id": 0, "queue": 1, "mark_id": 1},
+        {"port_id": 0, "queue": 2, "mark_id": 2},
+        {"port_id": 0, "queue": 2, "mark_id": 2},
+        {"port_id": 0, "queue": 3, "mark_id": 3},
+        {"port_id": 0, "queue": 3, "mark_id": 3},
+        {"port_id": 0, "queue": 4, "mark_id": 4},
+        {"port_id": 0, "queue": 4, "mark_id": 4},
+        {"port_id": 0, "queue": 5, "mark_id": 5},
+        {"port_id": 0, "queue": 0}]
+}
+
+tv_l2_ethertype_queue_group = {
+    "name": "test_l2_ethertype_queue_group",
+    "rule": [
+        "flow create 0 ingress pattern eth type is 0x8863 / end actions rss queues 0 1 end / mark id 0 / end",
+        "flow create 0 ingress pattern eth type is 0x8864 / end actions rss queues 2 3 end / mark id 1 / end",
+        "flow create 0 ingress pattern eth type is 0x0806 / end actions rss queues 4 5 end / mark id 2 / end",
+        "flow create 0 ingress pattern eth type is 0x8100 / end actions rss queues 6 7 end / mark id 2 / end",
+        "flow create 0 ingress pattern eth type is 0x88f7 / end actions rss queues 9 10 end / mark id 3 / end"],
+    "scapy_str": L2_Ethertype,
+    "check_param": [
+        {"port_id": 0, "queue": 0, "mark_id": 0},
+        {"port_id": 0, "queue": 0, "mark_id": 0},
+        {"port_id": 0, "queue": 0, "mark_id": 1},
+        {"port_id": 0, "queue": 0, "mark_id": 1},
+        {"port_id": 0, "queue": 0, "mark_id": 2},
+        {"port_id": 0, "queue": 0, "mark_id": 2},
+        {"port_id": 0, "queue": 0, "mark_id": 2},
+        {"port_id": 0, "queue": 0, "mark_id": 2},
+        {"port_id": 0, "queue": 0, "mark_id": 3},
+        {"port_id": 0, "queue": 0}]
+}
+
+tv_l2_ethertype_passthru = {
+    "name": "test_l2_ethertype_passthru",
+    "rule": [
+        "flow create 0 ingress pattern eth type is 0x8863 / end actions passthru / mark / end",
+        "flow create 0 ingress pattern eth type is 0x8864 / end actions passthru / mark id 1 / end",
+        "flow create 0 ingress pattern eth type is 0x0806 / end actions passthru / mark id 2 / end",
+        "flow create 0 ingress pattern eth type is 0x8100 / end actions passthru / mark id 3 / end",
+        "flow create 0 ingress pattern eth type is 0x88f7 / end actions passthru / mark id 4 / end"],
+    "scapy_str": L2_Ethertype,
+    "check_param": [
+        {"port_id": 0, "queue": 0, "mark_id": 0},
+        {"port_id": 0, "queue": 0, "mark_id": 0},
+        {"port_id": 0, "queue": 0, "mark_id": 1},
+        {"port_id": 0, "queue": 0, "mark_id": 1},
+        {"port_id": 0, "queue": 0, "mark_id": 2},
+        {"port_id": 0, "queue": 0, "mark_id": 2},
+        {"port_id": 0, "queue": 0, "mark_id": 3},
+        {"port_id": 0, "queue": 0, "mark_id": 3},
+        {"port_id": 0, "queue": 0, "mark_id": 4},
+        {"port_id": 0, "queue": 0}]
+}
+
+tv_l2_ethertype_mark_rss = {
+    "name": "test_l2_ethertype_mark_rss",
+    "rule": [
+        "flow create 0 ingress pattern eth type is 0x8863 / end actions rss / mark id 0 / end",
+        "flow create 0 ingress pattern eth type is 0x8864 / end actions mark id 1 / rss / end",
+        "flow create 0 ingress pattern eth type is 0x0806 / end actions mark / rss / end",
+        "flow create 0 ingress pattern eth type is 0x8100 / end actions rss / mark / end",
+        "flow create 0 ingress pattern eth type is 0x88f7 / end actions mark id 3 / rss / end"],
+    "scapy_str": L2_Ethertype,
+    "check_param": [
+        {"port_id": 0, "queue": 0, "mark_id": 0},
+        {"port_id": 0, "queue": 0, "mark_id": 0},
+        {"port_id": 0, "queue": 0, "mark_id": 1},
+        {"port_id": 0, "queue": 0, "mark_id": 1},
+        {"port_id": 0, "queue": 0, "mark_id": 0},
+        {"port_id": 0, "queue": 0, "mark_id": 0},
+        {"port_id": 0, "queue": 0, "mark_id": 0},
+        {"port_id": 0, "queue": 0, "mark_id": 0},
+        {"port_id": 0, "queue": 0, "mark_id": 3},
+        {"port_id": 0, "queue": 0}]
+}
+
+tv_l2_ethertype_mark = {
+    "name": "test_l2_ethertype_mark",
+    "rule": [
+        "flow create 0 ingress pattern eth type is 0x8863 / end actions mark id 0 / end",
+        "flow create 0 ingress pattern eth type is 0x8864 / end actions mark id 1 / end",
+        "flow create 0 ingress pattern eth type is 0x0806 / end actions mark id 2 / end",
+        "flow create 0 ingress pattern eth type is 0x8100 / end actions mark id 2 / end",
+        "flow create 0 ingress pattern eth type is 0x88f7 / end actions mark / end"],
+    "scapy_str": L2_Ethertype,
+    "check_param": [
+        {"port_id": 0, "queue": 0, "mark_id": 0},
+        {"port_id": 0, "queue": 0, "mark_id": 0},
+        {"port_id": 0, "queue": 0, "mark_id": 1},
+        {"port_id": 0, "queue": 0, "mark_id": 1},
+        {"port_id": 0, "queue": 0, "mark_id": 2},
+        {"port_id": 0, "queue": 0, "mark_id": 2},
+        {"port_id": 0, "queue": 0, "mark_id": 2},
+        {"port_id": 0, "queue": 0, "mark_id": 2},
+        {"port_id": 0, "queue": 0, "mark_id": 0},
+        {"port_id": 0, "queue": 0}]
+}
+
+tv_l2_ethertype_drop = {
+    "name": "test_l2_ethertype_drop",
+    "rule": [
+        "flow create 0 ingress pattern eth type is 0x8863 / end actions drop / end",
+        "flow create 0 ingress pattern eth type is 0x8864 / end actions drop / end",
+        "flow create 0 ingress pattern eth type is 0x0806 / end actions drop / end",
+        "flow create 0 ingress pattern eth type is 0x8100 / end actions drop / end",
+        "flow create 0 ingress pattern eth type is 0x88f7 / end actions drop / end"],
+    "scapy_str": L2_Ethertype,
+    "check_param": [
+        {"port_id": 0, "drop": 1},
+        {"port_id": 0, "drop": 1},
+        {"port_id": 0, "drop": 1},
+        {"port_id": 0, "drop": 1},
+        {"port_id": 0, "drop": 1},
+        {"port_id": 0, "drop": 1},
+        {"port_id": 0, "drop": 1},
+        {"port_id": 0, "drop": 1},
+        {"port_id": 0, "drop": 1},
+        {"port_id": 0, "queue": 0}]
+}
+
+tv_pfcp_queue_index = {
+    "name": "test_pfcp_queue_index",
+    "rule": [
+        "flow create 0 ingress pattern eth / ipv4 / udp / pfcp s_field is 0 / end actions queue index 1 / end",
+        "flow create 0 ingress pattern eth / ipv4 / udp / pfcp s_field is 1 / end actions queue index 2 / end",
+        "flow create 0 ingress pattern eth / ipv6 / udp / pfcp s_field is 0 / end actions queue index 3 / end",
+        "flow create 0 ingress pattern eth / ipv6 / udp / pfcp s_field is 1 / end actions queue index 4 / end"],
+    "scapy_str": PFCP,
+    "check_param": [
+        {"port_id": 0, "queue": 1},
+        {"port_id": 0, "queue": 2},
+        {"port_id": 0, "queue": 3},
+        {"port_id": 0, "queue": 4},
+        {"port_id": 0, "passthru": 1}]
+}
+
+tv_pfcp_queue_group = {
+    "name": "test_pfcp_queue_group",
+    "rule": [
+        "flow create 0 ingress pattern eth / ipv4 / udp / pfcp s_field is 0 / end actions rss queues 2 3 end / mark id 0 / end",
+        "flow create 0 ingress pattern eth / ipv4 / udp / pfcp s_field is 1 / end actions rss queues 4 5 6 7 end / mark id 1 / end",
+        "flow create 0 ingress pattern eth / ipv6 / udp / pfcp s_field is 0 / end actions rss queues 8 9 10 11 12 13 14 15 end / mark id 2 / end",
+        "flow create 0 ingress pattern eth / ipv6 / udp / pfcp s_field is 1 / end actions rss queues 3 4 5 6 end / mark id 3 / end"],
+    "scapy_str": PFCP,
+    "check_param": [
+        {"port_id": 0, "queue": 0, "mark_id": 0},
+        {"port_id": 0, "queue": [4, 5, 6, 7], "mark_id": 1},
+        {"port_id": 0, "queue": 0, "mark_id": 2},
+        {"port_id": 0, "queue": [3, 4, 5, 6], "mark_id": 3},
+        {"port_id": 0, "passthru": 1}]
+}
+
+tv_pfcp_passthru = {
+    "name": "test_pfcp_passthru",
+    "rule": [
+        "flow create 0 ingress pattern eth / ipv4 / udp / pfcp s_field is 0 / end actions passthru / mark id 0 / end",
+        "flow create 0 ingress pattern eth / ipv4 / udp / pfcp s_field is 1 / end actions passthru / mark id 1 / end",
+        "flow create 0 ingress pattern eth / ipv6 / udp / pfcp s_field is 0 / end actions passthru / mark id 2 / end",
+        "flow create 0 ingress pattern eth / ipv6 / udp / pfcp s_field is 1 / end actions passthru / mark id 3 / end"],
+    "scapy_str": PFCP,
+    "check_param": [
+        {"port_id": 0, "queue": 0, "mark_id": 0},
+        {"port_id": 0, "passthru": 1, "mark_id": 1},
+        {"port_id": 0, "queue": 0, "mark_id": 2},
+        {"port_id": 0, "passthru": 1, "mark_id": 3},
+        {"port_id": 0, "passthru": 1}]
+}
+
+tv_pfcp_mark_rss = {
+    "name": "test_pfcp_mark_rss",
+    "rule": [
+        "flow create 0 ingress pattern eth / ipv4 / udp / pfcp s_field is 0 / end actions mark / rss / end",
+        "flow create 0 ingress pattern eth / ipv4 / udp / pfcp s_field is 1 / end actions mark id 1 / rss / end",
+        "flow create 0 ingress pattern eth / ipv6 / udp / pfcp s_field is 0 / end actions mark id 2 / rss / end",
+        "flow create 0 ingress pattern eth / ipv6 / udp / pfcp s_field is 1 / end actions mark id 3 / rss / end"],
+    "scapy_str": PFCP,
+    "check_param": [
+        {"port_id": 0, "queue": 0, "mark_id": 0},
+        {"port_id": 0, "passthru": 1, "mark_id": 1},
+        {"port_id": 0, "queue": 0, "mark_id": 2},
+        {"port_id": 0, "passthru": 1, "mark_id": 3},
+        {"port_id": 0, "passthru": 1}]
+}
+
+tv_pfcp_mark = {
+    "name": "test_pfcp_mark",
+    "rule": [
+        "flow create 0 ingress pattern eth / ipv4 / udp / pfcp s_field is 0 / end actions mark / end",
+        "flow create 0 ingress pattern eth / ipv4 / udp / pfcp s_field is 1 / end actions mark id 1 / end",
+        "flow create 0 ingress pattern eth / ipv6 / udp / pfcp s_field is 0 / end actions mark id 2 / end",
+        "flow create 0 ingress pattern eth / ipv6 / udp / pfcp s_field is 1 / end actions mark id 4294967294 / end"],
+    "scapy_str": PFCP,
+    "check_param": [
+        {"port_id": 0, "queue": 0, "mark_id": 0},
+        {"port_id": 0, "passthru": 1, "mark_id": 1},
+        {"port_id": 0, "queue": 0, "mark_id": 2},
+        {"port_id": 0, "passthru": 1, "mark_id": 4294967294},
+        {"port_id": 0, "passthru": 1}]
+}
+
+tv_pfcp_drop = {
+    "name": "test_pfcp_drop",
+    "rule": [
+        "flow create 0 ingress pattern eth / ipv4 / udp / pfcp s_field is 0 / end actions drop / end",
+        "flow create 0 ingress pattern eth / ipv4 / udp / pfcp s_field is 1 / end actions drop / end",
+        "flow create 0 ingress pattern eth / ipv6 / udp / pfcp s_field is 0 / end actions drop / end",
+        "flow create 0 ingress pattern eth / ipv6 / udp / pfcp s_field is 1 / end actions drop / end"],
+    "scapy_str": PFCP,
+    "check_param": [
+        {"port_id": 0, "drop": 1},
+        {"port_id": 0, "drop": 1},
+        {"port_id": 0, "drop": 1},
+        {"port_id": 0, "drop": 1},
+        {"port_id": 0, "passthru": 1}]
+}
+
+tv_add_2048_rules_on_4_VFs_at_meantime = {
+    "name": "test_add_2048_rules_on_4_VFs_at_meantime",
+    "scapy_str": CREATE_2048_RULES_4_VFS,
+    "check_param": {"port_id": 0, "queue": 1}
+}
+
+tv_mac_ipv4_pay_queue_index = {
+    "name": "test_mac_ipv4_pay_queue_index",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 proto is 255 ttl is 2 tos is 4 / end actions queue index 1 / end",
+    "scapy_str": MAC_IPV4_PAY,
+    "check_param": {"port_id": 0, "queue": 1}
+}
+
+tv_mac_ipv4_pay_queue_group = {
+    "name": "test_mac_ipv4_pay_queue_group",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 proto is 255 ttl is 2 tos is 4 / end actions rss queues 0 1 end / end",
+    "scapy_str": MAC_IPV4_PAY,
+    "check_param": {"port_id": 0, "queue": [0, 1]}
+}
+
+tv_mac_ipv4_pay_passthru = {
+    "name": "test_mac_ipv4_pay_passthru",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 proto is 255 ttl is 2 tos is 4 / end actions passthru / end",
+    "scapy_str": MAC_IPV4_PAY,
+    "check_param": {"port_id": 0, "passthru": 1}
+}
+
+tv_mac_ipv4_pay_mark_rss = {
+    "name": "test_mac_ipv4_pay_mark_rss",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 proto is 255 ttl is 2 tos is 4 / end actions mark / rss / end",
+    "scapy_str": MAC_IPV4_PAY,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 0}
+}
+
+tv_mac_ipv4_pay_mark = {
+    "name": "test_mac_ipv4_pay_mark",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 proto is 255 ttl is 2 tos is 4 / end actions mark / end",
+    "scapy_str": MAC_IPV4_PAY,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 0}
+}
+
+tv_mac_ipv4_pay_drop = {
+    "name": "test_mac_ipv4_pay_drop",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 proto is 255 ttl is 2 tos is 4 / end actions drop / end",
+    "scapy_str": MAC_IPV4_PAY,
+    "check_param": {"port_id": 0, "drop":1}
+}
+
+tv_mac_ipv4_udp_queue_index = {
+    "name": "test_mac_ipv4_udp_queue_index",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / udp src is 22 dst is 23 / end actions queue index 1 / mark id 0 / end",
+    "scapy_str": MAC_IPV4_UDP,
+    "check_param": {"port_id": 0, "queue": 1, "mark_id": 0}
+}
+
+tv_mac_ipv4_udp_drop = {
+    "name": "test_mac_ipv4_udp_drop",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / udp src is 22 dst is 23 / end actions drop / end",
+    "scapy_str": MAC_IPV4_UDP,
+    "check_param": {"port_id": 0, "drop": 1}
+}
+
+tv_mac_ipv4_udp_queue_group = {
+    "name": "test_mac_ipv4_udp_queue_group",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / udp src is 22 dst is 23 / end actions rss queues 1 2 3 4 end / mark id 4294967294 / end",
+    "scapy_str": MAC_IPV4_UDP,
+    "check_param": {"port_id": 0, "queue": [1, 2, 3, 4], "mark_id": 4294967294}
+}
+
+tv_mac_ipv4_udp_passthru = {
+    "name": "test_mac_ipv4_udp_passthru",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / udp src is 22 dst is 23 / end actions passthru / mark id 1 / end",
+    "scapy_str": MAC_IPV4_UDP,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 1}
+}
+
+tv_mac_ipv4_udp_mark_rss = {
+    "name": "test_mac_ipv4_udp_mark_rss",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / udp src is 22 dst is 23 / end actions mark id 2 / rss / end",
+    "scapy_str": MAC_IPV4_UDP,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 2}
+}
+
+tv_mac_ipv4_udp_mark = {
+    "name": "test_mac_ipv4_udp_mark",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / udp src is 22 dst is 23 / end actions mark id 1 / end",
+    "scapy_str": MAC_IPV4_UDP,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 1}
+}
+
+tv_mac_ipv4_tcp_queue_index = {
+    "name": "test_mac_ipv4_tcp_queue_index",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / tcp src is 22 dst is 23 / end actions queue index 15 / end",
+    "scapy_str": MAC_IPV4_TCP,
+    "check_param": {"port_id": 0, "queue": 15}
+}
+
+tv_mac_ipv4_tcp_drop = {
+    "name": "test_mac_ipv4_tcp_drop",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / tcp src is 22 dst is 23 / end actions drop / end",
+    "scapy_str": MAC_IPV4_TCP,
+    "check_param": {"port_id": 0, "drop": 1}
+}
+
+tv_mac_ipv4_tcp_queue_group = {
+    "name": "test_mac_ipv4_tcp_queue_group",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / tcp src is 22 dst is 23 / end actions rss queues 0 1 2 3 end / mark id 1 / end",
+    "scapy_str": MAC_IPV4_TCP,
+    "check_param": {"port_id": 0, "queue": [1, 2, 3, 4], "mark_id": 1}
+}
+
+tv_mac_ipv4_tcp_passthru = {
+    "name": "test_mac_ipv4_tcp_passthru",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / tcp src is 22 dst is 23 / end actions passthru / mark id 2 / end",
+    "scapy_str": MAC_IPV4_TCP,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 2}
+}
+
+tv_mac_ipv4_tcp_mark_rss = {
+    "name": "test_mac_ipv4_tcp_mark_rss",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / tcp src is 22 dst is 23 / end actions mark id 0 / rss / end",
+    "scapy_str": MAC_IPV4_TCP,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 0}
+}
+
+tv_mac_ipv4_tcp_mark = {
+    "name": "test_mac_ipv4_tcp_mark",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / tcp src is 22 dst is 23 / end actions mark id 0 / end",
+    "scapy_str": MAC_IPV4_TCP,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 0}
+}
+
+tv_mac_ipv4_sctp_queue_index = {
+    "name": "test_mac_ipv4_sctp_queue_index",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / sctp src is 22 dst is 23 tag is 1 / end actions queue index 0 / end",
+    "scapy_str": MAC_IPV4_SCTP,
+    "check_param": {"port_id": 0, "queue": 0}
+}
+
+tv_mac_ipv4_sctp_drop = {
+    "name": "test_mac_ipv4_sctp_drop",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / sctp src is 22 dst is 23 / end actions drop / mark / end",
+    "scapy_str": MAC_IPV4_SCTP,
+    "check_param": {"port_id": 0, "drop": 1}
+}
+
+tv_mac_ipv4_sctp_queue_group = {
+    "name": "test_mac_ipv4_sctp_queue_group",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / sctp src is 22 dst is 23 / end actions rss queues 14 15 end / mark id 15 / end",
+    "scapy_str": MAC_IPV4_SCTP,
+    "check_param": {"port_id": 0, "queue": [14, 15], "mark_id": 15}
+}
+
+tv_mac_ipv4_sctp_passthru = {
+    "name": "test_mac_ipv4_sctp_passthru",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / sctp src is 22 dst is 23 / end actions passthru / mark id 0 / end",
+    "scapy_str": MAC_IPV4_SCTP,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 0}
+}
+
+tv_mac_ipv4_sctp_mark_rss = {
+    "name": "test_mac_ipv4_sctp_mark_rss",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / sctp src is 22 dst is 23 / end actions mark / rss / end",
+    "scapy_str": MAC_IPV4_SCTP,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 0}
+}
+
+tv_mac_ipv4_sctp_mark = {
+    "name": "test_mac_ipv4_sctp_mark",
+    "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / sctp src is 22 dst is 23 / end actions mark / end",
+    "scapy_str": MAC_IPV4_SCTP,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 0}
+}
+
+tv_mac_ipv6_pay_queue_index = {
+    "name": "test_mac_ipv6_pay_queue_index",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 proto is 0 hop is 2 tc is 1 / end actions queue index 15 / mark id 1 / end",
+    "scapy_str": MAC_IPV6_PAY,
+    "check_param": {"port_id": 0, "queue": 15, "mark_id": 1}
+}
+
+tv_mac_ipv6_pay_drop = {
+    "name": "test_mac_ipv6_pay_drop",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 proto is 0 hop is 2 tc is 1 / end actions drop / end",
+    "scapy_str": MAC_IPV6_PAY,
+    "check_param": {"port_id": 0, "drop": 1}
+}
+
+tv_mac_ipv6_pay_queue_group = {
+    "name": "test_mac_ipv6_pay_queue_group",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 proto is 0 hop is 2 tc is 1 / end actions rss queues 8 9 10 11 12 13 14 15 end / mark id 2 / end",
+    "scapy_str": MAC_IPV6_PAY,
+    "check_param": {"port_id": 0, "queue": [8, 9, 10, 11, 12, 13, 14, 15], "mark_id": 2}
+}
+
+tv_mac_ipv6_pay_passthru = {
+    "name": "test_mac_ipv6_pay_passthru",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 proto is 0 hop is 2 tc is 1 / end actions passthru / mark id 3 / end",
+    "scapy_str": MAC_IPV6_PAY,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 3}
+}
+
+tv_mac_ipv6_pay_mark_rss = {
+    "name": "test_mac_ipv6_pay_mark_rss",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 proto is 0 hop is 2 tc is 1 / end actions mark id 4 / rss / end",
+    "scapy_str": MAC_IPV6_PAY,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 4}
+}
+
+tv_mac_ipv6_pay_mark = {
+    "name": "test_mac_ipv6_pay_mark",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 proto is 0 hop is 2 tc is 1 / end actions mark id 5 / rss / end",
+    "scapy_str": MAC_IPV6_PAY,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 5}
+}
+
+tv_mac_ipv6_udp_queue_index = {
+    "name": "test_mac_ipv6_udp_queue_index",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 hop is 2 tc is 1 / udp src is 22 dst is 23 / end actions queue index 1 / end",
+    "scapy_str": MAC_IPV6_UDP,
+    "check_param": {"port_id": 0, "queue": 1}
+}
+
+tv_mac_ipv6_udp_queue_group = {
+    "name": "test_mac_ipv6_udp_queue_group",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 hop is 2 tc is 1 / udp src is 22 dst is 23 / end actions rss queues 1 2 end / end",
+    "scapy_str": MAC_IPV6_UDP,
+    "check_param": {"port_id": 0, "queue": [1, 2]}
+}
+
+tv_mac_ipv6_udp_drop = {
+    "name": "test_mac_ipv6_udp_drop",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 hop is 2 tc is 1 / udp src is 22 dst is 23 / end actions drop / end",
+    "scapy_str": MAC_IPV6_UDP,
+    "check_param": {"port_id": 0, "drop": 1}
+}
+
+tv_mac_ipv6_udp_passthru = {
+    "name": "test_mac_ipv6_udp_passthru",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 hop is 2 tc is 1 / udp src is 22 dst is 23 / end actions passthru / end",
+    "scapy_str": MAC_IPV6_UDP,
+    "check_param": {"port_id": 0, "passthru": 1}
+}
+
+tv_mac_ipv6_udp_mark_rss = {
+    "name": "test_mac_ipv6_udp_mark_rss",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 hop is 2 tc is 1 / udp src is 22 dst is 23 / end actions mark / rss / end",
+    "scapy_str": MAC_IPV6_UDP,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 0}
+}
+
+tv_mac_ipv6_udp_mark = {
+    "name": "test_mac_ipv6_udp_mark",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 hop is 2 tc is 1 / udp src is 22 dst is 23 / end actions mark / end",
+    "scapy_str": MAC_IPV6_UDP,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 0}
+}
+
+tv_mac_ipv6_tcp_queue_index = {
+    "name": "test_mac_ipv6_tcp_queue_index",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 hop is 2 tc is 1 / tcp src is 22 dst is 23 / end actions queue index 1 / mark / end",
+    "scapy_str": MAC_IPV6_TCP,
+    "check_param": {"port_id": 0, "queue": 1, "mark_id": 0}
+}
+
+tv_mac_ipv6_tcp_queue_group = {
+    "name": "test_mac_ipv6_tcp_queue_group",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 hop is 2 tc is 1 / tcp src is 22 dst is 23 / end actions rss queues 2 3 end / mark / end",
+    "scapy_str": MAC_IPV6_TCP,
+    "check_param": {"port_id": 0, "queue": [2, 3], "mark_id": 0}
+}
+
+tv_mac_ipv6_tcp_drop = {
+    "name": "test_mac_ipv6_tcp_drop",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 hop is 2 tc is 1 / tcp src is 22 dst is 23 / end actions drop / end",
+    "scapy_str": MAC_IPV6_TCP,
+    "check_param": {"port_id": 0, "drop": 1}
+}
+
+tv_mac_ipv6_tcp_passthru = {
+    "name": "test_mac_ipv6_tcp_passthru",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 hop is 2 tc is 1 / tcp src is 22 dst is 23 / end actions passthru / mark / end",
+    "scapy_str": MAC_IPV6_TCP,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 0}
+}
+
+tv_mac_ipv6_tcp_mark_rss = {
+    "name": "test_mac_ipv6_tcp_mark_rss",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 hop is 2 tc is 1 / tcp src is 22 dst is 23 / end actions mark / rss / end",
+    "scapy_str": MAC_IPV6_TCP,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 0}
+}
+
+tv_mac_ipv6_tcp_mark = {
+    "name": "test_mac_ipv6_tcp_mark",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 hop is 2 tc is 1 / tcp src is 22 dst is 23 / end actions mark / end",
+    "scapy_str": MAC_IPV6_TCP,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 0}
+}
+
+tv_mac_ipv6_sctp_queue_index = {
+    "name": "test_mac_ipv6_sctp_queue_index",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 hop is 2 tc is 1 / sctp src is 22 dst is 23 / end actions queue index 3 / mark id 0 / end",
+    "scapy_str": MAC_IPV6_SCTP,
+    "check_param": {"port_id": 0, "queue": 3, "mark_id": 0 }
+}
+
+tv_mac_ipv6_sctp_drop = {
+    "name": "test_mac_ipv6_sctp_drop",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 hop is 2 tc is 1 / sctp src is 22 dst is 23 / end actions drop / end",
+    "scapy_str": MAC_IPV6_SCTP,
+    "check_param": {"port_id": 0, "drop": 1}
+}
+
+tv_mac_ipv6_sctp_queue_group = {
+    "name": "test_mac_ipv6_sctp_queue_group",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 hop is 2 tc is 1 / sctp src is 22 dst is 23 / end actions rss queues 12 13 end / mark id 0 / end",
+    "scapy_str": MAC_IPV6_SCTP,
+    "check_param": {"port_id": 0, "queue": [12, 13], "mark_id": 0}
+}
+
+tv_mac_ipv6_sctp_passthru = {
+    "name": "test_mac_ipv6_sctp_passthru",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 hop is 2 tc is 1 / sctp src is 22 dst is 23 / end actions passthru / mark id 0 / end",
+    "scapy_str": MAC_IPV6_SCTP,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 0 }
+}
+
+tv_mac_ipv6_sctp_mark_rss = {
+    "name": "test_mac_ipv6_sctp_mark_rss",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 hop is 2 tc is 1 / sctp src is 22 dst is 23 / end actions mark id 1 / rss / end",
+    "scapy_str": MAC_IPV6_SCTP,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 1 }
+}
+
+tv_mac_ipv6_sctp_mark = {
+    "name": "test_mac_ipv6_sctp_mark",
+    "rule": "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 hop is 2 tc is 1 / sctp src is 22 dst is 23 / end actions mark id 2 / end",
+    "scapy_str": MAC_IPV6_SCTP,
+    "check_param": {"port_id": 0, "passthru": 1, "mark_id": 2 }
+}
+
+tv_mac_ipv4_gtpu_eh_queue_index = {
+    "name": "test_mac_ipv4_gtpu_eh_queue_index",
+    "rule": "flow create 0 ingress pattern eth / ipv4 / udp / gtpu teid is 0x12345678 / gtp_psc qfi is 0x34 / end actions queue index 1 / end",
+    "scapy_str": MAC_IPV4_GTPU_EH,
+    "check_param": {"port_id": 0, "queue": 1}
+}
+
+tv_mac_ipv4_gtpu_eh_drop = {
+    "name": "test_mac_ipv4_gtpu_eh_drop",
+    "rule": "flow create 0 ingress pattern eth / ipv4 / udp / gtpu teid is 0x12345678 / gtp_psc qfi is 0x34 / end actions drop / end",
+    "scapy_str": MAC_IPV4_GTPU_EH,
+    "check_param": {"port_id": 0, "drop": 1}
+}
+
+tv_mac_ipv4_gtpu_eh_queue_group = {
+    "name": "test_mac_ipv4_gtpu_eh_queue_group",
+    "rule": "flow create 0 ingress pattern eth / ipv4 / udp / gtpu teid is 0x12345678 / gtp_psc qfi is 0x34 / end actions rss queues 2 3 end / mark / end",
+    "scapy_str": MAC_IPV4_GTPU_EH,
+    "check_param": {"port_id": 0, "mark_id": 0}
+}
+
+tv_mac_ipv4_gtpu_eh_passthru = {
+    "name": "test_mac_ipv4_gtpu_eh_passthru",
+    "rule": "flow create 0 ingress pattern eth / ipv4 / udp / gtpu teid is 0x12345678 / gtp_psc qfi is 0x34 / end actions passthru / mark id 1 / end",
+    "scapy_str": MAC_IPV4_GTPU_EH,
+    "check_param": {"port_id": 0, "mark_id": 1}
+}
+
+tv_mac_ipv4_gtpu_eh_mark_rss = {
+    "name": "test_mac_ipv4_gtpu_eh_mark_rss",
+    "rule": "flow create 0 ingress pattern eth / ipv4 / udp / gtpu teid is 0x12345678 / gtp_psc qfi is 0x34 / end actions mark / rss / end",
+    "scapy_str": MAC_IPV4_GTPU_EH,
+    "check_param": {"port_id": 0, "mark_id": 0}
+}
+
+tv_mac_ipv4_gtpu_eh_mark = {
+    "name": "test_mac_ipv4_gtpu_eh_mark",
+    "rule": "flow create 0 ingress pattern eth / ipv4 / udp / gtpu teid is 0x12345678 / gtp_psc qfi is 0x34 / end actions mark / end",
+    "scapy_str": MAC_IPV4_GTPU_EH,
+    "check_param": {"port_id": 0, "mark_id": 0}
+}
+
+tv_mac_ipv4_gtpu_queue_index = {
+    "name": "test_mac_ipv4_gtpu_queue_index",
+    "rule": "flow create 0 ingress pattern eth / ipv4 / udp / gtpu teid is 0x12345678 / end actions queue index 1 / mark id 0 / end",
+    "scapy_str": MAC_IPV4_GTPU,
+    "check_param": {"port_id": 0, "queue": 1, "mark_id": 0}
+}
+
+tv_mac_ipv4_gtpu_drop = {
+    "name": "test_mac_ipv4_gtpu_drop",
+    "rule": "flow create 0 ingress pattern eth / ipv4 / udp / gtpu teid is 0x12345678 / end actions drop / end",
+    "scapy_str": MAC_IPV4_GTPU,
+    "check_param": {"port_id": 0, "drop": 1}
+}
+
+tv_mac_ipv4_gtpu_queue_group = {
+    "name": "test_mac_ipv4_gtpu_queue_group",
+    "rule": "flow create 0 ingress pattern eth / ipv4 / udp / gtpu teid is 0x12345678 / end actions rss queues 1 2 end / mark id 1 / end",
+    "scapy_str": MAC_IPV4_GTPU,
+    "check_param": {"port_id": 0, "mark_id": 1}
+}
+
+tv_mac_ipv4_gtpu_passthru = {
+    "name": "test_mac_ipv4_gtpu_passthru",
+    "rule": "flow create 0 ingress pattern eth / ipv4 / udp / gtpu teid is 0x12345678 / end actions passthru / mark id 2 / end",
+    "scapy_str": MAC_IPV4_GTPU,
+    "check_param": {"port_id": 0, "mark_id": 2}
+}
+
+tv_mac_ipv4_gtpu_mark_rss = {
+    "name": "test_mac_ipv4_gtpu_mark_rss",
+    "rule": "flow create 0 ingress pattern eth / ipv4 / udp / gtpu teid is 0x12345678 / end actions mark id 3 / rss / end",
+    "scapy_str": MAC_IPV4_GTPU,
+    "check_param": {"port_id": 0, "mark_id": 3}
+}
+
+tv_mac_ipv4_gtpu_mark = {
+    "name": "test_mac_ipv4_gtpu_mark",
+    "rule": "flow create 0 ingress pattern eth / ipv4 / udp / gtpu teid is 0x12345678 / end actions mark id 4 / end",
+    "scapy_str": MAC_IPV4_GTPU,
+    "check_param": {"port_id": 0, "mark_id": 4}
+}
+
+
+vectors_ipv4_pay = [tv_mac_ipv4_pay_queue_index, tv_mac_ipv4_pay_mark_rss,tv_mac_ipv4_pay_passthru,
+                         tv_mac_ipv4_pay_drop, tv_mac_ipv4_pay_queue_group, tv_mac_ipv4_pay_mark]
+
+vectors_ipv4_udp = [tv_mac_ipv4_udp_drop, tv_mac_ipv4_udp_queue_group, tv_mac_ipv4_udp_queue_index,
+                         tv_mac_ipv4_udp_mark_rss, tv_mac_ipv4_udp_passthru, tv_mac_ipv4_udp_mark]
+
+vectors_ipv4_tcp = [tv_mac_ipv4_tcp_drop, tv_mac_ipv4_tcp_queue_group, tv_mac_ipv4_tcp_queue_index,
+                         tv_mac_ipv4_tcp_mark_rss, tv_mac_ipv4_tcp_passthru, tv_mac_ipv4_tcp_mark]
+
+vectors_ipv4_sctp = [tv_mac_ipv4_sctp_drop, tv_mac_ipv4_sctp_queue_group, tv_mac_ipv4_sctp_queue_index,
+                         tv_mac_ipv4_sctp_passthru, tv_mac_ipv4_sctp_mark_rss, tv_mac_ipv4_sctp_mark]
+
+vectors_ipv6_pay = [tv_mac_ipv6_pay_drop, tv_mac_ipv6_pay_queue_group, tv_mac_ipv6_pay_queue_index,
+                         tv_mac_ipv6_pay_mark_rss, tv_mac_ipv6_pay_passthru, tv_mac_ipv6_pay_mark]
+
+vectors_ipv6_udp = [tv_mac_ipv6_udp_drop, tv_mac_ipv6_udp_queue_group, tv_mac_ipv6_udp_queue_index,
+                         tv_mac_ipv6_udp_passthru, tv_mac_ipv6_udp_mark_rss, tv_mac_ipv6_udp_mark]
+
+vectors_ipv6_tcp = [tv_mac_ipv6_tcp_drop, tv_mac_ipv6_tcp_queue_group, tv_mac_ipv6_tcp_queue_index,
+                         tv_mac_ipv6_tcp_mark_rss, tv_mac_ipv6_tcp_passthru, tv_mac_ipv6_tcp_mark]
+
+vectors_ipv6_sctp = [tv_mac_ipv6_sctp_queue_index, tv_mac_ipv6_sctp_drop, tv_mac_ipv6_sctp_queue_group,
+                          tv_mac_ipv6_sctp_passthru, tv_mac_ipv6_sctp_mark_rss, tv_mac_ipv6_sctp_mark]
+
+vectors_gtpu_eh = [tv_mac_ipv4_gtpu_eh_drop, tv_mac_ipv4_gtpu_eh_mark_rss, tv_mac_ipv4_gtpu_eh_queue_index,
+                              tv_mac_ipv4_gtpu_eh_queue_group, tv_mac_ipv4_gtpu_eh_passthru, tv_mac_ipv4_gtpu_eh_mark]
+
+vectors_gtpu = [tv_mac_ipv4_gtpu_drop, tv_mac_ipv4_gtpu_mark_rss, tv_mac_ipv4_gtpu_queue_index,
+                              tv_mac_ipv4_gtpu_queue_group, tv_mac_ipv4_gtpu_passthru, tv_mac_ipv4_gtpu_mark]
+
+vectors_pfcp = [tv_pfcp_queue_index, tv_pfcp_queue_group, tv_pfcp_passthru, tv_pfcp_drop,
+                     tv_pfcp_mark, tv_pfcp_mark_rss]
+
+vectors_l2_ethertype = [tv_l2_ethertype_drop, tv_l2_ethertype_queue_index, tv_l2_ethertype_queue_group,
+                             tv_l2_ethertype_passthru, tv_l2_ethertype_mark, tv_l2_ethertype_mark_rss]
+
+class TestIAVFFdir(TestCase):
+
+    def rte_flow_process(self, vectors):
+        test_results = {}
+        for tv in vectors:
+            try:
+                port_id = tv["check_param"]["port_id"]
+                self.dut.send_expect("flow flush %d" % port_id, "testpmd> ", 120)
+
+                # validate rule
+                self.validate_fdir_rule(tv["rule"], check_stats=True)
+                self.check_fdir_rule(port_id=port_id, stats=False)
+
+                # create rule
+                rule_li = self.create_fdir_rule(tv["rule"], check_stats=True)
+                if "gtpu_eh" in tv["name"]:
+                    gtpu_rss = [
+                        "flow create 0 ingress pattern eth / ipv4 / udp / gtpu / gtp_psc pdu_t is 1 / ipv4 / end actions rss types l3-src-only end key_len 0 queues end / end"]
+                    gtpu_rss_rule_li = self.create_fdir_rule(gtpu_rss, check_stats=True)
+
+                # send and check match packets
+                out1 = self.send_pkts_getouput(pkts=tv["scapy_str"]["match"])
+                rfc.check_iavf_fdir_mark(out1, pkt_num=len(tv["scapy_str"]["match"]), check_param=tv["check_param"])
+                # send and check mismatch packets
+                out2 = self.send_pkts_getouput(pkts=tv["scapy_str"]["mismatch"])
+                rfc.check_iavf_fdir_mark(out2, pkt_num=len(tv["scapy_str"]["mismatch"]), check_param=tv["check_param"],
+                                 stats=False)
+                # list and destroy rule
+                if "gtpu_eh" in tv["name"]:
+                    self.check_fdir_rule(port_id=port_id, rule_list=rule_li+gtpu_rss_rule_li)
+                else:
+                    self.check_fdir_rule(port_id=port_id, rule_list=rule_li)
+                self.destroy_fdir_rule(rule_id=rule_li, port_id=port_id)
+                # send matched packet
+                out3 = self.send_pkts_getouput(pkts=tv["scapy_str"]["match"])
+                rfc.check_iavf_fdir_mark(out3, pkt_num=len(tv["scapy_str"]["match"]), check_param=tv["check_param"],
+                                 stats=False)
+                # check not rule exists
+                if "gtpu_eh" in tv["name"]:
+                    self.check_fdir_rule(port_id=port_id, rule_list=gtpu_rss_rule_li)
+                else:
+                    self.check_fdir_rule(port_id=port_id, stats=False)
+                test_results[tv["name"]] = True
+                print((GREEN("case passed: %s" % tv["name"])))
+            except Exception as e:
+                print((RED(e)))
+                test_results[tv["name"]] = False
+                continue
+        failed_cases = []
+        for k, v in list(test_results.items()):
+            if not v:
+                failed_cases.append(k)
+        self.verify(all(test_results.values()), "{} failed.".format(failed_cases))
+
+    def multirules_process(self, vectors, port_id=0):
+        # create rules on only one port
+        test_results = {}
+        rule_li = []
+        for tv in vectors:
+            try:
+                port_id = port_id
+                pkts=tv["scapy_str"]
+                check_param=tv["check_param"]
+                self.destroy_fdir_rule(rule_id=rule_li, port_id=port_id)
+                # validate rules
+                self.validate_fdir_rule(tv["rule"], check_stats=True)
+
+                # create rules
+                rule_li = self.create_fdir_rule(tv["rule"], check_stats=True)
+
+                for i in range(len(pkts)):
+                    port_id = check_param[i]["port_id"]
+                    out = self.send_pkts_getouput(pkts=pkts[i])
+                    rfc.check_iavf_fdir_mark(out, pkt_num=1, check_param=check_param[i])
+                test_results[tv["name"]] = True
+                print((GREEN("case passed: %s" % tv["name"])))
+            except Exception as e:
+                print((RED(e)))
+                test_results[tv["name"]] = False
+                continue
+        failed_cases = []
+        for k, v in list(test_results.items()):
+            if not v:
+                failed_cases.append(k)
+        self.verify(all(test_results.values()), "{} failed.".format(failed_cases))
+
+    def set_up_all(self):
+        """
+        Run at the start of each test suite.
+        prerequisites.
+        """
+        # Based on h/w type, choose how many ports to use
+        self.dut_ports = self.dut.get_ports(self.nic)
+        self.verify(len(self.dut_ports) >= 2, "Insufficient ports for testing")
+        # Verify that enough threads are available
+        cores = self.dut.get_core_list("1S/4C/1T")
+        self.verify(cores is not None, "Insufficient cores for testing")
+        self.ports_socket = self.dut.get_numa_id(self.dut_ports[0])
+        localPort0 = self.tester.get_local_port(self.dut_ports[0])
+        localPort1 = self.tester.get_local_port(self.dut_ports[1])
+        self.tester_iface0 = self.tester.get_interface(localPort0)
+        self.tester_iface1 = self.tester.get_interface(localPort1)
+        self.pf0_intf = self.dut.ports_info[self.dut_ports[0]]['intf']
+        self.pf1_intf = self.dut.ports_info[self.dut_ports[1]]['intf']
+        self.pf0_mac = self.dut.get_mac_address(0)
+        self.pf1_mac = self.dut.get_mac_address(1)
+
+        #bind pf to kernel
+        for port in self.dut_ports:
+            netdev = self.dut.ports_info[port]['port']
+            netdev.bind_driver(driver='ice')
+
+        #set vf driver
+        self.vf_driver = 'vfio-pci'
+        self.dut.send_expect('modprobe vfio-pci', '#')
+        self.suite_config = rfc.get_suite_config(self)
+
+        self.pkt = Packet()
+        self.pmd_output = PmdOutput(self.dut)
+
+        self.re_load_ice_driver()
+        self.setup_2pf_4vf_env()
+
+        self.src_file_dir = 'dep/'
+        self.dut_file_dir = '/tmp/'
+
+    def set_up(self):
+        """
+        Run before each test case.
+        """
+        self.launch_testpmd()
+
+    def setup_2pf_4vf_env(self, driver='default'):
+
+        #get PF interface name
+        self.used_dut_port_0 = self.dut_ports[0]
+        self.used_dut_port_1 = self.dut_ports[1]
+
+        #generate 2 VFs on PF
+        self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 2, driver=driver)
+        self.dut.generate_sriov_vfs_by_port(self.used_dut_port_1, 2, driver=driver)
+        self.sriov_vfs_pf0 = self.dut.ports_info[self.used_dut_port_0]['vfs_port']
+        self.sriov_vfs_pf1 = self.dut.ports_info[self.used_dut_port_1]['vfs_port']
+
+        self.dut.send_expect('ip link set %s vf 0 mac 00:11:22:33:44:55' % self.pf0_intf, '#')
+        self.dut.send_expect('ip link set %s vf 1 mac 00:11:22:33:44:66' % self.pf0_intf, '#')
+        self.dut.send_expect('ip link set %s vf 0 mac 00:11:22:33:44:77' % self.pf1_intf, '#')
+        self.dut.send_expect('ip link set %s vf 1 mac 00:11:22:33:44:88' % self.pf1_intf, '#')
+
+        #bind VF0 and VF1 to dpdk driver
+        try:
+            for vf_port in self.sriov_vfs_pf0:
+                vf_port.bind_driver(self.vf_driver)
+            for vf_port in self.sriov_vfs_pf1:
+                vf_port.bind_driver(self.vf_driver)
+
+        except Exception as e:
+            self.destroy_env()
+            raise Exception(e)
+        out = self.dut.send_expect('./usertools/dpdk-devbind.py -s', '#')
+        print(out)
+
+    def setup_npf_nvf_env(self, pf_num=2, vf_num=2, driver='default'):
+
+        #get PF interface name
+        self.used_dut_port_0 = self.dut_ports[0]
+        self.used_dut_port_1 = self.dut_ports[1]
+        try:
+            # generate vf on pf
+            if pf_num == 1:
+                self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, vf_num, driver=driver)
+                self.sriov_vfs_pf0 = self.dut.ports_info[self.used_dut_port_0]['vfs_port']
+                #bind VF0 and VF1 to dpdk driver
+                for vf_port in self.sriov_vfs_pf0:
+                    vf_port.bind_driver(self.vf_driver)
+            else:
+                self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, vf_num, driver=driver)
+                self.dut.generate_sriov_vfs_by_port(self.used_dut_port_1, vf_num, driver=driver)
+                self.sriov_vfs_pf0 = self.dut.ports_info[self.used_dut_port_0]['vfs_port']
+                self.sriov_vfs_pf1 = self.dut.ports_info[self.used_dut_port_1]['vfs_port']
+                for vf_port in self.sriov_vfs_pf0:
+                    vf_port.bind_driver(self.vf_driver)
+                for vf_port in self.sriov_vfs_pf1:
+                    vf_port.bind_driver(self.vf_driver)
+
+        except Exception as e:
+            self.destroy_env()
+            raise Exception(e)
+        out = self.dut.send_expect('./usertools/dpdk-devbind.py -s', '#')
+        print(out)
+
+    def destroy_env(self):
+        """
+        This is to stop testpmd and destroy 1pf and 2vfs environment.
+        """
+        self.dut.send_expect("quit", "# ", 60)
+        time.sleep(2)
+        self.dut.destroy_sriov_vfs_by_port(self.dut_ports[0])
+        self.dut.destroy_sriov_vfs_by_port(self.dut_ports[1])
+
+    def re_load_ice_driver(self):
+        """
+        remove and reload the ice driver
+        """
+        self.dut.send_expect("rmmod ice", "# ", 20)
+        ice_driver_file_location = self.suite_config["ice_driver_file_location"]
+        self.dut.send_expect("insmod %s" % ice_driver_file_location, "# ")
+        self.dut.send_expect("ifconfig %s up" % self.tester_iface0, "# ", 15)
+        self.dut.send_expect("ifconfig %s up" % self.tester_iface1, "# ", 15)
+
+    def config_testpmd(self):
+        self.pmd_output.execute_cmd("set fwd rxonly")
+        self.pmd_output.execute_cmd("set verbose 1")
+        # specify a fixed rss-hash-key for cvl ether
+        self.pmd_output.execute_cmd(
+            "port config 0 rss-hash-key ipv4 1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd")
+        self.pmd_output.execute_cmd(
+            "port config 1 rss-hash-key ipv4 1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd")
+        res = self.pmd_output.wait_link_status_up('all', timeout=15)
+        self.verify(res is True, 'there have port link is down')
+        self.pmd_output.execute_cmd("start")
+
+    def launch_testpmd(self):
+        self.pmd_output.start_testpmd(cores="1S/4C/1T",
+                                      param="--rxq=16 --txq=16",
+                                      eal_param="-w %s -w %s" % (
+                                           self.sriov_vfs_pf0[0].pci,self.sriov_vfs_pf0[1].pci),
+                                      socket=self.ports_socket)
+        self.config_testpmd()
+
+    def send_packets(self, packets, pf_id=0):
+        self.pkt.update_pkt(packets)
+        tx_port = self.tester_iface0 if pf_id == 0 else self.tester_iface1
+        self.pkt.send_pkt(crb=self.tester, tx_port=tx_port)
+
+    def send_pkts_getouput(self, pkts, pf_id=0):
+        """
+        if pkt_info is True, we need to get packet infomation to check the RSS hash and FDIR.
+        if pkt_info is False, we just need to get the packet number and queue number.
+        """
+        self.send_packets(pkts, pf_id)
+        time.sleep(1)
+        out_info = self.dut.get_session_output(timeout=1)
+        out_pkt = self.pmd_output.execute_cmd("stop")
+        out = out_info + out_pkt
+        self.pmd_output.execute_cmd("start")
+        return out
+
+    def validate_fdir_rule(self, rule, check_stats=None):
+        # validate rule.
+        p = "Flow rule validated"
+        rule_list = []
+        if isinstance(rule, list):
+            for i in rule:
+                length = len(i)
+                rule_rep = i[0:5] + "validate" + i[11:length]
+                out = self.pmd_output.execute_cmd(rule_rep)
+                if (p in out) and ("Failed" not in out):
+                    rule_list.append(True)
+                else:
+                    rule_list.append(False)
+        elif isinstance(rule, str):
+            length = len(rule)
+            rule_rep = rule[0:5] + "validate" + rule[11:length]
+            out = self.pmd_output.execute_cmd(rule_rep)
+            if (p in out) and ("Failed" not in out):
+                rule_list.append(True)
+            else:
+                rule_list.append(False)
+        else:
+            raise Exception("unsupported rule type, only accept list or str")
+        if check_stats:
+            self.verify(all(rule_list), "some rules validate failed, result %s" % rule_list)
+        elif check_stats == False:
+            self.verify(not any(rule_list), "all rules should validate failed, result %s" % rule_list)
+
+    def create_fdir_rule(self, rule, check_stats=None):
+        p = re.compile(r"Flow rule #(\d+) created")
+        rule_list = []
+        if isinstance(rule, list):
+            for i in rule:
+                out = self.pmd_output.execute_cmd(i)
+                m = p.search(out)
+                if m:
+                    rule_list.append(m.group(1))
+                else:
+                    rule_list.append(False)
+        elif isinstance(rule, str):
+            out = self.pmd_output.execute_cmd(rule)
+            m = p.search(out)
+            if m:
+                rule_list.append(m.group(1))
+            else:
+                rule_list.append(False)
+        else:
+            raise Exception("unsupported rule type, only accept list or str")
+        if check_stats:
+            self.verify(all(rule_list), "some rules create failed, result %s" % rule_list)
+        elif check_stats == False:
+            self.verify(not any(rule_list), "all rules should create failed, result %s" % rule_list)
+        return rule_list
+
+    def destroy_fdir_rule(self, rule_id, port_id=0):
+        if isinstance(rule_id, list):
+            for i in rule_id:
+                out = self.pmd_output.execute_cmd("flow destroy %s rule %s" % (port_id, i))
+                p = re.compile(r"Flow rule #(\d+) destroyed")
+                m = p.search(out)
+                self.verify(m, "flow rule %s delete failed" % rule_id)
+        else:
+            out = self.pmd_output.execute_cmd("flow destroy %s rule %s" % (port_id, rule_id))
+            p = re.compile(r"Flow rule #(\d+) destroyed")
+            m = p.search(out)
+            self.verify(m, "flow rule %s delete failed" % rule_id)
+
+    def check_fdir_rule(self, port_id=0, stats=True, rule_list=None):
+        out = self.pmd_output.execute_cmd("flow list %s" % port_id)
+        p = re.compile(r"ID\s+Group\s+Prio\s+Attr\s+Rule")
+        if stats:
+            self.verify(p.search(out), "flow rule on port %s is not existed" % port_id)
+            if rule_list:
+                p = re.compile("^(\d+)\s")
+                li = out.splitlines()
+                res = list(filter(bool, list(map(p.match, li))))
+                result = [i.group(1) for i in res]
+                self.verify(sorted(result) == sorted(rule_list),
+                            "check rule list failed. expect %s, result %s" % (rule_list, result))
+        else:
+            self.verify(not p.search(out), "flow rule on port %s is existed" % port_id)
+
+    def check_rule_number(self, port_id=0, num=0):
+        out = self.pmd_output.execute_cmd("flow list %s" % port_id)
+        result_scanner = r'\d*.*?\d*.*?\d*.*?=>*'
+        scanner = re.compile(result_scanner, re.DOTALL)
+        li = scanner.findall(out)
+        if num == 0:
+            self.verify(not li, "there should be no rule listed")
+        else:
+            print(len(li))
+            self.verify(len(li) == num, "the amount of rules is wrong.")
+        return out
+
+    def test_mac_ipv4_pay(self):
+        self.rte_flow_process(vectors_ipv4_pay)
+
+    def test_mac_ipv4_udp(self):
+        self.rte_flow_process(vectors_ipv4_udp)
+
+    def test_mac_ipv4_tcp(self):
+        self.rte_flow_process(vectors_ipv4_tcp)
+
+    def test_mac_ipv4_sctp(self):
+        self.rte_flow_process(vectors_ipv4_sctp)
+
+    def test_mac_ipv6_pay(self):
+        self.rte_flow_process(vectors_ipv6_pay)
+
+    def test_mac_ipv6_udp(self):
+        self.rte_flow_process(vectors_ipv6_udp)
+
+    def test_mac_ipv6_tcp(self):
+        self.rte_flow_process(vectors_ipv6_tcp)
+
+    def test_mac_ipv6_sctp(self):
+        self.rte_flow_process(vectors_ipv6_sctp)
+
+    def test_mac_ipv4_gtpu_eh(self):
+        self.rte_flow_process(vectors_gtpu_eh)
+
+    def test_mac_ipv4_gtpu(self):
+        self.rte_flow_process(vectors_gtpu)
+
+    def test_mac_ipv4_protocol(self):
+        rules = [
+            "flow create 0 ingress pattern eth / ipv4 dst is 192.168.0.21 proto is 1 / end actions queue index 1 / mark id 1 / end",
+            "flow create 0 ingress pattern eth / ipv4 dst is 192.168.0.21 proto is 17 / end actions passthru / mark id 3 / end"]
+
+        #validate rules
+        self.validate_fdir_rule(rules, check_stats=True)
+        self.check_fdir_rule(port_id=0, stats=False)
+
+        #create rules
+        rule_li = self.create_fdir_rule(rules, check_stats=True)
+        self.check_fdir_rule(port_id=0, rule_list=rule_li)
+
+        # pkt1 and pkt2 in "match" match rule 0, pkt3-6 match rule 1.
+        out1 = self.send_pkts_getouput(MAC_IPV4_PAY_protocol["match"][0:2])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=2, check_param={"port_id": 0, "mark_id": 1, "queue": 1}, stats=True)
+
+        out2 = self.send_pkts_getouput(MAC_IPV4_PAY_protocol["match"][2:6])
+        rfc.check_iavf_fdir_mark(out2, pkt_num=4, check_param={"port_id": 0, "mark_id": 3, "passthru": 1}, stats=True)
+
+        # send mismatched packets:
+        out3 = self.send_pkts_getouput(MAC_IPV4_PAY_protocol["mismatch"])
+        rfc.check_iavf_fdir_mark(out3, pkt_num=4, check_param={"port_id": 0, "passthru": 1}, stats=False)
+
+        # destroy the rules and check there is no rule listed.
+        self.destroy_fdir_rule(rule_id=rule_li, port_id=0)
+        self.check_fdir_rule(port_id=0, stats=False)
+
+        # send matched packet
+        out4 = self.send_pkts_getouput(MAC_IPV4_PAY_protocol["match"])
+        rfc.check_iavf_fdir_mark(out4, pkt_num=6, check_param={"port_id": 0, "passthru": 1}, stats=False)
+
+    def test_mac_ipv6_protocol(self):
+        rules = [
+            "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 proto is 44 / end actions rss queues 5 6 end / mark id 0 / end",
+            "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 proto is 6 / end actions mark id 2 / rss / end"]
+
+        # validate rules
+        self.validate_fdir_rule(rules, check_stats=True)
+        self.check_fdir_rule(port_id=0, stats=False)
+
+        # create rules
+        rule_li = self.create_fdir_rule(rules, check_stats=True)
+        self.check_fdir_rule(port_id=0, rule_list=rule_li)
+
+        # pkt1-4 in "match" match rule 0, pkt5-6 match rule 1.
+        out1 = self.send_pkts_getouput(MAC_IPV6_PAY_protocol["match"][0:4])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=4, check_param={"port_id": 0, "mark_id": 0, "queue": [5, 6]}, stats=True)
+
+        out2 = self.send_pkts_getouput(MAC_IPV6_PAY_protocol["match"][4:6])
+        rfc.check_iavf_fdir_mark(out2, pkt_num=2, check_param={"port_id": 0, "mark_id": 2, "passthru": 1}, stats=True)
+
+        # send mismatched packets:
+        out3 = self.send_pkts_getouput(MAC_IPV6_PAY_protocol["mismatch"])
+        rfc.check_iavf_fdir_mark(out3, pkt_num=3, check_param={"port_id": 0, "passthru": 1}, stats=False)
+
+        # destroy the rules and check there is no rule listed.
+        self.destroy_fdir_rule(rule_id=rule_li, port_id=0)
+        self.check_fdir_rule(port_id=0, stats=False)
+
+        # send matched packet
+        out4 = self.send_pkts_getouput(MAC_IPV6_PAY_protocol["match"])
+        rfc.check_iavf_fdir_mark(out4, pkt_num=6, check_param={"port_id": 0, "passthru": 1}, stats=False)
+
+    def test_mac_ipv4_gtpu_eh_without_teid(self):
+        rules = "flow create 0 ingress pattern eth / ipv4 / udp / gtpu / gtp_psc qfi is 0x34 / end actions queue index 1 / mark id 3 / end"
+        MAC_IPV4_GTPU_EH_WITHOUT_TEID = {
+            "match": 'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255)/GTP_PDUSession_ExtensionHeader(pdu_type=1, qos_flow=0x34)/IP()/TCP()/Raw("x"*20)',
+            "mismatch": 'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255)/GTP_PDUSession_ExtensionHeader(pdu_type=1, qos_flow=0x35)/IP()/TCP()/Raw("x"*20)'
+        }
+        # validate rules
+        self.validate_fdir_rule(rules, check_stats=True)
+        self.check_fdir_rule(port_id=0, stats=False)
+
+        # create rules
+        rule_li = self.create_fdir_rule(rules, check_stats=True)
+        self.check_fdir_rule(port_id=0, rule_list=rule_li)
+
+        # send matched packet
+        out1 = self.send_pkts_getouput(MAC_IPV4_GTPU_EH_WITHOUT_TEID["match"])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 0, "mark_id": 3, "queue": 1}, stats=True)
+
+        # send mismatched packets:
+        out2 = self.send_pkts_getouput(MAC_IPV4_GTPU_EH_WITHOUT_TEID["mismatch"])
+        rfc.check_iavf_fdir_mark(out2, pkt_num=1, check_param={"port_id": 0, "passthru": 1}, stats=False)
+
+        # destroy the rules and check there is no rule listed.
+        self.destroy_fdir_rule(rule_id=rule_li, port_id=0)
+        self.check_fdir_rule(port_id=0, stats=False)
+
+        # send matched packet
+        out3 = self.send_pkts_getouput(MAC_IPV4_GTPU_EH_WITHOUT_TEID["match"])
+        rfc.check_iavf_fdir_mark(out3, pkt_num=1, check_param={"port_id": 0, "passthru": 1}, stats=False)
+
+    def test_mac_ipv4_gtpu_eh_without_qfi(self):
+        rules = "flow create 0 ingress pattern eth / ipv4 / udp / gtpu teid is 0x12345678 / gtp_psc / end actions rss queues 2 3 end / mark id 1 / end"
+        MAC_IPV4_GTPU_EH_WITHOUT_QFI = {
+            "match": 'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x12345678)/GTP_PDUSession_ExtensionHeader(pdu_type=1)/IP()/UDP()/Raw("x"*20)',
+            "mismatch": 'Ether(src="a4:bf:01:51:27:ca", dst="00:11:22:33:44:55")/IP(src="192.168.0.20", dst="192.168.0.21")/UDP(dport=2152)/GTP_U_Header(gtp_type=255, teid=0x1234567)/GTP_PDUSession_ExtensionHeader(pdu_type=1)/IP()/UDP()/Raw("x"*20)'
+        }
+        # validate rules
+        self.validate_fdir_rule(rules, check_stats=True)
+        self.check_fdir_rule(port_id=0, stats=False)
+
+        # create rules
+        rule_li = self.create_fdir_rule(rules, check_stats=True)
+        self.check_fdir_rule(port_id=0, rule_list=rule_li)
+
+        # send matched packet
+        out1 = self.send_pkts_getouput(MAC_IPV4_GTPU_EH_WITHOUT_QFI["match"])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 0, "mark_id": 1, "queue": [2, 3]}, stats=True)
+
+        # send mismatched packets:
+        out2 = self.send_pkts_getouput(MAC_IPV4_GTPU_EH_WITHOUT_QFI["mismatch"])
+        rfc.check_iavf_fdir_mark(out2, pkt_num=1, check_param={"port_id": 0, "passthru": 1}, stats=False)
+
+        # destroy the rules and check there is no rule listed.
+        self.destroy_fdir_rule(rule_id=rule_li, port_id=0)
+        self.check_fdir_rule(port_id=0, stats=False)
+
+        # send matched packet
+        out3 = self.send_pkts_getouput(MAC_IPV4_GTPU_EH_WITHOUT_QFI["match"])
+        rfc.check_iavf_fdir_mark(out3, pkt_num=1, check_param={"port_id": 0, "passthru": 1}, stats=False)
+
+    def test_pfcp(self):
+        # open the RSS function for PFCP session packet.
+        out = self.pmd_output.execute_cmd("flow create 0 ingress pattern eth / ipv4 / udp / pfcp / end actions rss types pfcp end key_len 0 queues end / end")
+        self.verify("Flow rule #0 created" in out, "failed to enable RSS function for MAC_IPV4_PFCP session packet")
+        out = self.pmd_output.execute_cmd("flow create 0 ingress pattern eth / ipv6 / udp / pfcp / end actions rss types pfcp end key_len 0 queues end / end")
+        self.verify("Flow rule #1 created" in out, "failed to enable RSS function for MAC_IPV6_PFCP session packet")
+        self.multirules_process(vectors_pfcp)
+
+    def test_l2_ethertype(self):
+        self.multirules_process(vectors_l2_ethertype)
+
+    def test_negative_case(self):
+        """
+        negative cases
+        """
+        rules = {
+            "invalid parameters of queue index" : "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 proto is 255 ttl is 2 tos is 4 / end actions queue index 16 / end",
+            "invalid parameters of rss queues" : [
+                "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 proto is 255 ttl is 2 tos is 4 / end actions rss queues 1 2 3 end / end",
+                "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 proto is 255 ttl is 2 tos is 4 / end actions rss queues 0 end / end",
+                "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 proto is 255 ttl is 2 tos is 4 / end actions rss queues end / end",
+                "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 proto is 255 ttl is 2 tos is 4 / end actions rss queues 1 2 3 5 end / end",
+                "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 proto is 255 ttl is 2 tos is 4 / end actions rss queues 15 16 end / end",
+                "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / end actions rss queues 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 end / end"],
+            "invalid mark id" : "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 proto is 255 ttl is 2 tos is 4 / end actions passthru / mark id 4294967296 / end",
+            "invalid parameters of GTPU input set" : [
+                "flow create 0 ingress pattern eth / ipv4 / udp / gtpu teid is 0x12345678 / gtp_psc qfi is 0x100 / end actions queue index 1 / end",
+                "flow create 0 ingress pattern eth / ipv4 / udp / gtpu teid is 0x100000000 / gtp_psc qfi is 0x5 / end actions queue index 2 / end",
+                "flow create 0 ingress pattern eth / ipv4 / udp / gtpu teid is 0x100000000 / end actions queue index 1 / end"],
+            "unsupported type of L2 ethertype" : [
+                "flow create 0 ingress pattern eth type is 0x0800 / end actions queue index 1 / end",
+                "flow create 0 ingress pattern eth type is 0x86dd / end actions queue index 1 / end"],
+            "conflicted actions" : "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / end actions queue index 1 / rss queues 2 3 end / end",
+            "void action" : "flow create 0 ingress pattern eth / ipv4 / udp / gtpu teid is 0x12345678 / gtp_psc qfi is 0x34 / end actions end",
+            "unsupported action" : "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 proto is 255 ttl is 2 tos is 4 / end actions count / end",
+            "unsupported input set field" : "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 tc is 2 / end actions queue index 1 / end",
+            "void input set value" : "flow create 0 ingress pattern eth / ipv4 / end actions queue index 1 / end",
+            "invalid port" : "flow create 2 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / end actions queue index 1 / end"
+        }
+        # all the rules failed to create and validate
+        self.validate_fdir_rule(rules["invalid parameters of queue index"], check_stats=False)
+        self.create_fdir_rule(rules["invalid parameters of queue index"], check_stats=False)
+        self.validate_fdir_rule(rules["invalid parameters of rss queues"], check_stats=False)
+        self.create_fdir_rule(rules["invalid parameters of rss queues"], check_stats=False)
+        self.validate_fdir_rule(rules["invalid parameters of GTPU input set"], check_stats=False)
+        self.create_fdir_rule(rules["invalid parameters of GTPU input set"], check_stats=False)
+        self.validate_fdir_rule(rules["unsupported type of L2 ethertype"], check_stats=False)
+        self.create_fdir_rule(rules["unsupported type of L2 ethertype"], check_stats=False)
+        self.validate_fdir_rule(rules["conflicted actions"], check_stats=False)
+        self.create_fdir_rule(rules["conflicted actions"], check_stats=False)
+        self.validate_fdir_rule(rules["void action"], check_stats=False)
+        self.create_fdir_rule(rules["void action"], check_stats=False)
+        self.validate_fdir_rule(rules["unsupported input set field"], check_stats=False)
+        self.create_fdir_rule(rules["unsupported input set field"], check_stats=False)
+        self.validate_fdir_rule(rules["void input set value"], check_stats=False)
+        self.create_fdir_rule(rules["void input set value"], check_stats=False)
+        self.validate_fdir_rule(rules["invalid port"], check_stats=False)
+        self.create_fdir_rule(rules["invalid port"], check_stats=False)
+
+        # check there is no rule listed
+        self.check_fdir_rule(port_id=0, stats=False)
+        self.check_fdir_rule(port_id=1, stats=False)
+
+        # duplicated rules
+        rule = "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / end actions queue index 1 / end"
+        self.create_fdir_rule(rule, check_stats=True)
+        self.create_fdir_rule(rule, check_stats=False)
+        self.pmd_output.execute_cmd("flow destroy 0 rule 0")
+
+        # conflict rules
+        rule = "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / end actions queue index 1 / end"
+        self.create_fdir_rule(rule, check_stats=True)
+        rule1 = "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / end actions queue index 2 / end"
+        self.create_fdir_rule(rule1, check_stats=False)
+        rule2 = "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 ttl is 2 tos is 4 / end actions drop / end"
+        self.create_fdir_rule(rule2, check_stats=False)
+        self.pmd_output.execute_cmd("flow destroy 0 rule 0", timeout=1)
+
+        # delete a non-existent rule
+        out1 = self.pmd_output.execute_cmd("flow destroy 0 rule 0")
+        self.verify("error" not in out1, "there shouldn't report error message")
+        out2 = self.pmd_output.execute_cmd("flow destroy 2 rule 0")
+        self.verify("Invalid port" in out2, "there should report error message")
+        out3 = self.pmd_output.execute_cmd("flow flush 2")
+        self.verify("No such device" in out3, "port 2 doesn't exist.")
+        out4 = self.pmd_output.execute_cmd("flow list 2")
+        self.verify("Invalid port" in out4, "port 2 doesn't exist.")
+
+        self.check_fdir_rule(port_id=0, stats=False)
+        self.check_fdir_rule(port_id=1, stats=False)
+
+    def test_unsupported_pattern_with_OS_package(self):
+        """
+        Create GTPU rule, PFCP rule and L2 Ethertype rule with OS default package
+        """
+        rule = ["flow create 0 ingress pattern eth / ipv4 / udp / gtpu teid is 0x12345678 / gtp_psc qfi is 0x34 / end actions drop / end",
+                "flow create 0 ingress pattern eth / ipv4 / udp / pfcp s_field is 0 / end actions queue index 1 / end",
+                "flow create 0 ingress pattern eth type is 0x8863 / end actions queue index 1 / mark id 1 / end"]
+        self.destroy_env()
+        os_package_location = self.suite_config["os_default_package_file_location"]
+        comms_package_location = self.suite_config["comms_package_file_location"]
+        package_location = self.suite_config["package_file_location"]
+        self.dut.send_expect("cp %s %s" % (os_package_location, package_location), "# ")
+        self.re_load_ice_driver()
+        self.setup_2pf_4vf_env()
+        self.launch_testpmd()
+
+        self.validate_fdir_rule(rule, check_stats=False)
+        self.create_fdir_rule(rule, check_stats=False)
+        self.check_fdir_rule(port_id=0, stats=False)
+
+        self.destroy_env()
+        self.dut.send_expect("cp %s %s" % (comms_package_location, package_location), "# ")
+        self.re_load_ice_driver()
+        self.setup_2pf_4vf_env()
+
+    def test_create_same_rule_on_pf_vf(self):
+        """
+        create same rules on pf and vf, no conflict
+        """
+        self.dut.kill_all()
+        self.session_secondary = self.dut.new_session()
+        self.session_third = self.dut.new_session()
+
+        rules = [
+            "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / tcp src is 22 dst is 23 / end actions queue index 1 / end",
+            "flow create 1 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / tcp src is 22 dst is 23 / end actions queue index 1 / end"]
+        pkts = {
+            "matched": [
+                'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=23)/Raw("x" * 80)',
+                'Ether(dst="00:11:22:33:44:66")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=23)/Raw("x" * 80)',
+                'Ether(dst="00:11:22:33:44:77")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=23)/Raw("x" * 80)',
+                'Ether(dst="00:11:22:33:44:88")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=23)/Raw("x" * 80)'],
+            "mismatched": [
+                'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=24)/Raw("x" * 80)',
+                'Ether(dst="00:11:22:33:44:66")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=24)/Raw("x" * 80)',
+                'Ether(dst="00:11:22:33:44:77")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=24)/Raw("x" * 80)',
+                'Ether(dst="00:11:22:33:44:88")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=24)/Raw("x" * 80)'],
+            "pf": [
+                'Ether(dst="%s")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=23)/Raw("x" * 80)' % self.pf0_mac,
+                'Ether(dst="%s")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=23)/Raw("x" * 80)' % self.pf1_mac]
+        }
+        out_pf0 = self.dut.send_expect("ethtool -N %s flow-type tcp4 src-ip 192.168.0.20 dst-ip 192.168.0.21 src-port 22 dst-port 23 action 1" % self.pf0_intf, "# ")
+        out_pf1 = self.dut.send_expect("ethtool -N %s flow-type tcp4 src-ip 192.168.0.20 dst-ip 192.168.0.21 src-port 22 dst-port 23 action 1" % self.pf1_intf, "# ")
+        p = re.compile(r"Added rule with ID (\d+)")
+        m0 = p.search(out_pf0)
+        m1 = p.search(out_pf1)
+
+        eal_param = "-c 0xf -n 6 -w %s -w %s --file-prefix=pf0" % (self.sriov_vfs_pf0[0].pci,self.sriov_vfs_pf0[1].pci)
+        command = "./%s/app/testpmd %s -- -i %s" % (self.dut.target, eal_param, "--rxq=16 --txq=16")
+        self.dut.send_expect(command, "testpmd> ", 300)
+        self.config_testpmd()
+
+        eal_param = "-c 0xf0 -n 6 -w %s -w %s --file-prefix=pf1" % (self.sriov_vfs_pf1[0].pci,self.sriov_vfs_pf1[1].pci)
+        command = "./%s/app/testpmd %s -- -i %s" % (self.dut.target, eal_param, "--rxq=16 --txq=16")
+        self.session_secondary.send_expect(command, "testpmd> ", 300)
+        #self.session_secondary.config_testpmd()
+        self.session_secondary.send_expect("set fwd rxonly", "testpmd> ")
+        self.session_secondary.send_expect("set verbose 1", "testpmd> ")
+        # specify a fixed rss-hash-key for cvl ether
+        self.session_secondary.send_expect(
+            "port config 0 rss-hash-key ipv4 1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd", "testpmd> ")
+        self.session_secondary.send_expect(
+            "port config 1 rss-hash-key ipv4 1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd", "testpmd> ")
+        self.session_secondary.send_expect("start", "testpmd> ")
+
+        self.create_fdir_rule(rules, check_stats=True)
+        self.session_secondary.send_expect("flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / tcp src is 22 dst is 23 / end actions queue index 1 / end", "created")
+        self.session_secondary.send_expect("flow create 1 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / tcp src is 22 dst is 23 / end actions queue index 1 / end", "created")
+
+        # confirm pf link is up
+        self.session_third.send_expect("ifconfig %s up" % self.pf0_intf, "# ", 15)
+        self.session_third.send_expect("ifconfig %s up" % self.pf1_intf, "# ", 15)
+        time.sleep(1)
+
+        # send matched packets
+        self.tester.scapy_append('sendp([%s], iface="%s")' % (pkts["pf"][0], self.tester_iface0))
+        self.tester.scapy_append('sendp([%s], iface="%s")' % (pkts["pf"][1], self.tester_iface1))
+        self.tester.scapy_execute()
+        time.sleep(1)
+        out_pf0 = self.session_third.send_expect("ethtool -S %s" % self.pf0_intf, "# ")
+        self.verify("rx_queue_1_packets: 1" in out_pf0, "the packet is not redirected to expected queue of pf0")
+        out_pf1 = self.session_third.send_expect("ethtool -S %s" % self.pf1_intf, "# ")
+        self.verify("rx_queue_1_packets: 1" in out_pf1, "the packet is not redirected to expected queue of pf1")
+
+        out_vf00 = self.send_pkts_getouput(pkts["matched"][0])
+        rfc.check_iavf_fdir_mark(out_vf00, pkt_num=1, check_param={"port_id": 0, "queue": 1}, stats=True)
+        out_vf01 = self.send_pkts_getouput(pkts["matched"][1])
+        rfc.check_iavf_fdir_mark(out_vf01, pkt_num=1, check_param={"port_id": 1, "queue": 1}, stats=True)
+
+        self.send_packets(pkts["matched"][2], pf_id=1)
+        out_info = self.session_secondary.get_session_before(timeout=2)
+        out_pkt = self.session_secondary.send_expect("stop", "testpmd> ")
+        out_vf10 = out_info + out_pkt
+        self.session_secondary.send_expect("start", "testpmd> ")
+        rfc.check_iavf_fdir_mark(out_vf10, pkt_num=1, check_param={"port_id": 0, "queue": 1}, stats=True)
+
+        self.send_packets(pkts["matched"][3], pf_id=1)
+        out_info = self.session_secondary.get_session_before(timeout=2)
+        out_pkt = self.session_secondary.send_expect("stop", "testpmd> ")
+        out_vf11 = out_info + out_pkt
+        self.session_secondary.send_expect("start", "testpmd> ")
+        rfc.check_iavf_fdir_mark(out_vf11, pkt_num=1, check_param={"port_id": 1, "queue": 1}, stats=True)
+
+        #send mismatched packets
+        out_vf00 = self.send_pkts_getouput(pkts["mismatched"][0])
+        rfc.check_iavf_fdir_mark(out_vf00, pkt_num=1, check_param={"port_id": 0, "queue": 1}, stats=False)
+        out_vf01 = self.send_pkts_getouput(pkts["mismatched"][1])
+        rfc.check_iavf_fdir_mark(out_vf01, pkt_num=1, check_param={"port_id": 1, "queue": 1}, stats=False)
+
+        self.send_packets(pkts["mismatched"][2], pf_id=1)
+        out_info = self.session_secondary.get_session_before(timeout=2)
+        out_pkt = self.session_secondary.send_expect("stop", "testpmd> ")
+        out_vf10 = out_info + out_pkt
+        self.session_secondary.send_expect("start", "testpmd> ")
+        rfc.check_iavf_fdir_mark(out_vf10, pkt_num=1, check_param={"port_id": 0, "queue": 1}, stats=False)
+
+        self.send_packets(pkts["mismatched"][3], pf_id=1)
+        out_info = self.session_secondary.get_session_before(timeout=2)
+        out_pkt = self.session_secondary.send_expect("stop", "testpmd> ")
+        out_vf11 = out_info + out_pkt
+        self.session_secondary.send_expect("start", "testpmd> ")
+        rfc.check_iavf_fdir_mark(out_vf11, pkt_num=1, check_param={"port_id": 1, "queue": 1}, stats=False)
+
+        # flush all the rules
+        self.dut.send_expect("flow flush 0", "testpmd> ")
+        self.dut.send_expect("flow flush 1", "testpmd> ")
+        self.session_secondary.send_expect("flow flush 0", "testpmd> ")
+        self.session_secondary.send_expect("flow flush 1", "testpmd> ")
+
+        self.session_third.send_expect("ethtool -N %s delete %d" % (self.pf0_intf, int(m0.group(1))), "# ")
+        self.session_third.send_expect("ethtool -N %s delete %d" % (self.pf1_intf, int(m1.group(1))), "# ")
+        self.session_third.send_expect("ethtool -n %s" % (self.pf0_intf), "Total 0 rules")
+        self.session_third.send_expect("ethtool -n %s" % (self.pf1_intf), "Total 0 rules")
+
+        # send matched packets
+        self.tester.scapy_append('sendp([%s], iface="%s")' % (pkts["pf"][0], self.tester_iface0))
+        self.tester.scapy_append('sendp([%s], iface="%s")' % (pkts["pf"][1], self.tester_iface1))
+        self.tester.scapy_execute()
+
+        out_pf0 = self.session_third.send_expect("ethtool -S %s" % self.pf0_intf, "# ")
+        self.verify("rx_queue_1_packets: 1" in out_pf0, "the packet is redirected to expected queue of pf0")
+        out_pf1 = self.session_third.send_expect("ethtool -S %s" % self.pf1_intf, "# ")
+        self.verify("rx_queue_1_packets: 1" in out_pf1, "the packet is redirected to expected queue of pf1")
+
+        out_vf00 = self.send_pkts_getouput(pkts["matched"][0])
+        rfc.check_iavf_fdir_mark(out_vf00, pkt_num=1, check_param={"port_id": 0, "queue": 1}, stats=False)
+        out_vf01 = self.send_pkts_getouput(pkts["matched"][1])
+        rfc.check_iavf_fdir_mark(out_vf01, pkt_num=1, check_param={"port_id": 1, "queue": 1}, stats=False)
+
+        self.send_packets(pkts["matched"][2], pf_id=1)
+        out_info = self.session_secondary.get_session_before(timeout=2)
+        out_pkt = self.session_secondary.send_expect("stop", "testpmd> ")
+        out_vf10 = out_info + out_pkt
+        self.session_secondary.send_expect("start", "testpmd> ")
+        rfc.check_iavf_fdir_mark(out_vf10, pkt_num=1, check_param={"port_id": 0, "queue": 1}, stats=False)
+
+        self.send_packets(pkts["matched"][3], pf_id=1)
+        out_info = self.session_secondary.get_session_before(timeout=2)
+        out_pkt = self.session_secondary.send_expect("stop", "testpmd> ")
+        out_vf11 = out_info + out_pkt
+        self.session_secondary.send_expect("start", "testpmd> ")
+        rfc.check_iavf_fdir_mark(out_vf11, pkt_num=1, check_param={"port_id": 1, "queue": 1}, stats=False)
+
+        self.dut.close_session(self.session_secondary)
+        self.dut.close_session(self.session_third)
+
+    def test_create_same_input_diff_action_on_pf_vf(self):
+        """
+        create same input set but different action rules on pf and vf, no conflict.
+        """
+        self.dut.kill_all()
+        self.session_secondary = self.dut.new_session()
+        self.session_third = self.dut.new_session()
+
+        rules = [
+            "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / tcp src is 22 dst is 23 / end actions queue index 1 / mark id 1 / end",
+            "flow create 1 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / tcp src is 22 dst is 23 / end actions rss queues 3 4 end / mark / end",
+            "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / tcp src is 22 dst is 23 / end actions drop / end",
+            "flow create 1 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / tcp src is 22 dst is 23 / end actions passthru / mark id 1 / end"]
+        pkts = {
+            "matched": [
+                'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=23)/Raw("x" * 80)',
+                'Ether(dst="00:11:22:33:44:66")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=23)/Raw("x" * 80)',
+                'Ether(dst="00:11:22:33:44:77")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=23)/Raw("x" * 80)',
+                'Ether(dst="00:11:22:33:44:88")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=23)/Raw("x" * 80)'],
+            "mismatched": [
+                'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=24)/Raw("x" * 80)',
+                'Ether(dst="00:11:22:33:44:66")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=24)/Raw("x" * 80)',
+                'Ether(dst="00:11:22:33:44:77")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=24)/Raw("x" * 80)',
+                'Ether(dst="00:11:22:33:44:88")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=24)/Raw("x" * 80)'],
+            "pf": [
+                'Ether(dst="%s")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=23)/Raw("x" * 80)' % self.pf0_mac,
+                'Ether(dst="%s")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=23)/Raw("x" * 80)' % self.pf1_mac]
+        }
+        out_pf0 = self.dut.send_expect("ethtool -N %s flow-type tcp4 src-ip 192.168.0.20 dst-ip 192.168.0.21 src-port 22 dst-port 23 action 1" % self.pf0_intf, "# ")
+        out_pf1 = self.dut.send_expect("ethtool -N %s flow-type tcp4 src-ip 192.168.0.20 dst-ip 192.168.0.21 src-port 22 dst-port 23 action 2" % self.pf1_intf, "# ")
+        p = re.compile(r"Added rule with ID (\d+)")
+        m0 = p.search(out_pf0)
+        m1 = p.search(out_pf1)
+
+        eal_param = "-c 0xf -n 6 -w %s -w %s --file-prefix=pf0" % (self.sriov_vfs_pf0[0].pci,self.sriov_vfs_pf0[1].pci)
+        command = "./%s/app/testpmd %s -- -i %s" % (self.dut.target, eal_param, "--rxq=16 --txq=16")
+        self.dut.send_expect(command, "testpmd> ", 300)
+        self.config_testpmd()
+
+        eal_param = "-c 0xf0 -n 6 -w %s -w %s --file-prefix=pf1" % (self.sriov_vfs_pf1[0].pci,self.sriov_vfs_pf1[1].pci)
+        command = "./%s/app/testpmd %s -- -i %s" % (self.dut.target, eal_param, "--rxq=16 --txq=16")
+        self.session_secondary.send_expect(command, "testpmd> ", 300)
+        #self.session_secondary.config_testpmd()
+        self.session_secondary.send_expect("set fwd rxonly", "testpmd> ")
+        self.session_secondary.send_expect("set verbose 1", "testpmd> ")
+        # specify a fixed rss-hash-key for cvl ether
+        self.session_secondary.send_expect(
+            "port config 0 rss-hash-key ipv4 1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd", "testpmd> ")
+        self.session_secondary.send_expect(
+            "port config 1 rss-hash-key ipv4 1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd", "testpmd> ")
+        self.session_secondary.send_expect("start", "testpmd> ")
+
+        self.create_fdir_rule(rules[:2], check_stats=True)
+        self.session_secondary.send_expect(rules[2], "created")
+        self.session_secondary.send_expect(rules[3], "created")
+
+        # confirm pf link is up
+        self.session_third.send_expect("ifconfig %s up" % self.pf0_intf, "# ", 15)
+        self.session_third.send_expect("ifconfig %s up" % self.pf1_intf, "# ", 15)
+        time.sleep(1)
+
+        # send matched packets
+        self.tester.scapy_append('sendp([%s], iface="%s")' % (pkts["pf"][0], self.tester_iface0))
+        self.tester.scapy_append('sendp([%s], iface="%s")' % (pkts["pf"][1], self.tester_iface1))
+        self.tester.scapy_execute()
+        time.sleep(1)
+        out_pf0 = self.session_third.send_expect("ethtool -S %s" % self.pf0_intf, "# ")
+        self.verify("rx_queue_1_packets: 1" in out_pf0, "the packet is not redirected to expected queue of pf0")
+        out_pf1 = self.session_third.send_expect("ethtool -S %s" % self.pf1_intf, "# ")
+        self.verify("rx_queue_2_packets: 1" in out_pf1, "the packet is not redirected to expected queue of pf1")
+
+        out_vf00 = self.send_pkts_getouput(pkts["matched"][0])
+        rfc.check_iavf_fdir_mark(out_vf00, pkt_num=1, check_param={"port_id": 0, "queue": 1, "mark_id": 1}, stats=True)
+        out_vf01 = self.send_pkts_getouput(pkts["matched"][1])
+        rfc.check_iavf_fdir_mark(out_vf01, pkt_num=1, check_param={"port_id": 1, "queue": [3, 4], "mark_id": 0}, stats=True)
+
+        self.send_packets(pkts["matched"][2], pf_id=1)
+        out_info = self.session_secondary.get_session_before(timeout=2)
+        out_pkt = self.session_secondary.send_expect("stop", "testpmd> ")
+        out_vf10 = out_info + out_pkt
+        self.session_secondary.send_expect("start", "testpmd> ")
+        rfc.check_iavf_fdir_mark(out_vf10, pkt_num=1, check_param={"port_id": 0, "drop": 1}, stats=True)
+
+        self.send_packets(pkts["matched"][3], pf_id=1)
+        out_info = self.session_secondary.get_session_before(timeout=2)
+        out_pkt = self.session_secondary.send_expect("stop", "testpmd> ")
+        out_vf11 = out_info + out_pkt
+        self.session_secondary.send_expect("start", "testpmd> ")
+        rfc.check_iavf_fdir_mark(out_vf11, pkt_num=1, check_param={"port_id": 1, "passthru": 1, "mark_id": 1}, stats=True)
+
+        #send mismatched packets
+        out_vf00 = self.send_pkts_getouput(pkts["mismatched"][0])
+        rfc.check_iavf_fdir_mark(out_vf00, pkt_num=1, check_param={"port_id": 0, "queue": 1, "mark_id": 1}, stats=False)
+        out_vf01 = self.send_pkts_getouput(pkts["mismatched"][1])
+        rfc.check_iavf_fdir_mark(out_vf01, pkt_num=1, check_param={"port_id": 1, "queue": [3, 4], "mark_id": 0}, stats=False)
+
+        self.send_packets(pkts["mismatched"][2], pf_id=1)
+        out_info = self.session_secondary.get_session_before(timeout=2)
+        out_pkt = self.session_secondary.send_expect("stop", "testpmd> ")
+        out_vf10 = out_info + out_pkt
+        self.session_secondary.send_expect("start", "testpmd> ")
+        rfc.check_iavf_fdir_mark(out_vf10, pkt_num=1, check_param={"port_id": 0, "drop": 1}, stats=False)
+
+        self.send_packets(pkts["mismatched"][3], pf_id=1)
+        out_info = self.session_secondary.get_session_before(timeout=2)
+        out_pkt = self.session_secondary.send_expect("stop", "testpmd> ")
+        out_vf11 = out_info + out_pkt
+        self.session_secondary.send_expect("start", "testpmd> ")
+        rfc.check_iavf_fdir_mark(out_vf11, pkt_num=1, check_param={"port_id": 1, "passthru": 1, "mark_id": 1}, stats=False)
+
+        # flush all the rules
+        self.dut.send_expect("flow flush 0", "testpmd> ")
+        self.dut.send_expect("flow flush 1", "testpmd> ")
+        self.session_secondary.send_expect("flow flush 0", "testpmd> ")
+        self.session_secondary.send_expect("flow flush 1", "testpmd> ")
+
+        self.session_third.send_expect("ethtool -N %s delete %d" % (self.pf0_intf, int(m0.group(1))), "# ")
+        self.session_third.send_expect("ethtool -N %s delete %d" % (self.pf1_intf, int(m1.group(1))), "# ")
+        self.session_third.send_expect("ethtool -n %s" % (self.pf0_intf), "Total 0 rules")
+        self.session_third.send_expect("ethtool -n %s" % (self.pf1_intf), "Total 0 rules")
+
+        # send matched packets
+        self.tester.scapy_append('sendp([%s], iface="%s")' % (pkts["pf"][0], self.tester_iface0))
+        self.tester.scapy_append('sendp([%s], iface="%s")' % (pkts["pf"][1], self.tester_iface1))
+        self.tester.scapy_execute()
+
+        out_pf0 = self.session_third.send_expect("ethtool -S %s" % self.pf0_intf, "# ")
+        self.verify("rx_queue_1_packets: 1" in out_pf0, "the packet is redirected to expected queue of pf0")
+        out_pf1 = self.session_third.send_expect("ethtool -S %s" % self.pf1_intf, "# ")
+        self.verify("rx_queue_2_packets: 1" in out_pf1, "the packet is redirected to expected queue of pf1")
+
+        out_vf00 = self.send_pkts_getouput(pkts["matched"][0])
+        rfc.check_iavf_fdir_mark(out_vf00, pkt_num=1, check_param={"port_id": 0, "queue": 1, "mark_id": 1}, stats=False)
+        out_vf01 = self.send_pkts_getouput(pkts["matched"][1])
+        rfc.check_iavf_fdir_mark(out_vf01, pkt_num=1, check_param={"port_id": 1, "queue": [3, 4], "mark_id": 0}, stats=False)
+
+        self.send_packets(pkts["matched"][2], pf_id=1)
+        out_info = self.session_secondary.get_session_before(timeout=2)
+        out_pkt = self.session_secondary.send_expect("stop", "testpmd> ")
+        out_vf10 = out_info + out_pkt
+        self.session_secondary.send_expect("start", "testpmd> ")
+        rfc.check_iavf_fdir_mark(out_vf10, pkt_num=1, check_param={"port_id": 0, "drop": 1}, stats=False)
+
+        self.send_packets(pkts["matched"][3], pf_id=1)
+        out_info = self.session_secondary.get_session_before(timeout=2)
+        out_pkt = self.session_secondary.send_expect("stop", "testpmd> ")
+        out_vf11 = out_info + out_pkt
+        self.session_secondary.send_expect("start", "testpmd> ")
+        rfc.check_iavf_fdir_mark(out_vf11, pkt_num=1, check_param={"port_id": 1, "passthru": 1, "mark_id": 1}, stats=False)
+
+        self.dut.close_session(self.session_secondary)
+        self.dut.close_session(self.session_third)
+
+    def test_create_diff_input_diff_action_on_pf_vf(self):
+        """
+        create different rules on pf and vf
+        """
+        self.dut.kill_all()
+        self.session_secondary = self.dut.new_session()
+        self.session_third = self.dut.new_session()
+
+        rules = [
+            "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / tcp src is 22 dst is 23 / end actions rss queues 2 3 end / end",
+            "flow create 1 ingress pattern eth / ipv4 src is 192.168.0.22 dst is 192.168.0.23 / udp src is 22 dst is 23 / end actions queue index 5 / mark / end",
+            "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.22 dst is 192.168.0.23 / udp src is 22 dst is 23 / end actions queue index 5 / mark id 1 / end",
+            "flow create 1 ingress pattern eth / ipv4 src is 192.168.0.22 dst is 192.168.0.23 tos is 4 / tcp src is 22 dst is 23 / end actions drop / end"]
+        pkts = {
+            "matched": [
+                'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=23)/Raw("x" * 80)',
+                'Ether(dst="00:11:22:33:44:66")/IP(src="192.168.0.22",dst="192.168.0.23")/UDP(sport=22,dport=23)/Raw("x" * 80)',
+                'Ether(dst="00:11:22:33:44:77")/IP(src="192.168.0.22",dst="192.168.0.23")/UDP(sport=22,dport=23)/Raw("x" * 80)',
+                'Ether(dst="00:11:22:33:44:88")/IP(src="192.168.0.22",dst="192.168.0.23",tos=4)/TCP(sport=22,dport=23)/Raw("x" * 80)'],
+            "mismatched": [
+                'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=24)/Raw("x" * 80)',
+                'Ether(dst="00:11:22:33:44:66")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=24)/Raw("x" * 80)',
+                'Ether(dst="00:11:22:33:44:77")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=24)/Raw("x" * 80)',
+                'Ether(dst="00:11:22:33:44:88")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=24)/Raw("x" * 80)'],
+            "pf": [
+                'Ether(dst="%s")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=23)/Raw("x" * 80)' % self.pf0_mac,
+                'Ether(dst="%s")/IP(src="192.168.0.22",dst="192.168.0.23")/UDP(sport=22,dport=23)/Raw("x" * 80)' % self.pf1_mac]
+        }
+        out_pf0 = self.dut.send_expect("ethtool -N %s flow-type tcp4 src-ip 192.168.0.20 dst-ip 192.168.0.21 src-port 22 dst-port 23 action 1" % self.pf0_intf, "# ")
+        out_pf1 = self.dut.send_expect("ethtool -N %s flow-type udp4 src-ip 192.168.0.22 dst-ip 192.168.0.23 src-port 22 dst-port 23 action -1" % self.pf1_intf, "# ")
+        p = re.compile(r"Added rule with ID (\d+)")
+        m0 = p.search(out_pf0)
+        m1 = p.search(out_pf1)
+
+        eal_param = "-c 0xf -n 6 -w %s -w %s --file-prefix=pf0" % (self.sriov_vfs_pf0[0].pci,self.sriov_vfs_pf0[1].pci)
+        command = "./%s/app/testpmd %s -- -i %s" % (self.dut.target, eal_param, "--rxq=16 --txq=16")
+        self.dut.send_expect(command, "testpmd> ", 300)
+        self.config_testpmd()
+
+        eal_param = "-c 0xf0 -n 6 -w %s -w %s --file-prefix=pf1" % (self.sriov_vfs_pf1[0].pci,self.sriov_vfs_pf1[1].pci)
+        command = "./%s/app/testpmd %s -- -i %s" % (self.dut.target, eal_param, "--rxq=16 --txq=16")
+        self.session_secondary.send_expect(command, "testpmd> ", 300)
+        #self.session_secondary.config_testpmd()
+        self.session_secondary.send_expect("set fwd rxonly", "testpmd> ")
+        self.session_secondary.send_expect("set verbose 1", "testpmd> ")
+        # specify a fixed rss-hash-key for cvl ether
+        self.session_secondary.send_expect(
+            "port config 0 rss-hash-key ipv4 1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd", "testpmd> ")
+        self.session_secondary.send_expect(
+            "port config 1 rss-hash-key ipv4 1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd", "testpmd> ")
+        self.session_secondary.send_expect("start", "testpmd> ")
+
+        self.create_fdir_rule(rules[:2], check_stats=True)
+        self.session_secondary.send_expect(rules[2], "created")
+        self.session_secondary.send_expect(rules[3], "created")
+
+        # confirm pf link is up
+        self.session_third.send_expect("ifconfig %s up" % self.pf0_intf, "# ", 15)
+        self.session_third.send_expect("ifconfig %s up" % self.pf1_intf, "# ", 15)
+        time.sleep(1)
+
+        # send matched packets
+        self.tester.scapy_append('sendp([%s], iface="%s")' % (pkts["pf"][0], self.tester_iface0))
+        self.tester.scapy_append('sendp([%s], iface="%s")' % (pkts["pf"][1], self.tester_iface1))
+        self.tester.scapy_execute()
+        time.sleep(1)
+
+        out_pf0 = self.session_third.send_expect("ethtool -S %s" % self.pf0_intf, "# ")
+        self.verify("rx_queue_1_packets: 1" in out_pf0, "the packet is not redirected to expected queue of pf0")
+        out_pf1 = self.session_third.send_expect("ethtool -S %s" % self.pf1_intf, "# ")
+        self.verify("rx_dropped: 1" in out_pf1, "the packet is not dropped pf1")
+
+        out_vf00 = self.send_pkts_getouput(pkts["matched"][0])
+        rfc.check_iavf_fdir_mark(out_vf00, pkt_num=1, check_param={"port_id": 0, "queue": [2, 3]}, stats=True)
+        out_vf01 = self.send_pkts_getouput(pkts["matched"][1])
+        rfc.check_iavf_fdir_mark(out_vf01, pkt_num=1, check_param={"port_id": 1, "queue": 5, "mark_id": 0}, stats=True)
+
+        self.send_packets(pkts["matched"][2], pf_id=1)
+        out_info = self.session_secondary.get_session_before(timeout=2)
+        out_pkt = self.session_secondary.send_expect("stop", "testpmd> ")
+        out_vf10 = out_info + out_pkt
+        self.session_secondary.send_expect("start", "testpmd> ")
+        rfc.check_iavf_fdir_mark(out_vf10, pkt_num=1, check_param={"port_id": 0, "queue": 5, "mark_id": 1}, stats=True)
+
+        self.send_packets(pkts["matched"][3], pf_id=1)
+        out_info = self.session_secondary.get_session_before(timeout=2)
+        out_pkt = self.session_secondary.send_expect("stop", "testpmd> ")
+        out_vf11 = out_info + out_pkt
+        self.session_secondary.send_expect("start", "testpmd> ")
+        rfc.check_iavf_fdir_mark(out_vf11, pkt_num=1, check_param={"port_id": 1, "drop": 1}, stats=True)
+
+        #send mismatched packets
+        out_vf00 = self.send_pkts_getouput(pkts["mismatched"][0])
+        rfc.check_iavf_fdir_mark(out_vf00, pkt_num=1, check_param={"port_id": 0, "queue": [2, 3]}, stats=False)
+        out_vf01 = self.send_pkts_getouput(pkts["mismatched"][1])
+        rfc.check_iavf_fdir_mark(out_vf01, pkt_num=1, check_param={"port_id": 1, "queue": 5, "mark_id": 0}, stats=False)
+
+        self.send_packets(pkts["mismatched"][2], pf_id=1)
+        out_info = self.session_secondary.get_session_before(timeout=2)
+        out_pkt = self.session_secondary.send_expect("stop", "testpmd> ")
+        out_vf10 = out_info + out_pkt
+        self.session_secondary.send_expect("start", "testpmd> ")
+        rfc.check_iavf_fdir_mark(out_vf10, pkt_num=1, check_param={"port_id": 0, "queue": 5, "mark_id": 1}, stats=False)
+
+        self.send_packets(pkts["mismatched"][3], pf_id=1)
+        out_info = self.session_secondary.get_session_before(timeout=2)
+        out_pkt = self.session_secondary.send_expect("stop", "testpmd> ")
+        out_vf11 = out_info + out_pkt
+        self.session_secondary.send_expect("start", "testpmd> ")
+        rfc.check_iavf_fdir_mark(out_vf11, pkt_num=1, check_param={"port_id": 1, "drop": 1}, stats=False)
+
+        # flush all the rules
+        self.dut.send_expect("flow flush 0", "testpmd> ")
+        self.dut.send_expect("flow flush 1", "testpmd> ")
+        self.session_secondary.send_expect("flow flush 0", "testpmd> ")
+        self.session_secondary.send_expect("flow flush 1", "testpmd> ")
+
+        self.session_third.send_expect("ethtool -N %s delete %d" % (self.pf0_intf, int(m0.group(1))), "# ")
+        self.session_third.send_expect("ethtool -N %s delete %d" % (self.pf1_intf, int(m1.group(1))), "# ")
+        self.session_third.send_expect("ethtool -n %s" % (self.pf0_intf), "Total 0 rules")
+        self.session_third.send_expect("ethtool -n %s" % (self.pf1_intf), "Total 0 rules")
+
+        # send matched packets
+        self.tester.scapy_append('sendp([%s], iface="%s")' % (pkts["pf"][0], self.tester_iface0))
+        self.tester.scapy_append('sendp([%s], iface="%s")' % (pkts["pf"][1], self.tester_iface1))
+        self.tester.scapy_execute()
+
+        out_pf0 = self.session_third.send_expect("ethtool -S %s" % self.pf0_intf, "# ")
+        self.verify("rx_queue_1_packets: 1" in out_pf0, "the rule is not destroyed")
+        out_pf1 = self.session_third.send_expect("ethtool -S %s" % self.pf1_intf, "# ")
+        self.verify("rx_dropped: 1" in out_pf1, "the packet is dropped by pf1")
+
+        #send mismatched packets
+        out_vf00 = self.send_pkts_getouput(pkts["matched"][0])
+        rfc.check_iavf_fdir_mark(out_vf00, pkt_num=1, check_param={"port_id": 0, "queue": [2, 3]}, stats=False)
+        out_vf01 = self.send_pkts_getouput(pkts["matched"][1])
+        rfc.check_iavf_fdir_mark(out_vf01, pkt_num=1, check_param={"port_id": 1, "queue": 5, "mark_id": 0}, stats=False)
+
+        self.send_packets(pkts["matched"][2], pf_id=1)
+        out_info = self.session_secondary.get_session_before(timeout=2)
+        out_pkt = self.session_secondary.send_expect("stop", "testpmd> ")
+        out_vf10 = out_info + out_pkt
+        self.session_secondary.send_expect("start", "testpmd> ")
+        rfc.check_iavf_fdir_mark(out_vf10, pkt_num=1, check_param={"port_id": 0, "queue": 5, "mark_id": 1}, stats=False)
+
+        self.send_packets(pkts["matched"][3], pf_id=1)
+        out_info = self.session_secondary.get_session_before(timeout=2)
+        out_pkt = self.session_secondary.send_expect("stop", "testpmd> ")
+        out_vf11 = out_info + out_pkt
+        self.session_secondary.send_expect("start", "testpmd> ")
+        rfc.check_iavf_fdir_mark(out_vf11, pkt_num=1, check_param={"port_id": 1, "drop": 1}, stats=False)
+
+        self.dut.close_session(self.session_secondary)
+        self.dut.close_session(self.session_third)
+
+    def test_maxnum_14336rules_1vf(self):
+        """
+        vfs share 14336 rules table
+        """
+        self.dut.kill_all()
+        src_file = 'create_14336_rules'
+        flows=open(self.src_file_dir + src_file,mode='w')
+        count=0
+        for i in range(56):
+            for j in range(256):
+                flows.write('flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.%d.%d / end actions queue index 5 / mark / end \n'%(i,j))
+                count=count+1
+        flows.close()
+        self.verify(count == 14336, "failed to create 14336 fdir rules on vf.")
+        self.dut.session.copy_file_to(self.src_file_dir + src_file, self.dut_file_dir)
+
+        eal_param = "-c f -n 6 -w %s -w %s" % (self.sriov_vfs_pf0[0].pci,self.sriov_vfs_pf0[1].pci)
+        command = "./%s/app/testpmd %s -- -i %s" % (self.dut.target, eal_param, "--rxq=16 --txq=16 --cmdline-file=%s" % self.dut_file_dir + src_file)
+        self.dut.send_expect(command, "testpmd> ", 300)
+        self.config_testpmd()
+
+        # can't create more than 14336 rules on vf0
+        rule_14336_vf0 = "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.56.0 / end actions queue index 5 / mark / end"
+        self.create_fdir_rule(rule_14336_vf0, check_stats=False)
+        #check there are 14336 rules created.
+        out = self.check_rule_number(port_id=0, num=14336)
+        self.verify("14336" not in out, "more than 14336 rules can be created on 1vf")
+
+        # can't create rule on vf1
+        rule_0_vf1 = "flow create 1 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.56.0 / end actions queue index 5 / mark / end"
+        self.create_fdir_rule(rule_0_vf1, check_stats=False)
+        self.check_fdir_rule(port_id=1, stats=False)
+
+        pkt_0 = 'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.0")/Raw("x" * 80)'
+        pkt_14335 = 'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.55.255")/Raw("x" * 80)'
+        pkt_14336 = 'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.56.0")/Raw("x" * 80)'
+
+        # check packet match rule 0 and rule 14335 can be redirected to expected queue
+        out_0 = self.send_pkts_getouput(pkts=pkt_0, pf_id=0)
+        rfc.check_iavf_fdir_mark(out_0, pkt_num=1, check_param={"port_id": 0, "mark_id": 0, "queue": 5}, stats=True)
+        out_14335 = self.send_pkts_getouput(pkts=pkt_14335, pf_id=0)
+        rfc.check_iavf_fdir_mark(out_14335, pkt_num=1, check_param={"port_id": 0, "mark_id": 0, "queue": 5}, stats=True)
+        # check packet match rule 14336 can't be redirected to expected queue.
+        out_14336 = self.send_pkts_getouput(pkts=pkt_14336, pf_id=0)
+        rfc.check_iavf_fdir_mark(out_14336, pkt_num=1, check_param={"port_id": 0, "queue": 5}, stats=False)
+
+        # flush all the rules
+        self.dut.send_expect("flow flush 0", "testpmd> ", timeout=200)
+        self.check_fdir_rule(port_id=0, stats=False)
+        out_0 = self.send_pkts_getouput(pkts=pkt_0, pf_id=0)
+        out_14335 = self.send_pkts_getouput(pkts=pkt_14335, pf_id=0)
+        rfc.check_iavf_fdir_mark(out_0, pkt_num=1, check_param={"port_id": 0, "mark_id": 0, "queue": 5}, stats=False)
+        rfc.check_iavf_fdir_mark(out_14335, pkt_num=1, check_param={"port_id": 0, "mark_id": 0, "queue": 5}, stats=False)
+
+        self.create_fdir_rule(rule_14336_vf0, check_stats=True)
+        self.create_fdir_rule(rule_0_vf1, check_stats=True)
+        out_14336 = self.send_pkts_getouput(pkts=pkt_14336, pf_id=0)
+        rfc.check_iavf_fdir_mark(out_14336, pkt_num=1, check_param={"port_id": 0, "mark_id": 0, "queue": 5}, stats=True)
+
+
+    def test_maxnum_14336rules_2vf(self):
+        """
+        vfs share 14336 rules table
+        """
+        self.dut.kill_all()
+        self.session_secondary = self.dut.new_session()
+        src_file = 'create_14336_rules_2vf'
+        flows=open(self.src_file_dir + src_file,mode='w')
+        flows.write('flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.0 / end actions queue index 5 / mark / end \n')
+        count=1
+        for i in range(55):
+            for j in range(256):
+                flows.write('flow create 1 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.%d.%d / end actions queue index 5 / mark / end \n'%(i,j))
+                count=count+1
+        for j in range(255):
+            flows.write('flow create 1 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.55.%d / end actions queue index 5 / mark / end \n' % j)
+            count=count+1
+        flows.close()
+        self.verify(count == 14336, "failed to create 14336 fdir rules on 2 vfs.")
+        self.dut.session.copy_file_to(self.src_file_dir + src_file, self.dut_file_dir)
+
+        eal_param = "-c f -n 6 -w %s -w %s" % (self.sriov_vfs_pf0[0].pci,self.sriov_vfs_pf1[0].pci)
+        command = "./%s/app/testpmd %s -- -i %s" % (self.dut.target, eal_param, "--rxq=16 --txq=16 --cmdline-file=%s" % self.dut_file_dir + src_file)
+        self.dut.send_expect(command, "testpmd> ", 300)
+
+        self.config_testpmd()
+        self.check_fdir_rule(port_id=0, rule_list=['0'])
+
+        # can't create more than 14336 rules on 2vf
+        rule_14335_vf1 = "flow create 1 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.56.0 / end actions queue index 5 / mark / end"
+        self.create_fdir_rule(rule_14335_vf1, check_stats=False)
+        #check there are 14336 rules created.
+        out = self.check_rule_number(port_id=1, num=14335)
+        self.verify("14335" not in out, "more than 14336 rules are created on 2vf")
+
+        # can't create new rule on vf0
+        rule_1_vf0 = "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.56.0 / end actions queue index 5 / mark / end"
+        self.create_fdir_rule(rule_1_vf0, check_stats=False)
+        self.check_rule_number(port_id=0, num=1)
+
+        pkt_0 = 'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.0")/Raw("x" * 80)'
+        pkt_1 = 'Ether(dst="00:11:22:33:44:77")/IP(src="192.168.0.20",dst="192.168.0.0")/Raw("x" * 80)'
+        pkt_14335 = 'Ether(dst="00:11:22:33:44:77")/IP(src="192.168.0.20",dst="192.168.55.254")/Raw("x" * 80)'
+        pkt_14336 = 'Ether(dst="00:11:22:33:44:77")/IP(src="192.168.0.20",dst="192.168.56.0")/Raw("x" * 80)'
+
+        self.session_secondary.send_expect("ifconfig %s up" % self.pf0_intf, "# ", 15)
+        self.session_secondary.send_expect("ifconfig %s up" % self.pf1_intf, "# ", 15)
+        time.sleep(1)
+
+        # check packet match rule 0 and rule 14335 can be redirected to expected queue
+        out_0 = self.send_pkts_getouput(pkts=pkt_0, pf_id=0)
+        rfc.check_iavf_fdir_mark(out_0, pkt_num=1, check_param={"port_id": 0, "mark_id": 0, "queue": 5}, stats=True)
+        out_1 = self.send_pkts_getouput(pkts=pkt_1, pf_id=1)
+        rfc.check_iavf_fdir_mark(out_1, pkt_num=1, check_param={"port_id": 1, "mark_id": 0, "queue": 5}, stats=True)
+
+        out_14335 = self.send_pkts_getouput(pkts=pkt_14335, pf_id=1)
+        rfc.check_iavf_fdir_mark(out_14335, pkt_num=1, check_param={"port_id": 1, "mark_id": 0, "queue": 5}, stats=True)
+        # check packet match rule 14336 can't be redirected to expected queue.
+        out_14336 = self.send_pkts_getouput(pkts=pkt_14336, pf_id=1)
+        rfc.check_iavf_fdir_mark(out_14336, pkt_num=1, check_param={"port_id": 1, "queue": 5}, stats=False)
+
+        # destroy rule 0 on vf0, then create a new rule on vf1 successfully.
+        self.dut.send_expect("flow flush 0", "testpmd> ")
+        self.create_fdir_rule(rule_14335_vf1, check_stats=True)
+        out_14336 = self.send_pkts_getouput(pkts=pkt_14336, pf_id=1)
+        rfc.check_iavf_fdir_mark(out_14336, pkt_num=1, check_param={"port_id": 1, "mark_id": 0, "queue": 5}, stats=True)
+
+        self.dut.send_expect("flow flush 1", "testpmd> ", timeout=300)
+
+        self.check_fdir_rule(port_id=0, stats=False)
+        self.check_fdir_rule(port_id=1, stats=False)
+
+        out_0 = self.send_pkts_getouput(pkts=pkt_0, pf_id=0)
+        out_1 = self.send_pkts_getouput(pkts=pkt_1, pf_id=1)
+        out_14335 = self.send_pkts_getouput(pkts=pkt_14335, pf_id=1)
+        rfc.check_iavf_fdir_mark(out_0, pkt_num=1, check_param={"port_id": 0, "mark_id": 0, "queue": 5}, stats=False)
+        rfc.check_iavf_fdir_mark(out_1, pkt_num=1, check_param={"port_id": 1, "mark_id": 0, "queue": 5}, stats=False)
+        rfc.check_iavf_fdir_mark(out_14335, pkt_num=1, check_param={"port_id": 1, "mark_id": 0, "queue": 5}, stats=False)
+
+        self.dut.close_session(self.session_secondary)
+
+    def test_maxnum_15360rules_1pf_2vf(self):
+        """
+        2*100G NIC, each pf can create 1024 rules at least, vfs share 14336 rules table
+        """
+        self.dut.kill_all()
+        self.session_secondary = self.dut.new_session()
+
+        #create 1025 rules on pf0
+        src_file = 'iavf_fdir_15360_kernel_rules'
+        flows=open(self.src_file_dir + src_file,mode='w')
+        count=0
+        for i in range(4):
+            for j in range(256):
+                flows.write('ethtool -N enp134s0f1 flow-type tcp4 src-ip 192.168.%d.%d dst-ip 192.168.100.2 src-port 32 dst-port 33 action 8 \n'%(i,j))
+                count=count+1
+        flows.write('ethtool -N enp134s0f1 flow-type tcp4 src-ip 192.168.100.0 dst-ip 192.168.100.2 src-port 32 dst-port 33 action 8 \n')
+        count=count+1
+        flows.close()
+        self.verify(count == 1025, "failed to create 1025 fdir rules on pf.")
+        self.dut.session.copy_file_to(self.src_file_dir + src_file, self.dut_file_dir)
+
+        # create 1025 rules on pf0
+        fkr = open(self.dut_file_dir + "iavf_fdir_15360_kernel_rules", "r+")
+        kernel_rules = fkr.read()
+        fkr.close()
+        self.dut.send_expect(kernel_rules, "# ", 300)
+        # write the kernel rules result to file
+        fkw = open("1025_kernel_rules_result.txt", "w")
+        fkw.write(self.dut.send_expect("ethtool -n %s" % self.pf1_intf, "# ", 300))
+
+        #create 1 rule on vf00, and 14334 rules on vf01
+        src_file_vf = 'iavf_fdir_15360_vf_rules'
+        flows = open(self.src_file_dir + src_file_vf, mode='w')
+        flows.write('flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.0 / end actions queue index 5 / mark / end \n')
+        count=1
+        for i in range(55):
+            for j in range(256):
+                flows.write('flow create 1 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.%d.%d / end actions queue index 5 / mark / end \n'%(i,j))
+                count=count+1
+        for j in range(254):
+            flows.write('flow create 1 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.55.%d / end actions queue index 5 / mark / end \n' % j)
+            count=count+1
+        flows.close()
+        self.verify(count == 14335, "failed to create 14335 fdir rules on vfs.")
+        self.dut.session.copy_file_to(self.src_file_dir + src_file_vf, self.dut_file_dir)
+
+        # start testpmd with creating rules in commandline
+        eal_param = "-c f -n 6 -w %s -w %s" % (self.sriov_vfs_pf0[0].pci,self.sriov_vfs_pf0[1].pci)
+        command = "./%s/app/testpmd %s -- -i %s" % (self.dut.target, eal_param, "--rxq=16 --txq=16 --cmdline-file=%s" % self.dut_file_dir + src_file_vf)
+        fdw = open("15360_rules_vf_result.txt", "w")
+        fdw.write(self.dut.send_expect(command, "testpmd> ", 360))
+        fdw.close()
+
+        self.config_testpmd()
+        # check there is 1 rule created on vf00
+        self.check_fdir_rule(port_id=0, rule_list=['0'])
+
+        # can't create more than 14335 rules on 2vf, the rule index is from 0
+        rule_14334_vf1 = "flow create 1 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.56.0 / end actions queue index 5 / mark / end"
+        pkt_14334 = 'Ether(dst="00:11:22:33:44:66")/IP(src="192.168.0.20",dst="192.168.56.0")/Raw("x" * 80)'
+        self.create_fdir_rule(rule_14334_vf1, check_stats=False)
+
+        #check there are 14334 rules created on vf01
+        out = self.check_rule_number(port_id=1, num=14334)
+        self.verify("14334" not in out, "more than 15360 rules are created on 2vf")
+
+        # delete a rule on pf0
+        self.session_secondary.send_expect("ethtool -N %s delete 14847" % self.pf1_intf, "# ")
+        time.sleep(3)
+
+        # then can create one more rule on vf01
+        self.create_fdir_rule(rule_14334_vf1, check_stats=True)
+        out_14334 = self.send_pkts_getouput(pkts=pkt_14334, pf_id=0)
+        rfc.check_iavf_fdir_mark(out_14334, pkt_num=1, check_param={"port_id": 1, "mark_id": 0, "queue": 5}, stats=True)
+
+        self.dut.send_expect("flow flush 0", "testpmd> ", timeout=200)
+        self.dut.send_expect("flow flush 1", "testpmd> ", timeout=200)
+        self.check_fdir_rule(port_id=0, stats=False)
+        self.check_fdir_rule(port_id=1, stats=False)
+        out_14334 = self.send_pkts_getouput(pkts=pkt_14334, pf_id=0)
+        rfc.check_iavf_fdir_mark(out_14334, pkt_num=1, check_param={"port_id": 1, "mark_id": 0, "queue": 5}, stats=False)
+
+        self.dut.send_expect("quit", "# ")
+        self.dut.close_session(self.session_secondary)
+        self.re_load_ice_driver()
+
+    def test_maxnum_128_profiles(self):
+        """
+        There are 128 profiles in total.
+        each pf apply for 8 profiles when kernel driver init, 4 for non-tunnel packet, 4 for tunnel packet.
+        profile 0 and profile 1 are default profile for specific packet.
+        design case with 2*100G card, so only 110 profiles can be used for vf.
+        """
+        self.destroy_env()
+        self.setup_npf_nvf_env(pf_num=1,vf_num=16)
+        self.dut.send_expect('ip link set %s vf 10 mac 00:11:22:33:44:55' % self.pf0_intf, '#')
+        command = "./%s/app/testpmd -c f -n 6 -- -i %s" % (self.dut.target, "--rxq=16 --txq=16")
+        self.dut.send_expect(command, "testpmd> ", 360)
+        self.config_testpmd()
+        for port_id in range(11):
+            rules = [
+                "flow create %d ingress pattern eth / ipv4 proto is 255 / end actions queue index 1 / mark / end" % port_id,
+                "flow create %d ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / udp src is 22 dst is 23 / end actions queue index 1 / mark / end" % port_id,
+                "flow create %d ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / tcp src is 22 dst is 23 / end actions queue index 1 / mark / end" % port_id,
+                "flow create %d ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / sctp src is 22 dst is 23 / end actions queue index 1 / mark / end" % port_id,
+                "flow create %d ingress pattern eth / ipv6 proto is 0 / end actions mark / rss / end" % port_id,
+                "flow create %d ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 / udp src is 22 dst is 23 / end actions queue index 1 / mark / end" % port_id,
+                "flow create %d ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 / tcp src is 22 dst is 23 / end actions queue index 1 / mark / end" % port_id,
+                "flow create %d ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 src is 2001::2 / sctp src is 22 dst is 23 / end actions queue index 1 / mark / end" % port_id,
+                "flow create %d ingress pattern eth type is 0x8863 / end actions queue index 1 / mark id 1 / end" % port_id,
+                "flow create %d ingress pattern eth / ipv4 / udp / pfcp s_field is 0 / end actions queue index 2 / end" % port_id]
+            self.create_fdir_rule(rules, check_stats=True)
+
+        rule = "flow create 11 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / udp src is 22 dst is 23 / end actions queue index 1 / mark / end"
+        self.create_fdir_rule(rule, check_stats=False)
+        self.check_fdir_rule(port_id=11, stats=False)
+        pkt = 'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21")/UDP(sport=22, dport=23)/ Raw("x" * 80)'
+        out = self.send_pkts_getouput(pkts=pkt)
+        rfc.check_iavf_fdir_mark(out, pkt_num=1, check_param={"port_id": 10, "mark_id": 0, "queue": 1}, stats=True)
+
+        self.dut.send_expect("flow flush 10", "testpmd> ")
+        self.check_fdir_rule(port_id=10, stats=False)
+        out = self.send_pkts_getouput(pkts=pkt)
+        rfc.check_iavf_fdir_mark(out, pkt_num=1, check_param={"port_id": 10, "mark_id": 0, "queue": 1}, stats=False)
+
+        self.create_fdir_rule(rule, check_stats=False)
+
+        self.destroy_env()
+        self.setup_2pf_4vf_env()
+
+    def test_stress_port_stop_start(self):
+        """
+        Rules can take effect after port stop/start
+        """
+        rule = "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / end actions queue index 1 / mark / end"
+        pkt = 'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21") / Raw("x" * 80)'
+        self.create_fdir_rule(rule, check_stats=True)
+        out = self.send_pkts_getouput(pkts=pkt)
+        rfc.check_iavf_fdir_mark(out, pkt_num=1, check_param={"port_id": 0, "mark_id": 0, "queue": 1}, stats=True)
+        self.dut.send_expect("stop", "testpmd> ")
+        self.dut.send_expect("port stop 0", "testpmd> ")
+        self.dut.send_expect("port start 0", "testpmd> ")
+        self.dut.send_expect("start", "testpmd> ")
+        self.check_fdir_rule(port_id=0, rule_list=['0'])
+        out = self.send_pkts_getouput(pkts=pkt)
+        rfc.check_iavf_fdir_mark(out, pkt_num=1, check_param={"port_id": 0, "mark_id": 0, "queue": 1}, stats=True)
+
+    def test_stress_add_delete_rules_1vf(self):
+        """
+        add/delete rules 14336 times on 1 vf
+        """
+        rules = [
+            "flow create 0 ingress pattern eth / ipv4 proto is 255 / end actions queue index 1 / mark / end",
+            "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / tcp src is 22 dst is 23 / end actions rss queues 2 3 end / mark id 1 / end"]
+        pkts = [
+            'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", proto=255)/Raw("x" * 80)',
+            'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=23)/Raw("x" * 80)']
+        self.dut.kill_all()
+        src_file = 'add_delete_rules_1vf'
+        flows=open(self.src_file_dir + src_file,mode='w')
+        count=0
+        for i in range(14336):
+            flows.write('%s \n' % rules[0])
+            flows.write('%s \n' % rules[1])
+            flows.write('flow flush 0\n')
+            count=count+1
+        flows.close()
+        self.verify(count == 14336, "failed to add/delete 14336 times of fdir rules on vf.")
+        self.dut.session.copy_file_to(self.src_file_dir + src_file, self.dut_file_dir)
+
+        eal_param = "-c f -n 6 -w %s -w %s" % (self.sriov_vfs_pf0[0].pci,self.sriov_vfs_pf0[1].pci)
+        command = "./%s/app/testpmd %s -- -i %s" % (self.dut.target, eal_param, "--rxq=16 --txq=16 --cmdline-file=%s" % self.dut_file_dir + src_file)
+        self.dut.send_expect(command, "testpmd> ", 900)
+        self.config_testpmd()
+        self.check_fdir_rule(port_id=0, stats=False)
+        self.create_fdir_rule(rules, check_stats=True)
+        out_0 = self.send_pkts_getouput(pkts=pkts[0])
+        rfc.check_iavf_fdir_mark(out_0, pkt_num=1, check_param={"port_id": 0, "mark_id": 0, "queue": 1}, stats=True)
+        out_1 = self.send_pkts_getouput(pkts=pkts[1])
+        rfc.check_iavf_fdir_mark(out_1, pkt_num=1, check_param={"port_id": 0, "mark_id": 1, "queue": [2, 3]}, stats=True)
+
+    def test_stress_add_delete_rules_2vf(self):
+        """
+        add/delete rules 14336 times on 2 vfs
+        """
+        rules = [
+            "flow create 0 ingress pattern eth / ipv4 src is 192.168.56.0 dst is 192.1.0.0 tos is 4 / tcp src is 22 dst is 23 / end actions queue index 5 / end",
+            "flow create 1 ingress pattern eth / ipv4 src is 192.168.56.0 dst is 192.1.0.0 tos is 4 / tcp src is 22 dst is 23 / end actions queue index 5 / end"]
+        pkts = [
+            'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.56.0",dst="192.1.0.0", tos=4)/TCP(sport=22,dport=23)/Raw("x" * 80)',
+            'Ether(dst="00:11:22:33:44:66")/IP(src="192.168.56.0",dst="192.1.0.0", tos=4)/TCP(sport=22,dport=23)/Raw("x" * 80)']
+        self.dut.kill_all()
+        src_file = 'add_delete_rules_2vfs'
+        flows=open(self.src_file_dir + src_file,mode='w')
+        count=0
+        for i in range(14336):
+            flows.write('%s \n' % rules[0])
+            flows.write('%s \n' % rules[1])
+            flows.write('flow flush 0\n')
+            flows.write('flow flush 1\n')
+            count=count+1
+        flows.close()
+        self.verify(count == 14336, "failed to add/delete 14336 times of fdir rules on 2 vfs.")
+        self.dut.session.copy_file_to(self.src_file_dir + src_file, self.dut_file_dir)
+
+        eal_param = "-c f -n 6 -w %s -w %s" % (self.sriov_vfs_pf0[0].pci,self.sriov_vfs_pf0[1].pci)
+        command = "./%s/app/testpmd %s -- -i %s" % (self.dut.target, eal_param, "--rxq=16 --txq=16 --cmdline-file=%s" % self.dut_file_dir + src_file)
+        self.dut.send_expect(command, "testpmd> ", 900)
+        self.config_testpmd()
+        self.check_fdir_rule(port_id=0, stats=False)
+        self.check_fdir_rule(port_id=1, stats=False)
+
+        self.create_fdir_rule(rules, check_stats=True)
+        out_0 = self.send_pkts_getouput(pkts=pkts[0], pf_id=0)
+        rfc.check_iavf_fdir_mark(out_0, pkt_num=1, check_param={"port_id": 0, "queue": 5}, stats=True)
+        out_1 = self.send_pkts_getouput(pkts=pkts[1], pf_id=0)
+        rfc.check_iavf_fdir_mark(out_1, pkt_num=1, check_param={"port_id": 1, "queue": 5}, stats=True)
+
+    def test_stress_delete_rules(self):
+        """
+        delete 1st/2nd/last rule won't affect other rules
+        """
+        rules = [
+            "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / tcp src is 22 dst is 23 / end actions queue index 1 / mark id 1 / end",
+            "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / tcp src is 22 dst is 24 / end actions queue index 2 / mark id 2 / end",
+            "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 192.168.0.21 / tcp src is 22 dst is 25 / end actions queue index 3 / mark id 3 / end"]
+        pkts = [
+            'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=23)/Raw("x" * 80)',
+            'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=24)/Raw("x" * 80)',
+            'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21")/TCP(sport=22,dport=25)/Raw("x" * 80)']
+
+        rule_li = self.create_fdir_rule(rules, check_stats=True)
+        self.check_fdir_rule(port_id=0, rule_list=rule_li)
+        out_0 = self.send_pkts_getouput(pkts=pkts[0])
+        rfc.check_iavf_fdir_mark(out_0, pkt_num=1, check_param={"port_id": 0, "mark_id": 1, "queue": 1}, stats=True)
+        out_1 = self.send_pkts_getouput(pkts=pkts[1])
+        rfc.check_iavf_fdir_mark(out_1, pkt_num=1, check_param={"port_id": 0, "mark_id": 2, "queue": 2}, stats=True)
+        out_2 = self.send_pkts_getouput(pkts=pkts[2])
+        rfc.check_iavf_fdir_mark(out_2, pkt_num=1, check_param={"port_id": 0, "mark_id": 3, "queue": 3}, stats=True)
+
+        self.dut.send_expect("flow destroy 0 rule 0", "testpmd> ")
+        out_0 = self.send_pkts_getouput(pkts=pkts[0])
+        rfc.check_iavf_fdir_mark(out_0, pkt_num=1, check_param={"port_id": 0, "mark_id": 1, "queue": 1}, stats=False)
+        out_1 = self.send_pkts_getouput(pkts=pkts[1])
+        rfc.check_iavf_fdir_mark(out_1, pkt_num=1, check_param={"port_id": 0, "mark_id": 2, "queue": 2}, stats=True)
+        out_2 = self.send_pkts_getouput(pkts=pkts[2])
+        rfc.check_iavf_fdir_mark(out_2, pkt_num=1, check_param={"port_id": 0, "mark_id": 3, "queue": 3}, stats=True)
+        self.dut.send_expect("flow flush 0", "testpmd> ")
+
+        rule_li = self.create_fdir_rule(rules, check_stats=True)
+        self.check_fdir_rule(port_id=0, rule_list=rule_li)
+        self.dut.send_expect("flow destroy 0 rule 1", "testpmd> ")
+        out_0 = self.send_pkts_getouput(pkts=pkts[0])
+        rfc.check_iavf_fdir_mark(out_0, pkt_num=1, check_param={"port_id": 0, "mark_id": 1, "queue": 1}, stats=True)
+        out_1 = self.send_pkts_getouput(pkts=pkts[1])
+        rfc.check_iavf_fdir_mark(out_1, pkt_num=1, check_param={"port_id": 0, "mark_id": 2, "queue": 2}, stats=False)
+        out_2 = self.send_pkts_getouput(pkts=pkts[2])
+        rfc.check_iavf_fdir_mark(out_2, pkt_num=1, check_param={"port_id": 0, "mark_id": 3, "queue": 3}, stats=True)
+        self.dut.send_expect("flow flush 0", "testpmd> ")
+
+        rule_li = self.create_fdir_rule(rules, check_stats=True)
+        self.check_fdir_rule(port_id=0, rule_list=rule_li)
+        self.dut.send_expect("flow destroy 0 rule 2", "testpmd> ")
+        out_0 = self.send_pkts_getouput(pkts=pkts[0])
+        rfc.check_iavf_fdir_mark(out_0, pkt_num=1, check_param={"port_id": 0, "mark_id": 1, "queue": 1}, stats=True)
+        out_1 = self.send_pkts_getouput(pkts=pkts[1])
+        rfc.check_iavf_fdir_mark(out_1, pkt_num=1, check_param={"port_id": 0, "mark_id": 2, "queue": 2}, stats=True)
+        out_2 = self.send_pkts_getouput(pkts=pkts[2])
+        rfc.check_iavf_fdir_mark(out_2, pkt_num=1, check_param={"port_id": 0, "mark_id": 3, "queue": 3}, stats=False)
+        self.dut.send_expect("flow flush 0", "testpmd> ")
+
+        out_0 = self.send_pkts_getouput(pkts=pkts[0])
+        rfc.check_iavf_fdir_mark(out_0, pkt_num=1, check_param={"port_id": 0, "mark_id": 1, "queue": 1}, stats=False)
+        out_1 = self.send_pkts_getouput(pkts=pkts[1])
+        rfc.check_iavf_fdir_mark(out_1, pkt_num=1, check_param={"port_id": 0, "mark_id": 2, "queue": 2}, stats=False)
+        out_2 = self.send_pkts_getouput(pkts=pkts[2])
+        rfc.check_iavf_fdir_mark(out_2, pkt_num=1, check_param={"port_id": 0, "mark_id": 3, "queue": 3}, stats=False)
+
+    def test_stress_vf_port_reset_add_new_rule(self):
+        """
+        vf reset, the origin rule can't take effect,
+        then add a new rule which can take effect.
+        relaunch testpmd, create same rules, can take effect.
+        """
+        rules = [
+            "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.0 dst is 192.1.0.0 tos is 4 / tcp src is 22 dst is 23 / end actions queue index 1 / mark / end",
+            "flow create 1 ingress pattern eth / ipv4 src is 192.168.0.0 dst is 192.1.0.0 tos is 4 / tcp src is 22 dst is 23 / end actions queue index 1 / mark / end"]
+        pkts = [
+            'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.0",dst="192.1.0.0", tos=4)/TCP(sport=22,dport=23)/Raw("x" * 80)',
+            'Ether(dst="00:11:22:33:44:66")/IP(src="192.168.0.0",dst="192.1.0.0", tos=4)/TCP(sport=22,dport=23)/Raw("x" * 80)']
+        self.create_fdir_rule(rules, check_stats=True)
+        out0 = self.send_pkts_getouput(pkts=pkts[0])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "mark_id": 0, "queue": 1}, stats=True)
+        out1 = self.send_pkts_getouput(pkts=pkts[1])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 1, "mark_id": 0, "queue": 1}, stats=True)
+        # reset vf
+        self.dut.send_expect("stop", "testpmd> ")
+        self.dut.send_expect("port stop 0", "testpmd> ")
+        self.dut.send_expect("port reset 0", "testpmd> ")
+        self.dut.send_expect("port start 0", "testpmd> ")
+        self.dut.send_expect("start", "testpmd> ")
+        # check the rule of port0 is still listed, but doesn't take effect.
+        self.check_fdir_rule(port_id=0, rule_list=['0'])
+        self.check_fdir_rule(port_id=1, rule_list=['0'])
+        out0 = self.send_pkts_getouput(pkts=pkts[0])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "passthru": 1}, stats=True)
+        out1 = self.send_pkts_getouput(pkts=pkts[1])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 1, "mark_id": 0, "queue": 1}, stats=True)
+        # create the rule again
+        self.create_fdir_rule(rules[0], check_stats=True)
+        out0 = self.send_pkts_getouput(pkts=pkts[0])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "mark_id": 0, "queue": 1}, stats=True)
+        # relaunch testpmd, and create the rules, check matched packets.
+        self.dut.send_expect("quit", "# ")
+        self.launch_testpmd()
+        self.create_fdir_rule(rules, check_stats=True)
+        out0 = self.send_pkts_getouput(pkts=pkts[0])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "mark_id": 0, "queue": 1}, stats=True)
+        out1 = self.send_pkts_getouput(pkts=pkts[1])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 1, "mark_id": 0, "queue": 1}, stats=True)
+
+    def test_stress_vf_port_reset_delete_rule(self):
+        """
+        vf reset, the origin rule can't take effect,
+        then delete the rule which can't take effect without core dump,
+        relaunch testpmd, create same rules, can take effect.
+        """
+        rules = [
+            "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.0 dst is 192.1.0.0 tos is 4 / tcp src is 22 dst is 23 / end actions queue index 1 / mark / end",
+            "flow create 1 ingress pattern eth / ipv4 src is 192.168.0.0 dst is 192.1.0.0 tos is 4 / tcp src is 22 dst is 23 / end actions queue index 1 / mark / end"]
+        pkts = [
+            'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.0",dst="192.1.0.0", tos=4)/TCP(sport=22,dport=23)/Raw("x" * 80)',
+            'Ether(dst="00:11:22:33:44:66")/IP(src="192.168.0.0",dst="192.1.0.0", tos=4)/TCP(sport=22,dport=23)/Raw("x" * 80)']
+        rule_li = self.create_fdir_rule(rules, check_stats=True)
+        out0 = self.send_pkts_getouput(pkts=pkts[0])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "mark_id": 0, "queue": 1}, stats=True)
+        out1 = self.send_pkts_getouput(pkts=pkts[1])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 1, "mark_id": 0, "queue": 1}, stats=True)
+        # reset vf
+        self.dut.send_expect("stop", "testpmd> ")
+        self.dut.send_expect("port stop 0", "testpmd> ")
+        self.dut.send_expect("port reset 0", "testpmd> ")
+        self.dut.send_expect("port start 0", "testpmd> ")
+        self.dut.send_expect("start", "testpmd> ")
+        # check the rule of port0 is still listed, but doesn't take effect.
+        self.check_fdir_rule(port_id=0, rule_list=['0'])
+        self.check_fdir_rule(port_id=1, rule_list=['0'])
+        out0 = self.send_pkts_getouput(pkts=pkts[0])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "passthru": 1}, stats=True)
+        out1 = self.send_pkts_getouput(pkts=pkts[1])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 1, "mark_id": 0, "queue": 1}, stats=True)
+        # delete the rules
+        self.dut.send_expect("flow destroy 0 rule 0", "Invalid flow destroy")
+        self.destroy_fdir_rule(rule_id='0', port_id=1)
+        out0 = self.send_pkts_getouput(pkts=pkts[0])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "mark_id": 0, "queue": 1}, stats=False)
+        out1 = self.send_pkts_getouput(pkts=pkts[1])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 1, "mark_id": 0, "queue": 1}, stats=False)
+        # relaunch testpmd, and create the rules, check matched packets.
+        self.dut.send_expect("quit", "# ")
+        self.launch_testpmd()
+        self.create_fdir_rule(rules, check_stats=True)
+        out0 = self.send_pkts_getouput(pkts=pkts[0])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "mark_id": 0, "queue": 1}, stats=True)
+        out1 = self.send_pkts_getouput(pkts=pkts[1])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 1, "mark_id": 0, "queue": 1}, stats=True)
+
+    def test_stress_pf_reset_vf_add_new_rule(self):
+        """
+        pf trigger vf reset, the origin rule can't take effect,
+        then add a new rule which can take effect.
+        relaunch testpmd, create same rules, can take effect.
+        """
+        self.session_secondary = self.dut.new_session()
+        rules = [
+            "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.0 dst is 192.1.0.0 tos is 4 / tcp src is 22 dst is 23 / end actions queue index 1 / mark / end",
+            "flow create 1 ingress pattern eth / ipv4 src is 192.168.0.0 dst is 192.1.0.0 tos is 4 / tcp src is 22 dst is 23 / end actions queue index 1 / mark / end"]
+        new_rule = "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.0 dst is 192.1.0.1 tos is 4 / tcp src is 22 dst is 23 / end actions queue index 6 / mark id 1 / end"
+        pkts = [
+            'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.0",dst="192.1.0.0", tos=4)/TCP(sport=22,dport=23)/Raw("x" * 80)',
+            'Ether(dst="00:11:22:33:44:66")/IP(src="192.168.0.0",dst="192.1.0.0", tos=4)/TCP(sport=22,dport=23)/Raw("x" * 80)',
+            'Ether(dst="00:11:22:33:44:56")/IP(src="192.168.0.0",dst="192.1.0.0", tos=4)/TCP(sport=22,dport=23)/Raw("x" * 80)',
+            'Ether(dst="00:11:22:33:44:56")/IP(src="192.168.0.0",dst="192.1.0.1", tos=4)/TCP(sport=22,dport=23)/Raw("x" * 80)']
+        self.create_fdir_rule(rules, check_stats=True)
+        out0 = self.send_pkts_getouput(pkts=pkts[0])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "mark_id": 0, "queue": 1}, stats=True)
+        out1 = self.send_pkts_getouput(pkts=pkts[1])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 1, "mark_id": 0, "queue": 1}, stats=True)
+
+        self.session_secondary.send_expect("ip link set %s vf 0 mac 00:11:22:33:44:56" % self.pf0_intf, "# ")
+        out = self.dut.session.get_session_before(timeout=2)
+        self.verify("Port 0: reset event" in out, "failed to reset vf0")
+        self.dut.send_expect("stop", "testpmd> ")
+        self.dut.send_expect("port stop 0", "testpmd> ")
+        self.dut.send_expect("port reset 0", "testpmd> ")
+        self.dut.send_expect("port start 0", "testpmd> ")
+        self.dut.send_expect("start", "testpmd> ")
+        out0 = self.send_pkts_getouput(pkts=pkts[2])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "passthru": 1}, stats=True)
+        out1 = self.send_pkts_getouput(pkts=pkts[1])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 1, "mark_id": 0, "queue": 1}, stats=True)
+
+        # create a new rule, the packet patch the rule can be redirected to queue 6 with mark ID 1.
+        self.create_fdir_rule(new_rule, check_stats=True)
+        out0 = self.send_pkts_getouput(pkts=pkts[3])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "mark_id": 1, "queue": 6}, stats=True)
+        # relaunch testpmd, and create the rules, check matched packets.
+        self.dut.send_expect("quit", "# ")
+        self.launch_testpmd()
+        self.create_fdir_rule(rules, check_stats=True)
+        out0 = self.send_pkts_getouput(pkts=pkts[2])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "mark_id": 0, "queue": 1}, stats=True)
+        out1 = self.send_pkts_getouput(pkts=pkts[1])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 1, "mark_id": 0, "queue": 1}, stats=True)
+        self.dut.send_expect("quit", "# ")
+        self.session_secondary.send_expect("ip link set %s vf 0 mac 00:11:22:33:44:55" % self.pf0_intf, "# ")
+        self.dut.close_session(self.session_secondary)
+
+    def test_stress_pf_reset_vf_delete_rule(self):
+        """
+        pf trigger vf reset, the origin rule can't take effect,
+        then delete the rule which can't take effect without core dump,
+        relaunch testpmd, create same rules, can take effect.
+        """
+        self.session_secondary = self.dut.new_session()
+        rules = [
+            "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.0 dst is 192.1.0.0 tos is 4 / tcp src is 22 dst is 23 / end actions queue index 1 / mark / end",
+            "flow create 1 ingress pattern eth / ipv4 src is 192.168.0.0 dst is 192.1.0.0 tos is 4 / tcp src is 22 dst is 23 / end actions queue index 1 / mark / end"]
+        pkts = [
+            'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.0",dst="192.1.0.0", tos=4)/TCP(sport=22,dport=23)/Raw("x" * 80)',
+            'Ether(dst="00:11:22:33:44:66")/IP(src="192.168.0.0",dst="192.1.0.0", tos=4)/TCP(sport=22,dport=23)/Raw("x" * 80)',
+            'Ether(dst="00:11:22:33:44:56")/IP(src="192.168.0.0",dst="192.1.0.0", tos=4)/TCP(sport=22,dport=23)/Raw("x" * 80)']
+        self.create_fdir_rule(rules, check_stats=True)
+        out0 = self.send_pkts_getouput(pkts=pkts[0])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "mark_id": 0, "queue": 1}, stats=True)
+        out1 = self.send_pkts_getouput(pkts=pkts[1])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 1, "mark_id": 0, "queue": 1}, stats=True)
+
+        self.session_secondary.send_expect("ip link set %s vf 0 mac 00:11:22:33:44:56" % self.pf0_intf, "# ")
+        out = self.dut.session.get_session_before(timeout=2)
+        self.verify("Port 0: reset event" in out, "failed to reset vf0")
+        self.dut.send_expect("stop", "testpmd> ")
+        self.dut.send_expect("port stop 0", "testpmd> ")
+        self.dut.send_expect("port reset 0", "testpmd> ")
+        self.dut.send_expect("port start 0", "testpmd> ")
+        self.dut.send_expect("start", "testpmd> ")
+        out0 = self.send_pkts_getouput(pkts=pkts[2])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "passthru": 1}, stats=True)
+        out1 = self.send_pkts_getouput(pkts=pkts[1])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 1, "mark_id": 0, "queue": 1}, stats=True)
+        # delete the rules
+        self.dut.send_expect("flow destroy 0 rule 0", "Invalid flow destroy")
+        self.destroy_fdir_rule(rule_id='0', port_id=1)
+        out0 = self.send_pkts_getouput(pkts=pkts[2])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "mark_id": 0, "queue": 1}, stats=False)
+        out1 = self.send_pkts_getouput(pkts=pkts[1])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 1, "mark_id": 0, "queue": 1}, stats=False)
+
+        # relaunch testpmd, and create the rules, check matched packets.
+        self.dut.send_expect("quit", "# ")
+        self.launch_testpmd()
+        self.create_fdir_rule(rules, check_stats=True)
+        out0 = self.send_pkts_getouput(pkts=pkts[2])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "mark_id": 0, "queue": 1}, stats=True)
+        out1 = self.send_pkts_getouput(pkts=pkts[1])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 1, "mark_id": 0, "queue": 1}, stats=True)
+        self.dut.send_expect("quit", "# ")
+        self.session_secondary.send_expect("ip link set %s vf 0 mac 00:11:22:33:44:55" % self.pf0_intf, "# ")
+        self.dut.close_session(self.session_secondary)
+
+    def checksum_enablehw(self, port, hw):
+        """
+        set checksum parameters
+        """
+        self.dut.send_expect("set fwd csum", "testpmd>")
+        self.dut.send_expect("port stop all", "testpmd>")
+        self.dut.send_expect("csum set ip %s %d" % (hw, port), "testpmd>")
+        self.dut.send_expect("csum set udp %s %d" %(hw, port), "testpmd>")
+        self.dut.send_expect("port start all", "testpmd>")
+        self.dut.send_expect("start", "testpmd>")
+
+    def get_chksum_values(self, packets_expected):
+        """
+        Validate the checksum flags.
+        """
+        checksum_pattern = re.compile("chksum.*=.*(0x[0-9a-z]+)")
+
+        chksum = dict()
+
+        self.tester.send_expect("scapy", ">>> ")
+        self.tester.send_expect("import sys", ">>> ")
+        self.tester.send_expect("sys.path.append('./dep')", ">>> ")
+        self.tester.send_expect("from pfcp import PFCP",  ">>> ")
+
+        for packet_type in list(packets_expected.keys()):
+            self.tester.send_expect("p = %s" % packets_expected[packet_type], ">>>")
+            out = self.tester.send_command("p.show2()", timeout=1)
+            chksums = checksum_pattern.findall(out)
+            chksum[packet_type] = chksums
+
+        self.tester.send_expect("exit()", "#")
+        return chksum
+
+    def checksum_validate(self, packets_sent, packets_expected):
+        """
+        Validate the checksum.
+        """
+        tx_interface = self.tester_iface0
+        rx_interface = self.tester_iface0
+
+        sniff_src = "00:11:22:33:44:55"
+        result = dict()
+        pkt = Packet()
+        chksum = self.get_chksum_values(packets_expected)
+        self.inst = self.tester.tcpdump_sniff_packets(intf=rx_interface, count=len(packets_sent),
+                filters=[{'layer': 'ether', 'config': {'src': sniff_src}}])
+        for packet_type in list(packets_sent.keys()):
+            pkt.append_pkt(packets_sent[packet_type])
+        pkt.send_pkt(crb=self.tester, tx_port=tx_interface, count=1)
+
+        p = self.tester.load_tcpdump_sniff_packets(self.inst)
+        nr_packets = len(p)
+        print(p)
+        packets_received = [p[i].sprintf("%IP.chksum%;%TCP.chksum%;%UDP.chksum%;%SCTP.chksum%") for i in range(nr_packets)]
+        print(len(packets_sent), len(packets_received))
+        self.verify(len(packets_sent)*1 == len(packets_received), "Unexpected Packets Drop")
+        i = 0
+        for packet_received in packets_received:
+            ip_checksum, tcp_checksum, udp_checksum, sctp_checksum = packet_received.split(';')
+            if udp_checksum != '??':
+                packet_type = 'UDP'
+                l4_checksum = udp_checksum
+            if i == 0 or i == 2:
+                packet_type = packet_type + '/PFCP_NODE'
+            else:
+                packet_type = packet_type + '/PFCP_SESSION'
+
+            if ip_checksum != '??':
+                packet_type = 'IP/' + packet_type
+                if chksum[packet_type] != [ip_checksum, l4_checksum]:
+                    result[packet_type] = packet_type + " checksum error"
+            else:
+                packet_type = 'IPv6/' + packet_type
+                if chksum[packet_type] != [l4_checksum]:
+                    result[packet_type] = packet_type + " checksum error"
+            i = i + 1
+        return (result, p)
+
+    def set_vlan(self, vlan, port, strip, rx_tx="rx"):
+        """
+        set rx_vlan and tx_vlan
+        """
+        self.dut.send_expect("vlan set filter on %d" % port, "testpmd> ", 20)
+        self.dut.send_expect("vlan set strip %s %d" % (strip, port), "testpmd> ", 20)
+        self.dut.send_expect("rx_vlan add %d %d" % (vlan, port), "testpmd> ", 20)
+        self.dut.send_expect("set verbose 1", "testpmd> ", 20)
+
+        if rx_tx == "tx":
+            self.dut.send_expect("port stop %d" % port, "testpmd> ", 20)
+            self.dut.send_expect("tx_vlan set %d %d" % (port, vlan), "testpmd> ", 20)
+            self.dut.send_expect("port start %d" % port, "testpmd> ", 20)
+            self.dut.send_expect("set fwd mac", "testpmd> ", 20)
+
+    def get_tcpdump_package(self, pkts):
+        """
+        return vlan id of tcpdump packets
+        """
+        vlans = []
+        for i in range(len(pkts)):
+            vlan = pkts.strip_element_vlan("vlan", p_index=i)
+            print("vlan is:", vlan)
+            vlans.append(vlan)
+        return vlans
+
+    def test_pfcp_vlan_strip_on_hw_checksum(self):
+        """
+        Set PFCP FDIR rules
+        Enable HW checksum offload.
+        Enable vlan filter and receipt of VLAN packets with VLAN Tag Identifier 1 on port 0.
+        Disable vlan strip.
+        Send packet with incorrect checksum,
+        can rx it and report the checksum error,
+        verify forwarded packets have correct checksum.
+        """
+        rules = ["flow create 0 ingress pattern eth / ipv4 / udp / pfcp s_field is 0 / end actions queue index 1 / mark id 1 / end",
+                 "flow create 0 ingress pattern eth / ipv4 / udp / pfcp s_field is 1 / end actions queue index 2 / mark id 2 / end",
+                 "flow create 0 ingress pattern eth / ipv6 / udp / pfcp s_field is 0 / end actions queue index 3 / mark id 3 / end",
+                 "flow create 0 ingress pattern eth / ipv6 / udp / pfcp s_field is 1 / end actions queue index 4 / mark id 4 / end"]
+
+        self.dut.send_expect("quit", "# ")
+        self.pmd_output.start_testpmd(cores="1S/4C/1T",
+                                      param="--rxq=16 --txq=16 --enable-rx-cksum --port-topology=loop",
+                                      eal_param="-w %s" % self.sriov_vfs_pf0[0].pci,
+                                      socket=self.ports_socket)
+        vlan = 51
+        mac = "00:11:22:33:44:55"
+        sndIP = '10.0.0.1'
+        sndIPv6 = '::1'
+        pkts_sent = {'IP/UDP/PFCP_NODE': 'Ether(dst="%s", src="52:00:00:00:00:00")/Dot1Q(vlan=51)/IP(src="%s", chksum=0xf)/UDP(sport=22, dport=8805, chksum=0xf)/PFCP(Sfield=0)/("X"*46)' % (mac, sndIP),
+                     'IP/UDP/PFCP_SESSION': 'Ether(dst="%s", src="52:00:00:00:00:00")/Dot1Q(vlan=51)/IP(src="%s", chksum=0xf)/UDP(sport=22, dport=8805, chksum=0xf)/PFCP(Sfield=1)/("X"*46)' % (mac, sndIP),
+                     'IPv6/UDP/PFCP_NODE': 'Ether(dst="%s", src="52:00:00:00:00:00")/Dot1Q(vlan=51)/IPv6(src="%s")/UDP(sport=22, dport=8805, chksum=0xf)/PFCP(Sfield=0)/("X"*46)' % (mac, sndIPv6),
+                     'IPv6/UDP/PFCP_SESSION': 'Ether(dst="%s", src="52:00:00:00:00:00")/Dot1Q(vlan=51)/IPv6(src="%s")/UDP(sport=22, dport=8805, chksum=0xf)/PFCP(Sfield=1)/("X"*46)' % (mac, sndIPv6)}
+
+        expIP = sndIP
+        expIPv6 = sndIPv6
+        pkts_ref = {'IP/UDP/PFCP_NODE': 'Ether(src="%s", dst="52:00:00:00:00:00")/Dot1Q(vlan=51)/IP(src="%s")/UDP(sport=22, dport=8805)/PFCP(Sfield=0)/("X"*46)' % (mac, expIP),
+                    'IP/UDP/PFCP_SESSION': 'Ether(src="%s", dst="52:00:00:00:00:00")/Dot1Q(vlan=51)/IP(src="%s")/UDP(sport=22, dport=8805)/PFCP(Sfield=1)/("X"*46)' % (mac, expIP),
+                    'IPv6/UDP/PFCP_NODE': 'Ether(src="%s", dst="52:00:00:00:00:00")/Dot1Q(vlan=51)/IPv6(src="%s")/UDP(sport=22, dport=8805)/PFCP(Sfield=0)/("X"*46)' % (mac, expIPv6),
+                    'IPv6/UDP/PFCP_SESSION': 'Ether(src="%s", dst="52:00:00:00:00:00")/Dot1Q(vlan=51)/IPv6(src="%s")/UDP(sport=22, dport=8805)/PFCP(Sfield=1)/("X"*46)' % (mac, expIPv6)}
+
+        self.checksum_enablehw(port=0, hw="hw")
+
+        self.set_vlan(vlan=vlan, port=0, strip="on")
+        out_info = self.dut.send_expect("show port info 0", "testpmd> ", 20)
+        self.verify("strip on" in out_info, "Wrong strip:" + out_info)
+
+        # send packets and check the checksum value
+        result = self.checksum_validate(pkts_sent, pkts_ref)
+        # validate vlan in the tcpdumped packets
+        out_dump = self.get_tcpdump_package(result[1])
+        self.verify(len(out_dump), "Forwarded vlan packet not received!!!")
+        self.verify(vlan not in out_dump, "Wrong vlan:" + str(out_dump))
+
+        # Validate checksum on the receive packet
+        out_testpmd = self.dut.send_expect("stop", "testpmd> ")
+        bad_ipcsum = self.pmd_output.get_pmd_value("Bad-ipcsum:", out_testpmd)
+        bad_l4csum = self.pmd_output.get_pmd_value("Bad-l4csum:", out_testpmd)
+        self.verify(bad_ipcsum == 2, "Bad-ipcsum check error")
+        self.verify(bad_l4csum == 4, "Bad-l4csum check error")
+        self.dut.send_expect("start", "testpmd> ")
+
+        # check fdir rule take effect
+        self.create_fdir_rule(rules, check_stats=True)
+        out0 = self.send_pkts_getouput(pkts=pkts_sent["IP/UDP/PFCP_NODE"])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "mark_id": 1, "queue": 1}, stats=True)
+        out1 = self.send_pkts_getouput(pkts=pkts_sent["IP/UDP/PFCP_SESSION"])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 0, "mark_id": 2, "queue": 2}, stats=True)
+        out2 = self.send_pkts_getouput(pkts=pkts_sent["IPv6/UDP/PFCP_NODE"])
+        rfc.check_iavf_fdir_mark(out2, pkt_num=1, check_param={"port_id": 0, "mark_id": 3, "queue": 3}, stats=True)
+        out3 = self.send_pkts_getouput(pkts=pkts_sent["IPv6/UDP/PFCP_SESSION"])
+        rfc.check_iavf_fdir_mark(out3, pkt_num=1, check_param={"port_id": 0, "mark_id": 4, "queue": 4}, stats=True)
+
+        # destroy the rules and check there is no rule listed.
+        self.dut.send_expect("flow flush 0", "testpmd> ", 20)
+        self.check_fdir_rule(port_id=0, stats=False)
+
+        # check no rules existing
+        out0 = self.send_pkts_getouput(pkts=pkts_sent["IP/UDP/PFCP_NODE"])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "mark_id": 1, "queue": 1}, stats=False)
+        out1 = self.send_pkts_getouput(pkts=pkts_sent["IP/UDP/PFCP_SESSION"])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 0, "mark_id": 2, "queue": 2}, stats=False)
+        out2 = self.send_pkts_getouput(pkts=pkts_sent["IPv6/UDP/PFCP_NODE"])
+        rfc.check_iavf_fdir_mark(out2, pkt_num=1, check_param={"port_id": 0, "mark_id": 3, "queue": 3}, stats=False)
+        out3 = self.send_pkts_getouput(pkts=pkts_sent["IPv6/UDP/PFCP_SESSION"])
+        rfc.check_iavf_fdir_mark(out3, pkt_num=1, check_param={"port_id": 0, "mark_id": 4, "queue": 4}, stats=False)
+
+        # send packets and check the checksum value
+        result = self.checksum_validate(pkts_sent, pkts_ref)
+        # validate vlan in the tcpdumped packets
+        out_dump = self.get_tcpdump_package(result[1])
+        self.verify(len(out_dump), "Forwarded vlan packet not received!!!")
+        self.verify(vlan not in out_dump, "Wrong vlan:" + str(out_dump))
+
+        # Validate checksum on the receive packet
+        out_testpmd = self.dut.send_expect("stop", "testpmd> ")
+        bad_ipcsum = self.pmd_output.get_pmd_value("Bad-ipcsum:", out_testpmd)
+        bad_l4csum = self.pmd_output.get_pmd_value("Bad-l4csum:", out_testpmd)
+        self.verify(bad_ipcsum == 2, "Bad-ipcsum check error")
+        self.verify(bad_l4csum == 4, "Bad-l4csum check error")
+
+    def test_pfcp_vlan_strip_off_sw_checksum(self):
+        """
+        Set PFCP FDIR rules
+        Enable SW checksum offload.
+        Enable vlan filter and receipt of VLAN packets with VLAN Tag Identifier 1 on port 0.
+        Disable vlan strip.
+        Send packet with incorrect checksum,
+        can rx it and report the checksum error,
+        verify forwarded packets have correct checksum.
+        """
+        rules = ["flow create 0 ingress pattern eth / ipv4 / udp / pfcp s_field is 0 / end actions queue index 1 / mark id 1 / end",
+                 "flow create 0 ingress pattern eth / ipv4 / udp / pfcp s_field is 1 / end actions queue index 2 / mark id 2 / end",
+                 "flow create 0 ingress pattern eth / ipv6 / udp / pfcp s_field is 0 / end actions queue index 3 / mark id 3 / end",
+                 "flow create 0 ingress pattern eth / ipv6 / udp / pfcp s_field is 1 / end actions queue index 4 / mark id 4 / end"]
+
+        self.dut.send_expect("quit", "# ")
+        self.pmd_output.start_testpmd(cores="1S/4C/1T",
+                                      param="--rxq=16 --txq=16 --enable-rx-cksum --port-topology=loop",
+                                      eal_param="-w %s" % self.sriov_vfs_pf0[0].pci,
+                                      socket=self.ports_socket)
+        vlan = 51
+        mac = "00:11:22:33:44:55"
+        sndIP = '10.0.0.1'
+        sndIPv6 = '::1'
+        pkts_sent = {'IP/UDP/PFCP_NODE': 'Ether(dst="%s", src="52:00:00:00:00:00")/Dot1Q(vlan=51)/IP(src="%s", chksum=0xf)/UDP(sport=22, dport=8805, chksum=0xf)/PFCP(Sfield=0)/("X"*46)' % (mac, sndIP),
+                'IP/UDP/PFCP_SESSION': 'Ether(dst="%s", src="52:00:00:00:00:00")/Dot1Q(vlan=51)/IP(src="%s", chksum=0xf)/UDP(sport=22, dport=8805, chksum=0xf)/PFCP(Sfield=1)/("X"*46)' % (mac, sndIP),
+                'IPv6/UDP/PFCP_NODE': 'Ether(dst="%s", src="52:00:00:00:00:00")/Dot1Q(vlan=51)/IPv6(src="%s")/UDP(sport=22, dport=8805, chksum=0xf)/PFCP(Sfield=0)/("X"*46)' % (mac, sndIPv6),
+                'IPv6/UDP/PFCP_SESSION': 'Ether(dst="%s", src="52:00:00:00:00:00")/Dot1Q(vlan=51)/IPv6(src="%s")/UDP(sport=22, dport=8805, chksum=0xf)/PFCP(Sfield=1)/("X"*46)' % (mac, sndIPv6)}
+
+        expIP = sndIP
+        expIPv6 = sndIPv6
+        pkts_ref = {'IP/UDP/PFCP_NODE': 'Ether(src="%s", dst="52:00:00:00:00:00")/Dot1Q(vlan=51)/IP(src="%s")/UDP(sport=22, dport=8805)/PFCP(Sfield=0)/("X"*46)' % (mac, expIP),
+                    'IP/UDP/PFCP_SESSION': 'Ether(src="%s", dst="52:00:00:00:00:00")/Dot1Q(vlan=51)/IP(src="%s")/UDP(sport=22, dport=8805)/PFCP(Sfield=1)/("X"*46)' % (mac, expIP),
+                    'IPv6/UDP/PFCP_NODE': 'Ether(src="%s", dst="52:00:00:00:00:00")/Dot1Q(vlan=51)/IPv6(src="%s")/UDP(sport=22, dport=8805)/PFCP(Sfield=0)/("X"*46)' % (mac, expIPv6),
+                    'IPv6/UDP/PFCP_SESSION': 'Ether(src="%s", dst="52:00:00:00:00:00")/Dot1Q(vlan=51)/IPv6(src="%s")/UDP(sport=22, dport=8805)/PFCP(Sfield=1)/("X"*46)' % (mac, expIPv6)}
+
+        self.checksum_enablehw(port=0, hw="sw")
+
+        self.set_vlan(vlan=vlan, port=0, strip="off")
+        out_info = self.dut.send_expect("show port info 0", "testpmd> ", 20)
+        self.verify("strip off" in out_info, "Wrong strip:" + out_info)
+
+        result = self.checksum_validate(pkts_sent, pkts_ref)
+
+        out_dump = self.get_tcpdump_package(result[1])
+        self.verify(len(out_dump), "Forwarded vlan packet not received!!!")
+        self.verify(vlan in out_dump, "Wrong vlan:" + str(out_dump))
+
+        # Validate checksum on the receive packet
+        out_testpmd = self.dut.send_expect("stop", "testpmd> ")
+        bad_ipcsum = self.pmd_output.get_pmd_value("Bad-ipcsum:", out_testpmd)
+        bad_l4csum = self.pmd_output.get_pmd_value("Bad-l4csum:", out_testpmd)
+        self.verify(bad_ipcsum == 2, "Bad-ipcsum check error")
+        self.verify(bad_l4csum == 4, "Bad-l4csum check error")
+        self.dut.send_expect("start", "testpmd> ")
+
+        # check fdir rule take effect
+        self.create_fdir_rule(rules, check_stats=True)
+        out0 = self.send_pkts_getouput(pkts=pkts_sent["IP/UDP/PFCP_NODE"])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "mark_id": 1, "queue": 1}, stats=True)
+        out1 = self.send_pkts_getouput(pkts=pkts_sent["IP/UDP/PFCP_SESSION"])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 0, "mark_id": 2, "queue": 2}, stats=True)
+        out2 = self.send_pkts_getouput(pkts=pkts_sent["IPv6/UDP/PFCP_NODE"])
+        rfc.check_iavf_fdir_mark(out2, pkt_num=1, check_param={"port_id": 0, "mark_id": 3, "queue": 3}, stats=True)
+        out3 = self.send_pkts_getouput(pkts=pkts_sent["IPv6/UDP/PFCP_SESSION"])
+        rfc.check_iavf_fdir_mark(out3, pkt_num=1, check_param={"port_id": 0, "mark_id": 4, "queue": 4}, stats=True)
+
+        # destroy the rules and check there is no rule listed.
+        self.dut.send_expect("flow flush 0", "testpmd> ", 20)
+        self.check_fdir_rule(port_id=0, stats=False)
+
+        # check no rules existing
+        out0 = self.send_pkts_getouput(pkts=pkts_sent["IP/UDP/PFCP_NODE"])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "mark_id": 1, "queue": 1}, stats=False)
+        out1 = self.send_pkts_getouput(pkts=pkts_sent["IP/UDP/PFCP_SESSION"])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 0, "mark_id": 2, "queue": 2}, stats=False)
+        out2 = self.send_pkts_getouput(pkts=pkts_sent["IPv6/UDP/PFCP_NODE"])
+        rfc.check_iavf_fdir_mark(out2, pkt_num=1, check_param={"port_id": 0, "mark_id": 3, "queue": 3}, stats=False)
+        out3 = self.send_pkts_getouput(pkts=pkts_sent["IPv6/UDP/PFCP_SESSION"])
+        rfc.check_iavf_fdir_mark(out3, pkt_num=1, check_param={"port_id": 0, "mark_id": 4, "queue": 4}, stats=False)
+
+        result = self.checksum_validate(pkts_sent, pkts_ref)
+
+        out_dump = self.get_tcpdump_package(result[1])
+        self.verify(len(out_dump), "Forwarded vlan packet not received!!!")
+        self.verify(vlan in out_dump, "Wrong vlan:" + str(out_dump))
+
+        # Validate checksum on the receive packet
+        out_testpmd = self.dut.send_expect("stop", "testpmd> ")
+        bad_ipcsum = self.pmd_output.get_pmd_value("Bad-ipcsum:", out_testpmd)
+        bad_l4csum = self.pmd_output.get_pmd_value("Bad-l4csum:", out_testpmd)
+        self.verify(bad_ipcsum == 2, "Bad-ipcsum check error")
+        self.verify(bad_l4csum == 4, "Bad-l4csum check error")
+
+    def test_pfcp_vlan_insert_on(self):
+        """
+        Set PFCP FDIR rules
+        Enable vlan filter and insert VLAN Tag Identifier 1 to vlan packet sent from port 0
+        """
+        rules = ["flow create 0 ingress pattern eth / ipv4 / udp / pfcp s_field is 0 / end actions queue index 1 / mark id 1 / end",
+                 "flow create 0 ingress pattern eth / ipv4 / udp / pfcp s_field is 1 / end actions queue index 2 / mark id 2 / end",
+                 "flow create 0 ingress pattern eth / ipv6 / udp / pfcp s_field is 0 / end actions queue index 3 / mark id 3 / end",
+                 "flow create 0 ingress pattern eth / ipv6 / udp / pfcp s_field is 1 / end actions queue index 4 / mark id 4 / end"]
+
+        self.dut.send_expect("quit", "# ")
+        self.pmd_output.start_testpmd(cores="1S/4C/1T",
+                                      param="--rxq=16 --txq=16 --enable-rx-cksum --port-topology=loop",
+                                      eal_param="-w %s" % self.sriov_vfs_pf0[0].pci,
+                                      socket=self.ports_socket)
+        vlan = 51
+        mac = "00:11:22:33:44:55"
+        sndIP = '10.0.0.1'
+        sndIPv6 = '::1'
+        pkt = Packet()
+        pkts_sent = {'IP/UDP/PFCP_NODE': 'Ether(dst="%s", src="52:00:00:00:00:00")/IP(src="%s")/UDP(sport=22, dport=8805)/PFCP(Sfield=0)/("X"*46)' % (mac, sndIP),
+                     'IP/UDP/PFCP_SESSION': 'Ether(dst="%s", src="52:00:00:00:00:00")/IP(src="%s")/UDP(sport=22, dport=8805)/PFCP(Sfield=1)/("X"*46)' % (mac, sndIP),
+                     'IPv6/UDP/PFCP_NODE': 'Ether(dst="%s", src="52:00:00:00:00:00")/IPv6(src="%s")/UDP(sport=22, dport=8805)/PFCP(Sfield=0)/("X"*46)' % (mac, sndIPv6),
+                     'IPv6/UDP/PFCP_SESSION': 'Ether(dst="%s", src="52:00:00:00:00:00")/IPv6(src="%s")/UDP(sport=22, dport=8805)/PFCP(Sfield=1)/("X"*46)' % (mac, sndIPv6)}
+
+        self.set_vlan(vlan=vlan, port=0, strip="off", rx_tx="tx")
+        self.dut.send_expect("start", "testpmd> ")
+
+        tx_interface = self.tester_iface0
+        rx_interface = self.tester_iface0
+
+        dmac = "00:11:22:33:44:55"
+        smac = self.pf1_mac
+        inst = self.tester.tcpdump_sniff_packets(rx_interface)
+
+        for packet_type in list(pkts_sent.keys()):
+            pkt.append_pkt(pkts_sent[packet_type])
+        pkt.send_pkt(crb=self.tester, tx_port=tx_interface, count=1)
+
+        p = self.tester.load_tcpdump_sniff_packets(inst)
+
+        out = self.get_tcpdump_package(p)
+        self.verify(vlan in out, "Vlan not found:" + str(out))
+        self.dut.send_expect("stop", "testpmd> ")
+        self.dut.send_expect("start", "testpmd> ")
+
+        # check fdir rule take effect
+        self.create_fdir_rule(rules, check_stats=True)
+        out0 = self.send_pkts_getouput(pkts=pkts_sent["IP/UDP/PFCP_NODE"])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "mark_id": 1, "queue": 1}, stats=True)
+        out1 = self.send_pkts_getouput(pkts=pkts_sent["IP/UDP/PFCP_SESSION"])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 0, "mark_id": 2, "queue": 2}, stats=True)
+        out2 = self.send_pkts_getouput(pkts=pkts_sent["IPv6/UDP/PFCP_NODE"])
+        rfc.check_iavf_fdir_mark(out2, pkt_num=1, check_param={"port_id": 0, "mark_id": 3, "queue": 3}, stats=True)
+        out3 = self.send_pkts_getouput(pkts=pkts_sent["IPv6/UDP/PFCP_SESSION"])
+        rfc.check_iavf_fdir_mark(out3, pkt_num=1, check_param={"port_id": 0, "mark_id": 4, "queue": 4}, stats=True)
+
+        # destroy the rules and check there is no rule listed.
+        self.dut.send_expect("flow flush 0", "testpmd> ", 20)
+        self.check_fdir_rule(port_id=0, stats=False)
+
+        # check no rules existing
+        out0 = self.send_pkts_getouput(pkts=pkts_sent["IP/UDP/PFCP_NODE"])
+        rfc.check_iavf_fdir_mark(out0, pkt_num=1, check_param={"port_id": 0, "mark_id": 1, "queue": 1}, stats=False)
+        out1 = self.send_pkts_getouput(pkts=pkts_sent["IP/UDP/PFCP_SESSION"])
+        rfc.check_iavf_fdir_mark(out1, pkt_num=1, check_param={"port_id": 0, "mark_id": 2, "queue": 2}, stats=False)
+        out2 = self.send_pkts_getouput(pkts=pkts_sent["IPv6/UDP/PFCP_NODE"])
+        rfc.check_iavf_fdir_mark(out2, pkt_num=1, check_param={"port_id": 0, "mark_id": 3, "queue": 3}, stats=False)
+        out3 = self.send_pkts_getouput(pkts=pkts_sent["IPv6/UDP/PFCP_SESSION"])
+        rfc.check_iavf_fdir_mark(out3, pkt_num=1, check_param={"port_id": 0, "mark_id": 4, "queue": 4}, stats=False)
+
+        self.dut.send_expect("stop", "testpmd> ")
+        self.dut.send_expect("port stop all", "testpmd> ")
+        self.dut.send_expect("tx_vlan reset 0", "testpmd> ")
+        self.dut.send_expect("port start all", "testpmd> ")
+        self.dut.send_expect("stop", "testpmd> ", 30)
+
+    def tear_down(self):
+        # destroy all flow rule on port 0
+        self.dut.kill_all()
+
+    def tear_down_all(self):
+        self.destroy_env()
+        self.dut.kill_all()
diff --git a/tests/rte_flow_common.py b/tests/rte_flow_common.py
index be0e434a..dee47c6a 100644
--- a/tests/rte_flow_common.py
+++ b/tests/rte_flow_common.py
@@ -37,7 +37,7 @@ from utils import GREEN, RED
 # switch filter common functions
 def get_suite_config(test_case):
     """
-    get the suite config from conf/cvl_dcf_switch_filter.cfg.
+    get the suite config from conf/suite.cfg.
     """
     suite_config = {}
     if "ice_driver_file_location" in test_case.get_suite_cfg():
@@ -46,6 +46,12 @@ def get_suite_config(test_case):
     if "os_default_package_file_location" in test_case.get_suite_cfg():
         os_default_package_file_location = test_case.get_suite_cfg()["os_default_package_file_location"]
         suite_config["os_default_package_file_location"] = os_default_package_file_location
+    if "comms_package_file_location" in test_case.get_suite_cfg():
+        comms_package_file_location = test_case.get_suite_cfg()["comms_package_file_location"]
+        suite_config["comms_package_file_location"] = comms_package_file_location
+    if "package_file_location" in test_case.get_suite_cfg():
+        package_file_location = test_case.get_suite_cfg()["package_file_location"]
+        suite_config["package_file_location"] = package_file_location
     return suite_config
 
 def get_rx_packet_number(out,match_string):
@@ -388,6 +394,94 @@ def check_mark(out, pkt_num, check_param, stats=True):
             check_drop(out[1], pkt_num, check_param, stats)
         verify(not res, "should has no mark_id in %s" % res)
 
+# IAVF fdir common functions
+def check_iavf_fdir_queue(out, pkt_num, check_param, stats=True):
+    port_id = check_param["port_id"] if check_param.get("port_id") is not None else 0
+    queue = check_param["queue"]
+    p = re.compile(
+        r"Forward Stats for RX Port=\s?%s/Queue=(\s?\d+)\s.*\n.*RX-packets:(\s?\d+)\s+TX-packets" % port_id)
+    res = p.findall(out)
+    if res:
+        res_queue = [int(i[0]) for i in res]
+        pkt_li = [int(i[1]) for i in res]
+        res_num = sum(pkt_li)
+        verify(res_num == pkt_num, "fail: got wrong number of packets, expect pakcet number %s, got %s." % (pkt_num, res_num))
+        if stats:
+            if isinstance(queue, int):
+                verify(all(q == queue for q in res_queue), "fail: queue id not matched, expect queue %s, got %s" % (queue, res_queue))
+                print((GREEN("pass: queue id %s matched" % res_queue)))
+            elif isinstance(queue, list):
+                verify(all(q in queue for q in res_queue), "fail: queue id not matched, expect queue %s, got %s" % (queue, res_queue))
+                print((GREEN("pass: queue id %s matched" % res_queue)))
+            else:
+                raise Exception("wrong queue value, expect int or list")
+        else:
+            if isinstance(queue, int):
+                verify(not any(q == queue for q in res_queue), "fail: queue id should not matched, expect queue %s, got %s" % (queue, res_queue))
+                print((GREEN("pass: queue id %s not matched" % res_queue)))
+            elif isinstance(queue, list):
+                verify_iavf_fdir_directed_by_rss(out, rxq=16, stats=True)
+                print((GREEN("pass: queue id %s not matched" % res_queue)))
+            else:
+                raise Exception("wrong action value, expect queue_index or queue_group")
+    else:
+        raise Exception("got wrong output, not match pattern %s" % p.pattern)
+
+def verify_iavf_fdir_directed_by_rss(out, rxq=16, stats=True):
+    p = re.compile("RSS hash=(0x\w+) - RSS queue=(0x\w+)")
+    pkt_info = p.findall(out)
+    if stats:
+        for i in pkt_info:
+            verify((int(i[0],16) % rxq == int(i[1],16)), "some packets are not directed by RSS")
+            print(GREEN("pass: queue id %s is redirected by RSS hash value %s" % (i[1], i[0])))
+    else:
+        for i in pkt_info:
+            verify((int(i[0],16) % rxq != int(i[1],16)), "some packets are not directed by RSS")
+
+def check_iavf_fdir_passthru(out, pkt_num, check_param, stats=True):
+    # check the actual queue is distributed by RSS
+    port_id = check_param["port_id"] if check_param.get("port_id") is not None else 0
+    p = re.compile('port\s*%s/queue\s?[0-9]+' % port_id)
+    pkt_li = p.findall(out)
+    verify(pkt_num == len(pkt_li), "fail: got wrong number of packets, expect pakcet number %s, got %s." % (pkt_num, len(pkt_li)))
+    p = re.compile('RSS\shash=(\w+)\s-\sRSS\squeue=(\w+)')
+    pkt_hash = p.findall(out)
+    verify(pkt_num == len(pkt_hash), "fail: got wrong number of passthru packets, expect passthru packet number %s, got %s." % (pkt_num, len(pkt_hash)))
+    verify_iavf_fdir_directed_by_rss(out, rxq=16, stats=True)
+
+def check_iavf_fdir_mark(out, pkt_num, check_param, stats=True):
+    mark_scanner = "FDIR matched ID=(0x\w+)"
+    res = re.findall(mark_scanner, out)
+    print(out)
+    if stats:
+        if check_param.get("drop") is not None:
+            check_drop(out, pkt_num, check_param, stats)
+            verify(not res, "should has no mark_id in %s" % res)
+        elif check_param.get("mark_id") is not None:
+            mark_list = [i for i in res]
+            print("mark list is: ", mark_list)
+            verify(len(res) == pkt_num, "get wrong number of packet with mark_id")
+            verify(all([int(i, 16) == check_param["mark_id"] for i in res]),
+                        "failed: some packet mark id of %s not match" % mark_list)
+            if check_param.get("queue") is not None:
+                check_iavf_fdir_queue(out, pkt_num, check_param, stats)
+            elif check_param.get("passthru") is not None:
+                check_iavf_fdir_passthru(out, pkt_num, check_param, stats)
+        else:
+            if check_param.get("queue") is not None:
+                check_iavf_fdir_queue(out, pkt_num, check_param, stats)
+            elif check_param.get("passthru") is not None:
+                check_iavf_fdir_passthru(out, pkt_num, check_param, stats)
+            verify(not res, "should has no mark_id in %s" % res)
+    else:
+        if check_param.get("queue") is not None:
+            check_iavf_fdir_queue(out, pkt_num, check_param, stats)
+        elif check_param.get("drop") is not None:
+            check_drop(out, pkt_num, check_param, stats)
+        elif check_param.get("passthru") is not None:
+            check_iavf_fdir_passthru(out, pkt_num, check_param, stats)
+        verify(not res, "should has no mark_id in %s" % res)
+
 # rss common functions
 def check_packets_of_each_queue(out):
     """
-- 
2.17.1



More information about the dts mailing list