Provide support for ETH, VLAN, IPv4/IPv6, TCP/UDP, VXLAN,<br />and mask matching, supporting multiple actions<br />include drop/count/mark/queue/rss,and vxlan decap/encap.<br /> <br />Signed-off-by: Bingbin Chen <chen.bingbin@zte.com.cn> <br />---<br /> doc/guides/nics/features/zxdh.ini  |   16 +<br /> doc/guides/nics/zxdh.rst           |    1 +<br /> drivers/net/zxdh/meson.build       |    1 +<br /> drivers/net/zxdh/zxdh_common.h     |    1 +<br /> drivers/net/zxdh/zxdh_ethdev.c     |   27 +<br /> drivers/net/zxdh/zxdh_ethdev.h     |   13 +-<br /> drivers/net/zxdh/zxdh_ethdev_ops.c |   10 +-<br /> drivers/net/zxdh/zxdh_ethdev_ops.h |    1 +<br /> drivers/net/zxdh/zxdh_flow.c       | 2003 ++++++++++++++++++++++++++++<br /> drivers/net/zxdh/zxdh_flow.h       |  237 ++++<br /> drivers/net/zxdh/zxdh_msg.c        |  263 +++-<br /> drivers/net/zxdh/zxdh_msg.h        |   31 +-<br /> drivers/net/zxdh/zxdh_tables.h     |   10 +-<br /> 13 files changed, 2539 insertions(+), 75 deletions(-)<br /> create mode 100644 drivers/net/zxdh/zxdh_flow.c<br /> create mode 100644 drivers/net/zxdh/zxdh_flow.h<br /> <br />diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini<br />index 277e17a584..bd20838676 100644<br />--- a/doc/guides/nics/features/zxdh.ini<br />+++ b/doc/guides/nics/features/zxdh.ini<br />@@ -34,5 +34,21 @@ Extended stats       = Y<br /> FW version           = Y<br /> Module EEPROM dump   = Y<br />  <br />+[rte_flow items]<br />+eth                  = Y<br />+ipv4                 = Y<br />+ipv6                 = Y<br />+sctp                 = Y<br />+tcp                  = Y<br />+udp                  = Y<br />+vlan                 = Y<br />+vxlan                = Y<br />+<br /> [rte_flow actions]<br /> drop                 = Y<br />+count                = Y<br />+mark                 = Y<br />+queue                = Y<br />+rss                  = Y<br />+vxlan_decap          = Y<br />+vxlan_encap          = Y<br />diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst<br />index 372cb5b44f..47dabde97e 100644<br />--- a/doc/guides/nics/zxdh.rst<br />+++ b/doc/guides/nics/zxdh.rst<br />@@ -41,6 +41,7 @@ Features of the ZXDH PMD are:<br /> - Hardware TSO for generic IP or UDP tunnel, including VXLAN<br /> - Extended statistics query<br /> - Ingress meter support<br />+- Flow API<br />  <br />  <br /> Driver compilation and testing<br />diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build<br />index a48a0d43c2..120cac5879 100644<br />--- a/drivers/net/zxdh/meson.build<br />+++ b/drivers/net/zxdh/meson.build<br />@@ -24,4 +24,5 @@ sources = files(<br />         'zxdh_rxtx.c',<br />         'zxdh_ethdev_ops.c',<br />         'zxdh_mtr.c',<br />+        'zxdh_flow.c',<br /> )<br />diff --git a/drivers/net/zxdh/zxdh_common.h b/drivers/net/zxdh/zxdh_common.h<br />index c151101bbc..6d78ae0273 100644<br />--- a/drivers/net/zxdh/zxdh_common.h<br />+++ b/drivers/net/zxdh/zxdh_common.h<br />@@ -14,6 +14,7 @@<br /> #define ZXDH_VF_LOCK_REG               0x90<br /> #define ZXDH_VF_LOCK_ENABLE_MASK       0x1<br /> #define ZXDH_ACQUIRE_CHANNEL_NUM_MAX   10<br />+#define VF_IDX(pcie_id)     ((pcie_id) & 0xff)<br />  <br /> struct zxdh_res_para {<br />     uint64_t virt_addr;<br />diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c<br />index 80053678cb..3b9cb6fa63 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev.c<br />+++ b/drivers/net/zxdh/zxdh_ethdev.c<br />@@ -1228,6 +1228,11 @@ zxdh_dev_close(struct rte_eth_dev *dev)<br />         return -1;<br />     }<br />  <br />+    if (zxdh_shared_data != NULL) {<br />+        zxdh_mtr_release(dev);<br />+        zxdh_flow_release(dev);<br />+    }<br />+<br />     zxdh_intr_release(dev);<br />     zxdh_np_uninit(dev);<br />     zxdh_pci_reset(hw);<br />@@ -1428,6 +1433,7 @@ static const struct eth_dev_ops zxdh_eth_dev_ops = {<br />     .get_module_eeprom         = zxdh_dev_get_module_eeprom,<br />     .dev_supported_ptypes_get = zxdh_dev_supported_ptypes_get,<br />     .mtr_ops_get             = zxdh_meter_ops_get,<br />+    .flow_ops_get             = zxdh_flow_ops_get,<br /> };<br />  <br /> static int32_t<br />@@ -1504,6 +1510,8 @@ zxdh_dtb_dump_res_init(struct zxdh_hw *hw, ZXDH_DEV_INIT_CTRL_T *dpp_ctrl)<br />         {"sdt_mc_table1",       5 * 1024 * 1024, ZXDH_SDT_MC_TABLE1, NULL},<br />         {"sdt_mc_table2",       5 * 1024 * 1024, ZXDH_SDT_MC_TABLE2, NULL},<br />         {"sdt_mc_table3",       5 * 1024 * 1024, ZXDH_SDT_MC_TABLE3, NULL},<br />+        {"sdt_acl_index_mng",  4 * 1024 * 1024, 30, NULL},<br />+        {"sdt_fd_table",    4 * 1024 * 1024, ZXDH_SDT_FD_TABLE, NULL},<br />     };<br />  <br />     struct zxdh_dev_shared_data *dev_sd = hw->dev_sd;<br />@@ -1723,6 +1731,7 @@ zxdh_free_sh_res(void)<br />         rte_spinlock_lock(&zxdh_shared_data_lock);<br />         if (zxdh_shared_data != NULL && zxdh_shared_data->init_done && <br />             (--zxdh_shared_data->dev_refcnt == 0)) {<br />+            rte_mempool_free(zxdh_shared_data->flow_mp);<br />             rte_mempool_free(zxdh_shared_data->mtr_mp);<br />             rte_mempool_free(zxdh_shared_data->mtr_profile_mp);<br />             rte_mempool_free(zxdh_shared_data->mtr_policy_mp);<br />@@ -1734,6 +1743,7 @@ zxdh_free_sh_res(void)<br /> static int<br /> zxdh_init_sh_res(struct zxdh_shared_data *sd)<br /> {<br />+    const char *MZ_ZXDH_FLOW_MP        = "zxdh_flow_mempool";<br />     const char *MZ_ZXDH_MTR_MP         = "zxdh_mtr_mempool";<br />     const char *MZ_ZXDH_MTR_PROFILE_MP = "zxdh_mtr_profile_mempool";<br />     const char *MZ_ZXDH_MTR_POLICY_MP = "zxdh_mtr_policy_mempool";<br />@@ -1743,6 +1753,13 @@ zxdh_init_sh_res(struct zxdh_shared_data *sd)<br />     struct rte_mempool *mtr_policy_mp = NULL;<br />  <br />     if (rte_eal_process_type() == RTE_PROC_PRIMARY) {<br />+        flow_mp = rte_mempool_create(MZ_ZXDH_FLOW_MP, ZXDH_MAX_FLOW_NUM,<br />+            sizeof(struct zxdh_flow), 64, 0,<br />+            NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);<br />+        if (flow_mp == NULL) {<br />+            PMD_DRV_LOG(ERR, "Cannot allocate zxdh flow mempool");<br />+            goto error;<br />+        }<br />         mtr_mp = rte_mempool_create(MZ_ZXDH_MTR_MP, ZXDH_MAX_MTR_NUM,<br />             sizeof(struct zxdh_mtr_object), 64, 0,<br />             NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);<br />@@ -1765,6 +1782,7 @@ zxdh_init_sh_res(struct zxdh_shared_data *sd)<br />             PMD_DRV_LOG(ERR, "Cannot allocate zxdh mtr profile mempool");<br />             goto error;<br />         }<br />+        sd->flow_mp = flow_mp;<br />         sd->mtr_mp = mtr_mp;<br />         sd->mtr_profile_mp = mtr_profile_mp;<br />         sd->mtr_policy_mp = mtr_policy_mp;<br />@@ -1814,6 +1832,7 @@ zxdh_init_once(struct rte_eth_dev *eth_dev)<br />         ret = zxdh_init_sh_res(sd);<br />         if (ret != 0)<br />             goto out;<br />+        zxdh_flow_global_init();<br />         rte_spinlock_init(&g_mtr_res.hw_plcr_res_lock);<br />         memset(&g_mtr_res, 0, sizeof(g_mtr_res));<br />         sd->init_done = true;<br />@@ -1837,10 +1856,17 @@ zxdh_tbl_entry_offline_destroy(struct zxdh_hw *hw)<br />         ret = zxdh_np_dtb_hash_offline_delete(hw->dev_id, dtb_data->queueid, sdt_no, 0);<br />         if (ret)<br />             PMD_DRV_LOG(ERR, "sdt_no %d delete failed. code:%d ", sdt_no, ret);<br />+<br />         sdt_no = ZXDH_SDT_MC_TABLE0 + hw->hash_search_index;<br />         ret = zxdh_np_dtb_hash_offline_delete(hw->dev_id, dtb_data->queueid, sdt_no, 0);<br />         if (ret)<br />             PMD_DRV_LOG(ERR, "sdt_no %d delete failed. code:%d ", sdt_no, ret);<br />+<br />+        ret = zxdh_np_dtb_acl_offline_delete(hw->dev_id, dtb_data->queueid,<br />+                    ZXDH_SDT_FD_TABLE, hw->vport.vport,<br />+                    ZXDH_FLOW_STATS_INGRESS_BASE, 1);<br />+        if (ret)<br />+            PMD_DRV_LOG(ERR, "flow offline delete failed. code:%d", ret);<br />     }<br />     return ret;<br /> }<br />@@ -2064,6 +2090,7 @@ zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)<br />     if (ret)<br />         goto err_zxdh_init;<br />  <br />+    zxdh_flow_init(eth_dev);<br />     zxdh_queue_res_get(eth_dev);<br />     zxdh_msg_cb_reg(hw);<br />     if (zxdh_priv_res_init(hw) != 0)<br />diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h<br />index 169af209a2..8e465d66b6 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev.h<br />+++ b/drivers/net/zxdh/zxdh_ethdev.h<br />@@ -11,6 +11,7 @@<br /> #include <eal_interrupts.h> <br />  <br /> #include "zxdh_mtr.h" <br />+#include "zxdh_flow.h" <br />  <br /> /* ZXDH PCI vendor/device ID. */<br /> #define ZXDH_PCI_VENDOR_ID        0x1cf2<br />@@ -54,6 +55,7 @@<br /> #define ZXDH_SLOT_MAX             256<br /> #define ZXDH_MAX_VF               256<br /> #define ZXDH_HASHIDX_MAX          6<br />+#define ZXDH_RSS_HASH_KEY_LEN     40U<br />  <br /> union zxdh_virport_num {<br />     uint16_t vport;<br />@@ -129,7 +131,10 @@ struct zxdh_hw {<br />     uint8_t is_pf         : 1,<br />             rsv : 1,<br />             i_mtr_en      : 1,<br />-            e_mtr_en      : 1;<br />+            e_mtr_en      : 1,<br />+            i_flow_en     : 1,<br />+            e_flow_en     : 1,<br />+            vxlan_flow_en : 1;<br />     uint8_t msg_chan_init;<br />     uint8_t phyport;<br />     uint8_t panel_id;<br />@@ -149,7 +154,10 @@ struct zxdh_hw {<br />     uint16_t queue_pool_count;<br />     uint16_t queue_pool_start;<br />     uint8_t dl_net_hdr_len;<br />-    uint8_t rsv1[3];<br />+    uint16_t vxlan_fd_num;<br />+    uint8_t rsv1[1];<br />+<br />+    struct dh_flow_list dh_flow_list;<br /> };<br />  <br /> struct zxdh_dtb_shared_data {<br />@@ -174,6 +182,7 @@ struct zxdh_shared_data {<br />     int32_t np_init_done;<br />     uint32_t dev_refcnt;<br />     struct zxdh_dtb_shared_data *dtb_data;<br />+    struct rte_mempool *flow_mp;<br />     struct rte_mempool *mtr_mp;<br />     struct rte_mempool *mtr_profile_mp;<br />     struct rte_mempool *mtr_policy_mp;<br />diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c<br />index 268f78900c..10a174938e 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev_ops.c<br />+++ b/drivers/net/zxdh/zxdh_ethdev_ops.c<br />@@ -444,7 +444,7 @@ zxdh_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)<br />     } else {<br />         struct zxdh_mac_filter *mac_filter = &msg_info.data.mac_filter_msg;<br />         mac_filter->filter_flag = ZXDH_MAC_UNFILTER;<br />-        mac_filter->mac = *addr;<br />+        memcpy(&mac_filter->mac, addr, sizeof(struct rte_ether_addr));<br />         zxdh_msg_head_build(hw, ZXDH_MAC_ADD, &msg_info);<br />         ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);<br />         if (ret) {<br />@@ -460,7 +460,7 @@ zxdh_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)<br />  <br />         mac_filter->filter_flag = ZXDH_MAC_UNFILTER;<br />         mac_filter->mac_flag = true;<br />-        mac_filter->mac = *old_addr;<br />+        memcpy(&mac_filter->mac, old_addr, sizeof(struct rte_ether_addr));<br />         zxdh_msg_head_build(hw, ZXDH_MAC_DEL, &msg_info);<br />         ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);<br />         if (ret) {<br />@@ -532,7 +532,7 @@ zxdh_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,<br />         struct zxdh_mac_filter *mac_filter = &msg_info.data.mac_filter_msg;<br />  <br />         mac_filter->filter_flag = ZXDH_MAC_FILTER;<br />-        mac_filter->mac = *mac_addr;<br />+        memcpy(&mac_filter->mac, mac_addr, sizeof(struct rte_ether_addr));<br />         zxdh_msg_head_build(hw, ZXDH_MAC_ADD, &msg_info);<br />         if (rte_is_unicast_ether_addr(mac_addr)) {<br />             if (hw->uc_num < ZXDH_MAX_UC_MAC_ADDRS) {<br />@@ -614,7 +614,7 @@ void zxdh_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)<br />         struct zxdh_mac_filter *mac_filter = &msg_info.data.mac_filter_msg;<br />  <br />         mac_filter->filter_flag = ZXDH_MAC_FILTER;<br />-        mac_filter->mac = *mac_addr;<br />+        memcpy(&mac_filter->mac, mac_addr, sizeof(struct rte_ether_addr));<br />         zxdh_msg_head_build(hw, ZXDH_MAC_DEL, &msg_info);<br />         if (rte_is_unicast_ether_addr(mac_addr)) {<br />             if (hw->uc_num <= ZXDH_MAX_UC_MAC_ADDRS) {<br />@@ -1056,7 +1056,7 @@ zxdh_dev_rss_reta_update(struct rte_eth_dev *dev,<br />     return ret;<br /> }<br />  <br />-static uint16_t<br />+uint16_t<br /> zxdh_hw_qid_to_logic_qid(struct rte_eth_dev *dev, uint16_t qid)<br /> {<br />     struct zxdh_hw *priv = (struct zxdh_hw *)dev->data->dev_private;<br />diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.h b/drivers/net/zxdh/zxdh_ethdev_ops.h<br />index 97a1eb4532..a83b808934 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev_ops.h<br />+++ b/drivers/net/zxdh/zxdh_ethdev_ops.h<br />@@ -141,5 +141,6 @@ int zxdh_dev_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw<br /> int zxdh_dev_get_module_info(struct rte_eth_dev *dev, struct rte_eth_dev_module_info *modinfo);<br /> int zxdh_dev_get_module_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *info);<br /> int zxdh_meter_ops_get(struct rte_eth_dev *dev, void *arg);<br />+uint16_t zxdh_hw_qid_to_logic_qid(struct rte_eth_dev *dev, uint16_t qid);<br />  <br /> #endif /* ZXDH_ETHDEV_OPS_H */<br />diff --git a/drivers/net/zxdh/zxdh_flow.c b/drivers/net/zxdh/zxdh_flow.c<br />new file mode 100644<br />index 0000000000..82cf01ad1e<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_flow.c<br />@@ -0,0 +1,2003 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2024 ZTE Corporation<br />+ */<br />+<br />+#include <sys/queue.h> <br />+#include <stdio.h> <br />+#include <errno.h> <br />+#include <stdint.h> <br />+#include <string.h> <br />+#include <unistd.h> <br />+#include <stdarg.h> <br />+<br />+#include <rte_debug.h> <br />+#include <rte_ether.h> <br />+#include <ethdev_driver.h> <br />+#include <rte_log.h> <br />+#include <rte_malloc.h> <br />+#include <rte_tailq.h> <br />+#include <rte_flow.h> <br />+#include <rte_bitmap.h> <br />+<br />+#include "zxdh_ethdev.h" <br />+#include "zxdh_logs.h" <br />+#include "zxdh_flow.h" <br />+#include "zxdh_tables.h" <br />+#include "zxdh_ethdev_ops.h" <br />+#include "zxdh_np.h" <br />+#include "zxdh_msg.h" <br />+<br />+#define ZXDH_IPV6_FRAG_HEADER     44<br />+#define ZXDH_TENANT_ARRAY_NUM     3<br />+#define ZXDH_VLAN_TCI_MASK       0xFFFF<br />+#define ZXDH_VLAN_PRI_MASK       0xE000<br />+#define ZXDH_VLAN_CFI_MASK       0x1000<br />+#define ZXDH_VLAN_VID_MASK       0x0FFF<br />+#define MAX_STRING_LEN           8192<br />+#define FLOW_INGRESS             0<br />+#define FLOW_EGRESS              1<br />+#define MAX_ENCAP1_NUM           (256)<br />+#define INVALID_HANDLEIDX        0xffff<br />+#define ACTION_VXLAN_ENCAP_ITEMS_NUM (6)<br />+static struct dh_engine_list flow_engine_list = TAILQ_HEAD_INITIALIZER(flow_engine_list);<br />+static struct count_res flow_count_ref[MAX_FLOW_COUNT_NUM];<br />+static rte_spinlock_t fd_hw_res_lock = RTE_SPINLOCK_INITIALIZER;<br />+static uint8_t fd_hwres_bitmap[ZXDH_MAX_FLOW_NUM] = {0};<br />+<br />+#define MKDUMPSTR(buf, buf_size, cur_len, ...) \<br />+do { \<br />+    if ((cur_len) >= (buf_size)) \<br />+        break; \<br />+    (cur_len) += snprintf((buf) + (cur_len), (buf_size) - (cur_len), __VA_ARGS__); \<br />+} while (0)<br />+<br />+static inline void<br />+print_ether_addr(const char *what, const struct rte_ether_addr *eth_addr,<br />+         char print_buf[], uint32_t buf_size, uint32_t *cur_len)<br />+{<br />+    char buf[RTE_ETHER_ADDR_FMT_SIZE];<br />+<br />+    rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);<br />+    MKDUMPSTR(print_buf, buf_size, *cur_len, "%s%s", what, buf);<br />+}<br />+<br />+static inline void<br />+zxdh_fd_flow_free_dtbentry(ZXDH_DTB_USER_ENTRY_T *dtb_entry)<br />+{<br />+    rte_free(dtb_entry->p_entry_data);<br />+    dtb_entry->p_entry_data = NULL;<br />+    dtb_entry->sdt_no = 0;<br />+}<br />+<br />+static void<br />+data_bitwise(void *data, int bytecnt)<br />+{<br />+    int i;<br />+    uint32_t *temp = (uint32_t *)data;<br />+    int remain = bytecnt % 4;<br />+    for (i = 0; i < (bytecnt >> 2); i++)    {<br />+        *(temp) = ~*(temp);<br />+        temp++;<br />+    }<br />+<br />+    if (remain) {<br />+        for (i = 0; i < remain; i++) {<br />+            uint8_t *tmp = (uint8_t *)temp;<br />+            *(uint8_t *)tmp = ~*(uint8_t *)tmp;<br />+            tmp++;<br />+        }<br />+    }<br />+}<br />+<br />+static void<br />+zxdh_adjust_flow_op_rsp_memory_layout(void *old_data,<br />+        size_t old_size, void *new_data)<br />+{<br />+    rte_memcpy(new_data, old_data, sizeof(struct zxdh_flow));<br />+    memset((char *)new_data + sizeof(struct zxdh_flow), 0, 4);<br />+    rte_memcpy((char *)new_data + sizeof(struct zxdh_flow) + 4,<br />+        (char *)old_data + sizeof(struct zxdh_flow),<br />+        old_size - sizeof(struct zxdh_flow));<br />+}<br />+<br />+void zxdh_flow_global_init(void)<br />+{<br />+    int i;<br />+    for (i = 0; i < MAX_FLOW_COUNT_NUM; i++) {<br />+        rte_spinlock_init(&flow_count_ref[i].count_lock);<br />+        flow_count_ref[i].count_ref = 0;<br />+    }<br />+}<br />+<br />+static void<br />+__entry_dump(char *print_buf, uint32_t buf_size,<br />+        uint32_t *cur_len, struct fd_flow_key *key)<br />+{<br />+    print_ether_addr("\nL2\t  dst=", &key->mac_dst, print_buf, buf_size, cur_len);<br />+    print_ether_addr(" - src=", &key->mac_src, print_buf, buf_size, cur_len);<br />+    MKDUMPSTR(print_buf, buf_size, *cur_len, " -eth type=0x%04x", key->ether_type);<br />+    MKDUMPSTR(print_buf, buf_size, *cur_len,<br />+        " -vlan_pri=0x%02x -vlan_vlanid=0x%04x  -vlan_tci=0x%04x ",<br />+        key->cvlan_pri, key->cvlan_vlanid, key->vlan_tci);<br />+    MKDUMPSTR(print_buf, buf_size, *cur_len,<br />+        " -vni=0x%02x 0x%02x 0x%02x\n", key->vni[0], key->vni[1], key->vni[2]);<br />+    MKDUMPSTR(print_buf, buf_size, *cur_len,<br />+        "L3\t  dstip=0x%08x 0x%08x 0x%08x 0x%08x("IPv6_BYTES_FMT")\n",<br />+        *(uint32_t *)key->dst_ip, *((uint32_t *)key->dst_ip + 1),<br />+        *((uint32_t *)key->dst_ip + 2),<br />+        *((uint32_t *)key->dst_ip + 3),<br />+        IPv6_BYTES(key->dst_ip));<br />+    MKDUMPSTR(print_buf, buf_size, *cur_len,<br />+        "\t  srcip=0x%08x 0x%08x 0x%08x 0x%08x("IPv6_BYTES_FMT")\n",<br />+        *((uint32_t *)key->src_ip), *((uint32_t *)key->src_ip + 1),<br />+        *((uint32_t *)key->src_ip + 2),<br />+        *((uint32_t *)key->src_ip + 3),<br />+        IPv6_BYTES(key->src_ip));<br />+    MKDUMPSTR(print_buf, buf_size, *cur_len,<br />+        "  \t  tos=0x%02x -nw-proto=0x%02x -frag-flag %u\n",<br />+        key->tos, key->nw_proto, key->frag_flag);<br />+    MKDUMPSTR(print_buf, buf_size, *cur_len,<br />+        "L4\t  dstport=0x%04x -srcport=0x%04x", key->tp_dst, key->tp_src);<br />+}<br />+<br />+static void<br />+__result_dump(char *print_buf, uint32_t buf_size,<br />+        uint32_t *cur_len, struct fd_flow_result *res)<br />+{<br />+    MKDUMPSTR(print_buf, buf_size, *cur_len, " -hit_flag = 0x%04x", res->hit_flag);<br />+    MKDUMPSTR(print_buf, buf_size, *cur_len, " -action_idx = 0x%02x", res->action_idx);<br />+    MKDUMPSTR(print_buf, buf_size, *cur_len, " -qid = 0x%04x", res->qid);<br />+    MKDUMPSTR(print_buf, buf_size, *cur_len, " -mark_id = 0x%08x", res->mark_fd_id);<br />+    MKDUMPSTR(print_buf, buf_size, *cur_len, " -count_id = 0x%02x", res->countid);<br />+}<br />+<br />+static void offlow_key_dump(struct fd_flow_key *key, struct fd_flow_key *key_mask, FILE *file)<br />+{<br />+    char print_buf[MAX_STRING_LEN];<br />+    uint32_t buf_size = MAX_STRING_LEN;<br />+    uint32_t cur_len = 0;<br />+<br />+    MKDUMPSTR(print_buf, buf_size, cur_len, "offload key:\n\t");<br />+    __entry_dump(print_buf, buf_size, &cur_len, key);<br />+<br />+    MKDUMPSTR(print_buf, buf_size, cur_len, "\noffload key_mask:\n\t");<br />+    __entry_dump(print_buf, buf_size, &cur_len, key_mask);<br />+<br />+    PMD_DRV_LOG(INFO, "%s", print_buf);<br />+    MKDUMPSTR(print_buf, buf_size, cur_len, "\n");<br />+    if (file)<br />+        fputs(print_buf, file);<br />+}<br />+<br />+static void offlow_result_dump(struct fd_flow_result *res, FILE *file)<br />+{<br />+    char print_buf[MAX_STRING_LEN];<br />+    uint32_t buf_size = MAX_STRING_LEN;<br />+    uint32_t cur_len = 0;<br />+<br />+    MKDUMPSTR(print_buf, buf_size, cur_len, "offload result:\n");<br />+    __result_dump(print_buf, buf_size, &cur_len, res);<br />+    PMD_DRV_LOG(INFO, "%s", print_buf);<br />+    PMD_DRV_LOG(INFO, "memdump : ===result ===");<br />+    MKDUMPSTR(print_buf, buf_size, cur_len, "\n");<br />+    if (file)<br />+        fputs(print_buf, file);<br />+}<br />+<br />+static int<br />+set_flow_enable(struct rte_eth_dev *dev, uint8_t dir,<br />+        bool enable, struct rte_flow_error *error)<br />+{<br />+    struct zxdh_hw *priv = dev->data->dev_private;<br />+    struct zxdh_port_attr_table port_attr = {0};<br />+    int ret = 0;<br />+<br />+    if (priv->is_pf) {<br />+        ret = zxdh_get_port_attr(priv, priv->vport.vport, &port_attr);<br />+        if (ret) {<br />+            PMD_DRV_LOG(ERR, "get port_attr failed");<br />+            return -1;<br />+        }<br />+        port_attr.fd_enable = enable;<br />+<br />+        ret = zxdh_set_port_attr(priv, priv->vport.vport, &port_attr);<br />+        if (ret) {<br />+            PMD_DRV_LOG(ERR, "write port_attr failed");<br />+            return -1;<br />+        }<br />+    } else {<br />+        struct zxdh_msg_info msg_info = {0};<br />+        struct zxdh_port_attr_set_msg *attr_msg = &msg_info.data.port_attr_msg;<br />+<br />+        attr_msg->mode  = ZXDH_PORT_FD_EN_OFF_FLAG;<br />+        attr_msg->value = enable;<br />+        zxdh_msg_head_build(priv, ZXDH_PORT_ATTRS_SET, &msg_info);<br />+        ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);<br />+    }<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "port %d flow enable failed", priv->port_id);<br />+        return -rte_flow_error_set(error, EEXIST,<br />+                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,<br />+                    "Meter  enable failed.");<br />+    }<br />+    if (dir == FLOW_INGRESS)<br />+        priv->i_flow_en = !!enable;<br />+    else<br />+        priv->e_flow_en = !!enable;<br />+<br />+    return ret;<br />+}<br />+<br />+static int<br />+set_vxlan_enable(struct rte_eth_dev *dev, bool enable, struct rte_flow_error *error)<br />+{<br />+    struct zxdh_hw *priv = dev->data->dev_private;<br />+    struct zxdh_port_attr_table port_attr = {0};<br />+    int ret = 0;<br />+<br />+    if (priv->vxlan_flow_en == !!enable)<br />+        return 0;<br />+    if (priv->is_pf) {<br />+        ret = zxdh_get_port_attr(priv, priv->vport.vport, &port_attr);<br />+        if (ret) {<br />+            PMD_DRV_LOG(ERR, "get port_attr failed");<br />+            return -1;<br />+        }<br />+        port_attr.fd_enable = enable;<br />+<br />+        ret = zxdh_set_port_attr(priv, priv->vport.vport, &port_attr);<br />+        if (ret) {<br />+            PMD_DRV_LOG(ERR, "write port_attr failed");<br />+            return -1;<br />+        }<br />+    } else {<br />+        struct zxdh_msg_info msg_info = {0};<br />+        struct zxdh_port_attr_set_msg *attr_msg = &msg_info.data.port_attr_msg;<br />+<br />+        attr_msg->mode  = ZXDH_PORT_VXLAN_OFFLOAD_EN_OFF;<br />+        attr_msg->value = enable;<br />+<br />+        zxdh_msg_head_build(priv, ZXDH_PORT_ATTRS_SET, &msg_info);<br />+        ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);<br />+    }<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "port %d vxlan flow enable failed", priv->port_id);<br />+        return -rte_flow_error_set(error, EEXIST,<br />+                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,<br />+                    "vxlan offload enable failed.");<br />+    }<br />+    priv->vxlan_flow_en = !!enable;<br />+    return ret;<br />+}<br />+<br />+void zxdh_register_flow_engine(struct dh_flow_engine *engine)<br />+{<br />+    TAILQ_INSERT_TAIL(&flow_engine_list, engine, node);<br />+}<br />+<br />+static void zxdh_flow_free(struct zxdh_flow *dh_flow)<br />+{<br />+    if (dh_flow)<br />+        rte_mempool_put(zxdh_shared_data->flow_mp, dh_flow);<br />+}<br />+<br />+static struct dh_flow_engine *zxdh_get_flow_engine(struct rte_eth_dev *dev __rte_unused)<br />+{<br />+    struct dh_flow_engine *engine = NULL;<br />+    void *temp;<br />+<br />+    RTE_TAILQ_FOREACH_SAFE(engine, &flow_engine_list, node, temp) {<br />+        if (engine->type  == FLOW_TYPE_FD_TCAM)<br />+            break;<br />+    }<br />+    return engine;<br />+}<br />+<br />+static int<br />+zxdh_flow_validate(struct rte_eth_dev *dev,<br />+             const struct rte_flow_attr *attr,<br />+             const struct rte_flow_item  *pattern,<br />+             const struct rte_flow_action *actions,<br />+             struct rte_flow_error *error)<br />+{<br />+    struct dh_flow_engine *flow_engine = NULL;<br />+<br />+    if (!pattern) {<br />+        rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,<br />+                   NULL, "NULL pattern.");<br />+        return -rte_errno;<br />+    }<br />+<br />+    if (!actions) {<br />+        rte_flow_error_set(error, EINVAL,<br />+                   RTE_FLOW_ERROR_TYPE_ACTION_NUM,<br />+                   NULL, "NULL action.");<br />+        return -rte_errno;<br />+    }<br />+<br />+    if (!attr) {<br />+        rte_flow_error_set(error, EINVAL,<br />+                   RTE_FLOW_ERROR_TYPE_ATTR,<br />+                   NULL, "NULL attribute.");<br />+        return -rte_errno;<br />+    }<br />+    flow_engine = zxdh_get_flow_engine(dev);<br />+    if (flow_engine == NULL || flow_engine->parse_pattern_action == NULL) {<br />+        rte_flow_error_set(error, EINVAL,<br />+                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,<br />+                   NULL, "cannot find valid flow engine.");<br />+        return -rte_errno;<br />+    }<br />+    if (flow_engine->parse_pattern_action(dev, attr, pattern, actions, error, NULL) != 0)<br />+        return -rte_errno;<br />+    return 0;<br />+}<br />+<br />+static struct zxdh_flow *flow_exist_check(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    struct rte_flow *entry;<br />+    struct zxdh_flow *entry_flow;<br />+<br />+    TAILQ_FOREACH(entry, &hw->dh_flow_list, next) {<br />+        entry_flow = (struct zxdh_flow *)entry->driver_flow;<br />+        if ((memcmp(&entry_flow->flowentry.fd_flow.key, &dh_flow->flowentry.fd_flow.key,<br />+                 sizeof(struct fd_flow_key)) == 0)  && <br />+            (memcmp(&entry_flow->flowentry.fd_flow.key_mask,<br />+                &dh_flow->flowentry.fd_flow.key_mask,<br />+                 sizeof(struct fd_flow_key)) == 0)) {<br />+            return entry_flow;<br />+        }<br />+    }<br />+    return NULL;<br />+}<br />+<br />+static struct rte_flow *<br />+zxdh_flow_create(struct rte_eth_dev *dev,<br />+         const struct rte_flow_attr *attr,<br />+         const struct rte_flow_item pattern[],<br />+         const struct rte_flow_action actions[],<br />+         struct rte_flow_error *error)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    struct rte_flow *flow = NULL;<br />+    struct zxdh_flow *dh_flow = NULL;<br />+    int ret = 0;<br />+    struct dh_flow_engine *flow_engine = NULL;<br />+<br />+    flow_engine = zxdh_get_flow_engine(dev);<br />+<br />+    if (flow_engine == NULL ||<br />+        flow_engine->parse_pattern_action == NULL ||<br />+        flow_engine->apply == NULL) {<br />+        rte_flow_error_set(error, EINVAL,<br />+                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,<br />+                   NULL, "cannot find valid flow engine.");<br />+        return NULL;<br />+    }<br />+<br />+    flow = rte_zmalloc("rte_flow", sizeof(struct rte_flow), 0);<br />+    if (!flow) {<br />+        rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "flow malloc failed");<br />+        return NULL;<br />+    }<br />+    ret = rte_mempool_get(zxdh_shared_data->flow_mp, (void **)&dh_flow);<br />+    if (ret) {<br />+        rte_flow_error_set(error, ENOMEM,<br />+                   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,<br />+                    "Failed to allocate memory from flowmp");<br />+        goto free_flow;<br />+    }<br />+    memset(dh_flow, 0, sizeof(struct zxdh_flow));<br />+    if (flow_engine->parse_pattern_action(dev, attr, pattern, actions, error, dh_flow) != 0) {<br />+        PMD_DRV_LOG(ERR, "parse_pattern_action failed zxdh_created failed");<br />+        goto free_flow;<br />+    }<br />+<br />+    if (flow_exist_check(dev, dh_flow) != NULL) {<br />+        rte_flow_error_set(error, EINVAL,<br />+                   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,<br />+                    "flow repeat .no add again");<br />+        goto free_flow;<br />+    }<br />+<br />+    ret = flow_engine->apply(dev, dh_flow, error, hw->vport.vport, hw->pcie_id);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "apply failed flow created failed");<br />+        goto free_flow;<br />+    }<br />+    flow->driver_flow = dh_flow;<br />+    flow->port_id = dev->data->port_id;<br />+    flow->type = ZXDH_FLOW_GROUP_TCAM;<br />+    TAILQ_INSERT_TAIL(&hw->dh_flow_list, flow, next);<br />+<br />+    if (hw->i_flow_en == 0) {<br />+        ret = set_flow_enable(dev, FLOW_INGRESS, 1, error);<br />+        if (ret < 0) {<br />+            PMD_DRV_LOG(ERR, "set flow enable failed");<br />+            goto free_flow;<br />+        }<br />+    }<br />+    return flow;<br />+free_flow:<br />+    zxdh_flow_free(dh_flow);<br />+    rte_free(flow);<br />+    return NULL;<br />+}<br />+<br />+static int<br />+zxdh_flow_destroy(struct rte_eth_dev *dev,<br />+          struct rte_flow *flow,<br />+          struct rte_flow_error *error)<br />+{<br />+    struct zxdh_hw *priv = dev->data->dev_private;<br />+    struct zxdh_flow *dh_flow = NULL;<br />+    int ret = 0;<br />+    struct dh_flow_engine *flow_engine = NULL;<br />+<br />+    flow_engine = zxdh_get_flow_engine(dev);<br />+    if (flow_engine == NULL ||<br />+        flow_engine->destroy == NULL) {<br />+        rte_flow_error_set(error, EINVAL,<br />+                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,<br />+                 NULL, "cannot find valid flow engine.");<br />+        return -rte_errno;<br />+    }<br />+    if (flow->driver_flow)<br />+        dh_flow = (struct zxdh_flow *)flow->driver_flow;<br />+<br />+    if (dh_flow == NULL) {<br />+        PMD_DRV_LOG(ERR, "invalid flow");<br />+        rte_flow_error_set(error, EINVAL,<br />+                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,<br />+                 NULL, "invalid flow");<br />+        return -1;<br />+    }<br />+    ret = flow_engine->destroy(dev, dh_flow, error, priv->vport.vport, priv->pcie_id);<br />+    if (ret) {<br />+        rte_flow_error_set(error, -ret,<br />+                   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,<br />+                   "Failed to destroy flow.");<br />+        return -rte_errno;<br />+    }<br />+    TAILQ_REMOVE(&priv->dh_flow_list, flow, next);<br />+    zxdh_flow_free(dh_flow);<br />+    rte_free(flow);<br />+<br />+    if (TAILQ_EMPTY(&priv->dh_flow_list)) {<br />+        ret = set_flow_enable(dev, FLOW_INGRESS, 0, error);<br />+        if (ret) {<br />+            PMD_DRV_LOG(ERR, "clear flow enable failed");<br />+            return -rte_errno;<br />+        }<br />+    }<br />+    return ret;<br />+}<br />+<br />+<br />+static int<br />+zxdh_flow_query(struct rte_eth_dev *dev,<br />+        struct rte_flow *flow,<br />+        const struct rte_flow_action *actions,<br />+        void *data, struct rte_flow_error *error)<br />+{<br />+    struct zxdh_flow *dh_flow;<br />+    int ret = 0;<br />+    struct dh_flow_engine *flow_engine = NULL;<br />+<br />+    flow_engine = zxdh_get_flow_engine(dev);<br />+<br />+    if (flow_engine == NULL ||<br />+        flow_engine->query_count == NULL) {<br />+        rte_flow_error_set(error, EINVAL,<br />+                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,<br />+                   NULL, "cannot find valid flow engine.");<br />+        return -rte_errno;<br />+    }<br />+<br />+    if (flow->driver_flow)<br />+        dh_flow = (struct zxdh_flow *)flow->driver_flow;<br />+    dh_flow = (struct zxdh_flow *)flow->driver_flow;<br />+    if (dh_flow == NULL) {<br />+        PMD_DRV_LOG(ERR, "flow is not exist");<br />+        return -1;<br />+    }<br />+<br />+    for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {<br />+        switch (actions->type) {<br />+        case RTE_FLOW_ACTION_TYPE_VOID:<br />+            break;<br />+        case RTE_FLOW_ACTION_TYPE_COUNT:<br />+            ret = flow_engine->query_count(dev, dh_flow,<br />+                         (struct rte_flow_query_count *)data, error);<br />+            break;<br />+        default:<br />+            ret = rte_flow_error_set(error, ENOTSUP,<br />+                    RTE_FLOW_ERROR_TYPE_ACTION,<br />+                    actions,<br />+                    "action not supported");<br />+            goto out;<br />+        }<br />+    }<br />+out:<br />+    if (ret)<br />+        PMD_DRV_LOG(ERR, "flow query failed");<br />+    return ret;<br />+}<br />+<br />+static int zxdh_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)<br />+{<br />+    struct rte_flow *flow;<br />+    struct zxdh_flow *dh_flow = NULL;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    struct zxdh_dtb_shared_data *dtb_data = &hw->dev_sd->dtb_sd;<br />+    struct dh_flow_engine *flow_engine = NULL;<br />+    struct zxdh_msg_info msg_info = {0};<br />+    uint8_t zxdh_msg_reply_info[ZXDH_ST_SZ_BYTES(msg_reply_info)] = {0};<br />+    int ret = 0;<br />+<br />+    flow_engine = zxdh_get_flow_engine(dev);<br />+    if (flow_engine == NULL) {<br />+        PMD_DRV_LOG(ERR, "get flow engine failed");<br />+        return -1;<br />+    }<br />+    ret = set_flow_enable(dev, FLOW_INGRESS, 0, error);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "clear flow enable failed");<br />+        return ret;<br />+    }<br />+<br />+    ret = set_vxlan_enable(dev, 0, error);<br />+    if (ret)<br />+        PMD_DRV_LOG(ERR, "clear vxlan enable failed");<br />+    hw->vxlan_fd_num = 0;<br />+<br />+    if (hw->is_pf) {<br />+        ret = zxdh_np_dtb_acl_offline_delete(hw->dev_id, dtb_data->queueid,<br />+                    ZXDH_SDT_FD_TABLE, hw->vport.vport,<br />+                    ZXDH_FLOW_STATS_INGRESS_BASE, 1);<br />+        if (ret)<br />+            PMD_DRV_LOG(ERR, "%s flush failed. code:%d", dev->data->name, ret);<br />+    } else {<br />+        zxdh_msg_head_build(hw, ZXDH_FLOW_HW_FLUSH, &msg_info);<br />+        ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(struct zxdh_msg_info),<br />+            (void *)zxdh_msg_reply_info, ZXDH_ST_SZ_BYTES(msg_reply_info));<br />+        if (ret) {<br />+            PMD_DRV_LOG(ERR, "port %d flow op %d flush failed ret %d",<br />+                hw->port_id, ZXDH_FLOW_HW_FLUSH, ret);<br />+            return -1;<br />+        }<br />+    }<br />+<br />+    /* Remove all flows */<br />+    while ((flow = TAILQ_FIRST(&hw->dh_flow_list))) {<br />+        TAILQ_REMOVE(&hw->dh_flow_list, flow, next);<br />+        if (flow->driver_flow)<br />+            dh_flow = (struct zxdh_flow *)flow->driver_flow;<br />+        if (dh_flow == NULL) {<br />+            PMD_DRV_LOG(ERR, "Invalid flow Failed to destroy flow.");<br />+            ret = rte_flow_error_set(error, ENOTSUP,<br />+                    RTE_FLOW_ERROR_TYPE_HANDLE,<br />+                    NULL,<br />+                    "Invalid flow ,flush failed");<br />+            return ret;<br />+        }<br />+<br />+        zxdh_flow_free(dh_flow);<br />+        rte_free(flow);<br />+    }<br />+    return ret;<br />+}<br />+<br />+static void<br />+handle_res_dump(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *priv =  dev->data->dev_private;<br />+    uint16_t hwres_base = priv->vport.pfid << 10;<br />+    uint16_t hwres_cnt = ZXDH_MAX_FLOW_NUM >> 1;<br />+    uint16_t i;<br />+<br />+    PMD_DRV_LOG(DEBUG, "hwres_base %d", hwres_base);<br />+    rte_spinlock_lock(&fd_hw_res_lock);<br />+    for (i = 0; i < hwres_cnt; i++) {<br />+        if (fd_hwres_bitmap[hwres_base + i] == 1)<br />+            PMD_DRV_LOG(DEBUG, "used idx %d", i + hwres_base);<br />+    }<br />+    rte_spinlock_unlock(&fd_hw_res_lock);<br />+}<br />+<br />+static int<br />+zxdh_flow_dev_dump(struct rte_eth_dev *dev,<br />+            struct rte_flow *flow,<br />+            FILE *file,<br />+            struct rte_flow_error *error __rte_unused)<br />+{<br />+    struct zxdh_hw *hw =  dev->data->dev_private;<br />+    struct rte_flow *entry;<br />+    struct zxdh_flow *entry_flow;<br />+    uint32_t dtb_qid = 0;<br />+    uint32_t entry_num = 0;<br />+    uint16_t ret = 0;<br />+    ZXDH_DTB_ACL_ENTRY_INFO_T *fd_entry = NULL;<br />+    uint8_t *key = NULL;<br />+    uint8_t *key_mask = NULL;<br />+    uint8_t *result = NULL;<br />+<br />+    if (flow) {<br />+        entry_flow = flow_exist_check(dev, (struct zxdh_flow *)flow->driver_flow);<br />+        if (entry_flow) {<br />+            PMD_DRV_LOG(DEBUG, "handle idx %d:", entry_flow->flowentry.hw_idx);<br />+            offlow_key_dump(&entry_flow->flowentry.fd_flow.key,<br />+                &entry_flow->flowentry.fd_flow.key_mask, file);<br />+            offlow_result_dump(&entry_flow->flowentry.fd_flow.result, file);<br />+        }<br />+    } else {<br />+        if (hw->is_pf) {<br />+            dtb_qid = hw->dev_sd->dtb_sd.queueid;<br />+            fd_entry = rte_malloc(NULL,<br />+                sizeof(ZXDH_DTB_ACL_ENTRY_INFO_T) * ZXDH_MAX_FLOW_NUM, 1);<br />+            key = rte_malloc(NULL, sizeof(struct fd_flow_key) * ZXDH_MAX_FLOW_NUM, 1);<br />+            key_mask = rte_malloc(NULL,<br />+                sizeof(struct fd_flow_key) * ZXDH_MAX_FLOW_NUM, 1);<br />+            result = rte_malloc(NULL,<br />+                sizeof(struct fd_flow_result) * ZXDH_MAX_FLOW_NUM, 1);<br />+            if (!fd_entry || !key || !key_mask || !result) {<br />+                PMD_DRV_LOG(ERR, "fd_entry malloc failed!");<br />+                goto end;<br />+            }<br />+<br />+            for (int i = 0; i < ZXDH_MAX_FLOW_NUM; i++) {<br />+                fd_entry[i].key_data = key + i * sizeof(struct fd_flow_key);<br />+                fd_entry[i].key_mask = key_mask + i * sizeof(struct fd_flow_key);<br />+                fd_entry[i].p_as_rslt = result + i * sizeof(struct fd_flow_result);<br />+            }<br />+            ret = zxdh_np_dtb_acl_table_dump_by_vport(hw->dev_id, dtb_qid,<br />+                        ZXDH_SDT_FD_TABLE, hw->vport.vport, &entry_num,<br />+                        (uint8_t *)fd_entry);<br />+            if (ret) {<br />+                PMD_DRV_LOG(ERR, "dpp_dtb_acl_table_dump_by_vport failed!");<br />+                goto end;<br />+            }<br />+            for (uint32_t i = 0; i < entry_num; i++) {<br />+                offlow_key_dump((struct fd_flow_key *)fd_entry[i].key_data,<br />+                    (struct fd_flow_key *)fd_entry[i].key_mask, file);<br />+                offlow_result_dump((struct fd_flow_result *)fd_entry[i].p_as_rslt,<br />+                        file);<br />+            }<br />+            rte_free(result);<br />+            rte_free(key_mask);<br />+            rte_free(key);<br />+            rte_free(fd_entry);<br />+        } else {<br />+            entry = rte_malloc(NULL, sizeof(struct rte_flow), 0);<br />+            entry_flow = rte_malloc(NULL, sizeof(struct zxdh_flow), 0);<br />+            TAILQ_FOREACH(entry, &hw->dh_flow_list, next) {<br />+                entry_flow = (struct zxdh_flow *)entry->driver_flow;<br />+                offlow_key_dump(&entry_flow->flowentry.fd_flow.key,<br />+                        &entry_flow->flowentry.fd_flow.key_mask, file);<br />+                offlow_result_dump(&entry_flow->flowentry.fd_flow.result, file);<br />+            }<br />+            rte_free(entry_flow);<br />+            rte_free(entry);<br />+        }<br />+    }<br />+    handle_res_dump(dev);<br />+<br />+    return 0;<br />+end:<br />+    rte_free(result);<br />+    rte_free(key_mask);<br />+    rte_free(key);<br />+    rte_free(fd_entry);<br />+    return -1;<br />+}<br />+<br />+static int32_t<br />+get_available_handle(struct zxdh_hw *hw, uint16_t vport)<br />+{<br />+    int ret = 0;<br />+    uint32_t handle_idx = 0;<br />+<br />+    ret = zxdh_np_dtb_acl_index_request(hw->dev_id, ZXDH_SDT_FD_TABLE, vport, &handle_idx);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "Failed to allocate memory for hw!");<br />+        return INVALID_HANDLEIDX;<br />+    }<br />+    return handle_idx;<br />+}<br />+<br />+static int free_handle(struct zxdh_hw *hw, uint16_t handle_idx, uint16_t vport)<br />+{<br />+    int ret = zxdh_np_dtb_acl_index_release(hw->dev_id, ZXDH_SDT_FD_TABLE, vport, handle_idx);<br />+<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "Failed to free handle_idx %d for hw!", handle_idx);<br />+        return -1;<br />+    }<br />+    return 0;<br />+}<br />+<br />+static uint16_t<br />+zxdh_encap0_to_dtbentry(struct zxdh_hw *hw __rte_unused,<br />+            struct zxdh_flow *dh_flow,<br />+            ZXDH_DTB_USER_ENTRY_T *dtb_entry)<br />+{<br />+    ZXDH_DTB_ERAM_ENTRY_INFO_T *dtb_eram_entry;<br />+    dtb_eram_entry = rte_zmalloc(NULL, sizeof(ZXDH_DTB_ERAM_ENTRY_INFO_T), 0);<br />+<br />+    if (dtb_eram_entry == NULL)<br />+        return INVALID_HANDLEIDX;<br />+<br />+    dtb_eram_entry->index = dh_flow->flowentry.fd_flow.result.encap0_index * 2;<br />+    dtb_eram_entry->p_data = (uint32_t *)&dh_flow->encap0;<br />+<br />+    dtb_entry->sdt_no = ZXDH_SDT_TUNNEL_ENCAP0_TABLE;<br />+    dtb_entry->p_entry_data = dtb_eram_entry;<br />+    return 0;<br />+}<br />+<br />+static uint16_t<br />+zxdh_encap0_ip_to_dtbentry(struct zxdh_hw *hw __rte_unused,<br />+            struct zxdh_flow *dh_flow,<br />+            ZXDH_DTB_USER_ENTRY_T *dtb_entry)<br />+{<br />+    ZXDH_DTB_ERAM_ENTRY_INFO_T *dtb_eram_entry;<br />+    dtb_eram_entry = rte_zmalloc(NULL, sizeof(ZXDH_DTB_ERAM_ENTRY_INFO_T), 0);<br />+<br />+    if (dtb_eram_entry == NULL)<br />+        return INVALID_HANDLEIDX;<br />+<br />+    dtb_eram_entry->index = dh_flow->flowentry.fd_flow.result.encap0_index * 2 + 1;<br />+    dtb_eram_entry->p_data = (uint32_t *)&dh_flow->encap0.dip;<br />+    dtb_entry->sdt_no = ZXDH_SDT_TUNNEL_ENCAP0_TABLE;<br />+    dtb_entry->p_entry_data = dtb_eram_entry;<br />+    return 0;<br />+}<br />+<br />+static uint16_t zxdh_encap1_to_dtbentry(struct zxdh_hw *hw __rte_unused,<br />+                             struct zxdh_flow *dh_flow,<br />+                             ZXDH_DTB_USER_ENTRY_T *dtb_entry)<br />+{<br />+    ZXDH_DTB_ERAM_ENTRY_INFO_T *dtb_eram_entry;<br />+    dtb_eram_entry = rte_zmalloc(NULL, sizeof(ZXDH_DTB_ERAM_ENTRY_INFO_T), 0);<br />+<br />+    if (dtb_eram_entry == NULL)<br />+        return INVALID_HANDLEIDX;<br />+<br />+    if (dh_flow->encap0.ethtype == 0)<br />+        dtb_eram_entry->index = dh_flow->flowentry.fd_flow.result.encap1_index * 4;<br />+    else<br />+        dtb_eram_entry->index = dh_flow->flowentry.fd_flow.result.encap1_index * 4 + 1;<br />+<br />+    dtb_eram_entry->p_data = (uint32_t *)&dh_flow->encap1;<br />+<br />+    dtb_entry->sdt_no = ZXDH_SDT_TUNNEL_ENCAP1_TABLE;<br />+    dtb_entry->p_entry_data = dtb_eram_entry;<br />+    return 0;<br />+}<br />+<br />+static uint16_t<br />+zxdh_encap1_ip_to_dtbentry(struct zxdh_hw *hw __rte_unused,<br />+            struct zxdh_flow *dh_flow,<br />+            ZXDH_DTB_USER_ENTRY_T *dtb_entry)<br />+{<br />+    ZXDH_DTB_ERAM_ENTRY_INFO_T *dtb_eram_entry;<br />+    dtb_eram_entry = rte_zmalloc(NULL, sizeof(ZXDH_DTB_ERAM_ENTRY_INFO_T), 0);<br />+<br />+    if (dtb_eram_entry == NULL)<br />+        return INVALID_HANDLEIDX;<br />+    if (dh_flow->encap0.ethtype == 0)<br />+        dtb_eram_entry->index = dh_flow->flowentry.fd_flow.result.encap1_index * 4 + 2;<br />+    else<br />+        dtb_eram_entry->index = dh_flow->flowentry.fd_flow.result.encap1_index * 4 + 3;<br />+    dtb_eram_entry->p_data = (uint32_t *)&dh_flow->encap1.sip;<br />+    dtb_entry->sdt_no = ZXDH_SDT_TUNNEL_ENCAP1_TABLE;<br />+    dtb_entry->p_entry_data = dtb_eram_entry;<br />+    return 0;<br />+}<br />+<br />+static int zxdh_hw_encap_insert(struct rte_eth_dev *dev,<br />+                    struct zxdh_flow *dh_flow,<br />+                    struct rte_flow_error *error)<br />+{<br />+    uint32_t ret;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint32_t dtb_qid = hw->dev_sd->dtb_sd.queueid;<br />+    ZXDH_DTB_USER_ENTRY_T dtb_entry = {0};<br />+<br />+    zxdh_encap0_to_dtbentry(hw, dh_flow, &dtb_entry);<br />+    ret = zxdh_np_dtb_table_entry_write(hw->dev_id, dtb_qid, 1, &dtb_entry);<br />+    zxdh_fd_flow_free_dtbentry(&dtb_entry);<br />+    if (ret) {<br />+        rte_flow_error_set(error, EINVAL,<br />+                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,<br />+                            "write to hw failed");<br />+        return -1;<br />+    }<br />+<br />+    zxdh_encap0_ip_to_dtbentry(hw, dh_flow, &dtb_entry);<br />+    ret = zxdh_np_dtb_table_entry_write(hw->dev_id, dtb_qid, 1, &dtb_entry);<br />+    zxdh_fd_flow_free_dtbentry(&dtb_entry);<br />+    if (ret) {<br />+        rte_flow_error_set(error, EINVAL,<br />+                RTE_FLOW_ERROR_TYPE_HANDLE, NULL,<br />+                "write to hw failed");<br />+        return -1;<br />+    }<br />+<br />+    zxdh_encap1_to_dtbentry(hw, dh_flow, &dtb_entry);<br />+    ret = zxdh_np_dtb_table_entry_write(hw->dev_id, dtb_qid, 1, &dtb_entry);<br />+    zxdh_fd_flow_free_dtbentry(&dtb_entry);<br />+    if (ret) {<br />+        rte_flow_error_set(error, EINVAL,<br />+                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,<br />+                    "write to hw failed");<br />+        return -1;<br />+    }<br />+<br />+    zxdh_encap1_ip_to_dtbentry(hw, dh_flow, &dtb_entry);<br />+    ret = zxdh_np_dtb_table_entry_write(hw->dev_id, dtb_qid, 1, &dtb_entry);<br />+    zxdh_fd_flow_free_dtbentry(&dtb_entry);<br />+    if (ret) {<br />+        rte_flow_error_set(error, EINVAL,<br />+                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,<br />+                    "write to hw failed");<br />+        return -1;<br />+    }<br />+    return 0;<br />+}<br />+<br />+static uint16_t<br />+zxdh_fd_flow_to_dtbentry(struct zxdh_hw *hw __rte_unused,<br />+        struct zxdh_flow_info *fdflow,<br />+        ZXDH_DTB_USER_ENTRY_T *dtb_entry)<br />+{<br />+    ZXDH_DTB_ACL_ENTRY_INFO_T *dtb_acl_entry;<br />+    uint16_t handle_idx = 0;<br />+    dtb_acl_entry = rte_zmalloc("fdflow_dtbentry", sizeof(ZXDH_DTB_ACL_ENTRY_INFO_T), 0);<br />+<br />+    if (dtb_acl_entry == NULL)<br />+        return INVALID_HANDLEIDX;<br />+<br />+    dtb_acl_entry->key_data = (uint8_t *)&fdflow->fd_flow.key;<br />+    dtb_acl_entry->key_mask = (uint8_t *)&fdflow->fd_flow.key_mask;<br />+    dtb_acl_entry->p_as_rslt = (uint8_t *)&fdflow->fd_flow.result;<br />+<br />+    handle_idx = fdflow->hw_idx;<br />+<br />+    if (handle_idx >= ZXDH_MAX_FLOW_NUM) {<br />+        rte_free(dtb_acl_entry);<br />+        return INVALID_HANDLEIDX;<br />+    }<br />+    dtb_acl_entry->handle = handle_idx;<br />+    dtb_entry->sdt_no = ZXDH_SDT_FD_TABLE;<br />+    dtb_entry->p_entry_data = dtb_acl_entry;<br />+    return handle_idx;<br />+}<br />+<br />+static int zxdh_hw_flow_insert(struct rte_eth_dev *dev,<br />+                                struct zxdh_flow *dh_flow,<br />+                                struct rte_flow_error *error,<br />+                                uint16_t vport)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint32_t dtb_qid = hw->dev_sd->dtb_sd.queueid;<br />+    ZXDH_DTB_USER_ENTRY_T dtb_entry = {0};<br />+    uint32_t ret;<br />+    uint16_t handle_idx;<br />+<br />+    struct zxdh_flow_info *flow = &dh_flow->flowentry;<br />+    handle_idx = zxdh_fd_flow_to_dtbentry(hw, flow, &dtb_entry);<br />+    if (handle_idx == INVALID_HANDLEIDX) {<br />+        rte_flow_error_set(error, EINVAL,<br />+                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,<br />+                         "Failed to allocate memory for hw");<br />+        return -1;<br />+    }<br />+    ret = zxdh_np_dtb_table_entry_write(hw->dev_id, dtb_qid, 1, &dtb_entry);<br />+    zxdh_fd_flow_free_dtbentry(&dtb_entry);<br />+    if (ret) {<br />+        ret = free_handle(hw, handle_idx, vport);<br />+        if (ret) {<br />+            rte_flow_error_set(error, EINVAL,<br />+                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,<br />+                         "release handle_idx to hw failed");<br />+            return -1;<br />+        }<br />+        rte_flow_error_set(error, EINVAL,<br />+                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,<br />+                         "write to hw failed");<br />+        return -1;<br />+    }<br />+    dh_flow->flowentry.hw_idx = handle_idx;<br />+    return 0;<br />+}<br />+<br />+static int<br />+hw_count_query(struct zxdh_hw *hw, uint32_t countid, bool clear,<br />+        struct flow_stats *fstats, struct rte_flow_error *error)<br />+{<br />+    uint32_t stats_id = 0;<br />+    int ret = 0;<br />+    stats_id = countid;<br />+    if (stats_id >= ZXDH_MAX_FLOW_NUM) {<br />+        PMD_DRV_LOG(DEBUG, "query count id %d invalid", stats_id);<br />+        ret = rte_flow_error_set(error, ENODEV,<br />+                 RTE_FLOW_ERROR_TYPE_HANDLE,<br />+                 NULL,<br />+                 "query count id invalid");<br />+        return -rte_errno;<br />+    }<br />+    PMD_DRV_LOG(DEBUG, "query count id %d,clear %d ", stats_id, clear);<br />+    if (!clear)<br />+        ret = zxdh_np_dtb_stats_get(hw->dev_id, hw->dev_sd->dtb_sd.queueid, 1,<br />+                    stats_id + ZXDH_FLOW_STATS_INGRESS_BASE,<br />+                    (uint32_t *)fstats);<br />+    else<br />+        ret = zxdh_np_stat_ppu_cnt_get_ex(hw->dev_id, 1,<br />+                    stats_id + ZXDH_FLOW_STATS_INGRESS_BASE,<br />+                    1, (uint32_t *)fstats);<br />+    if (ret)<br />+        rte_flow_error_set(error, EINVAL,<br />+                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,<br />+                     "fail to get flow stats");<br />+    return ret;<br />+}<br />+<br />+static int<br />+count_deref(struct zxdh_hw *hw, uint32_t countid,<br />+        struct rte_flow_error *error)<br />+{<br />+    int ret = 0;<br />+    struct count_res *count_res = &flow_count_ref[countid];<br />+    struct flow_stats fstats = {0};<br />+<br />+    rte_spinlock_lock(&count_res->count_lock);<br />+<br />+    if (count_res->count_ref >= 1) {<br />+        count_res->count_ref--;<br />+    } else {<br />+        rte_spinlock_unlock(&count_res->count_lock);<br />+        return rte_flow_error_set(error, ENOTSUP,<br />+                     RTE_FLOW_ERROR_TYPE_ACTION_CONF,<br />+                     NULL,<br />+                     "count deref underflow");<br />+    }<br />+    if (count_res->count_ref == 0)<br />+        ret = hw_count_query(hw, countid, 1, &fstats, error);<br />+<br />+    rte_spinlock_unlock(&count_res->count_lock);<br />+    return ret;<br />+}<br />+<br />+static int<br />+count_ref(struct zxdh_hw *hw, uint32_t countid, struct rte_flow_error *error)<br />+{<br />+    int ret = 0;<br />+    struct count_res *count_res = &flow_count_ref[countid];<br />+    struct flow_stats fstats = {0};<br />+<br />+    rte_spinlock_lock(&count_res->count_lock);<br />+    if (count_res->count_ref < 255) {<br />+        count_res->count_ref++;<br />+    } else {<br />+        rte_spinlock_unlock(&count_res->count_lock);<br />+        return rte_flow_error_set(error, ENOTSUP,<br />+                     RTE_FLOW_ERROR_TYPE_ACTION_CONF,<br />+                     NULL,<br />+                     "count ref overflow");<br />+    }<br />+<br />+    if (count_res->count_ref == 1)<br />+        ret = hw_count_query(hw, countid, 1, &fstats, error);<br />+<br />+    rte_spinlock_unlock(&count_res->count_lock);<br />+    return ret;<br />+}<br />+<br />+int<br />+pf_fd_hw_apply(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow,<br />+        struct rte_flow_error *error, uint16_t vport, uint16_t pcieid)<br />+{<br />+    int ret = 0;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint8_t vf_index = 0;<br />+    uint8_t action_bits = dh_flow->flowentry.fd_flow.result.action_idx;<br />+    uint32_t countid  = MAX_FLOW_COUNT_NUM;<br />+    uint32_t handle_idx = 0;<br />+    union zxdh_virport_num port = {0};<br />+<br />+    port.vport = vport;<br />+    handle_idx = get_available_handle(hw, vport);<br />+    if (handle_idx >= ZXDH_MAX_FLOW_NUM) {<br />+        rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Failed to allocate memory for hw");<br />+        return -1;<br />+    }<br />+    dh_flow->flowentry.hw_idx = handle_idx;<br />+    if ((action_bits & (1 << FD_ACTION_COUNT_BIT)) != 0) {<br />+        countid = handle_idx;<br />+        dh_flow->flowentry.fd_flow.result.countid = countid;<br />+    }<br />+<br />+    if ((action_bits & (1 << FD_ACTION_VXLAN_ENCAP)) != 0) {<br />+        dh_flow->flowentry.fd_flow.result.encap0_index = handle_idx;<br />+        if (!port.vf_flag) {<br />+            dh_flow->flowentry.fd_flow.result.encap1_index =<br />+                hw->hash_search_index * MAX_ENCAP1_NUM;<br />+        } else {<br />+            vf_index = VF_IDX(pcieid);<br />+            if (vf_index < (ZXDH_MAX_VF - 1)) {<br />+                dh_flow->flowentry.fd_flow.result.encap1_index =<br />+                    hw->hash_search_index * MAX_ENCAP1_NUM + vf_index + 1;<br />+            } else {<br />+                rte_flow_error_set(error, EINVAL,<br />+                        RTE_FLOW_ERROR_TYPE_HANDLE, NULL,<br />+                        "encap1 vf_index is too big");<br />+                return -1;<br />+            }<br />+        }<br />+        PMD_DRV_LOG(DEBUG, "encap_index (%d)(%d)",<br />+                dh_flow->flowentry.fd_flow.result.encap0_index,<br />+                dh_flow->flowentry.fd_flow.result.encap1_index);<br />+        if (zxdh_hw_encap_insert(dev, dh_flow, error) != 0)<br />+            return -1;<br />+    }<br />+    ret = zxdh_hw_flow_insert(dev, dh_flow, error, vport);<br />+    if (!ret && countid < MAX_FLOW_COUNT_NUM)<br />+        ret = count_ref(hw, countid, error);<br />+<br />+    if (!ret) {<br />+        if (!port.vf_flag) {<br />+            if (((action_bits & (1 << FD_ACTION_VXLAN_ENCAP)) != 0) ||<br />+                ((action_bits & (1 << FD_ACTION_VXLAN_DECAP)) != 0)) {<br />+                hw->vxlan_fd_num++;<br />+                if (hw->vxlan_fd_num == 1)<br />+                    set_vxlan_enable(dev, 1, error);<br />+            }<br />+        }<br />+    }<br />+<br />+    return ret;<br />+}<br />+<br />+static int<br />+zxdh_hw_flow_del(struct rte_eth_dev *dev,<br />+                            struct zxdh_flow *dh_flow,<br />+                            struct rte_flow_error *error,<br />+                            uint16_t vport)<br />+{<br />+    struct zxdh_flow_info *flow = &dh_flow->flowentry;<br />+    ZXDH_DTB_USER_ENTRY_T dtb_entry = {0};<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint32_t dtb_qid = hw->dev_sd->dtb_sd.queueid;<br />+    uint32_t ret;<br />+    uint16_t handle_idx;<br />+<br />+    handle_idx = zxdh_fd_flow_to_dtbentry(hw, flow, &dtb_entry);<br />+    if (handle_idx >= ZXDH_MAX_FLOW_NUM) {<br />+        rte_flow_error_set(error, EINVAL,<br />+                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,<br />+                         "Failed to allocate memory for hw");<br />+        return -1;<br />+    }<br />+    ret = zxdh_np_dtb_table_entry_delete(hw->dev_id, dtb_qid, 1, &dtb_entry);<br />+    zxdh_fd_flow_free_dtbentry(&dtb_entry);<br />+    if (ret) {<br />+        rte_flow_error_set(error, EINVAL,<br />+                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,<br />+                         "delete to hw failed");<br />+        return -1;<br />+    }<br />+    ret = free_handle(hw, handle_idx, vport);<br />+    if (ret) {<br />+        rte_flow_error_set(error, EINVAL,<br />+                        RTE_FLOW_ERROR_TYPE_HANDLE, NULL,<br />+                        "release handle_idx to hw failed");<br />+        return -1;<br />+    }<br />+    PMD_DRV_LOG(DEBUG, "release handle_idx to hw succ! %d", handle_idx);<br />+    return ret;<br />+}<br />+<br />+int<br />+pf_fd_hw_destroy(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow,<br />+        struct rte_flow_error *error, uint16_t vport,<br />+        uint16_t pcieid __rte_unused)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    union zxdh_virport_num port = {0};<br />+    int ret = 0;<br />+<br />+    port.vport = vport;<br />+    ret = zxdh_hw_flow_del(dev, dh_flow, error, vport);<br />+    PMD_DRV_LOG(DEBUG, "destroy handle id %d", dh_flow->flowentry.hw_idx);<br />+    if (!ret) {<br />+        uint8_t action_bits = dh_flow->flowentry.fd_flow.result.action_idx;<br />+        uint32_t countid;<br />+        countid = dh_flow->flowentry.hw_idx;<br />+        if ((action_bits & (1 << FD_ACTION_COUNT_BIT)) != 0)<br />+            ret = count_deref(hw, countid, error);<br />+        if (!port.vf_flag) {<br />+            if (((action_bits & (1 << FD_ACTION_VXLAN_ENCAP)) != 0) ||<br />+                ((action_bits & (1 << FD_ACTION_VXLAN_DECAP)) != 0)) {<br />+                hw->vxlan_fd_num--;<br />+                if (hw->vxlan_fd_num == 0)<br />+                    set_vxlan_enable(dev, 0, error);<br />+            }<br />+        }<br />+    }<br />+    return ret;<br />+}<br />+<br />+static int<br />+zxdh_hw_flow_query(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow,<br />+        struct rte_flow_error *error)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    int ret = 0;<br />+    struct zxdh_flow_info *flow = &dh_flow->flowentry;<br />+    ZXDH_DTB_USER_ENTRY_T dtb_entry;<br />+    uint16_t handle_idx;<br />+<br />+    handle_idx = zxdh_fd_flow_to_dtbentry(hw, flow, &dtb_entry);<br />+    if (handle_idx >= ZXDH_MAX_FLOW_NUM) {<br />+        rte_flow_error_set(error, EINVAL,<br />+                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,<br />+                         "Failed to build hw entry for query");<br />+        ret = -1;<br />+        goto free_res;<br />+    }<br />+    ret = zxdh_np_dtb_table_entry_get(hw->dev_id, hw->dev_sd->dtb_sd.queueid, &dtb_entry, 0);<br />+    if (ret != 0) {<br />+        rte_flow_error_set(error, EINVAL,<br />+                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,<br />+                         "Failed query  entry from hw ");<br />+        goto free_res;<br />+    }<br />+<br />+free_res:<br />+    zxdh_fd_flow_free_dtbentry(&dtb_entry);<br />+<br />+    return ret;<br />+}<br />+<br />+int<br />+pf_fd_hw_query_count(struct rte_eth_dev *dev,<br />+            struct zxdh_flow *flow,<br />+            struct rte_flow_query_count *count,<br />+            struct rte_flow_error *error)<br />+{<br />+    struct zxdh_hw *hw =  dev->data->dev_private;<br />+    struct flow_stats  fstats = {0};<br />+    int ret = 0;<br />+    uint32_t countid;<br />+<br />+    memset(&flow->flowentry.fd_flow.result, 0, sizeof(struct fd_flow_result));<br />+    ret = zxdh_hw_flow_query(dev, flow, error);<br />+    if (ret) {<br />+        ret = rte_flow_error_set(error, ENODEV,<br />+                 RTE_FLOW_ERROR_TYPE_HANDLE,<br />+                 NULL,<br />+                 "query failed");<br />+        return -rte_errno;<br />+    }<br />+    countid = flow->flowentry.hw_idx;<br />+    if (countid >= ZXDH_MAX_FLOW_NUM) {<br />+        ret = rte_flow_error_set(error, ENODEV,<br />+                 RTE_FLOW_ERROR_TYPE_HANDLE,<br />+                 NULL,<br />+                 "query count id invalid");<br />+        return -rte_errno;<br />+    }<br />+    ret = hw_count_query(hw, countid, 0, &fstats, error);<br />+    if (ret) {<br />+        rte_flow_error_set(error, EINVAL,<br />+                 RTE_FLOW_ERROR_TYPE_ACTION, NULL,<br />+                     "fail to get flow stats");<br />+            return ret;<br />+    }<br />+    count->bytes = (uint64_t)(rte_le_to_cpu_32(fstats.hit_bytes_hi)) << 32 |<br />+                    rte_le_to_cpu_32(fstats.hit_bytes_lo);<br />+    count->hits = (uint64_t)(rte_le_to_cpu_32(fstats.hit_pkts_hi)) << 32 |<br />+                    rte_le_to_cpu_32(fstats.hit_pkts_lo);<br />+    return ret;<br />+}<br />+<br />+static int<br />+fd_flow_parse_attr(struct rte_eth_dev *dev __rte_unused,<br />+        const struct rte_flow_attr *attr,<br />+        struct rte_flow_error *error,<br />+        struct zxdh_flow *dh_flow)<br />+{<br />+    /* Not supported */<br />+    if (attr->priority) {<br />+        rte_flow_error_set(error, EINVAL,<br />+                   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,<br />+                   attr, "Not support priority.");<br />+        return -rte_errno;<br />+    }<br />+<br />+    /* Not supported */<br />+    if (attr->group >= MAX_GROUP) {<br />+        rte_flow_error_set(error, EINVAL,<br />+                   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,<br />+                   attr, "Not support group.");<br />+        return -rte_errno;<br />+    }<br />+<br />+    if (dh_flow) {<br />+        dh_flow->group = attr->group;<br />+        dh_flow->direct = (attr->ingress == 1) ? 0 : 1;<br />+        dh_flow->pri = attr->priority;<br />+    }<br />+<br />+    return 0;<br />+}<br />+<br />+static int fd_flow_parse_pattern(struct rte_eth_dev *dev, const struct rte_flow_item *items,<br />+             struct rte_flow_error *error, struct zxdh_flow *dh_flow)<br />+{<br />+    struct zxdh_hw *priv = dev->data->dev_private;<br />+    struct zxdh_flow_info *flow = NULL;<br />+    const struct rte_flow_item *item;<br />+    const struct rte_flow_item_eth *eth_spec, *eth_mask;<br />+    const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;<br />+    const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;<br />+    const struct rte_flow_item_ipv6 *ipv6_spec = NULL, *ipv6_mask = NULL;<br />+    const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;<br />+    const struct rte_flow_item_udp *udp_spec, *udp_mask;<br />+    const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;<br />+    const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;<br />+    struct fd_flow_key *key, *key_mask;<br />+<br />+    if (dh_flow) {<br />+        flow = &dh_flow->flowentry;<br />+    } else {<br />+        flow = rte_zmalloc("dh_flow", sizeof(*flow), 0);<br />+        if (flow == NULL) {<br />+            rte_flow_error_set(error, EINVAL,<br />+                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,<br />+                         "Failed to allocate memory ");<br />+            return -rte_errno;<br />+        }<br />+    }<br />+<br />+    key = &flow->fd_flow.key;<br />+    key_mask = &flow->fd_flow.key_mask;<br />+    key->vfid = rte_cpu_to_be_16(priv->vfid);<br />+    key_mask->vfid  = 0xffff;<br />+    for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {<br />+        item = items;<br />+        if (items->last) {<br />+            rte_flow_error_set(error, EINVAL,<br />+                     RTE_FLOW_ERROR_TYPE_ITEM,<br />+                     items,<br />+                     "Not support range");<br />+            return -rte_errno;<br />+        }<br />+<br />+        switch (item->type) {<br />+        case RTE_FLOW_ITEM_TYPE_ETH:<br />+            eth_spec = item->spec;<br />+            eth_mask = item->mask;<br />+            if (eth_spec && eth_mask) {<br />+                key->mac_dst = eth_spec->dst;<br />+                key->mac_src  = eth_spec->src;<br />+                key_mask->mac_dst  = eth_mask->dst;<br />+                key_mask->mac_src  = eth_mask->src;<br />+<br />+                if (eth_mask->type == 0xffff) {<br />+                    key->ether_type = eth_spec->type;<br />+                    key_mask->ether_type = eth_mask->type;<br />+                }<br />+            }<br />+            break;<br />+        case RTE_FLOW_ITEM_TYPE_VLAN:<br />+            vlan_spec = item->spec;<br />+            vlan_mask = item->mask;<br />+            if (vlan_spec && vlan_mask) {<br />+                key->vlan_tci  = vlan_spec->tci;<br />+                key_mask->vlan_tci = vlan_mask->tci;<br />+            }<br />+            break;<br />+        case RTE_FLOW_ITEM_TYPE_IPV4:<br />+            ipv4_spec = item->spec;<br />+            ipv4_mask = item->mask;<br />+<br />+            if (ipv4_spec && ipv4_mask) {<br />+                /* Check IPv4 mask and update input set */<br />+                if (ipv4_mask->hdr.version_ihl ||<br />+                    ipv4_mask->hdr.total_length ||<br />+                    ipv4_mask->hdr.packet_id ||<br />+                    ipv4_mask->hdr.hdr_checksum ||<br />+                    ipv4_mask->hdr.time_to_live) {<br />+                    rte_flow_error_set(error, EINVAL,<br />+                             RTE_FLOW_ERROR_TYPE_ITEM,<br />+                             item,<br />+                             "Invalid IPv4 mask.");<br />+                    return -rte_errno;<br />+                }<br />+                    /* Get the filter info */<br />+                key->nw_proto =<br />+                        ipv4_spec->hdr.next_proto_id;<br />+                key->tos =<br />+                        ipv4_spec->hdr.type_of_service;<br />+                key_mask->nw_proto =<br />+                        ipv4_mask->hdr.next_proto_id;<br />+                key_mask->tos =<br />+                        ipv4_mask->hdr.type_of_service;<br />+                key->frag_flag = (ipv4_spec->hdr.fragment_offset != 0) ? 1 : 0;<br />+                key_mask->frag_flag = (ipv4_mask->hdr.fragment_offset != 0) ? 1 : 0;<br />+                rte_memcpy((uint32_t *)key->src_ip + 3,<br />+                             &ipv4_spec->hdr.src_addr, 4);<br />+                rte_memcpy((uint32_t *)key->dst_ip + 3,<br />+                             &ipv4_spec->hdr.dst_addr, 4);<br />+                rte_memcpy((uint32_t *)key_mask->src_ip + 3,<br />+                             &ipv4_mask->hdr.src_addr, 4);<br />+                rte_memcpy((uint32_t *)key_mask->dst_ip + 3,<br />+                             &ipv4_mask->hdr.dst_addr, 4);<br />+            }<br />+            break;<br />+        case RTE_FLOW_ITEM_TYPE_IPV6:<br />+            ipv6_spec = item->spec;<br />+            ipv6_mask = item->mask;<br />+<br />+            if (ipv6_spec && ipv6_mask) {<br />+                /* Check IPv6 mask and update input set */<br />+                if (ipv6_mask->hdr.payload_len ||<br />+                     ipv6_mask->hdr.hop_limits == UINT8_MAX) {<br />+                    rte_flow_error_set(error, EINVAL,<br />+                        RTE_FLOW_ERROR_TYPE_ITEM,<br />+                        item,<br />+                        "Invalid IPv6 mask");<br />+                    return -rte_errno;<br />+                }<br />+                key->tc =<br />+                    (uint8_t)((ipv6_spec->hdr.vtc_flow & <br />+                                RTE_IPV6_HDR_TC_MASK) >> <br />+                                RTE_IPV6_HDR_TC_SHIFT);<br />+                key_mask->tc =<br />+                    (uint8_t)((ipv6_mask->hdr.vtc_flow & <br />+                                RTE_IPV6_HDR_TC_MASK) >> <br />+                                RTE_IPV6_HDR_TC_SHIFT);<br />+<br />+                key->nw_proto = ipv6_spec->hdr.proto;<br />+                key_mask->nw_proto = ipv6_mask->hdr.proto;<br />+<br />+                rte_memcpy(key->src_ip,<br />+                             &ipv6_spec->hdr.src_addr, 16);<br />+                rte_memcpy(key->dst_ip,<br />+                             &ipv6_spec->hdr.dst_addr, 16);<br />+                rte_memcpy(key_mask->src_ip,<br />+                             &ipv6_mask->hdr.src_addr, 16);<br />+                rte_memcpy(key_mask->dst_ip,<br />+                             &ipv6_mask->hdr.dst_addr, 16);<br />+            }<br />+            break;<br />+        case RTE_FLOW_ITEM_TYPE_TCP:<br />+            tcp_spec = item->spec;<br />+            tcp_mask = item->mask;<br />+<br />+            if (tcp_spec && tcp_mask) {<br />+                /* Check TCP mask and update input set */<br />+                if (tcp_mask->hdr.sent_seq ||<br />+                    tcp_mask->hdr.recv_ack ||<br />+                    tcp_mask->hdr.data_off ||<br />+                    tcp_mask->hdr.tcp_flags ||<br />+                    tcp_mask->hdr.rx_win ||<br />+                    tcp_mask->hdr.cksum ||<br />+                    tcp_mask->hdr.tcp_urp ||<br />+                    (tcp_mask->hdr.src_port && <br />+                    tcp_mask->hdr.src_port != UINT16_MAX) ||<br />+                    (tcp_mask->hdr.dst_port && <br />+                    tcp_mask->hdr.dst_port != UINT16_MAX)) {<br />+                    rte_flow_error_set(error, EINVAL,<br />+                                 RTE_FLOW_ERROR_TYPE_ITEM,<br />+                                 item,<br />+                                 "Invalid TCP mask");<br />+                    return -rte_errno;<br />+                }<br />+<br />+                key->tp_src = tcp_spec->hdr.src_port;<br />+                key_mask->tp_src = tcp_mask->hdr.src_port;<br />+<br />+                key->tp_dst = tcp_spec->hdr.dst_port;<br />+                key_mask->tp_dst = tcp_mask->hdr.dst_port;<br />+            }<br />+            break;<br />+        case RTE_FLOW_ITEM_TYPE_UDP:<br />+            udp_spec = item->spec;<br />+            udp_mask = item->mask;<br />+<br />+            if (udp_spec && udp_mask) {<br />+                /* Check UDP mask and update input set*/<br />+                if (udp_mask->hdr.dgram_len ||<br />+                    udp_mask->hdr.dgram_cksum ||<br />+                    (udp_mask->hdr.src_port && <br />+                    udp_mask->hdr.src_port != UINT16_MAX) ||<br />+                    (udp_mask->hdr.dst_port && <br />+                    udp_mask->hdr.dst_port != UINT16_MAX)) {<br />+                    rte_flow_error_set(error, EINVAL,<br />+                                     RTE_FLOW_ERROR_TYPE_ITEM,<br />+                                     item,<br />+                                     "Invalid UDP mask");<br />+                    return -rte_errno;<br />+                }<br />+<br />+                key->tp_src = udp_spec->hdr.src_port;<br />+                key_mask->tp_src = udp_mask->hdr.src_port;<br />+<br />+                key->tp_dst = udp_spec->hdr.dst_port;<br />+                key_mask->tp_dst = udp_mask->hdr.dst_port;<br />+            }<br />+            break;<br />+        case RTE_FLOW_ITEM_TYPE_SCTP:<br />+            sctp_spec = item->spec;<br />+            sctp_mask = item->mask;<br />+<br />+            if (!(sctp_spec && sctp_mask))<br />+                break;<br />+<br />+            /* Check SCTP mask and update input set */<br />+            if (sctp_mask->hdr.cksum) {<br />+                rte_flow_error_set(error, EINVAL,<br />+                           RTE_FLOW_ERROR_TYPE_ITEM,<br />+                           item,<br />+                           "Invalid sctp mask");<br />+                return -rte_errno;<br />+            }<br />+<br />+            /* Mask for SCTP src/dst ports not supported */<br />+            if (sctp_mask->hdr.src_port && <br />+                sctp_mask->hdr.src_port != UINT16_MAX)<br />+                return -rte_errno;<br />+            if (sctp_mask->hdr.dst_port && <br />+                sctp_mask->hdr.dst_port != UINT16_MAX)<br />+                return -rte_errno;<br />+<br />+            key->tp_src = sctp_spec->hdr.src_port;<br />+            key_mask->tp_src = sctp_mask->hdr.src_port;<br />+            key->tp_dst = sctp_spec->hdr.dst_port;<br />+            key_mask->tp_dst = sctp_mask->hdr.dst_port;<br />+            break;<br />+        case RTE_FLOW_ITEM_TYPE_VXLAN:<br />+        {<br />+            vxlan_spec = item->spec;<br />+            vxlan_mask = item->mask;<br />+            static const struct rte_flow_item_vxlan flow_item_vxlan_mask = {<br />+                .vni = {0xff, 0xff, 0xff},<br />+            };<br />+            if (!(vxlan_spec && vxlan_mask))<br />+                break;<br />+            if (memcmp(vxlan_mask, &flow_item_vxlan_mask,<br />+                sizeof(struct rte_flow_item_vxlan))) {<br />+                rte_flow_error_set(error, EINVAL,<br />+                                 RTE_FLOW_ERROR_TYPE_ITEM,<br />+                                 item,<br />+                                 "Invalid vxlan mask");<br />+                    return -rte_errno;<br />+            }<br />+            rte_memcpy(key->vni, vxlan_spec->vni, 3);<br />+            rte_memcpy(key_mask->vni, vxlan_mask->vni, 3);<br />+            break;<br />+        }<br />+        default:<br />+                return rte_flow_error_set(error, ENOTSUP,<br />+                                     RTE_FLOW_ERROR_TYPE_ITEM,<br />+                                     NULL, "item not supported");<br />+        }<br />+    }<br />+<br />+    data_bitwise(key_mask, sizeof(*key_mask));<br />+    return 0;<br />+}<br />+<br />+static inline int<br />+validate_action_rss(struct rte_eth_dev *dev,<br />+             const struct rte_flow_action *action,<br />+             struct rte_flow_error *error)<br />+{<br />+    const struct rte_flow_action_rss *rss = action->conf;<br />+<br />+    if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT && <br />+        rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)<br />+        return rte_flow_error_set(error, ENOTSUP,<br />+                      RTE_FLOW_ERROR_TYPE_ACTION_CONF,<br />+                      &rss->func,<br />+                    "RSS hash function not supported");<br />+    if (rss->level > 1)<br />+        return rte_flow_error_set(error, ENOTSUP,<br />+                      RTE_FLOW_ERROR_TYPE_ACTION_CONF,<br />+                      &rss->level,<br />+                      "tunnel RSS is not supported");<br />+    /* allow RSS key_len 0 in case of NULL (default) RSS key. */<br />+    if (rss->key_len == 0 && rss->key != NULL)<br />+        return rte_flow_error_set(error, ENOTSUP,<br />+                      RTE_FLOW_ERROR_TYPE_ACTION_CONF,<br />+                      &rss->key_len,<br />+                      "RSS hash key length 0");<br />+    if (rss->key_len > 0 && rss->key_len < ZXDH_RSS_HASH_KEY_LEN)<br />+        return rte_flow_error_set(error, ENOTSUP,<br />+                      RTE_FLOW_ERROR_TYPE_ACTION_CONF,<br />+                      &rss->key_len,<br />+                      "RSS hash key too small");<br />+    if (rss->key_len > ZXDH_RSS_HASH_KEY_LEN)<br />+        return rte_flow_error_set(error, ENOTSUP,<br />+                      RTE_FLOW_ERROR_TYPE_ACTION_CONF,<br />+                      &rss->key_len,<br />+                      "RSS hash key too large");<br />+    if (rss->queue_num > dev->data->nb_rx_queues)<br />+        return rte_flow_error_set(error, ENOTSUP,<br />+                      RTE_FLOW_ERROR_TYPE_ACTION_CONF,<br />+                      &rss->queue_num,<br />+                      "number of queues too large");<br />+    if (!rss->queue_num)<br />+        return rte_flow_error_set(error, EINVAL,<br />+                      RTE_FLOW_ERROR_TYPE_ACTION_CONF,<br />+                      NULL, "No queues configured");<br />+    return 0;<br />+}<br />+<br />+static int<br />+fd_flow_parse_vxlan_encap(struct rte_eth_dev *dev __rte_unused,<br />+        const struct rte_flow_item *item,<br />+        struct zxdh_flow *dh_flow)<br />+{<br />+    const struct rte_flow_item *items;<br />+    const struct rte_flow_item_eth *item_eth;<br />+    const struct rte_flow_item_vlan *item_vlan;<br />+    const struct rte_flow_item_ipv4 *item_ipv4;<br />+    const struct rte_flow_item_ipv6 *item_ipv6;<br />+    const struct rte_flow_item_udp *item_udp;<br />+    const struct rte_flow_item_vxlan *item_vxlan;<br />+    uint32_t i = 0;<br />+    rte_be32_t addr;<br />+<br />+    for (i = 0; i < ACTION_VXLAN_ENCAP_ITEMS_NUM; i++) {<br />+        items = &item[i];<br />+        switch (items->type) {<br />+        case RTE_FLOW_ITEM_TYPE_ETH:<br />+            item_eth = items->spec;<br />+            rte_memcpy(&dh_flow->encap0.dst_mac1, item_eth->dst.addr_bytes, 2);<br />+            rte_memcpy(&dh_flow->encap1.src_mac1, item_eth->src.addr_bytes, 2);<br />+            rte_memcpy(&dh_flow->encap0.dst_mac2, &item_eth->dst.addr_bytes[2], 4);<br />+            rte_memcpy(&dh_flow->encap1.src_mac2, &item_eth->src.addr_bytes[2], 4);<br />+            dh_flow->encap0.dst_mac1 = rte_bswap16(dh_flow->encap0.dst_mac1);<br />+            dh_flow->encap1.src_mac1 = rte_bswap16(dh_flow->encap1.src_mac1);<br />+            dh_flow->encap0.dst_mac2 = rte_bswap32(dh_flow->encap0.dst_mac2);<br />+            dh_flow->encap1.src_mac2 = rte_bswap32(dh_flow->encap1.src_mac2);<br />+            break;<br />+        case RTE_FLOW_ITEM_TYPE_VLAN:<br />+            item_vlan = items->spec;<br />+            dh_flow->encap1.vlan_tci = item_vlan->hdr.vlan_tci;<br />+            break;<br />+        case RTE_FLOW_ITEM_TYPE_IPV4:<br />+            item_ipv4 = items->spec;<br />+            dh_flow->encap0.ethtype = 0;<br />+            dh_flow->encap0.tos = item_ipv4->hdr.type_of_service;<br />+            dh_flow->encap0.ttl = item_ipv4->hdr.time_to_live;<br />+            addr = rte_bswap32(item_ipv4->hdr.src_addr);<br />+            rte_memcpy((uint32_t *)dh_flow->encap1.sip.ip_addr + 3, &addr, 4);<br />+            addr = rte_bswap32(item_ipv4->hdr.dst_addr);<br />+            rte_memcpy((uint32_t *)dh_flow->encap0.dip.ip_addr + 3, &addr, 4);<br />+            break;<br />+        case RTE_FLOW_ITEM_TYPE_IPV6:<br />+            item_ipv6 = items->spec;<br />+            dh_flow->encap0.ethtype = 1;<br />+            dh_flow->encap0.tos =<br />+                    (item_ipv6->hdr.vtc_flow & RTE_IPV6_HDR_TC_MASK) >> <br />+                        RTE_IPV6_HDR_TC_SHIFT;<br />+            dh_flow->encap0.ttl = item_ipv6->hdr.hop_limits;<br />+            rte_memcpy(dh_flow->encap1.sip.ip_addr, &item_ipv6->hdr.src_addr, 16);<br />+            dh_flow->encap1.sip.ip_addr[0] =<br />+                rte_bswap32(dh_flow->encap1.sip.ip_addr[0]);<br />+            dh_flow->encap1.sip.ip_addr[1] =<br />+                rte_bswap32(dh_flow->encap1.sip.ip_addr[1]);<br />+            dh_flow->encap1.sip.ip_addr[2] =<br />+                rte_bswap32(dh_flow->encap1.sip.ip_addr[2]);<br />+            dh_flow->encap1.sip.ip_addr[3] =<br />+                rte_bswap32(dh_flow->encap1.sip.ip_addr[3]);<br />+            rte_memcpy(dh_flow->encap0.dip.ip_addr, &item_ipv6->hdr.dst_addr, 16);<br />+            dh_flow->encap0.dip.ip_addr[0] =<br />+                    rte_bswap32(dh_flow->encap0.dip.ip_addr[0]);<br />+            dh_flow->encap0.dip.ip_addr[1] =<br />+                    rte_bswap32(dh_flow->encap0.dip.ip_addr[1]);<br />+            dh_flow->encap0.dip.ip_addr[2] =<br />+                    rte_bswap32(dh_flow->encap0.dip.ip_addr[2]);<br />+            dh_flow->encap0.dip.ip_addr[3] =<br />+                    rte_bswap32(dh_flow->encap0.dip.ip_addr[3]);<br />+            break;<br />+        case RTE_FLOW_ITEM_TYPE_UDP:<br />+            item_udp = items->spec;<br />+            dh_flow->encap0.tp_dst = item_udp->hdr.dst_port;<br />+            dh_flow->encap0.tp_dst = rte_bswap16(dh_flow->encap0.tp_dst);<br />+            break;<br />+        case RTE_FLOW_ITEM_TYPE_VXLAN:<br />+            item_vxlan = items->spec;<br />+            dh_flow->encap0.vni = item_vxlan->vni[0] * 65536 +<br />+                    item_vxlan->vni[1] * 256 + item_vxlan->vni[2];<br />+            break;<br />+        case RTE_FLOW_ITEM_TYPE_END:<br />+        default:<br />+            break;<br />+        }<br />+    }<br />+    dh_flow->encap0.hit_flag = 1;<br />+    dh_flow->encap1.hit_flag = 1;<br />+<br />+    return 0;<br />+}<br />+<br />+static int<br />+fd_flow_parse_action(struct rte_eth_dev *dev, const struct rte_flow_action *actions,<br />+             struct rte_flow_error *error, struct zxdh_flow *dh_flow)<br />+{<br />+    struct zxdh_flow_info *flow = NULL;<br />+    struct fd_flow_result *result = NULL;<br />+    const struct rte_flow_item *enc_item = NULL;<br />+    uint8_t action_bitmap = 0;<br />+    uint32_t dest_num = 0;<br />+    uint32_t mark_num = 0;<br />+    uint32_t counter_num = 0;<br />+    int ret;<br />+<br />+    rte_errno = 0;<br />+    if (dh_flow) {<br />+        flow = &dh_flow->flowentry;<br />+    } else {<br />+        flow = rte_zmalloc("dh_flow", sizeof(*flow), 0);<br />+        if (flow == NULL) {<br />+            rte_flow_error_set(error, EINVAL,<br />+                     RTE_FLOW_ERROR_TYPE_HANDLE, NULL,<br />+                     "Failed to allocate memory ");<br />+            return -rte_errno;<br />+        }<br />+    }<br />+    result = &flow->fd_flow.result;<br />+    action_bitmap = result->action_idx;<br />+<br />+    for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {<br />+        switch (actions->type) {<br />+        case RTE_FLOW_ACTION_TYPE_RSS:<br />+        {<br />+            dest_num++;<br />+            if (action_bitmap & (1 << FD_ACTION_RSS_BIT)) {<br />+                rte_flow_error_set(error, EINVAL,<br />+                        RTE_FLOW_ERROR_TYPE_ACTION, actions,<br />+                        "multi rss action no support.");<br />+                goto free_flow;<br />+            }<br />+            ret = validate_action_rss(dev, actions, error);<br />+            if (ret)<br />+                goto free_flow;<br />+            action_bitmap |= (1 << FD_ACTION_RSS_BIT);<br />+            break;<br />+        }<br />+        case RTE_FLOW_ACTION_TYPE_MARK:<br />+        {<br />+            mark_num++;<br />+            if (action_bitmap & (1 << FD_ACTION_MARK_BIT)) {<br />+                rte_flow_error_set(error, EINVAL,<br />+                        RTE_FLOW_ERROR_TYPE_ACTION, actions,<br />+                        "multi mark action no support.");<br />+                goto free_flow;<br />+            }<br />+            const struct rte_flow_action_mark *act_mark = actions->conf;<br />+            result->mark_fd_id = rte_cpu_to_le_32(act_mark->id);<br />+            action_bitmap |= (1 << FD_ACTION_MARK_BIT);<br />+            break;<br />+        }<br />+        case RTE_FLOW_ACTION_TYPE_COUNT:<br />+        {<br />+            counter_num++;<br />+            if (action_bitmap & (1 << FD_ACTION_COUNT_BIT)) {<br />+                rte_flow_error_set(error, EINVAL,<br />+                        RTE_FLOW_ERROR_TYPE_ACTION, actions,<br />+                        "multi count action no support.");<br />+                goto free_flow;<br />+            }<br />+            const struct rte_flow_action_count *act_count = actions->conf;<br />+            if (act_count->id > MAX_FLOW_COUNT_NUM) {<br />+                rte_flow_error_set(error, EINVAL,<br />+                            RTE_FLOW_ERROR_TYPE_ACTION, actions,<br />+                            "count action id no support.");<br />+                goto free_flow;<br />+            };<br />+            result->countid = act_count->id;<br />+            action_bitmap |= (1 << FD_ACTION_COUNT_BIT);<br />+            break;<br />+        }<br />+        case RTE_FLOW_ACTION_TYPE_QUEUE:<br />+        {<br />+            dest_num++;<br />+            if (action_bitmap & (1 << FD_ACTION_QUEUE_BIT)) {<br />+                rte_flow_error_set(error, EINVAL,<br />+                        RTE_FLOW_ERROR_TYPE_ACTION, actions,<br />+                        "multi queue action no support.");<br />+                goto free_flow;<br />+            }<br />+            const struct rte_flow_action_queue *act_q;<br />+            act_q = actions->conf;<br />+            if (act_q->index >= dev->data->nb_rx_queues) {<br />+                rte_flow_error_set(error, EINVAL,<br />+                        RTE_FLOW_ERROR_TYPE_ACTION, actions,<br />+                        "Invalid queue ID");<br />+                goto free_flow;<br />+            }<br />+            ret = zxdh_hw_qid_to_logic_qid(dev, act_q->index << 1);<br />+            if (ret < 0) {<br />+                rte_flow_error_set(error, EINVAL,<br />+                        RTE_FLOW_ERROR_TYPE_ACTION, actions,<br />+                        "Invalid phy queue ID .");<br />+                goto free_flow;<br />+            }<br />+            result->qid = rte_cpu_to_le_16(ret);<br />+            action_bitmap |= (1 << FD_ACTION_QUEUE_BIT);<br />+<br />+            PMD_DRV_LOG(DEBUG, "QID RET 0x%x", result->qid);<br />+            break;<br />+        }<br />+        case RTE_FLOW_ACTION_TYPE_DROP:<br />+        {<br />+            dest_num++;<br />+            if (action_bitmap & (1 << FD_ACTION_DROP_BIT)) {<br />+                rte_flow_error_set(error, EINVAL,<br />+                        RTE_FLOW_ERROR_TYPE_ACTION, actions,<br />+                        "multi drop action no support.");<br />+                goto free_flow;<br />+            }<br />+            action_bitmap |= (1 << FD_ACTION_DROP_BIT);<br />+            break;<br />+        }<br />+        case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:<br />+        {<br />+            dest_num++;<br />+            if (action_bitmap & (1 << FD_ACTION_VXLAN_DECAP)) {<br />+                rte_flow_error_set(error, EINVAL,<br />+                        RTE_FLOW_ERROR_TYPE_ACTION, actions,<br />+                        "multi drop action no support.");<br />+                goto free_flow;<br />+            }<br />+            action_bitmap |= (1 << FD_ACTION_VXLAN_DECAP);<br />+            break;<br />+        }<br />+        case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:<br />+            enc_item = ((const struct rte_flow_action_vxlan_encap *)<br />+                   actions->conf)->definition;<br />+            if (dh_flow != NULL)<br />+                fd_flow_parse_vxlan_encap(dev, enc_item, dh_flow);<br />+            dest_num++;<br />+            if (action_bitmap & (1 << FD_ACTION_VXLAN_ENCAP)) {<br />+                rte_flow_error_set(error, EINVAL,<br />+                        RTE_FLOW_ERROR_TYPE_ACTION, actions,<br />+                        "multi drop action no support.");<br />+                goto free_flow;<br />+            }<br />+            action_bitmap |= (1 << FD_ACTION_VXLAN_ENCAP);<br />+            break;<br />+        default:<br />+            rte_flow_error_set(error, EINVAL,<br />+                RTE_FLOW_ERROR_TYPE_ACTION, actions,<br />+                "Invalid action.");<br />+            goto free_flow;<br />+        }<br />+    }<br />+<br />+    if (dest_num >= 2) {<br />+        rte_flow_error_set(error, EINVAL,<br />+               RTE_FLOW_ERROR_TYPE_ACTION, actions,<br />+               "Unsupported action combination");<br />+        return -rte_errno;<br />+    }<br />+<br />+    if (mark_num >= 2) {<br />+        rte_flow_error_set(error, EINVAL,<br />+               RTE_FLOW_ERROR_TYPE_ACTION, actions,<br />+               "Too many mark actions");<br />+        return -rte_errno;<br />+    }<br />+<br />+    if (counter_num >= 2) {<br />+        rte_flow_error_set(error, EINVAL,<br />+               RTE_FLOW_ERROR_TYPE_ACTION, actions,<br />+               "Too many count actions");<br />+        return -rte_errno;<br />+    }<br />+<br />+    if (dest_num + mark_num + counter_num == 0) {<br />+        rte_flow_error_set(error, EINVAL,<br />+               RTE_FLOW_ERROR_TYPE_ACTION, actions,<br />+               "Empty action");<br />+        return -rte_errno;<br />+    }<br />+<br />+    result->action_idx = action_bitmap;<br />+    return 0;<br />+<br />+free_flow:<br />+    if (!dh_flow)<br />+        rte_free(flow);<br />+    return -rte_errno;<br />+}<br />+<br />+static int<br />+fd_parse_pattern_action(struct rte_eth_dev *dev,<br />+            const struct rte_flow_attr *attr,<br />+            const struct rte_flow_item pattern[],<br />+            const struct rte_flow_action *actions,<br />+            struct rte_flow_error *error, struct zxdh_flow *dh_flow)<br />+{<br />+    int ret = 0;<br />+    ret = fd_flow_parse_attr(dev, attr, error, dh_flow);<br />+    if (ret < 0)<br />+        return -rte_errno;<br />+    ret = fd_flow_parse_pattern(dev, pattern, error, dh_flow);<br />+    if (ret < 0)<br />+        return -rte_errno;<br />+<br />+    ret = fd_flow_parse_action(dev, actions, error, dh_flow);<br />+    if (ret < 0)<br />+        return -rte_errno;<br />+    return 0;<br />+}<br />+<br />+struct dh_flow_engine pf_fd_engine = {<br />+    .apply = pf_fd_hw_apply,<br />+    .destroy = pf_fd_hw_destroy,<br />+    .query_count = pf_fd_hw_query_count,<br />+    .parse_pattern_action = fd_parse_pattern_action,<br />+    .type = FLOW_TYPE_FD_TCAM,<br />+};<br />+<br />+<br />+static int<br />+vf_flow_msg_process(enum zxdh_msg_type msg_type, struct rte_eth_dev *dev,<br />+        struct zxdh_flow *dh_flow, struct rte_flow_error *error,<br />+        struct rte_flow_query_count *count)<br />+{<br />+    int ret = 0;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    struct zxdh_msg_info msg_info = {0};<br />+    struct zxdh_flow_op_msg *flow_msg = &msg_info.data.flow_msg;<br />+<br />+    uint8_t zxdh_msg_reply_info[ZXDH_ST_SZ_BYTES(msg_reply_info)] = {0};<br />+    void *reply_body_addr = ZXDH_ADDR_OF(msg_reply_info, zxdh_msg_reply_info, reply_body);<br />+    void *flow_rsp_addr = ZXDH_ADDR_OF(msg_reply_body, reply_body_addr, flow_rsp);<br />+    uint8_t flow_op_rsp[sizeof(struct zxdh_flow_op_rsp)] = {0};<br />+    uint16_t len = sizeof(struct zxdh_flow_op_rsp) - 4;<br />+    struct zxdh_flow_op_rsp *flow_rsp = (struct zxdh_flow_op_rsp *)flow_op_rsp;<br />+<br />+    dh_flow->hash_search_index = hw->hash_search_index;<br />+    rte_memcpy(&flow_msg->dh_flow, dh_flow, sizeof(struct zxdh_flow));<br />+<br />+    zxdh_msg_head_build(hw, msg_type, &msg_info);<br />+    ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(struct zxdh_msg_info),<br />+            (void *)zxdh_msg_reply_info, ZXDH_ST_SZ_BYTES(msg_reply_info));<br />+    zxdh_adjust_flow_op_rsp_memory_layout(flow_rsp_addr, len, flow_op_rsp);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "port %d flow op %d failed ret %d", hw->port_id, msg_type, ret);<br />+        if (ret == -2) {<br />+            PMD_DRV_LOG(ERR, "port %d  flow %d failed: cause %s",<br />+                 hw->port_id, msg_type, flow_rsp->error.reason);<br />+            rte_flow_error_set(error, EBUSY,<br />+                     RTE_FLOW_ERROR_TYPE_HANDLE, NULL,<br />+                     flow_rsp->error.reason);<br />+        } else {<br />+            rte_flow_error_set(error, EBUSY,<br />+                     RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,<br />+                     "msg channel error");<br />+        }<br />+        return ret;<br />+    }<br />+<br />+    if (msg_type == ZXDH_FLOW_HW_ADD)<br />+        dh_flow->flowentry.hw_idx = flow_rsp->dh_flow.flowentry.hw_idx;<br />+    if (count)<br />+        rte_memcpy((void *)count, &flow_rsp->count, sizeof(flow_rsp->count));<br />+<br />+    return ret;<br />+}<br />+<br />+static int<br />+vf_fd_apply(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow,<br />+        struct rte_flow_error *error, uint16_t vport __rte_unused,<br />+        uint16_t pcieid __rte_unused)<br />+{<br />+    int ret = 0;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    ret =  vf_flow_msg_process(ZXDH_FLOW_HW_ADD, dev, dh_flow, error, NULL);<br />+    if (!ret) {<br />+        uint8_t action_bits = dh_flow->flowentry.fd_flow.result.action_idx;<br />+        if (((action_bits & (1 << FD_ACTION_VXLAN_ENCAP)) != 0) ||<br />+                ((action_bits & (1 << FD_ACTION_VXLAN_DECAP)) != 0)) {<br />+            hw->vxlan_fd_num++;<br />+            if (hw->vxlan_fd_num == 1) {<br />+                set_vxlan_enable(dev, 1, error);<br />+                PMD_DRV_LOG(DEBUG, "vf set_vxlan_enable");<br />+            }<br />+        }<br />+    }<br />+    return ret;<br />+}<br />+<br />+static int<br />+vf_fd_destroy(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow,<br />+        struct rte_flow_error *error, uint16_t vport __rte_unused,<br />+        uint16_t pcieid __rte_unused)<br />+{<br />+    int ret = 0;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    ret = vf_flow_msg_process(ZXDH_FLOW_HW_DEL, dev, dh_flow, error, NULL);<br />+    if (!ret) {<br />+        uint8_t action_bits = dh_flow->flowentry.fd_flow.result.action_idx;<br />+        if (((action_bits & (1 << FD_ACTION_VXLAN_ENCAP)) != 0) ||<br />+                ((action_bits & (1 << FD_ACTION_VXLAN_DECAP)) != 0)) {<br />+            hw->vxlan_fd_num--;<br />+            if (hw->vxlan_fd_num == 0) {<br />+                set_vxlan_enable(dev, 0, error);<br />+                PMD_DRV_LOG(DEBUG, "vf set_vxlan_disable");<br />+            }<br />+        }<br />+    }<br />+    return ret;<br />+}<br />+<br />+static int<br />+vf_fd_query_count(struct rte_eth_dev *dev,<br />+        struct zxdh_flow *dh_flow,<br />+        struct rte_flow_query_count *count,<br />+        struct rte_flow_error *error)<br />+{<br />+    int ret = 0;<br />+    ret = vf_flow_msg_process(ZXDH_FLOW_HW_GET, dev, dh_flow, error, count);<br />+    return ret;<br />+}<br />+<br />+<br />+static struct dh_flow_engine vf_fd_engine = {<br />+    .apply = vf_fd_apply,<br />+    .destroy = vf_fd_destroy,<br />+    .parse_pattern_action = fd_parse_pattern_action,<br />+    .query_count = vf_fd_query_count,<br />+    .type = FLOW_TYPE_FD_TCAM,<br />+};<br />+<br />+void zxdh_flow_init(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *priv =  dev->data->dev_private;<br />+    if (priv->is_pf)<br />+        zxdh_register_flow_engine(&pf_fd_engine);<br />+    else<br />+        zxdh_register_flow_engine(&vf_fd_engine);<br />+    TAILQ_INIT(&priv->dh_flow_list);<br />+}<br />+<br />+const struct rte_flow_ops zxdh_flow_ops = {<br />+    .validate = zxdh_flow_validate,<br />+    .create = zxdh_flow_create,<br />+    .destroy = zxdh_flow_destroy,<br />+    .flush = zxdh_flow_flush,<br />+    .query = zxdh_flow_query,<br />+    .dev_dump = zxdh_flow_dev_dump,<br />+};<br />+<br />+int<br />+zxdh_flow_ops_get(struct rte_eth_dev *dev __rte_unused,<br />+        const struct rte_flow_ops **ops)<br />+{<br />+    *ops = &zxdh_flow_ops;<br />+<br />+    return 0;<br />+}<br />+<br />+void<br />+zxdh_flow_release(struct rte_eth_dev *dev)<br />+{<br />+    struct rte_flow_error error = {0};<br />+    const struct rte_flow_ops *flow_ops = NULL;<br />+<br />+    if (dev->dev_ops && dev->dev_ops->flow_ops_get)<br />+        dev->dev_ops->flow_ops_get(dev, &flow_ops);<br />+    if (flow_ops && flow_ops->flush)<br />+        flow_ops->flush(dev, &error);<br />+}<br />diff --git a/drivers/net/zxdh/zxdh_flow.h b/drivers/net/zxdh/zxdh_flow.h<br />new file mode 100644<br />index 0000000000..cbcf71b3e1<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_flow.h<br />@@ -0,0 +1,237 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2024 ZTE Corporation<br />+ */<br />+<br />+#ifndef ZXDH_FLOW_H<br />+#define ZXDH_FLOW_H<br />+<br />+#include <stddef.h> <br />+#include <stdint.h> <br />+#include <sys/queue.h> <br />+<br />+#include <rte_arp.h> <br />+#include <rte_common.h> <br />+#include <rte_ether.h> <br />+#include <rte_icmp.h> <br />+#include <rte_ip.h> <br />+#include <rte_sctp.h> <br />+#include <rte_tcp.h> <br />+#include <rte_udp.h> <br />+#include <rte_byteorder.h> <br />+#include <rte_flow_driver.h> <br />+<br />+#ifdef __cplusplus<br />+extern "C" {<br />+#endif<br />+<br />+#define MAX_GROUP                  1<br />+#define ZXDH_MAX_FLOW_NUM          2048<br />+#define MAX_FLOW_COUNT_NUM         ZXDH_MAX_FLOW_NUM<br />+#define ZXDH_FLOW_GROUP_TCAM       1<br />+<br />+#ifndef IPv4_BYTES<br />+#define IPv4_BYTES_FMT "%" PRIu8 ".%" PRIu8 ".%" PRIu8 ".%" PRIu8<br />+#define IPv4_BYTES(addr) \<br />+        (uint8_t)(((addr) >> 24) & 0xFF),\<br />+        (uint8_t)(((addr) >> 16) & 0xFF),\<br />+        (uint8_t)(((addr) >> 8) & 0xFF),\<br />+        (uint8_t)((addr) & 0xFF)<br />+#endif<br />+<br />+#ifndef IPv6_BYTES<br />+#define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:" \<br />+                        "%02x%02x:%02x%02x:%02x%02x:%02x%02x" <br />+#define IPv6_BYTES(addr) \<br />+    addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], addr[6], addr[7], \<br />+    addr[8], addr[9], addr[10], addr[11], addr[12], addr[13], addr[14], addr[15]<br />+#endif<br />+<br />+enum {<br />+    FD_ACTION_VXLAN_ENCAP = 0,<br />+    FD_ACTION_VXLAN_DECAP = 1,<br />+    FD_ACTION_RSS_BIT = 2,<br />+    FD_ACTION_COUNT_BIT = 3,<br />+    FD_ACTION_DROP_BIT = 4,<br />+    FD_ACTION_MARK_BIT = 5,<br />+    FD_ACTION_QUEUE_BIT = 6,<br />+};<br />+<br />+struct fd_flow_key {<br />+    struct rte_ether_addr mac_dst; /**< Destination MAC. */<br />+    struct rte_ether_addr mac_src; /**< Source MAC. */<br />+    rte_be16_t ether_type; /**< EtherType  */<br />+    union {<br />+        struct {<br />+            rte_be16_t cvlan_pri:4; /**< vlanid 0xfff  is  valid */<br />+            rte_be16_t cvlan_vlanid:12; /**< vlanid 0xfff  is  valid */<br />+        };<br />+        rte_be16_t  vlan_tci;<br />+    };<br />+<br />+    uint8_t  src_ip[16];  /** ip src  */<br />+    uint8_t  dst_ip[16];  /** ip dst  */<br />+    uint8_t  rsv0;<br />+    union {<br />+        uint8_t  tos;<br />+        uint8_t  tc;<br />+    };<br />+    uint8_t  nw_proto;<br />+    uint8_t  frag_flag;/*1表示分片 0 表示非分片*/<br />+    rte_be16_t  tp_src;<br />+    rte_be16_t  tp_dst;<br />+<br />+    uint8_t rsv1;/**/<br />+    uint8_t vni[3];/**/<br />+<br />+    rte_be16_t vfid;<br />+    uint8_t rsv2[18];<br />+};<br />+<br />+struct fd_flow_result {<br />+    rte_le16_t qid;<br />+    uint8_t rsv0;<br />+<br />+    uint8_t action_idx:7;<br />+    uint8_t hit_flag:1;<br />+<br />+    rte_le32_t mark_fd_id;<br />+    rte_le32_t countid:20;<br />+    rte_le32_t encap1_index:12;<br />+<br />+    rte_le16_t encap0_index:12;<br />+    rte_le16_t rsv1:4;<br />+    uint8_t rss_hash_factor;<br />+    uint8_t rss_hash_alg;<br />+};<br />+<br />+struct fd_flow_entry {<br />+    struct fd_flow_key key;<br />+    struct fd_flow_key key_mask;<br />+    struct fd_flow_result result;<br />+};<br />+<br />+struct flow_stats {<br />+    uint32_t hit_pkts_hi;<br />+    uint32_t hit_pkts_lo;<br />+    uint32_t hit_bytes_hi;<br />+    uint32_t hit_bytes_lo;<br />+};<br />+<br />+<br />+enum dh_flow_type {<br />+     FLOW_TYPE_FLOW = 0,<br />+     FLOW_TYPE_FD_TCAM,<br />+     FLOW_TYPE_FD_SW,<br />+};<br />+<br />+struct zxdh_flow_info {<br />+    enum dh_flow_type flowtype;<br />+    uint16_t hw_idx;<br />+    uint16_t rsv;<br />+    union {<br />+        struct fd_flow_entry fd_flow;<br />+    };<br />+};<br />+<br />+struct tunnel_encap_ip {<br />+    rte_be32_t ip_addr[4];<br />+};<br />+<br />+struct tunnel_encap0 {<br />+    uint8_t tos;<br />+    uint8_t rsv2[2];<br />+    uint8_t rsv1: 6;<br />+    uint8_t ethtype: 1;<br />+    uint8_t hit_flag: 1;<br />+    uint16_t dst_mac1;<br />+    uint16_t tp_dst;<br />+    uint32_t dst_mac2;<br />+    uint32_t ttl:8;<br />+    uint32_t vni:24;<br />+    struct tunnel_encap_ip dip;<br />+};<br />+<br />+struct tunnel_encap1 {<br />+    uint32_t rsv1: 31;<br />+    uint32_t hit_flag: 1;<br />+    uint16_t src_mac1;<br />+    uint16_t vlan_tci;<br />+    uint32_t src_mac2;<br />+    uint32_t rsv;<br />+    struct tunnel_encap_ip sip;<br />+};<br />+<br />+struct zxdh_flow {<br />+    uint8_t direct; /* 0 in 1 out */<br />+    uint8_t group;  /* rule group id */<br />+    uint8_t pri; /* priority */<br />+    uint8_t hash_search_index; /*  */<br />+    struct zxdh_flow_info  flowentry;<br />+    struct tunnel_encap0  encap0;<br />+    struct tunnel_encap1  encap1;<br />+};<br />+TAILQ_HEAD(dh_flow_list, rte_flow);<br />+<br />+struct rte_flow {<br />+    TAILQ_ENTRY(rte_flow) next;<br />+    void *driver_flow;<br />+    uint32_t type;<br />+    uint16_t port_id;<br />+};<br />+<br />+struct count_res {<br />+    rte_spinlock_t count_lock;<br />+    uint8_t count_ref;<br />+    uint8_t rev[3];<br />+};<br />+<br />+/* Struct to store engine created. */<br />+struct dh_flow_engine {<br />+    TAILQ_ENTRY(dh_flow_engine) node;<br />+    enum dh_flow_type type;<br />+    int (*apply)<br />+        (struct rte_eth_dev *dev,<br />+         struct zxdh_flow *dh_flow,<br />+         struct rte_flow_error *error,<br />+         uint16_t vport, uint16_t pcieid);<br />+<br />+    int (*parse_pattern_action)<br />+        (struct rte_eth_dev *dev,<br />+         const struct rte_flow_attr *attr,<br />+         const struct rte_flow_item pattern[],<br />+         const struct rte_flow_action *actions,<br />+         struct rte_flow_error *error,<br />+         struct zxdh_flow *dh_flow);<br />+<br />+    int (*destroy)<br />+        (struct rte_eth_dev *dev,<br />+         struct zxdh_flow *dh_flow,<br />+         struct rte_flow_error *error,<br />+         uint16_t vport, uint16_t pcieid);<br />+<br />+    int (*query_count)<br />+        (struct rte_eth_dev *dev,<br />+         struct zxdh_flow *dh_flow,<br />+         struct rte_flow_query_count *count,<br />+         struct rte_flow_error *error);<br />+};<br />+TAILQ_HEAD(dh_engine_list, dh_flow_engine);<br />+<br />+void zxdh_register_flow_engine(struct dh_flow_engine *engine);<br />+<br />+extern const struct rte_flow_ops zxdh_flow_ops;<br />+<br />+void zxdh_flow_global_init(void);<br />+void zxdh_flow_init(struct rte_eth_dev *dev);<br />+int pf_fd_hw_apply(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow,<br />+                 struct rte_flow_error *error, uint16_t vport, uint16_t pcieid);<br />+int pf_fd_hw_destroy(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow,<br />+                 struct rte_flow_error *error, uint16_t vport, uint16_t pcieid);<br />+int pf_fd_hw_query_count(struct rte_eth_dev *dev,<br />+                        struct zxdh_flow *flow,<br />+                        struct rte_flow_query_count *count,<br />+                        struct rte_flow_error *error);<br />+int zxdh_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops);<br />+void zxdh_flow_release(struct rte_eth_dev *dev);<br />+<br />+#endif /* ZXDH_FLOW_H */<br />diff --git a/drivers/net/zxdh/zxdh_msg.c b/drivers/net/zxdh/zxdh_msg.c<br />index 02ecd93b12..7e73833bf4 100644<br />--- a/drivers/net/zxdh/zxdh_msg.c<br />+++ b/drivers/net/zxdh/zxdh_msg.c<br />@@ -19,6 +19,7 @@<br /> #include "zxdh_tables.h" <br /> #include "zxdh_np.h" <br /> #include "zxdh_common.h" <br />+#include "zxdh_flow.h" <br />  <br /> #define ZXDH_REPS_INFO_FLAG_USABLE  0x00<br /> #define ZXDH_BAR_SEQID_NUM_MAX      256<br />@@ -1234,7 +1235,8 @@ zxdh_vf_promisc_uninit(struct zxdh_hw *hw, union zxdh_virport_num vport)<br /> }<br />  <br /> static int<br />-zxdh_vf_port_init(struct zxdh_hw *pf_hw, uint16_t vport, void *cfg_data,<br />+zxdh_vf_port_init(struct zxdh_hw *pf_hw, uint16_t vport,<br />+        uint16_t pcieid, void *cfg_data,<br />         void *res_info, uint16_t *res_len)<br /> {<br />     struct zxdh_port_attr_table port_attr = {0};<br />@@ -1253,6 +1255,9 @@ zxdh_vf_port_init(struct zxdh_hw *pf_hw, uint16_t vport, void *cfg_data,<br />     port_attr.hash_search_index = pf_hw->hash_search_index;<br />     port_attr.port_base_qid = vf_init_msg->base_qid;<br />     uint16_t vfid = zxdh_vport_to_vfid(port);<br />+    int vf_index = VF_IDX(pcieid);<br />+<br />+    pf_hw->vfinfo[vf_index].vport = vport;<br />  <br />     ret = zxdh_set_port_attr(pf_hw, vfid, &port_attr);<br />     if (ret) {<br />@@ -1265,6 +1270,12 @@ zxdh_vf_port_init(struct zxdh_hw *pf_hw, uint16_t vport, void *cfg_data,<br />         PMD_DRV_LOG(ERR, "vf_promisc_table_init failed, code:%d", ret);<br />         goto proc_end;<br />     }<br />+<br />+    ret = zxdh_np_dtb_acl_offline_delete(pf_hw->dev_id, pf_hw->dev_sd->dtb_sd.queueid,<br />+                ZXDH_SDT_FD_TABLE, vport, ZXDH_FLOW_STATS_INGRESS_BASE, 1);<br />+    if (ret)<br />+        PMD_DRV_LOG(ERR, "flow table delete failed. code:%d", ret);<br />+<br />     ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_SUCC);<br />     *res_len = sizeof(uint8_t);<br />  <br />@@ -1276,30 +1287,30 @@ zxdh_vf_port_init(struct zxdh_hw *pf_hw, uint16_t vport, void *cfg_data,<br /> }<br />  <br /> static int<br />-zxdh_mac_clear(struct zxdh_hw *hw, union zxdh_virport_num vport)<br />+zxdh_mac_clear(struct zxdh_hw *hw, union zxdh_virport_num vport, uint16_t pcieid)<br /> {<br />-    uint16_t vf_id = vport.vfid;<br />+    uint16_t vf_index = VF_IDX(pcieid);<br />     int i;<br />     int ret = 0;<br />  <br />     for (i = 0; (i != ZXDH_MAX_MAC_ADDRS); ++i) {<br />-        if (!rte_is_zero_ether_addr(&hw->vfinfo[vf_id].vf_mac[i])) {<br />+        if (!rte_is_zero_ether_addr(&hw->vfinfo[vf_index].vf_mac[i])) {<br />             ret = zxdh_del_mac_table(hw, vport.vport,<br />-                    &hw->vfinfo[vf_id].vf_mac[i],<br />+                    &hw->vfinfo[vf_index].vf_mac[i],<br />                     hw->hash_search_index, 0, 0);<br />             if (ret) {<br />                 PMD_DRV_LOG(ERR, "vf_del_mac_failed. code:%d", ret);<br />                 return ret;<br />             }<br />-            memset(&hw->vfinfo[vf_id].vf_mac[i], 0, sizeof(struct rte_ether_addr));<br />+            memset(&hw->vfinfo[vf_index].vf_mac[i], 0, sizeof(struct rte_ether_addr));<br />         }<br />     }<br />     return ret;<br /> }<br />  <br /> static int<br />-zxdh_vf_port_uninit(struct zxdh_hw *pf_hw,<br />-        uint16_t vport, void *cfg_data __rte_unused,<br />+zxdh_vf_port_uninit(struct zxdh_hw *pf_hw, uint16_t vport,<br />+        uint16_t pcieid, void *cfg_data __rte_unused,<br />         void *res_info, uint16_t *res_len)<br /> {<br />     char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "uninit";<br />@@ -1317,7 +1328,7 @@ zxdh_vf_port_uninit(struct zxdh_hw *pf_hw,<br />         goto proc_end;<br />     }<br />  <br />-    ret = zxdh_mac_clear(pf_hw, vport_num);<br />+    ret = zxdh_mac_clear(pf_hw, vport_num, pcieid);<br />     if (ret) {<br />         PMD_DRV_LOG(ERR, "zxdh_mac_clear failed, code:%d", ret);<br />         goto proc_end;<br />@@ -1342,7 +1353,8 @@ zxdh_vf_port_uninit(struct zxdh_hw *pf_hw,<br /> }<br />  <br /> static int<br />-zxdh_add_vf_mac_table(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br />+zxdh_add_vf_mac_table(struct zxdh_hw *hw, uint16_t vport,<br />+        uint16_t pcieid, void *cfg_data,<br />         void *reply_body, uint16_t *reply_len)<br /> {<br />     char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "add mac";<br />@@ -1350,13 +1362,13 @@ zxdh_add_vf_mac_table(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br />     struct zxdh_mac_filter *mac_filter = (struct zxdh_mac_filter *)cfg_data;<br />     struct rte_ether_addr *addr = &mac_filter->mac;<br />     int i = 0, ret = 0;<br />-    uint16_t vf_id = port.vfid;<br />+    uint16_t vf_index = VF_IDX(pcieid);<br />     port.vport = vport;<br />     void *reply_data_addr = ZXDH_ADDR_OF(msg_reply_body, reply_body, reply_data);<br />     void *mac_reply_msg_addr = ZXDH_ADDR_OF(msg_reply_body, reply_body, mac_reply_msg);<br />  <br />     for (i = 0; i < ZXDH_MAX_MAC_ADDRS; i++)<br />-        if (rte_is_same_ether_addr(&hw->vfinfo[vf_id].vf_mac[i], addr))<br />+        if (rte_is_same_ether_addr(&hw->vfinfo[vf_index].vf_mac[i], addr))<br />             goto success;<br />  <br />     ret = zxdh_add_mac_table(hw, vport, addr, hw->hash_search_index, 0, 0);<br />@@ -1372,8 +1384,8 @@ zxdh_add_vf_mac_table(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br />         goto failure;<br />     }<br />     for (i = 0; i < ZXDH_MAX_MAC_ADDRS; i++) {<br />-        if (rte_is_zero_ether_addr(&hw->vfinfo[vf_id].vf_mac[i])) {<br />-            memcpy(&hw->vfinfo[vf_id].vf_mac[i], addr, 6);<br />+        if (rte_is_zero_ether_addr(&hw->vfinfo[vf_index].vf_mac[i])) {<br />+            memcpy(&hw->vfinfo[vf_index].vf_mac[i], addr, 6);<br />             break;<br />         }<br />     }<br />@@ -1393,14 +1405,15 @@ zxdh_add_vf_mac_table(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br /> }<br />  <br /> static int<br />-zxdh_del_vf_mac_table(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br />-    void *res_info, uint16_t *res_len)<br />+zxdh_del_vf_mac_table(struct zxdh_hw *hw, uint16_t vport,<br />+        uint16_t pcieid, void *cfg_data,<br />+        void *res_info, uint16_t *res_len)<br /> {<br />     int ret, i = 0;<br />     struct zxdh_mac_filter *mac_filter = (struct zxdh_mac_filter *)cfg_data;<br />     union zxdh_virport_num  port = (union zxdh_virport_num)vport;<br />     char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "del mac";<br />-    uint16_t  vf_id = port.vfid;<br />+    uint16_t vf_index = VF_IDX(pcieid);<br />     void *reply_data_addr = ZXDH_ADDR_OF(msg_reply_body, res_info, reply_data);<br />  <br />     PMD_DRV_LOG(DEBUG, "[PF GET MSG FROM VF]--vf mac to del.");<br />@@ -1415,8 +1428,8 @@ zxdh_del_vf_mac_table(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br />     }<br />  <br />     for (i = 0; i < ZXDH_MAX_MAC_ADDRS; i++) {<br />-        if (rte_is_same_ether_addr(&hw->vfinfo[vf_id].vf_mac[i], &mac_filter->mac))<br />-            memset(&hw->vfinfo[vf_id].vf_mac[i], 0, sizeof(struct rte_ether_addr));<br />+        if (rte_is_same_ether_addr(&hw->vfinfo[vf_index].vf_mac[i], &mac_filter->mac))<br />+            memset(&hw->vfinfo[vf_index].vf_mac[i], 0, sizeof(struct rte_ether_addr));<br />     }<br />  <br />     sprintf(str, "vport 0x%x del mac ret 0x%x\n", port.vport, ret);<br />@@ -1432,7 +1445,8 @@ zxdh_del_vf_mac_table(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br /> }<br />  <br /> static int<br />-zxdh_vf_promisc_set(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br />+zxdh_vf_promisc_set(struct zxdh_hw *hw, uint16_t vport,<br />+        uint16_t pcieid __rte_unused, void *cfg_data,<br />         void *reply, uint16_t *res_len)<br /> {<br />     struct zxdh_port_promisc_msg *promisc_msg = (struct zxdh_port_promisc_msg *)cfg_data;<br />@@ -1463,7 +1477,8 @@ zxdh_vf_promisc_set(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br /> }<br />  <br /> static int<br />-zxdh_vf_vlan_filter_table_process(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br />+zxdh_vf_vlan_filter_table_process(struct zxdh_hw *hw, uint16_t vport,<br />+        uint16_t pcieid __rte_unused, void *cfg_data,<br />         void *res_info, uint16_t *res_len, uint8_t enable)<br /> {<br />     struct zxdh_vlan_filter *vlan_filter = cfg_data;<br />@@ -1488,21 +1503,24 @@ zxdh_vf_vlan_filter_table_process(struct zxdh_hw *hw, uint16_t vport, void *cfg_<br /> }<br />  <br /> static int<br />-zxdh_vf_vlan_filter_table_add(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br />+zxdh_vf_vlan_filter_table_add(struct zxdh_hw *hw, uint16_t vport,<br />+        uint16_t pcieid, void *cfg_data,<br />         void *res_info, uint16_t *res_len)<br /> {<br />-    return zxdh_vf_vlan_filter_table_process(hw, vport, cfg_data, res_info, res_len, 1);<br />+    return zxdh_vf_vlan_filter_table_process(hw, vport, pcieid, cfg_data, res_info, res_len, 1);<br /> }<br />  <br /> static int<br />-zxdh_vf_vlan_filter_table_del(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br />+zxdh_vf_vlan_filter_table_del(struct zxdh_hw *hw, uint16_t vport,<br />+        uint16_t pcieid, void *cfg_data,<br />         void *res_info, uint16_t *res_len)<br /> {<br />-    return zxdh_vf_vlan_filter_table_process(hw, vport, cfg_data, res_info, res_len, 0);<br />+    return zxdh_vf_vlan_filter_table_process(hw, vport, pcieid, cfg_data, res_info, res_len, 0);<br /> }<br />  <br /> static int<br />-zxdh_vf_set_vlan_filter(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br />+zxdh_vf_set_vlan_filter(struct zxdh_hw *hw, uint16_t vport,<br />+        uint16_t pcieid __rte_unused, void *cfg_data,<br />         void *reply, uint16_t *res_len)<br /> {<br />     struct zxdh_vlan_filter_set *vlan_filter = cfg_data;<br />@@ -1526,7 +1544,8 @@ zxdh_vf_set_vlan_filter(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br /> }<br />  <br /> static int<br />-zxdh_vf_set_vlan_offload(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br />+zxdh_vf_set_vlan_offload(struct zxdh_hw *hw, uint16_t vport,<br />+        uint16_t pcieid __rte_unused, void *cfg_data,<br />         void *reply, uint16_t *res_len)<br /> {<br />     struct zxdh_vlan_offload *vlan_offload = cfg_data;<br />@@ -1553,8 +1572,9 @@ zxdh_vf_set_vlan_offload(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br /> }<br />  <br /> static int<br />-zxdh_vf_rss_hf_get(struct zxdh_hw *hw, uint16_t vport, void *cfg_data __rte_unused,<br />-            void *reply, uint16_t *res_len)<br />+zxdh_vf_rss_hf_get(struct zxdh_hw *hw, uint16_t vport,<br />+        uint16_t pcieid __rte_unused, void *cfg_data __rte_unused,<br />+        void *reply, uint16_t *res_len)<br /> {<br />     char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "rss_hf";<br />     struct zxdh_port_attr_table vport_att = {0};<br />@@ -1582,8 +1602,9 @@ zxdh_vf_rss_hf_get(struct zxdh_hw *hw, uint16_t vport, void *cfg_data __rte_unus<br /> }<br />  <br /> static int<br />-zxdh_vf_rss_hf_set(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br />-            void *reply, uint16_t *res_len)<br />+zxdh_vf_rss_hf_set(struct zxdh_hw *hw, uint16_t vport,<br />+        uint16_t pcieid __rte_unused, void *cfg_data,<br />+        void *reply, uint16_t *res_len)<br /> {<br />     char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "rss_hf";<br />     struct zxdh_rss_hf *rss_hf = cfg_data;<br />@@ -1618,8 +1639,9 @@ zxdh_vf_rss_hf_set(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br /> }<br />  <br /> static int<br />-zxdh_vf_rss_enable(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br />-            void *reply, uint16_t *res_len)<br />+zxdh_vf_rss_enable(struct zxdh_hw *hw, uint16_t vport,<br />+        uint16_t pcieid __rte_unused, void *cfg_data,<br />+        void *reply, uint16_t *res_len)<br /> {<br />     char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "rss_enable";<br />     struct zxdh_rss_enable *rss_enable = cfg_data;<br />@@ -1654,7 +1676,8 @@ zxdh_vf_rss_enable(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br /> }<br />  <br /> static int<br />-zxdh_vf_rss_table_set(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br />+zxdh_vf_rss_table_set(struct zxdh_hw *hw, uint16_t vport,<br />+        uint16_t pcieid __rte_unused, void *cfg_data,<br />         void *reply, uint16_t *res_len)<br /> {<br />     char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "rss_table";<br />@@ -1676,7 +1699,8 @@ zxdh_vf_rss_table_set(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br /> }<br />  <br /> static int<br />-zxdh_vf_rss_table_get(struct zxdh_hw *hw, uint16_t vport, void *cfg_data __rte_unused,<br />+zxdh_vf_rss_table_get(struct zxdh_hw *hw, uint16_t vport,<br />+        uint16_t pcieid __rte_unused, void *cfg_data __rte_unused,<br />         void *reply, uint16_t *res_len)<br /> {<br />     char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "rss_table";<br />@@ -1699,8 +1723,9 @@ zxdh_vf_rss_table_get(struct zxdh_hw *hw, uint16_t vport, void *cfg_data __rte_u<br /> }<br />  <br /> static int<br />-zxdh_vf_port_attr_set(struct zxdh_hw *pf_hw, uint16_t vport, void *cfg_data,<br />-    void *res_info, uint16_t *res_len)<br />+zxdh_vf_port_attr_set(struct zxdh_hw *pf_hw, uint16_t vport,<br />+        uint16_t pcieid __rte_unused, void *cfg_data,<br />+        void *res_info, uint16_t *res_len)<br /> {<br />     RTE_ASSERT(!cfg_data || !pf_hw);<br />     if (res_info)<br />@@ -1762,8 +1787,8 @@ zxdh_vf_port_attr_set(struct zxdh_hw *pf_hw, uint16_t vport, void *cfg_data,<br />  <br /> static int<br /> zxdh_vf_np_stats_update(struct zxdh_hw *pf_hw, uint16_t vport,<br />-        void *cfg_data, void *res_info,<br />-        uint16_t *res_len)<br />+        uint16_t pcieid __rte_unused, void *cfg_data,<br />+        void *res_info, uint16_t *res_len)<br /> {<br />     struct zxdh_np_stats_updata_msg *np_stats_query =<br />              (struct zxdh_np_stats_updata_msg  *)cfg_data;<br />@@ -1944,10 +1969,9 @@ zxdh_vf_np_stats_update(struct zxdh_hw *pf_hw, uint16_t vport,<br /> }<br />  <br /> static int<br />-zxdh_vf_mtr_hw_stats_get(struct zxdh_hw *pf_hw,<br />-    uint16_t vport, void *cfg_data,<br />-    void *res_info,<br />-    uint16_t *res_len)<br />+zxdh_vf_mtr_hw_stats_get(struct zxdh_hw *pf_hw, uint16_t vport,<br />+        uint16_t pcieid __rte_unused, void *cfg_data,<br />+        void *res_info, uint16_t *res_len)<br /> {<br />     struct zxdh_mtr_stats_query  *zxdh_mtr_stats_query =<br />             (struct zxdh_mtr_stats_query  *)cfg_data;<br />@@ -1977,11 +2001,9 @@ zxdh_vf_mtr_hw_stats_get(struct zxdh_hw *pf_hw,<br /> }<br />  <br /> static int<br />-zxdh_vf_mtr_hw_profile_add(struct zxdh_hw *pf_hw,<br />-    uint16_t vport,<br />-    void *cfg_data,<br />-    void *res_info,<br />-    uint16_t *res_len)<br />+zxdh_vf_mtr_hw_profile_add(struct zxdh_hw *pf_hw, uint16_t vport,<br />+        uint16_t pcieid __rte_unused, void *cfg_data,<br />+        void *res_info, uint16_t *res_len)<br /> {<br />     if (!cfg_data || !res_len || !res_info) {<br />         PMD_DRV_LOG(ERR, " get profileid invalid inparams");<br />@@ -2017,11 +2039,9 @@ zxdh_vf_mtr_hw_profile_add(struct zxdh_hw *pf_hw,<br /> }<br />  <br /> static int<br />-zxdh_vf_mtr_hw_profile_del(struct zxdh_hw *pf_hw,<br />-    uint16_t vport,<br />-    void *cfg_data,<br />-    void *res_info,<br />-    uint16_t *res_len)<br />+zxdh_vf_mtr_hw_profile_del(struct zxdh_hw *pf_hw, uint16_t vport,<br />+        uint16_t pcieid __rte_unused, void *cfg_data,<br />+        void *res_info, uint16_t *res_len)<br /> {<br />     if (!cfg_data || !res_len || !res_info) {<br />         PMD_DRV_LOG(ERR, " del profileid  invalid inparams");<br />@@ -2059,11 +2079,9 @@ zxdh_vf_mtr_hw_profile_del(struct zxdh_hw *pf_hw,<br /> }<br />  <br /> static int<br />-zxdh_vf_mtr_hw_plcrflow_cfg(struct zxdh_hw *pf_hw,<br />-    uint16_t vport,<br />-    void *cfg_data,<br />-    void *res_info,<br />-    uint16_t *res_len)<br />+zxdh_vf_mtr_hw_plcrflow_cfg(struct zxdh_hw *pf_hw, uint16_t vport,<br />+        uint16_t pcieid __rte_unused, void *cfg_data,<br />+        void *res_info, uint16_t *res_len)<br /> {<br />     int ret = 0;<br />  <br />@@ -2098,11 +2116,9 @@ zxdh_vf_mtr_hw_plcrflow_cfg(struct zxdh_hw *pf_hw,<br /> }<br />  <br /> static int<br />-zxdh_vf_mtr_hw_profile_cfg(struct zxdh_hw *pf_hw __rte_unused,<br />-    uint16_t vport,<br />-    void *cfg_data,<br />-    void *res_info,<br />-    uint16_t *res_len)<br />+zxdh_vf_mtr_hw_profile_cfg(struct zxdh_hw *pf_hw, uint16_t vport,<br />+        uint16_t pcieid __rte_unused, void *cfg_data,<br />+        void *res_info, uint16_t *res_len)<br /> {<br />     int ret = 0;<br />  <br />@@ -2131,6 +2147,121 @@ zxdh_vf_mtr_hw_profile_cfg(struct zxdh_hw *pf_hw __rte_unused,<br />     return 0;<br /> }<br />  <br />+<br />+static int<br />+zxdh_vf_flow_hw_add(struct zxdh_hw *pf_hw, uint16_t vport,<br />+         uint16_t pcieid, void *cfg_data,<br />+         void *res_info, uint16_t *res_len)<br />+{<br />+    if (!cfg_data || !res_len || !res_info) {<br />+        PMD_DRV_LOG(ERR, "invalid inparams");<br />+        return -1;<br />+    }<br />+    struct rte_flow_error error = {0};<br />+    int ret = 0;<br />+    struct zxdh_flow_op_msg  *flow_entry = (struct zxdh_flow_op_msg *)cfg_data;<br />+    struct zxdh_flow  *dh_flow;<br />+    ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_FAIL);<br />+    *res_len = sizeof(struct zxdh_flow_op_rsp) - 4;<br />+<br />+    ret = pf_fd_hw_apply(pf_hw->eth_dev, &flow_entry->dh_flow, &error, vport, pcieid);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "pf 0x%x for vf 0x%x flow add failed ret :%d",<br />+            pf_hw->vport.vport, vport, ret);<br />+        return -1;<br />+    }<br />+    void *flow_rsp_addr = ZXDH_ADDR_OF(msg_reply_body, res_info, flow_rsp);<br />+    dh_flow = flow_rsp_addr;<br />+    dh_flow->flowentry.hw_idx = flow_entry->dh_flow.flowentry.hw_idx;<br />+    ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_SUCC);<br />+    return 0;<br />+}<br />+<br />+static int<br />+zxdh_vf_flow_hw_del(struct zxdh_hw *pf_hw, uint16_t vport,<br />+            uint16_t pcieid, void *cfg_data,<br />+            void *res_info, uint16_t *res_len)<br />+{<br />+    if (!cfg_data || !res_len || !res_info) {<br />+        PMD_DRV_LOG(ERR, "invalid inparams");<br />+        return -1;<br />+    }<br />+    struct rte_flow_error error = {0};<br />+    int ret = 0;<br />+    struct zxdh_flow_op_msg  *flow_entry = (struct zxdh_flow_op_msg *)cfg_data;<br />+    ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_FAIL);<br />+    *res_len = sizeof(struct zxdh_flow_op_rsp) - 4;<br />+<br />+    ret = pf_fd_hw_destroy(pf_hw->eth_dev, &flow_entry->dh_flow, &error, vport, pcieid);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "pf 0x%x for vf 0x%x flow del failed ret :%d",<br />+            pf_hw->vport.vport, vport, ret);<br />+        return -1;<br />+    }<br />+    ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_SUCC);<br />+    return 0;<br />+}<br />+<br />+static int<br />+zxdh_vf_flow_hw_get(struct zxdh_hw *pf_hw, uint16_t vport,<br />+        uint16_t pcieid __rte_unused, void *cfg_data,<br />+        void *res_info, uint16_t *res_len)<br />+{<br />+    if (!cfg_data || !res_len || !res_info) {<br />+        PMD_DRV_LOG(ERR, "invalid inparams");<br />+        return -1;<br />+    }<br />+<br />+    void *flow_rsp_addr = ZXDH_ADDR_OF(msg_reply_body, res_info, flow_rsp);<br />+    void *count_addr = (uint8_t *)flow_rsp_addr + sizeof(struct zxdh_flow);<br />+    struct rte_flow_error error = {0};<br />+    int ret = 0;<br />+    struct zxdh_flow_op_msg  *flow_entry = (struct zxdh_flow_op_msg *)cfg_data;<br />+    struct zxdh_flow  *dh_flow;<br />+<br />+    ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_FAIL);<br />+    *res_len = sizeof(struct zxdh_flow_op_rsp) - 4;<br />+<br />+    PMD_DRV_LOG(INFO, "handle %d", flow_entry->dh_flow.flowentry.hw_idx);<br />+    ret = pf_fd_hw_query_count(pf_hw->eth_dev, &flow_entry->dh_flow, count_addr, &error);<br />+    if (ret) {<br />+        PMD_DRV_LOG(DEBUG, "pf 0x%x for vf 0x%x flow get failed ret :%d",<br />+            pf_hw->vport.vport, vport, ret);<br />+        return -1;<br />+    }<br />+    PMD_DRV_LOG(INFO, " res len :%d", *res_len);<br />+    dh_flow = flow_rsp_addr;<br />+    rte_memcpy(&dh_flow->flowentry, &flow_entry->dh_flow.flowentry, sizeof(dh_flow->flowentry));<br />+    ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_SUCC);<br />+    return 0;<br />+}<br />+<br />+static int<br />+zxdh_vf_flow_hw_flush(struct zxdh_hw *pf_hw, uint16_t vport,<br />+            uint16_t pcieid __rte_unused, void *cfg_data,<br />+            void *res_info, uint16_t *res_len)<br />+{<br />+    if (!cfg_data || !res_len || !res_info) {<br />+        PMD_DRV_LOG(ERR, "invalid inparams");<br />+        return -1;<br />+    }<br />+    int ret = 0;<br />+    uint16_t queue_id = pf_hw->dev_sd->dtb_sd.queueid;<br />+<br />+    ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_FAIL);<br />+    *res_len = sizeof(struct zxdh_flow_op_rsp) - 4;<br />+<br />+    ret = zxdh_np_dtb_acl_offline_delete(pf_hw->dev_id, queue_id, ZXDH_SDT_FD_TABLE,<br />+                vport, ZXDH_FLOW_STATS_INGRESS_BASE, 1);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "flow flush failed. code:%d", ret);<br />+        return -1;<br />+    }<br />+<br />+    ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_SUCC);<br />+    return 0;<br />+}<br />+<br /> static const zxdh_msg_process_callback zxdh_proc_cb[] = {<br />     [ZXDH_NULL] = NULL,<br />     [ZXDH_VF_PORT_INIT] = zxdh_vf_port_init,<br />@@ -2154,6 +2285,10 @@ static const zxdh_msg_process_callback zxdh_proc_cb[] = {<br />     [ZXDH_PLCR_CAR_PROFILE_ID_DELETE] =  zxdh_vf_mtr_hw_profile_del,<br />     [ZXDH_PLCR_CAR_QUEUE_CFG_SET] = zxdh_vf_mtr_hw_plcrflow_cfg,<br />     [ZXDH_PLCR_CAR_PROFILE_CFG_SET] = zxdh_vf_mtr_hw_profile_cfg,<br />+    [ZXDH_FLOW_HW_ADD] = zxdh_vf_flow_hw_add,<br />+    [ZXDH_FLOW_HW_DEL] = zxdh_vf_flow_hw_del,<br />+    [ZXDH_FLOW_HW_GET] = zxdh_vf_flow_hw_get,<br />+    [ZXDH_FLOW_HW_FLUSH] = zxdh_vf_flow_hw_flush,<br /> };<br />  <br /> static inline int<br />@@ -2168,7 +2303,7 @@ zxdh_config_process_callback(struct zxdh_hw *hw, struct zxdh_msg_info *msg_info,<br />         return -1;<br />     }<br />     if (zxdh_proc_cb[msghead->msg_type]) {<br />-        ret = zxdh_proc_cb[msghead->msg_type](hw, msghead->vport,<br />+        ret = zxdh_proc_cb[msghead->msg_type](hw, msghead->vport, msghead->pcieid,<br />                     (void *)&msg_info->data, res, res_len);<br />         if (!ret)<br />             ZXDH_SET(msg_reply_body, res, flag, ZXDH_REPS_SUCC);<br />diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h<br />index 7dad6f7335..c20bb98195 100644<br />--- a/drivers/net/zxdh/zxdh_msg.h<br />+++ b/drivers/net/zxdh/zxdh_msg.h<br />@@ -240,6 +240,11 @@ enum zxdh_msg_type {<br />     ZXDH_PLCR_CAR_QUEUE_CFG_SET = 40,<br />     ZXDH_PORT_METER_STAT_GET = 42,<br />  <br />+    ZXDH_FLOW_HW_ADD = 46,<br />+    ZXDH_FLOW_HW_DEL = 47,<br />+    ZXDH_FLOW_HW_GET = 48,<br />+    ZXDH_FLOW_HW_FLUSH = 49,<br />+<br />     ZXDH_MSG_TYPE_END,<br /> };<br />  <br />@@ -418,6 +423,21 @@ struct zxdh_ifc_mtr_profile_info_bits {<br />     uint8_t profile_id[0x40];<br /> };<br />  <br />+struct err_reason {<br />+    uint8_t err_type;<br />+    uint8_t rsv[3];<br />+    char reason[512];<br />+};<br />+<br />+struct zxdh_flow_op_rsp {<br />+    struct zxdh_flow  dh_flow;<br />+    uint8_t rev[4];<br />+    union {<br />+        struct rte_flow_query_count count;<br />+        struct err_reason error;<br />+    };<br />+};<br />+<br /> struct zxdh_ifc_msg_reply_body_bits {<br />     uint8_t flag[0x8];<br />     union {<br />@@ -432,6 +452,7 @@ struct zxdh_ifc_msg_reply_body_bits {<br />         struct zxdh_ifc_agent_mac_module_eeprom_msg_bits module_eeprom_msg;<br />         struct zxdh_ifc_mtr_profile_info_bits  mtr_profile_info;<br />         struct zxdh_ifc_mtr_stats_bits hw_mtr_stats;<br />+        struct zxdh_flow_op_rsp  flow_rsp;<br />     };<br /> };<br />  <br />@@ -535,6 +556,10 @@ struct zxdh_plcr_profile_free {<br />     uint16_t profile_id;<br /> };<br />  <br />+struct zxdh_flow_op_msg {<br />+    struct zxdh_flow dh_flow;<br />+};<br />+<br /> struct zxdh_msg_info {<br />     union {<br />         uint8_t head_len[ZXDH_MSG_HEAD_LEN];<br />@@ -561,13 +586,15 @@ struct zxdh_msg_info {<br />         struct zxdh_plcr_profile_cfg zxdh_plcr_profile_cfg;<br />         struct zxdh_plcr_flow_cfg  zxdh_plcr_flow_cfg;<br />         struct zxdh_mtr_stats_query  zxdh_mtr_stats_query;<br />+        struct zxdh_flow_op_msg flow_msg;<br />     } data;<br /> };<br />  <br /> typedef int (*zxdh_bar_chan_msg_recv_callback)(void *pay_load, uint16_t len,<br />         void *reps_buffer, uint16_t *reps_len, void *dev);<br />-typedef int (*zxdh_msg_process_callback)(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,<br />-    void *res_info, uint16_t *res_len);<br />+typedef int (*zxdh_msg_process_callback)(struct zxdh_hw *hw, uint16_t vport,<br />+        uint16_t pcieid, void *cfg_data,<br />+        void *res_info, uint16_t *res_len);<br />  <br /> typedef int (*zxdh_bar_chan_msg_recv_callback)(void *pay_load, uint16_t len,<br />             void *reps_buffer, uint16_t *reps_len, void *dev);<br />diff --git a/drivers/net/zxdh/zxdh_tables.h b/drivers/net/zxdh/zxdh_tables.h<br />index cb34e38be8..a227e09962 100644<br />--- a/drivers/net/zxdh/zxdh_tables.h<br />+++ b/drivers/net/zxdh/zxdh_tables.h<br />@@ -7,6 +7,8 @@<br />  <br /> #include <stdint.h> <br />  <br />+#include <zxdh_msg.h> <br />+<br /> /* eram */<br /> #define ZXDH_SDT_VPORT_ATT_TABLE          1<br /> #define ZXDH_SDT_PANEL_ATT_TABLE          2<br />@@ -16,6 +18,8 @@<br /> #define ZXDH_SDT_UNICAST_ATT_TABLE        10<br /> #define ZXDH_SDT_MULTICAST_ATT_TABLE      11<br /> #define ZXDH_SDT_PORT_VLAN_ATT_TABLE      16<br />+#define ZXDH_SDT_TUNNEL_ENCAP0_TABLE      28<br />+#define ZXDH_SDT_TUNNEL_ENCAP1_TABLE      29<br /> /* hash */<br /> #define ZXDH_SDT_L2_ENTRY_TABLE0          64<br /> #define ZXDH_SDT_L2_ENTRY_TABLE1          65<br />@@ -27,12 +31,14 @@<br /> #define ZXDH_SDT_MC_TABLE2                78<br /> #define ZXDH_SDT_MC_TABLE3                79<br />  <br />+#define ZXDH_SDT_FD_TABLE                 130<br />+<br /> #define ZXDH_PORT_VHCA_FLAG                       1<br /> #define ZXDH_PORT_RSS_HASH_FACTOR_FLAG            3<br /> #define ZXDH_PORT_HASH_ALG_FLAG                   4<br /> #define ZXDH_PORT_PHY_PORT_FLAG                   5<br /> #define ZXDH_PORT_LAG_ID_FLAG                     6<br />-<br />+#define ZXDH_PORT_VXLAN_OFFLOAD_EN_OFF       7<br /> #define ZXDH_PORT_PF_VQM_VFID_FLAG                8<br />  <br /> #define ZXDH_PORT_MTU_FLAG                        10<br />@@ -169,7 +175,7 @@ struct zxdh_port_attr_table {<br />     uint8_t phy_port: 4;<br />  <br />     uint16_t lag_id : 3;<br />-    uint16_t rsv81 : 1;<br />+    uint16_t fd_vxlan_offload_en : 1;<br />     uint16_t pf_vfid : 11;<br />     uint16_t rsv82 : 1;<br />  <br />--  <br />2.27.0<br />