[dpdk-dev] [PATCH 44/58] net/bnxt: refactor ULP mapper and parser

Venkat Duvvuru venkatkumar.duvvuru at broadcom.com
Sun May 30 10:59:15 CEST 2021


From: Kishore Padmanabha <kishore.padmanabha at broadcom.com>

1. The internal and external exact match table resource types
is combined since the resource handle contains the encoded
type whether it is internal or external exact match entry.

2. When a flow doesn't hit the offloaded rules, the default action is
to send it to the kernel (L2 driver interface). In order to do that,
TRUFLOW must know the kernel interface's (PF's) default vnic id.
This patch fetches the PF's default vnic id from the dpdk core and
stores it in port database. It also stores the mac addr for the
future usage. Renamed compute field for layer 4 port enums.
Added support for port database opcode that can get port details
like mac address which can then be populated in the l2 context entry.

3. Both active and default bit set need to considered to check if a
specific flow type is enabled or not.

4. ulp mapper fetches the dpdk port id from the compute field index
BNXT_ULP_CF_IDX_DEV_PORT_ID which is used to get the interface’s
mac address eventually. However, the compute field array is not
populated with dpdk port id at the index BNXT_ULP_CF_IDX_DEV_PORT_ID.
The problem fixed by populating the compute field array correctly.

5. Some dpdk applications may accumulate the flow counters while some
may not. In cases where the application is accumulating the counters
the PMD need not do the accumulation itself and viceversa to report
the correct flow counters.

6. Pointer to bp is added to open session parms to support
shared session.

Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru at broadcom.com>
Signed-off-by: Kishore Padmanabha <kishore.padmanabha at broadcom.com>
Signed-off-by: Jay Ding <jay.ding at broadcom.com>
Reviewed-by: Lance Richardson <lance.richardson at broadcom.com>
Reviewed-by: Ajit Kumar Khaparde <ajit.khaparde at broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom at broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur at broadcom.com>
Reviewed-by: Randy Schacher <stuart.schacher at broadcom.com>
---
 drivers/net/bnxt/bnxt.h                       |   6 +
 drivers/net/bnxt/bnxt_ethdev.c                |  86 +++++++
 drivers/net/bnxt/tf_ulp/bnxt_ulp.c            |   5 +
 drivers/net/bnxt/tf_ulp/bnxt_ulp.h            |   1 +
 drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c       |   2 +
 drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c          |  25 +-
 drivers/net/bnxt/tf_ulp/ulp_flow_db.c         |  95 +++++---
 drivers/net/bnxt/tf_ulp/ulp_flow_db.h         |   5 +-
 drivers/net/bnxt/tf_ulp/ulp_mapper.c          |  52 +++-
 drivers/net/bnxt/tf_ulp/ulp_port_db.c         |  37 +++
 drivers/net/bnxt/tf_ulp/ulp_port_db.h         |  15 ++
 drivers/net/bnxt/tf_ulp/ulp_rte_parser.c      | 114 +++++++--
 drivers/net/bnxt/tf_ulp/ulp_rte_parser.h      |   8 +
 .../net/bnxt/tf_ulp/ulp_template_db_enum.h    |  59 +++--
 .../tf_ulp/ulp_template_db_stingray_class.c   |   4 +-
 .../tf_ulp/ulp_template_db_wh_plus_class.c    |  48 ++--
 drivers/net/bnxt/tf_ulp/ulp_template_struct.h |   3 +-
 drivers/net/bnxt/tf_ulp/ulp_tun.c             | 227 +++++++++++++-----
 drivers/net/bnxt/tf_ulp/ulp_tun.h             |  44 ++--
 19 files changed, 637 insertions(+), 199 deletions(-)

diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index d3ab57ab8d..246f51fddf 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -668,8 +668,11 @@ struct bnxt {
 	uint32_t			flags2;
 #define BNXT_FLAGS2_PTP_TIMESYNC_ENABLED	BIT(0)
 #define BNXT_FLAGS2_PTP_ALARM_SCHEDULED		BIT(1)
+#define	BNXT_FLAGS2_ACCUM_STATS_EN		BIT(2)
 #define BNXT_P5_PTP_TIMESYNC_ENABLED(bp)	\
 	((bp)->flags2 & BNXT_FLAGS2_PTP_TIMESYNC_ENABLED)
+#define	BNXT_ACCUM_STATS_EN(bp)			\
+	((bp)->flags2 & BNXT_FLAGS2_ACCUM_STATS_EN)
 
 	uint16_t		chip_num;
 #define CHIP_NUM_58818		0xd818
@@ -981,7 +984,10 @@ int32_t
 bnxt_ulp_create_vfr_default_rules(struct rte_eth_dev *vfr_ethdev);
 int32_t
 bnxt_ulp_delete_vfr_default_rules(struct bnxt_representor *vfr);
+void bnxt_get_iface_mac(uint16_t port, enum bnxt_ulp_intf_type type,
+			uint8_t *mac, uint8_t *parent_mac);
 uint16_t bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type);
+uint16_t bnxt_get_parent_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type);
 struct bnxt *bnxt_get_bp(uint16_t port);
 uint16_t bnxt_get_svif(uint16_t port_id, bool func_svif,
 		       enum bnxt_ulp_intf_type type);
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index ebb326b0d1..1c0eeb76b7 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -87,6 +87,7 @@ static const struct rte_pci_id bnxt_pci_id_map[] = {
 	{ .vendor_id = 0, /* sentinel */ },
 };
 
+#define	BNXT_DEVARG_ACCUM_STATS	"accum-stats"
 #define BNXT_DEVARG_FLOW_XSTAT	"flow-xstat"
 #define BNXT_DEVARG_MAX_NUM_KFLOWS  "max-num-kflows"
 #define BNXT_DEVARG_REPRESENTOR	"representor"
@@ -99,6 +100,7 @@ static const struct rte_pci_id bnxt_pci_id_map[] = {
 
 static const char *const bnxt_dev_args[] = {
 	BNXT_DEVARG_REPRESENTOR,
+	BNXT_DEVARG_ACCUM_STATS,
 	BNXT_DEVARG_FLOW_XSTAT,
 	BNXT_DEVARG_MAX_NUM_KFLOWS,
 	BNXT_DEVARG_REP_BASED_PF,
@@ -110,6 +112,12 @@ static const char *const bnxt_dev_args[] = {
 	NULL
 };
 
+/*
+ * accum-stats == false to disable flow counter accumulation
+ * accum-stats == true to enable flow counter accumulation
+ */
+#define	BNXT_DEVARG_ACCUM_STATS_INVALID(accum_stats)	((accum_stats) > 1)
+
 /*
  * flow_xstat == false to disable the feature
  * flow_xstat == true to enable the feature
@@ -4837,6 +4845,39 @@ bnxt_get_svif(uint16_t port_id, bool func_svif,
 	return func_svif ? bp->func_svif : bp->port_svif;
 }
 
+void
+bnxt_get_iface_mac(uint16_t port, enum bnxt_ulp_intf_type type,
+		   uint8_t *mac, uint8_t *parent_mac)
+{
+	struct rte_eth_dev *eth_dev;
+	struct bnxt *bp;
+
+	if (type != BNXT_ULP_INTF_TYPE_TRUSTED_VF &&
+	    type != BNXT_ULP_INTF_TYPE_PF)
+		return;
+
+	eth_dev = &rte_eth_devices[port];
+	bp = eth_dev->data->dev_private;
+	memcpy(mac, bp->mac_addr, RTE_ETHER_ADDR_LEN);
+
+	if (type == BNXT_ULP_INTF_TYPE_TRUSTED_VF)
+		memcpy(parent_mac, bp->parent->mac_addr, RTE_ETHER_ADDR_LEN);
+}
+
+uint16_t
+bnxt_get_parent_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type)
+{
+	struct rte_eth_dev *eth_dev;
+	struct bnxt *bp;
+
+	if (type != BNXT_ULP_INTF_TYPE_TRUSTED_VF)
+		return 0;
+
+	eth_dev = &rte_eth_devices[port];
+	bp = eth_dev->data->dev_private;
+
+	return bp->parent->vnic;
+}
 uint16_t
 bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type)
 {
@@ -5200,6 +5241,45 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
 	return 0;
 }
 
+static int
+bnxt_parse_devarg_accum_stats(__rte_unused const char *key,
+			      const char *value, void *opaque_arg)
+{
+	struct bnxt *bp = opaque_arg;
+	unsigned long accum_stats;
+	char *end = NULL;
+
+	if (!value || !opaque_arg) {
+		PMD_DRV_LOG(ERR,
+			    "Invalid parameter passed to accum-stats devargs.\n");
+		return -EINVAL;
+	}
+
+	accum_stats = strtoul(value, &end, 10);
+	if (end == NULL || *end != '\0' ||
+	    (accum_stats == ULONG_MAX && errno == ERANGE)) {
+		PMD_DRV_LOG(ERR,
+			    "Invalid parameter passed to accum-stats devargs.\n");
+		return -EINVAL;
+	}
+
+	if (BNXT_DEVARG_ACCUM_STATS_INVALID(accum_stats)) {
+		PMD_DRV_LOG(ERR,
+			    "Invalid value passed to accum-stats devargs.\n");
+		return -EINVAL;
+	}
+
+	if (accum_stats) {
+		bp->flags2 |= BNXT_FLAGS2_ACCUM_STATS_EN;
+		PMD_DRV_LOG(INFO, "Host-based accum-stats feature enabled.\n");
+	} else {
+		bp->flags2 &= ~BNXT_FLAGS2_ACCUM_STATS_EN;
+		PMD_DRV_LOG(INFO, "Host-based accum-stats feature disabled.\n");
+	}
+
+	return 0;
+}
+
 static int
 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key,
 			     const char *value, void *opaque_arg)
@@ -5516,6 +5596,12 @@ bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs)
 	if (ret)
 		goto err;
 
+	/*
+	 * Handler for "accum-stats" devarg.
+	 * Invoked as for ex: "-a 0000:00:0d.0,accum-stats=1"
+	 */
+	rte_kvargs_process(kvlist, BNXT_DEVARG_ACCUM_STATS,
+			   bnxt_parse_devarg_accum_stats, bp);
 	/*
 	 * Handler for "max_num_kflows" devarg.
 	 * Invoked as for ex: "-a 000:00:0d.0,max_num_kflows=32"
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
index 458c37b4e9..d68cc889c6 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
@@ -287,6 +287,7 @@ ulp_ctx_session_open(struct bnxt *bp,
 		return rc;
 	}
 
+	params.bp = bp;
 	rc = tf_open_session(&bp->tfp, &params);
 	if (rc) {
 		BNXT_TF_DBG(ERR, "Failed to open TF session - %s, rc = %d\n",
@@ -1071,6 +1072,10 @@ bnxt_ulp_port_init(struct bnxt *bp)
 	}
 	/* create the default rules */
 	bnxt_ulp_create_df_rules(bp);
+
+	if (BNXT_ACCUM_STATS_EN(bp))
+		bp->ulp_ctx->cfg_data->accum_stats = true;
+
 	BNXT_TF_DBG(DEBUG, "BNXT Port:%d ULP port init\n",
 		    bp->eth_dev->data->port_id);
 	return rc;
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
index c2e71430ec..47c9c802e2 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
@@ -67,6 +67,7 @@ struct bnxt_ulp_data {
 #define	BNXT_ULP_TUN_ENTRY_INVALID	-1
 #define	BNXT_ULP_MAX_TUN_CACHE_ENTRIES	16
 	struct bnxt_tun_cache_entry	tun_tbl[BNXT_ULP_MAX_TUN_CACHE_ENTRIES];
+	bool				accum_stats;
 };
 
 struct bnxt_ulp_context {
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c
index 0af2f6aaa6..59d75bc496 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c
@@ -143,6 +143,8 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev,
 	/* copy the device port id and direction for further processing */
 	ULP_COMP_FLD_IDX_WR(&params, BNXT_ULP_CF_IDX_INCOMING_IF,
 			    dev->data->port_id);
+	ULP_COMP_FLD_IDX_WR(&params, BNXT_ULP_CF_IDX_DEV_PORT_ID,
+			    dev->data->port_id);
 	ULP_COMP_FLD_IDX_WR(&params, BNXT_ULP_CF_IDX_SVIF_FLAG,
 			    BNXT_ULP_INVALID_SVIF_VAL);
 
diff --git a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c
index 054a76b5ee..65029139e6 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2020 Broadcom
+ * Copyright(c) 2014-2021 Broadcom
  * All rights reserved.
  */
 
@@ -317,8 +317,18 @@ static int ulp_get_single_flow_stat(struct bnxt_ulp_context *ctxt,
 	/* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */
 	sw_cntr_indx = hw_cntr_id - fc_info->shadow_hw_tbl[dir].start_idx;
 	sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][sw_cntr_indx];
-	sw_acc_tbl_entry->pkt_count = FLOW_CNTR_PKTS(stats, dparms);
-	sw_acc_tbl_entry->byte_count = FLOW_CNTR_BYTES(stats, dparms);
+	/* Some dpdk applications may accumulate the flow counters while some
+	 * may not. In cases where the application is accumulating the counters
+	 * the PMD need not do the accumulation itself and viceversa to report
+	 * the correct flow counters.
+	 */
+	if (ctxt->cfg_data->accum_stats) {
+		sw_acc_tbl_entry->pkt_count += FLOW_CNTR_PKTS(stats, dparms);
+		sw_acc_tbl_entry->byte_count += FLOW_CNTR_BYTES(stats, dparms);
+	} else {
+		sw_acc_tbl_entry->pkt_count = FLOW_CNTR_PKTS(stats, dparms);
+		sw_acc_tbl_entry->byte_count = FLOW_CNTR_BYTES(stats, dparms);
+	}
 
 	/* Update the parent counters if it is child flow */
 	if (sw_acc_tbl_entry->parent_flow_id) {
@@ -628,11 +638,10 @@ int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt,
 		pthread_mutex_unlock(&ulp_fc_info->fc_lock);
 	} else if (params.resource_sub_type ==
 			BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT_ACC) {
-		/* Get the stats from the parent child table */
-		ulp_flow_db_parent_flow_count_get(ctxt,
-						  flow_id,
-						  &count->hits,
-						  &count->bytes);
+		/* Get stats from the parent child table */
+		ulp_flow_db_parent_flow_count_get(ctxt, flow_id,
+						  &count->hits, &count->bytes,
+						  count->reset);
 		count->hits_set = 1;
 		count->bytes_set = 1;
 	} else {
diff --git a/drivers/net/bnxt/tf_ulp/ulp_flow_db.c b/drivers/net/bnxt/tf_ulp/ulp_flow_db.c
index 1326f79ff5..47c8c48456 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_flow_db.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_flow_db.c
@@ -48,17 +48,21 @@ ulp_flow_db_active_flows_bit_set(struct bnxt_ulp_flow_db *flow_db,
 	uint32_t a_idx = idx / ULP_INDEX_BITMAP_SIZE;
 
 	if (flag) {
-		if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR)
+		if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR || flow_type ==
+		    BNXT_ULP_FDB_TYPE_RID)
 			ULP_INDEX_BITMAP_SET(f_tbl->active_reg_flows[a_idx],
 					     idx);
-		else
+		if (flow_type == BNXT_ULP_FDB_TYPE_DEFAULT || flow_type ==
+		    BNXT_ULP_FDB_TYPE_RID)
 			ULP_INDEX_BITMAP_SET(f_tbl->active_dflt_flows[a_idx],
 					     idx);
 	} else {
-		if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR)
+		if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR || flow_type ==
+		    BNXT_ULP_FDB_TYPE_RID)
 			ULP_INDEX_BITMAP_RESET(f_tbl->active_reg_flows[a_idx],
 					       idx);
-		else
+		if (flow_type == BNXT_ULP_FDB_TYPE_DEFAULT || flow_type ==
+		    BNXT_ULP_FDB_TYPE_RID)
 			ULP_INDEX_BITMAP_RESET(f_tbl->active_dflt_flows[a_idx],
 					       idx);
 	}
@@ -81,13 +85,21 @@ ulp_flow_db_active_flows_bit_is_set(struct bnxt_ulp_flow_db *flow_db,
 {
 	struct bnxt_ulp_flow_tbl *f_tbl = &flow_db->flow_tbl;
 	uint32_t a_idx = idx / ULP_INDEX_BITMAP_SIZE;
-
-	if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR)
-		return ULP_INDEX_BITMAP_GET(f_tbl->active_reg_flows[a_idx],
-					    idx);
-	else
-		return ULP_INDEX_BITMAP_GET(f_tbl->active_dflt_flows[a_idx],
-					    idx);
+	uint32_t reg, dflt;
+
+	reg = ULP_INDEX_BITMAP_GET(f_tbl->active_reg_flows[a_idx], idx);
+	dflt = ULP_INDEX_BITMAP_GET(f_tbl->active_dflt_flows[a_idx], idx);
+
+	switch (flow_type) {
+	case BNXT_ULP_FDB_TYPE_REGULAR:
+		return (reg && !dflt);
+	case BNXT_ULP_FDB_TYPE_DEFAULT:
+		return (!reg && dflt);
+	case BNXT_ULP_FDB_TYPE_RID:
+		return (reg && dflt);
+	default:
+		return 0;
+	}
 }
 
 static inline enum tf_dir
@@ -140,8 +152,7 @@ ulp_flow_db_res_params_to_info(struct ulp_fdb_resource_info *resource_info,
 	}
 
 	/* Store the handle as 64bit only for EM table entries */
-	if (params->resource_func != BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE &&
-	    params->resource_func != BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE) {
+	if (params->resource_func != BNXT_ULP_RESOURCE_FUNC_EM_TABLE) {
 		resource_info->resource_hndl = (uint32_t)params->resource_hndl;
 		resource_info->resource_type = params->resource_type;
 		resource_info->resource_sub_type = params->resource_sub_type;
@@ -170,8 +181,7 @@ ulp_flow_db_res_info_to_params(struct ulp_fdb_resource_info *resource_info,
 	params->direction = ulp_flow_db_resource_dir_get(resource_info);
 	params->resource_func = ulp_flow_db_resource_func_get(resource_info);
 
-	if (params->resource_func == BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE ||
-	    params->resource_func == BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE) {
+	if (params->resource_func == BNXT_ULP_RESOURCE_FUNC_EM_TABLE) {
 		params->resource_hndl = resource_info->resource_em_handle;
 	} else if (params->resource_func & ULP_FLOW_DB_RES_FUNC_NEED_LOWER) {
 		params->resource_hndl = resource_info->resource_hndl;
@@ -213,7 +223,7 @@ ulp_flow_db_alloc_resource(struct bnxt_ulp_flow_db *flow_db)
 		return -ENOMEM;
 	}
 	size = (flow_tbl->num_flows / sizeof(uint64_t)) + 1;
-	size =  ULP_BYTE_ROUND_OFF_8(size);
+	size = ULP_BYTE_ROUND_OFF_8(size);
 	flow_tbl->active_reg_flows = rte_zmalloc("active reg flows", size,
 						 ULP_BUFFER_ALIGN_64_BYTE);
 	if (!flow_tbl->active_reg_flows) {
@@ -617,7 +627,7 @@ ulp_flow_db_fid_alloc(struct bnxt_ulp_context *ulp_ctxt,
 		return -EINVAL;
 	}
 
-	if (flow_type > BNXT_ULP_FDB_TYPE_DEFAULT) {
+	if (flow_type >= BNXT_ULP_FDB_TYPE_LAST) {
 		BNXT_TF_DBG(ERR, "Invalid flow type\n");
 		return -EINVAL;
 	}
@@ -674,7 +684,7 @@ ulp_flow_db_resource_add(struct bnxt_ulp_context *ulp_ctxt,
 		return -EINVAL;
 	}
 
-	if (flow_type > BNXT_ULP_FDB_TYPE_DEFAULT) {
+	if (flow_type >= BNXT_ULP_FDB_TYPE_LAST) {
 		BNXT_TF_DBG(ERR, "Invalid flow type\n");
 		return -EINVAL;
 	}
@@ -688,7 +698,7 @@ ulp_flow_db_resource_add(struct bnxt_ulp_context *ulp_ctxt,
 
 	/* check if the flow is active or not */
 	if (!ulp_flow_db_active_flows_bit_is_set(flow_db, flow_type, fid)) {
-		BNXT_TF_DBG(ERR, "flow does not exist\n");
+		BNXT_TF_DBG(ERR, "flow does not exist %x:%x\n", flow_type, fid);
 		return -EINVAL;
 	}
 
@@ -769,7 +779,7 @@ ulp_flow_db_resource_del(struct bnxt_ulp_context *ulp_ctxt,
 		return -EINVAL;
 	}
 
-	if (flow_type > BNXT_ULP_FDB_TYPE_DEFAULT) {
+	if (flow_type >= BNXT_ULP_FDB_TYPE_LAST) {
 		BNXT_TF_DBG(ERR, "Invalid flow type\n");
 		return -EINVAL;
 	}
@@ -783,7 +793,7 @@ ulp_flow_db_resource_del(struct bnxt_ulp_context *ulp_ctxt,
 
 	/* check if the flow is active or not */
 	if (!ulp_flow_db_active_flows_bit_is_set(flow_db, flow_type, fid)) {
-		BNXT_TF_DBG(ERR, "flow does not exist\n");
+		BNXT_TF_DBG(ERR, "flow does not exist %x:%x\n", flow_type, fid);
 		return -EINVAL;
 	}
 
@@ -868,8 +878,9 @@ ulp_flow_db_fid_free(struct bnxt_ulp_context *ulp_ctxt,
 		     enum bnxt_ulp_fdb_type flow_type,
 		     uint32_t fid)
 {
-	struct bnxt_ulp_flow_db *flow_db;
+	struct bnxt_tun_cache_entry *tun_tbl;
 	struct bnxt_ulp_flow_tbl *flow_tbl;
+	struct bnxt_ulp_flow_db *flow_db;
 
 	flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt);
 	if (!flow_db) {
@@ -877,7 +888,7 @@ ulp_flow_db_fid_free(struct bnxt_ulp_context *ulp_ctxt,
 		return -EINVAL;
 	}
 
-	if (flow_type > BNXT_ULP_FDB_TYPE_DEFAULT) {
+	if (flow_type >= BNXT_ULP_FDB_TYPE_LAST) {
 		BNXT_TF_DBG(ERR, "Invalid flow type\n");
 		return -EINVAL;
 	}
@@ -892,7 +903,7 @@ ulp_flow_db_fid_free(struct bnxt_ulp_context *ulp_ctxt,
 
 	/* check if the flow is active or not */
 	if (!ulp_flow_db_active_flows_bit_is_set(flow_db, flow_type, fid)) {
-		BNXT_TF_DBG(ERR, "flow does not exist\n");
+		BNXT_TF_DBG(ERR, "flow does not exist %x:%x\n", flow_type, fid);
 		return -EINVAL;
 	}
 	flow_tbl->head_index--;
@@ -900,6 +911,7 @@ ulp_flow_db_fid_free(struct bnxt_ulp_context *ulp_ctxt,
 		BNXT_TF_DBG(ERR, "FlowDB: Head Ptr is zero\n");
 		return -ENOENT;
 	}
+
 	flow_tbl->flow_tbl_stack[flow_tbl->head_index] = fid;
 
 	/* Clear the flows bitmap */
@@ -908,12 +920,18 @@ ulp_flow_db_fid_free(struct bnxt_ulp_context *ulp_ctxt,
 	if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR)
 		ulp_flow_db_func_id_set(flow_db, fid, 0);
 
+	tun_tbl = bnxt_ulp_cntxt_ptr2_tun_tbl_get(ulp_ctxt);
+	if (!tun_tbl)
+		return -EINVAL;
+
+	ulp_clear_tun_inner_entry(tun_tbl, fid);
+
 	/* all good, return success */
 	return 0;
 }
 
 /*
- * Get the flow database entry details
+ *Get the flow database entry details
  *
  * ulp_ctxt [in] Ptr to ulp_context
  * flow_type [in] - specify default or regular
@@ -940,7 +958,7 @@ ulp_flow_db_resource_get(struct bnxt_ulp_context *ulp_ctxt,
 		return -EINVAL;
 	}
 
-	if (flow_type > BNXT_ULP_FDB_TYPE_DEFAULT) {
+	if (flow_type >= BNXT_ULP_FDB_TYPE_LAST) {
 		BNXT_TF_DBG(ERR, "Invalid flow type\n");
 		return -EINVAL;
 	}
@@ -996,10 +1014,14 @@ ulp_flow_db_next_entry_get(struct bnxt_ulp_flow_db *flow_db,
 	uint64_t *active_flows;
 	struct bnxt_ulp_flow_tbl *flowtbl = &flow_db->flow_tbl;
 
-	if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR)
+	if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR) {
 		active_flows = flowtbl->active_reg_flows;
-	else
+	} else if (flow_type == BNXT_ULP_FDB_TYPE_DEFAULT) {
 		active_flows = flowtbl->active_dflt_flows;
+	} else {
+		BNXT_TF_DBG(ERR, "Invalid flow type %x\n", flow_type);
+			return -EINVAL;
+	}
 
 	do {
 		/* increment the flow id to find the next valid flow id */
@@ -1192,7 +1214,7 @@ ulp_flow_db_resource_params_get(struct bnxt_ulp_context *ulp_ctx,
 		return -EINVAL;
 	}
 
-	if (flow_type > BNXT_ULP_FDB_TYPE_DEFAULT) {
+	if (flow_type >= BNXT_ULP_FDB_TYPE_LAST) {
 		BNXT_TF_DBG(ERR, "Invalid flow type\n");
 		return -EINVAL;
 	}
@@ -1224,9 +1246,7 @@ ulp_flow_db_resource_params_get(struct bnxt_ulp_context *ulp_ctx,
 				}
 
 			} else if (resource_func ==
-				   BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE ||
-				   resource_func ==
-				   BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE) {
+				   BNXT_ULP_RESOURCE_FUNC_EM_TABLE) {
 				ulp_flow_db_res_info_to_params(fid_res,
 							       params);
 				return 0;
@@ -1594,7 +1614,7 @@ ulp_flow_db_child_flow_reset(struct bnxt_ulp_context *ulp_ctxt,
 		return -EINVAL;
 	}
 
-	if (flow_type > BNXT_ULP_FDB_TYPE_DEFAULT) {
+	if (flow_type >= BNXT_ULP_FDB_TYPE_LAST) {
 		BNXT_TF_DBG(ERR, "Invalid flow type\n");
 		return -EINVAL;
 	}
@@ -1812,9 +1832,8 @@ ulp_flow_db_parent_flow_count_update(struct bnxt_ulp_context *ulp_ctxt,
  */
 int32_t
 ulp_flow_db_parent_flow_count_get(struct bnxt_ulp_context *ulp_ctxt,
-				  uint32_t parent_fid,
-				  uint64_t *packet_count,
-				  uint64_t *byte_count)
+				  uint32_t parent_fid, uint64_t *packet_count,
+				  uint64_t *byte_count, uint8_t count_reset)
 {
 	struct bnxt_ulp_flow_db *flow_db;
 	struct ulp_fdb_parent_child_db *p_pdb;
@@ -1835,6 +1854,10 @@ ulp_flow_db_parent_flow_count_get(struct bnxt_ulp_context *ulp_ctxt,
 					p_pdb->parent_flow_tbl[idx].pkt_count;
 				*byte_count =
 					p_pdb->parent_flow_tbl[idx].byte_count;
+				if (count_reset) {
+					p_pdb->parent_flow_tbl[idx].pkt_count = 0;
+					p_pdb->parent_flow_tbl[idx].byte_count = 0;
+				}
 			}
 			return 0;
 		}
diff --git a/drivers/net/bnxt/tf_ulp/ulp_flow_db.h b/drivers/net/bnxt/tf_ulp/ulp_flow_db.h
index f7dfd67bed..62c914833b 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_flow_db.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_flow_db.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2019 Broadcom
+ * Copyright(c) 2014-2021 Broadcom
  * All rights reserved.
  */
 
@@ -390,7 +390,8 @@ int32_t
 ulp_flow_db_parent_flow_count_get(struct bnxt_ulp_context *ulp_ctxt,
 				  uint32_t parent_fid,
 				  uint64_t *packet_count,
-				  uint64_t *byte_count);
+				  uint64_t *byte_count,
+				  uint8_t count_reset);
 
 /*
  * reset the parent accumulation counters
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
index bd28556a4b..27c7c871b1 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
@@ -18,6 +18,7 @@
 #include "ulp_flow_db.h"
 #include "tf_util.h"
 #include "ulp_template_db_tbl.h"
+#include "ulp_port_db.h"
 
 static uint8_t mapper_fld_ones[16] = {
 	0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
@@ -450,10 +451,6 @@ ulp_mapper_em_entry_free(struct bnxt_ulp_context *ulp,
 	int32_t rc;
 
 	fparms.dir		= res->direction;
-	if (res->resource_func == BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE)
-		fparms.mem = TF_MEM_EXTERNAL;
-	else
-		fparms.mem = TF_MEM_INTERNAL;
 	fparms.flow_handle	= res->resource_hndl;
 
 	rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp, &fparms.tbl_scope_id);
@@ -883,6 +880,30 @@ ulp_mapper_ident_extract(struct bnxt_ulp_mapper_parms *parms,
 	return rc;
 }
 
+static int32_t
+ulp_mapper_field_port_db_process(struct bnxt_ulp_mapper_parms *parms,
+				 struct bnxt_ulp_mapper_field_info *fld,
+				 uint32_t port_id,
+				 uint16_t val16,
+				 uint8_t **val)
+{
+	enum bnxt_ulp_port_table port_data = val16;
+
+	switch (port_data) {
+	case BNXT_ULP_PORT_TABLE_DRV_FUNC_PARENT_MAC:
+		if (ulp_port_db_parent_mac_addr_get(parms->ulp_ctx, port_id,
+						    val)) {
+			BNXT_TF_DBG(ERR, "Invalid port id %u\n", port_id);
+			return -EINVAL;
+		}
+		break;
+	default:
+		BNXT_TF_DBG(ERR, "Invalid port_data %s\n", fld->description);
+		return -EINVAL;
+	}
+	return 0;
+}
+
 static int32_t
 ulp_mapper_field_process_inc_dec(struct bnxt_ulp_mapper_field_info *fld,
 				 struct ulp_blob *blob,
@@ -938,6 +959,7 @@ ulp_mapper_field_process(struct bnxt_ulp_mapper_parms *parms,
 	uint16_t const_val = 0;
 	uint32_t update_flag = 0;
 	uint64_t src1_val64;
+	uint32_t port_id;
 
 	/* process the field opcode */
 	if (fld->field_opc != BNXT_ULP_FIELD_OPC_COND_OP) {
@@ -1081,6 +1103,20 @@ ulp_mapper_field_process(struct bnxt_ulp_mapper_parms *parms,
 					    name);
 				return -EINVAL;
 			}
+		} else if (fld->field_opc == BNXT_ULP_FIELD_OPC_PORT_TABLE) {
+			port_id = ULP_COMP_FLD_IDX_RD(parms, idx);
+			if (ulp_mapper_field_port_db_process(parms, fld,
+							     port_id, const_val,
+							     &val)) {
+				BNXT_TF_DBG(ERR, "%s field port table failed\n",
+					    name);
+				return -EINVAL;
+			}
+			if (!ulp_blob_push(blob, val, bitlen)) {
+				BNXT_TF_DBG(ERR, "%s push to blob failed\n",
+					    name);
+				return -EINVAL;
+			}
 		} else {
 			src1_val64 = ULP_COMP_FLD_IDX_RD(parms, idx);
 			if (ulp_mapper_field_process_inc_dec(fld, blob,
@@ -1951,7 +1987,7 @@ ulp_mapper_em_tbl_process(struct bnxt_ulp_mapper_parms *parms,
 		return rc;
 	}
 	/* do the transpose for the internal EM keys */
-	if (tbl->resource_func == BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE)
+	if (tbl->resource_type == TF_MEM_INTERNAL)
 		ulp_blob_perform_byte_reverse(&key);
 
 	rc = bnxt_ulp_cntxt_tbl_scope_id_get(parms->ulp_ctx,
@@ -3021,8 +3057,7 @@ ulp_mapper_tbls_process(struct bnxt_ulp_mapper_parms *parms, uint32_t tid)
 		case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE:
 			rc = ulp_mapper_tcam_tbl_process(parms, tbl);
 			break;
-		case BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE:
-		case BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE:
+		case BNXT_ULP_RESOURCE_FUNC_EM_TABLE:
 			rc = ulp_mapper_em_tbl_process(parms, tbl);
 			break;
 		case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE:
@@ -3101,8 +3136,7 @@ ulp_mapper_resource_free(struct bnxt_ulp_context *ulp,
 	case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE:
 		rc = ulp_mapper_tcam_entry_free(ulp, tfp, res);
 		break;
-	case BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE:
-	case BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE:
+	case BNXT_ULP_RESOURCE_FUNC_EM_TABLE:
 		rc = ulp_mapper_em_entry_free(ulp, tfp, res);
 		break;
 	case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE:
diff --git a/drivers/net/bnxt/tf_ulp/ulp_port_db.c b/drivers/net/bnxt/tf_ulp/ulp_port_db.c
index 94075784d8..2ee79ea3fe 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_port_db.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_port_db.c
@@ -182,6 +182,13 @@ int32_t	ulp_port_db_dev_port_intf_update(struct bnxt_ulp_context *ulp_ctxt,
 		func->ifindex = ifindex;
 	}
 
+	/* When there is no match, the default action is to send the packet to
+	 * the kernel. And to send it to the kernel, we need the PF's vnic id.
+	 */
+	func->func_parent_vnic = bnxt_get_parent_vnic_id(port_id, intf->type);
+	bnxt_get_iface_mac(port_id, intf->type, func->func_mac,
+			   func->func_parent_mac);
+
 	port_data = &port_db->phy_port_list[func->phy_port_id];
 	if (!port_data->port_valid) {
 		port_data->port_svif =
@@ -579,3 +586,33 @@ ulp_port_db_port_func_id_get(struct bnxt_ulp_context *ulp_ctxt,
 	}
 	return 0;
 }
+
+/*
+ * Api to get the parent mac address for a given port id.
+ *
+ * ulp_ctxt [in] Ptr to ulp context
+ * port_id [in].device port id
+ * mac_addr [out] mac address
+ *
+ * Returns 0 on success or negative number on failure.
+ */
+int32_t
+ulp_port_db_parent_mac_addr_get(struct bnxt_ulp_context *ulp_ctxt,
+				uint32_t port_id, uint8_t **mac_addr)
+{
+	struct bnxt_ulp_port_db *port_db;
+	uint16_t func_id;
+
+	port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt);
+	if (ulp_port_db_port_func_id_get(ulp_ctxt, port_id, &func_id)) {
+		BNXT_TF_DBG(ERR, "Invalid port_id %x\n", port_id);
+		return -EINVAL;
+	}
+
+	if (!port_db->ulp_func_id_tbl[func_id].func_valid) {
+		BNXT_TF_DBG(ERR, "Invalid func_id %x\n", func_id);
+		return -ENOENT;
+	}
+	*mac_addr = port_db->ulp_func_id_tbl[func_id].func_parent_mac;
+	return 0;
+}
diff --git a/drivers/net/bnxt/tf_ulp/ulp_port_db.h b/drivers/net/bnxt/tf_ulp/ulp_port_db.h
index 7b85987a0c..b10a7ea58c 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_port_db.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_port_db.h
@@ -46,6 +46,9 @@ struct ulp_func_if_info {
 	uint16_t		func_spif;
 	uint16_t		func_parif;
 	uint16_t		func_vnic;
+	uint8_t			func_mac[RTE_ETHER_ADDR_LEN];
+	uint16_t		func_parent_vnic;
+	uint8_t			func_parent_mac[RTE_ETHER_ADDR_LEN];
 	uint16_t		phy_port_id;
 	uint16_t		ifindex;
 };
@@ -272,4 +275,16 @@ int32_t
 ulp_port_db_port_func_id_get(struct bnxt_ulp_context *ulp_ctxt,
 			     uint16_t port_id, uint16_t *func_id);
 
+/*
+ * Api to get the parent mac address for a given port id.
+ *
+ * ulp_ctxt [in] Ptr to ulp context
+ * port_id [in].device port id
+ * mac_addr [out] mac address
+ *
+ * Returns 0 on success or negative number on failure.
+ */
+int32_t
+ulp_port_db_parent_mac_addr_get(struct bnxt_ulp_context *ulp_ctxt,
+				uint32_t port_id, uint8_t **mac_addr);
 #endif /* _ULP_PORT_DB_H_ */
diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
index 5a2249f349..1522328a5d 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
@@ -16,6 +16,7 @@
 #include "ulp_flow_db.h"
 #include "ulp_mapper.h"
 #include "ulp_tun.h"
+#include "ulp_template_db_tbl.h"
 
 /* Local defines for the parsing functions */
 #define ULP_VLAN_PRIORITY_SHIFT		13 /* First 3 bits */
@@ -240,6 +241,11 @@ bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
 					    BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
 					    parif);
 		}
+		if (mtype == BNXT_ULP_INTF_TYPE_PF) {
+			ULP_COMP_FLD_IDX_WR(params,
+					    BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
+					    1);
+		}
 	}
 }
 
@@ -623,7 +629,7 @@ ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
 	}
 }
 
-/* Internal Function to indentify broadcast or multicast packets */
+/* Internal Function to identify broadcast or multicast packets */
 static int32_t
 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
 {
@@ -740,6 +746,7 @@ ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
 		field = ulp_rte_parser_fld_copy(field,
 						&vlan_tag,
 						sizeof(vlan_tag));
+
 		field = ulp_rte_parser_fld_copy(field,
 						&vlan_spec->inner_type,
 						sizeof(vlan_spec->inner_type));
@@ -764,7 +771,7 @@ ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
 		vlan_tag = htons(vlan_tag);
 
 		/*
-		 * The priortiy field is ignored since OVS is seting it as
+		 * The priority field is ignored since OVS is setting it as
 		 * wild card match and it is not supported. This is a work
 		 * around and shall be addressed in the future.
 		 */
@@ -960,7 +967,7 @@ ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
 				       &ipv4_mask->hdr.version_ihl,
 				       sizeof(ipv4_mask->hdr.version_ihl));
 		/*
-		 * The tos field is ignored since OVS is seting it as wild card
+		 * The tos field is ignored since OVS is setting it as wild card
 		 * match and it is not supported. This is a work around and
 		 * shall be addressed in the future.
 		 */
@@ -1008,6 +1015,13 @@ ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
 	}
 
+	/* Some of the PMD applications may set the protocol field
+	 * in the IPv4 spec but don't set the mask. So, consider
+	 * the mask in the proto value calculation.
+	 */
+	if (ipv4_mask)
+		proto &= ipv4_mask->hdr.next_proto_id;
+
 	if (proto == IPPROTO_GRE)
 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
 
@@ -1108,8 +1122,8 @@ ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
 				       &vtcf_mask,
 				       size);
 		/*
-		 * The TC and flow lable field are ignored since OVS is seting
-		 * it for match and it is not supported.
+		 * The TC and flow label field are ignored since OVS is
+		 * setting it for match and it is not supported.
 		 * This is a work around and
 		 * shall be addressed in the future.
 		 */
@@ -1149,6 +1163,13 @@ ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
 	}
 
+	/* Some of the PMD applications may set the protocol field
+	 * in the IPv6 spec but don't set the mask. So, consider
+	 * the mask in proto value calculation.
+	 */
+	if (ipv6_mask)
+		proto &= ipv6_mask->hdr.proto;
+
 	if (proto == IPPROTO_GRE)
 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
 
@@ -1182,7 +1203,7 @@ ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
 	uint32_t idx = params->field_idx;
 	uint32_t size;
-	uint16_t dport = 0, sport = 0;
+	uint16_t dport = 0;
 	uint32_t cnt;
 
 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
@@ -1200,7 +1221,6 @@ ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
 						&udp_spec->hdr.src_port,
 						size);
-		sport = udp_spec->hdr.src_port;
 		size = sizeof(udp_spec->hdr.dst_port);
 		field = ulp_rte_parser_fld_copy(field,
 						&udp_spec->hdr.dst_port,
@@ -1238,14 +1258,26 @@ ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
-		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SPORT, sport);
-		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DPORT, dport);
+		if (udp_mask && udp_mask->hdr.src_port)
+			ULP_COMP_FLD_IDX_WR(params,
+					    BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
+					    1);
+		if (udp_mask && udp_mask->hdr.dst_port)
+			ULP_COMP_FLD_IDX_WR(params,
+					    BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
+					    1);
 
 	} else {
 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
-		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SPORT, sport);
-		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DPORT, dport);
+		if (udp_mask && udp_mask->hdr.src_port)
+			ULP_COMP_FLD_IDX_WR(params,
+					    BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
+					    1);
+		if (udp_mask && udp_mask->hdr.dst_port)
+			ULP_COMP_FLD_IDX_WR(params,
+					    BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
+					    1);
 
 		/* Update the field protocol hdr bitmap */
 		ulp_rte_l4_proto_type_update(params, dport);
@@ -1264,7 +1296,6 @@ ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
 	struct ulp_rte_hdr_field *field;
 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
 	uint32_t idx = params->field_idx;
-	uint16_t dport = 0, sport = 0;
 	uint32_t size;
 	uint32_t cnt;
 
@@ -1279,12 +1310,10 @@ ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
 	 * header fields
 	 */
 	if (tcp_spec) {
-		sport = tcp_spec->hdr.src_port;
 		size = sizeof(tcp_spec->hdr.src_port);
 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
 						&tcp_spec->hdr.src_port,
 						size);
-		dport = tcp_spec->hdr.dst_port;
 		size = sizeof(tcp_spec->hdr.dst_port);
 		field = ulp_rte_parser_fld_copy(field,
 						&tcp_spec->hdr.dst_port,
@@ -1358,13 +1387,25 @@ ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
-		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SPORT, sport);
-		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DPORT, dport);
+		if (tcp_mask && tcp_mask->hdr.src_port)
+			ULP_COMP_FLD_IDX_WR(params,
+					    BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
+					    1);
+		if (tcp_mask && tcp_mask->hdr.dst_port)
+			ULP_COMP_FLD_IDX_WR(params,
+					    BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
+					    1);
 	} else {
 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
-		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SPORT, sport);
-		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DPORT, dport);
+		if (tcp_mask && tcp_mask->hdr.src_port)
+			ULP_COMP_FLD_IDX_WR(params,
+					    BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
+					    1);
+		if (tcp_mask && tcp_mask->hdr.dst_port)
+			ULP_COMP_FLD_IDX_WR(params,
+					    BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
+					    1);
 	}
 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
 	return BNXT_TF_RC_SUCCESS;
@@ -2257,3 +2298,40 @@ ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
 	return BNXT_TF_RC_SUCCESS;
 }
+
+int32_t
+ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
+			   struct ulp_rte_parser_params *params)
+{
+	const struct rte_flow_action_sample *sample;
+	int ret;
+
+	sample = action_item->conf;
+
+	/* if SAMPLE bit is set it means this sample action is nested within the
+	 * actions of another sample action; this is not allowed
+	 */
+	if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
+			     BNXT_ULP_ACT_BIT_SAMPLE))
+		return BNXT_TF_RC_ERROR;
+
+	/* a sample action is only allowed as a shared action */
+	if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
+			      BNXT_ULP_ACT_BIT_SHARED))
+		return BNXT_TF_RC_ERROR;
+
+	/* only a ratio of 1 i.e. 100% is supported */
+	if (sample->ratio != 1)
+		return BNXT_TF_RC_ERROR;
+
+	if (!sample->actions)
+		return BNXT_TF_RC_ERROR;
+
+	/* parse the nested actions for a sample action */
+	ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
+	if (ret == BNXT_TF_RC_SUCCESS)
+		/* Update the act_bitmap with sample */
+		ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_SAMPLE);
+
+	return ret;
+}
diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h
index cb9ae02371..48a20e84b1 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h
@@ -245,4 +245,12 @@ int32_t
 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item,
 			 struct ulp_rte_parser_params *params);
 
+int32_t
+ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
+			   struct ulp_rte_parser_params *params);
+
+int32_t
+ulp_rte_shared_act_handler(const struct rte_flow_action *action_item,
+			   struct ulp_rte_parser_params *params);
+
 #endif /* _ULP_RTE_PARSER_H_ */
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
index 418f6389eb..0223296480 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
@@ -145,10 +145,10 @@ enum bnxt_ulp_cf_idx {
 	BNXT_ULP_CF_IDX_I_L3 = 14,
 	BNXT_ULP_CF_IDX_O_L4 = 15,
 	BNXT_ULP_CF_IDX_I_L4 = 16,
-	BNXT_ULP_CF_IDX_O_L4_SPORT = 17,
-	BNXT_ULP_CF_IDX_O_L4_DPORT = 18,
-	BNXT_ULP_CF_IDX_I_L4_SPORT = 19,
-	BNXT_ULP_CF_IDX_I_L4_DPORT = 20,
+	BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT = 17,
+	BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT = 18,
+	BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT = 19,
+	BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT = 20,
 	BNXT_ULP_CF_IDX_DEV_PORT_ID = 21,
 	BNXT_ULP_CF_IDX_DRV_FUNC_SVIF = 22,
 	BNXT_ULP_CF_IDX_DRV_FUNC_SPIF = 23,
@@ -171,17 +171,18 @@ enum bnxt_ulp_cf_idx {
 	BNXT_ULP_CF_IDX_ACT_PORT_TYPE = 40,
 	BNXT_ULP_CF_IDX_MATCH_PORT_TYPE = 41,
 	BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP = 42,
-	BNXT_ULP_CF_IDX_VF_TO_VF = 43,
-	BNXT_ULP_CF_IDX_L3_HDR_CNT = 44,
-	BNXT_ULP_CF_IDX_L4_HDR_CNT = 45,
-	BNXT_ULP_CF_IDX_VFR_MODE = 46,
-	BNXT_ULP_CF_IDX_L3_TUN = 47,
-	BNXT_ULP_CF_IDX_L3_TUN_DECAP = 48,
-	BNXT_ULP_CF_IDX_FID = 49,
-	BNXT_ULP_CF_IDX_HDR_SIG_ID = 50,
-	BNXT_ULP_CF_IDX_FLOW_SIG_ID = 51,
-	BNXT_ULP_CF_IDX_WC_MATCH = 52,
-	BNXT_ULP_CF_IDX_LAST = 53
+	BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF = 43,
+	BNXT_ULP_CF_IDX_VF_TO_VF = 44,
+	BNXT_ULP_CF_IDX_L3_HDR_CNT = 45,
+	BNXT_ULP_CF_IDX_L4_HDR_CNT = 46,
+	BNXT_ULP_CF_IDX_VFR_MODE = 47,
+	BNXT_ULP_CF_IDX_L3_TUN = 48,
+	BNXT_ULP_CF_IDX_L3_TUN_DECAP = 49,
+	BNXT_ULP_CF_IDX_FID = 50,
+	BNXT_ULP_CF_IDX_HDR_SIG_ID = 51,
+	BNXT_ULP_CF_IDX_FLOW_SIG_ID = 52,
+	BNXT_ULP_CF_IDX_WC_MATCH = 53,
+	BNXT_ULP_CF_IDX_LAST = 54
 };
 
 enum bnxt_ulp_cond_list_opc {
@@ -266,7 +267,8 @@ enum bnxt_ulp_field_opc {
 	BNXT_ULP_FIELD_OPC_SRC1_MINUS_CONST = 2,
 	BNXT_ULP_FIELD_OPC_SRC1_PLUS_CONST_POST = 3,
 	BNXT_ULP_FIELD_OPC_SRC1_MINUS_CONST_POST = 4,
-	BNXT_ULP_FIELD_OPC_LAST = 5
+	BNXT_ULP_FIELD_OPC_PORT_TABLE = 5,
+	BNXT_ULP_FIELD_OPC_LAST = 6
 };
 
 enum bnxt_ulp_field_src {
@@ -352,6 +354,27 @@ enum bnxt_ulp_mem_type_opc {
 	BNXT_ULP_MEM_TYPE_OPC_LAST = 3
 };
 
+enum bnxt_ulp_port_table {
+	BNXT_ULP_PORT_TABLE_DRV_FUNC_PARENT_MAC = 0,
+	BNXT_ULP_PORT_TABLE_DRV_FUNC_PARENT_VNIC = 1,
+	BNXT_ULP_PORT_TABLE_DRV_FUNC_SVIF = 2,
+	BNXT_ULP_PORT_TABLE_DRV_FUNC_SPIF = 3,
+	BNXT_ULP_PORT_TABLE_DRV_FUNC_PARIF = 4,
+	BNXT_ULP_PORT_TABLE_DRV_FUNC_VNIC = 5,
+	BNXT_ULP_PORT_TABLE_DRV_FUNC_PHY_PORT = 6,
+	BNXT_ULP_PORT_TABLE_DRV_FUNC_MAC = 7,
+	BNXT_ULP_PORT_TABLE_VF_FUNC_SVIF = 8,
+	BNXT_ULP_PORT_TABLE_VF_FUNC_SPIF = 9,
+	BNXT_ULP_PORT_TABLE_VF_FUNC_PARIF = 10,
+	BNXT_ULP_PORT_TABLE_VF_FUNC_VNIC = 11,
+	BNXT_ULP_PORT_TABLE_VF_FUNC_MAC = 12,
+	BNXT_ULP_PORT_TABLE_PHY_PORT_SVIF = 13,
+	BNXT_ULP_PORT_TABLE_PHY_PORT_SPIF = 14,
+	BNXT_ULP_PORT_TABLE_PHY_PORT_PARIF = 15,
+	BNXT_ULP_PORT_TABLE_PHY_PORT_VPORT = 16,
+	BNXT_ULP_PORT_TABLE_LAST = 17
+};
+
 enum bnxt_ulp_pri_opc {
 	BNXT_ULP_PRI_OPC_NOT_USED = 0,
 	BNXT_ULP_PRI_OPC_CONST = 1,
@@ -427,8 +450,8 @@ enum bnxt_ulp_match_type_bitmask {
 
 enum bnxt_ulp_resource_func {
 	BNXT_ULP_RESOURCE_FUNC_INVALID = 0x00,
-	BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE = 0x20,
-	BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE = 0x40,
+	BNXT_ULP_RESOURCE_FUNC_EM_TABLE = 0x20,
+	BNXT_ULP_RESOURCE_FUNC_RSVD1 = 0x40,
 	BNXT_ULP_RESOURCE_FUNC_RSVD2 = 0x60,
 	BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE = 0x80,
 	BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE = 0x81,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_stingray_class.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_stingray_class.c
index df09de929e..320a89a5d9 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_stingray_class.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_stingray_class.c
@@ -205,7 +205,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_stingray_class_tbl_list[] = {
 	.encap_num_fields = 0
 	},
 	{ /* class_tid: 1, stingray, table: em.int_0 */
-	.resource_func = BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE,
+	.resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE,
 	.resource_type = TF_MEM_INTERNAL,
 	.direction = TF_DIR_RX,
 	.mem_type_opcode = BNXT_ULP_MEM_TYPE_OPC_EXECUTE_IF_INT,
@@ -228,7 +228,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_stingray_class_tbl_list[] = {
 	.encap_num_fields = 0
 	},
 	{ /* class_tid: 1, stingray, table: eem.ext_0 */
-	.resource_func = BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE,
+	.resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE,
 	.resource_type = TF_MEM_EXTERNAL,
 	.direction = TF_DIR_RX,
 	.mem_type_opcode = BNXT_ULP_MEM_TYPE_OPC_EXECUTE_IF_EXT,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_wh_plus_class.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_wh_plus_class.c
index 5324bd2531..973ba39f82 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_wh_plus_class.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_wh_plus_class.c
@@ -246,7 +246,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_wh_plus_class_tbl_list[] = {
 	.result_num_fields = 5
 	},
 	{ /* class_tid: 1, wh_plus, table: em.ipv4 */
-	.resource_func = BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE,
+	.resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE,
 	.resource_type = TF_MEM_INTERNAL,
 	.direction = TF_DIR_RX,
 	.mem_type_opcode = BNXT_ULP_MEM_TYPE_OPC_EXECUTE_IF_INT,
@@ -269,7 +269,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_wh_plus_class_tbl_list[] = {
 	.result_num_fields = 9
 	},
 	{ /* class_tid: 1, wh_plus, table: eem.ipv4 */
-	.resource_func = BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE,
+	.resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE,
 	.resource_type = TF_MEM_EXTERNAL,
 	.direction = TF_DIR_RX,
 	.mem_type_opcode = BNXT_ULP_MEM_TYPE_OPC_EXECUTE_IF_EXT,
@@ -292,7 +292,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_wh_plus_class_tbl_list[] = {
 	.result_num_fields = 9
 	},
 	{ /* class_tid: 1, wh_plus, table: em.ipv6 */
-	.resource_func = BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE,
+	.resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE,
 	.resource_type = TF_MEM_INTERNAL,
 	.direction = TF_DIR_RX,
 	.mem_type_opcode = BNXT_ULP_MEM_TYPE_OPC_EXECUTE_IF_INT,
@@ -315,7 +315,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_wh_plus_class_tbl_list[] = {
 	.result_num_fields = 9
 	},
 	{ /* class_tid: 1, wh_plus, table: eem.ipv6 */
-	.resource_func = BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE,
+	.resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE,
 	.resource_type = TF_MEM_EXTERNAL,
 	.direction = TF_DIR_RX,
 	.execute_info = {
@@ -497,7 +497,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_wh_plus_class_tbl_list[] = {
 	.result_num_fields = 5
 	},
 	{ /* class_tid: 2, wh_plus, table: em.ipv4 */
-	.resource_func = BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE,
+	.resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE,
 	.resource_type = TF_MEM_INTERNAL,
 	.direction = TF_DIR_TX,
 	.mem_type_opcode = BNXT_ULP_MEM_TYPE_OPC_EXECUTE_IF_INT,
@@ -520,7 +520,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_wh_plus_class_tbl_list[] = {
 	.result_num_fields = 9
 	},
 	{ /* class_tid: 2, wh_plus, table: eem.ipv4 */
-	.resource_func = BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE,
+	.resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE,
 	.resource_type = TF_MEM_EXTERNAL,
 	.direction = TF_DIR_TX,
 	.mem_type_opcode = BNXT_ULP_MEM_TYPE_OPC_EXECUTE_IF_EXT,
@@ -543,7 +543,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_wh_plus_class_tbl_list[] = {
 	.result_num_fields = 9
 	},
 	{ /* class_tid: 2, wh_plus, table: em.ipv6 */
-	.resource_func = BNXT_ULP_RESOURCE_FUNC_INT_EM_TABLE,
+	.resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE,
 	.resource_type = TF_MEM_INTERNAL,
 	.direction = TF_DIR_TX,
 	.mem_type_opcode = BNXT_ULP_MEM_TYPE_OPC_EXECUTE_IF_INT,
@@ -566,7 +566,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_wh_plus_class_tbl_list[] = {
 	.result_num_fields = 9
 	},
 	{ /* class_tid: 2, wh_plus, table: eem.ipv6 */
-	.resource_func = BNXT_ULP_RESOURCE_FUNC_EXT_EM_TABLE,
+	.resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE,
 	.resource_type = TF_MEM_EXTERNAL,
 	.direction = TF_DIR_TX,
 	.execute_info = {
@@ -9699,8 +9699,8 @@ struct bnxt_ulp_mapper_field_info ulp_wh_plus_class_result_field_list[] = {
 	.field_cond_src = BNXT_ULP_FIELD_COND_SRC_TRUE,
 	.field_src1 = BNXT_ULP_FIELD_SRC_CF,
 	.field_opr1 = {
-		(BNXT_ULP_CF_IDX_O_L4_SPORT >> 8) & 0xff,
-		BNXT_ULP_CF_IDX_O_L4_SPORT & 0xff}
+		(BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT >> 8) & 0xff,
+		BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT & 0xff}
 	},
 	{
 	.description = "em_key_mask.6",
@@ -9709,8 +9709,8 @@ struct bnxt_ulp_mapper_field_info ulp_wh_plus_class_result_field_list[] = {
 	.field_cond_src = BNXT_ULP_FIELD_COND_SRC_TRUE,
 	.field_src1 = BNXT_ULP_FIELD_SRC_CF,
 	.field_opr1 = {
-		(BNXT_ULP_CF_IDX_O_L4_DPORT >> 8) & 0xff,
-		BNXT_ULP_CF_IDX_O_L4_DPORT & 0xff}
+		(BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT >> 8) & 0xff,
+		BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT & 0xff}
 	},
 	{
 	.description = "em_key_mask.7",
@@ -9850,8 +9850,8 @@ struct bnxt_ulp_mapper_field_info ulp_wh_plus_class_result_field_list[] = {
 	.field_cond_src = BNXT_ULP_FIELD_COND_SRC_TRUE,
 	.field_src1 = BNXT_ULP_FIELD_SRC_CF,
 	.field_opr1 = {
-		(BNXT_ULP_CF_IDX_O_L4_SPORT >> 8) & 0xff,
-		BNXT_ULP_CF_IDX_O_L4_SPORT & 0xff}
+		(BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT >> 8) & 0xff,
+		BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT & 0xff}
 	},
 	{
 	.description = "em_key_mask.7",
@@ -9860,8 +9860,8 @@ struct bnxt_ulp_mapper_field_info ulp_wh_plus_class_result_field_list[] = {
 	.field_cond_src = BNXT_ULP_FIELD_COND_SRC_TRUE,
 	.field_src1 = BNXT_ULP_FIELD_SRC_CF,
 	.field_opr1 = {
-		(BNXT_ULP_CF_IDX_O_L4_DPORT >> 8) & 0xff,
-		BNXT_ULP_CF_IDX_O_L4_DPORT & 0xff}
+		(BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT >> 8) & 0xff,
+		BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT & 0xff}
 	},
 	{
 	.description = "em_key_mask.8",
@@ -10443,8 +10443,8 @@ struct bnxt_ulp_mapper_field_info ulp_wh_plus_class_result_field_list[] = {
 	.field_cond_src = BNXT_ULP_FIELD_COND_SRC_TRUE,
 	.field_src1 = BNXT_ULP_FIELD_SRC_CF,
 	.field_opr1 = {
-		(BNXT_ULP_CF_IDX_O_L4_SPORT >> 8) & 0xff,
-		BNXT_ULP_CF_IDX_O_L4_SPORT & 0xff}
+		(BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT >> 8) & 0xff,
+		BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT & 0xff}
 	},
 	{
 	.description = "em_key_mask.6",
@@ -10453,8 +10453,8 @@ struct bnxt_ulp_mapper_field_info ulp_wh_plus_class_result_field_list[] = {
 	.field_cond_src = BNXT_ULP_FIELD_COND_SRC_TRUE,
 	.field_src1 = BNXT_ULP_FIELD_SRC_CF,
 	.field_opr1 = {
-		(BNXT_ULP_CF_IDX_O_L4_DPORT >> 8) & 0xff,
-		BNXT_ULP_CF_IDX_O_L4_DPORT & 0xff}
+		(BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT >> 8) & 0xff,
+		BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT & 0xff}
 	},
 	{
 	.description = "em_key_mask.7",
@@ -10594,8 +10594,8 @@ struct bnxt_ulp_mapper_field_info ulp_wh_plus_class_result_field_list[] = {
 	.field_cond_src = BNXT_ULP_FIELD_COND_SRC_TRUE,
 	.field_src1 = BNXT_ULP_FIELD_SRC_CF,
 	.field_opr1 = {
-		(BNXT_ULP_CF_IDX_O_L4_SPORT >> 8) & 0xff,
-		BNXT_ULP_CF_IDX_O_L4_SPORT & 0xff}
+		(BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT >> 8) & 0xff,
+		BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT & 0xff}
 	},
 	{
 	.description = "em_key_mask.7",
@@ -10604,8 +10604,8 @@ struct bnxt_ulp_mapper_field_info ulp_wh_plus_class_result_field_list[] = {
 	.field_cond_src = BNXT_ULP_FIELD_COND_SRC_TRUE,
 	.field_src1 = BNXT_ULP_FIELD_SRC_CF,
 	.field_opr1 = {
-		(BNXT_ULP_CF_IDX_O_L4_DPORT >> 8) & 0xff,
-		BNXT_ULP_CF_IDX_O_L4_DPORT & 0xff}
+		(BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT >> 8) & 0xff,
+		BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT & 0xff}
 	},
 	{
 	.description = "em_key_mask.8",
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h
index 5150ed2b07..6e2c48e7b6 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2019 Broadcom
+ * Copyright(c) 2014-2021 Broadcom
  * All rights reserved.
  */
 
@@ -63,6 +63,7 @@ struct ulp_rte_act_prop {
 
 /* Structure to be used for passing all the parser functions */
 struct ulp_rte_parser_params {
+	STAILQ_ENTRY(ulp_rte_parser_params)  next;
 	struct ulp_rte_hdr_bitmap	hdr_bitmap;
 	struct ulp_rte_hdr_bitmap	hdr_fp_bit;
 	struct ulp_rte_field_bitmap	fld_bitmap;
diff --git a/drivers/net/bnxt/tf_ulp/ulp_tun.c b/drivers/net/bnxt/tf_ulp/ulp_tun.c
index dd3d8703fb..a883e0ff08 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_tun.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_tun.c
@@ -1,8 +1,10 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2020 Broadcom
+ * Copyright(c) 2014-2021 Broadcom
  * All rights reserved.
  */
 
+#include <sys/queue.h>
+
 #include <rte_malloc.h>
 
 #include "ulp_tun.h"
@@ -48,18 +50,18 @@ ulp_install_outer_tun_flow(struct ulp_rte_parser_params *params,
 		goto err;
 
 	/* Store the tunnel dmac in the tunnel cache table and use it while
-	 * programming tunnel flow F2.
+	 * programming tunnel inner flow.
 	 */
 	memcpy(tun_entry->t_dmac,
 	       &params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX].spec,
 	       RTE_ETHER_ADDR_LEN);
 
-	tun_entry->valid = true;
-	tun_entry->state = BNXT_ULP_FLOW_STATE_TUN_O_OFFLD;
+	tun_entry->tun_flow_info[params->port_id].state =
+				BNXT_ULP_FLOW_STATE_TUN_O_OFFLD;
 	tun_entry->outer_tun_flow_id = params->fid;
 
-	/* F1 and it's related F2s are correlated based on
-	 * Tunnel Destination IP Address.
+	/* Tunnel outer flow  and it's related inner flows are correlated
+	 * based on Tunnel Destination IP Address.
 	 */
 	if (tun_entry->t_dst_ip_valid)
 		goto done;
@@ -83,27 +85,32 @@ ulp_install_outer_tun_flow(struct ulp_rte_parser_params *params,
 
 /* This function programs the inner tunnel flow in the hardware. */
 static void
-ulp_install_inner_tun_flow(struct bnxt_tun_cache_entry *tun_entry)
+ulp_install_inner_tun_flow(struct bnxt_tun_cache_entry *tun_entry,
+			   struct ulp_rte_parser_params *tun_o_params)
 {
 	struct bnxt_ulp_mapper_create_parms mparms = { 0 };
-	struct ulp_rte_parser_params *params;
+	struct ulp_per_port_flow_info *flow_info;
+	struct ulp_rte_parser_params *inner_params;
 	int ret;
 
-	/* F2 doesn't have tunnel dmac, use the tunnel dmac that was
-	 * stored during F1 programming.
+	/* Tunnel inner flow doesn't have tunnel dmac, use the tunnel
+	 * dmac that was stored during F1 programming.
 	 */
-	params = &tun_entry->first_inner_tun_params;
-	memcpy(&params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
-	       tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
-	params->parent_fid = tun_entry->outer_tun_flow_id;
-	params->fid = tun_entry->first_inner_tun_flow_id;
-
-	bnxt_ulp_init_mapper_params(&mparms, params,
-				    BNXT_ULP_FDB_TYPE_REGULAR);
-
-	ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
-	if (ret)
-		PMD_DRV_LOG(ERR, "Failed to create F2 flow.");
+	flow_info = &tun_entry->tun_flow_info[tun_o_params->port_id];
+	STAILQ_FOREACH(inner_params, &flow_info->tun_i_prms_list, next) {
+		memcpy(&inner_params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
+		       tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
+		inner_params->parent_fid = tun_entry->outer_tun_flow_id;
+
+		bnxt_ulp_init_mapper_params(&mparms, inner_params,
+					    BNXT_ULP_FDB_TYPE_REGULAR);
+
+		ret = ulp_mapper_flow_create(inner_params->ulp_ctx, &mparms);
+		if (ret)
+			PMD_DRV_LOG(ERR,
+				    "Failed to create inner tun flow, FID:%u.",
+				    inner_params->fid);
+	}
 }
 
 /* This function either install outer tunnel flow & inner tunnel flow
@@ -114,30 +121,31 @@ ulp_post_process_outer_tun_flow(struct ulp_rte_parser_params *params,
 			     struct bnxt_tun_cache_entry *tun_entry,
 			     uint16_t tun_idx)
 {
-	enum bnxt_ulp_tun_flow_state flow_state;
 	int ret;
 
-	flow_state = tun_entry->state;
 	ret = ulp_install_outer_tun_flow(params, tun_entry, tun_idx);
-	if (ret)
+	if (ret == BNXT_TF_RC_ERROR) {
+		PMD_DRV_LOG(ERR, "Failed to create outer tunnel flow.");
 		return ret;
+	}
 
-	/* If flow_state == BNXT_ULP_FLOW_STATE_NORMAL before installing
-	 * F1, that means F2 is not deferred. Hence, no need to install F2.
+	/* Install any cached tunnel inner flows that came before tunnel
+	 * outer flow.
 	 */
-	if (flow_state != BNXT_ULP_FLOW_STATE_NORMAL)
-		ulp_install_inner_tun_flow(tun_entry);
+	ulp_install_inner_tun_flow(tun_entry, params);
 
-	return 0;
+	return BNXT_TF_RC_FID;
 }
 
 /* This function will be called if inner tunnel flow request comes before
  * outer tunnel flow request.
  */
 static int32_t
-ulp_post_process_first_inner_tun_flow(struct ulp_rte_parser_params *params,
+ulp_post_process_cache_inner_tun_flow(struct ulp_rte_parser_params *params,
 				      struct bnxt_tun_cache_entry *tun_entry)
 {
+	struct ulp_rte_parser_params *inner_tun_params;
+	struct ulp_per_port_flow_info *flow_info;
 	int ret;
 
 	ret = ulp_matcher_pattern_match(params, &params->class_id);
@@ -148,18 +156,22 @@ ulp_post_process_first_inner_tun_flow(struct ulp_rte_parser_params *params,
 	if (ret != BNXT_TF_RC_SUCCESS)
 		return BNXT_TF_RC_ERROR;
 
-	/* If Tunnel F2 flow comes first then we can't install it in the
-	 * hardware, because, F2 flow will not have L2 context information.
-	 * So, just cache the F2 information and program it in the context
-	 * of F1 flow installation.
+	/* If Tunnel inner flow comes first then we can't install it in the
+	 * hardware, because, Tunnel inner flow will not have L2 context
+	 * information. So, just cache the Tunnel inner flow information
+	 * and program it in the context of F1 flow installation.
 	 */
-	memcpy(&tun_entry->first_inner_tun_params, params,
-	       sizeof(struct ulp_rte_parser_params));
-
-	tun_entry->first_inner_tun_flow_id = params->fid;
-	tun_entry->state = BNXT_ULP_FLOW_STATE_TUN_I_CACHED;
+	flow_info = &tun_entry->tun_flow_info[params->port_id];
+	inner_tun_params = rte_zmalloc("ulp_inner_tun_params",
+				       sizeof(struct ulp_rte_parser_params), 0);
+	if (!inner_tun_params)
+		return BNXT_TF_RC_ERROR;
+	memcpy(inner_tun_params, params, sizeof(struct ulp_rte_parser_params));
+	STAILQ_INSERT_TAIL(&flow_info->tun_i_prms_list, inner_tun_params,
+			   next);
+	flow_info->tun_i_cnt++;
 
-	/* F1 and it's related F2s are correlated based on
+	/* F1 and it's related Tunnel inner flows are correlated based on
 	 * Tunnel Destination IP Address. It could be already set, if
 	 * the inner flow got offloaded first.
 	 */
@@ -240,8 +252,8 @@ ulp_get_tun_entry(struct ulp_rte_parser_params *params,
 int32_t
 ulp_post_process_tun_flow(struct ulp_rte_parser_params *params)
 {
-	bool outer_tun_sig, inner_tun_sig, first_inner_tun_flow;
-	bool outer_tun_reject, inner_tun_reject, outer_tun_flow, inner_tun_flow;
+	bool inner_tun_sig, cache_inner_tun_flow;
+	bool outer_tun_reject, outer_tun_flow, inner_tun_flow;
 	enum bnxt_ulp_tun_flow_state flow_state;
 	struct bnxt_tun_cache_entry *tun_entry;
 	uint32_t l3_tun, l3_tun_decap;
@@ -259,40 +271,31 @@ ulp_post_process_tun_flow(struct ulp_rte_parser_params *params)
 	if (rc == BNXT_TF_RC_ERROR)
 		return rc;
 
-	flow_state = tun_entry->state;
+	if (params->port_id >= RTE_MAX_ETHPORTS)
+		return BNXT_TF_RC_ERROR;
+	flow_state = tun_entry->tun_flow_info[params->port_id].state;
 	/* Outer tunnel flow validation */
-	outer_tun_sig = BNXT_OUTER_TUN_SIGNATURE(l3_tun, params);
-	outer_tun_flow = BNXT_OUTER_TUN_FLOW(outer_tun_sig);
+	outer_tun_flow = BNXT_OUTER_TUN_FLOW(l3_tun, params);
 	outer_tun_reject = BNXT_REJECT_OUTER_TUN_FLOW(flow_state,
-						      outer_tun_sig);
+						      outer_tun_flow);
 
 	/* Inner tunnel flow validation */
 	inner_tun_sig = BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params);
-	first_inner_tun_flow = BNXT_FIRST_INNER_TUN_FLOW(flow_state,
+	cache_inner_tun_flow = BNXT_CACHE_INNER_TUN_FLOW(flow_state,
 							 inner_tun_sig);
 	inner_tun_flow = BNXT_INNER_TUN_FLOW(flow_state, inner_tun_sig);
-	inner_tun_reject = BNXT_REJECT_INNER_TUN_FLOW(flow_state,
-						      inner_tun_sig);
 
 	if (outer_tun_reject) {
 		tun_entry->outer_tun_rej_cnt++;
 		BNXT_TF_DBG(ERR,
 			    "Tunnel F1 flow rejected, COUNT: %d\n",
 			    tun_entry->outer_tun_rej_cnt);
-	/* Inner tunnel flow is rejected if it comes between first inner
-	 * tunnel flow and outer flow requests.
-	 */
-	} else if (inner_tun_reject) {
-		tun_entry->inner_tun_rej_cnt++;
-		BNXT_TF_DBG(ERR,
-			    "Tunnel F2 flow rejected, COUNT: %d\n",
-			    tun_entry->inner_tun_rej_cnt);
 	}
 
-	if (outer_tun_reject || inner_tun_reject)
+	if (outer_tun_reject)
 		return BNXT_TF_RC_ERROR;
-	else if (first_inner_tun_flow)
-		return ulp_post_process_first_inner_tun_flow(params, tun_entry);
+	else if (cache_inner_tun_flow)
+		return ulp_post_process_cache_inner_tun_flow(params, tun_entry);
 	else if (outer_tun_flow)
 		return ulp_post_process_outer_tun_flow(params, tun_entry,
 						       tun_idx);
@@ -302,9 +305,109 @@ ulp_post_process_tun_flow(struct ulp_rte_parser_params *params)
 		return BNXT_TF_RC_NORMAL;
 }
 
+void
+ulp_tun_tbl_init(struct bnxt_tun_cache_entry *tun_tbl)
+{
+	struct ulp_per_port_flow_info *flow_info;
+	int i, j;
+
+	for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
+		for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
+			flow_info = &tun_tbl[i].tun_flow_info[j];
+			STAILQ_INIT(&flow_info->tun_i_prms_list);
+		}
+	}
+}
+
 void
 ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx)
 {
+	struct ulp_rte_parser_params *inner_params;
+	struct ulp_per_port_flow_info *flow_info;
+	int j;
+
+	for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
+		flow_info = &tun_tbl[tun_idx].tun_flow_info[j];
+		STAILQ_FOREACH(inner_params,
+			       &flow_info->tun_i_prms_list,
+			       next) {
+			STAILQ_REMOVE(&flow_info->tun_i_prms_list,
+				      inner_params,
+				      ulp_rte_parser_params, next);
+			rte_free(inner_params);
+		}
+	}
+
 	memset(&tun_tbl[tun_idx], 0,
-		sizeof(struct bnxt_tun_cache_entry));
+			sizeof(struct bnxt_tun_cache_entry));
+
+	for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
+		flow_info = &tun_tbl[tun_idx].tun_flow_info[j];
+		STAILQ_INIT(&flow_info->tun_i_prms_list);
+	}
+}
+
+static bool
+ulp_chk_and_rem_tun_i_flow(struct bnxt_tun_cache_entry *tun_entry,
+			   struct ulp_per_port_flow_info *flow_info,
+			   uint32_t fid)
+{
+	struct ulp_rte_parser_params *inner_params;
+	int j;
+
+	STAILQ_FOREACH(inner_params,
+		       &flow_info->tun_i_prms_list,
+		       next) {
+		if (inner_params->fid == fid) {
+			STAILQ_REMOVE(&flow_info->tun_i_prms_list,
+				      inner_params,
+				      ulp_rte_parser_params,
+				      next);
+			rte_free(inner_params);
+			flow_info->tun_i_cnt--;
+			/* When a dpdk application offloads a duplicate
+			 * tunnel inner flow on a port that it is not
+			 * destined to, there won't be a tunnel outer flow
+			 * associated with these duplicate tunnel inner flows.
+			 * So, when the last tunnel inner flow ages out, the
+			 * driver has to clear the tunnel entry, otherwise
+			 * the tunnel entry cannot be reused.
+			 */
+			if (!flow_info->tun_i_cnt &&
+			    flow_info->state != BNXT_ULP_FLOW_STATE_TUN_O_OFFLD) {
+				memset(tun_entry, 0,
+				       sizeof(struct bnxt_tun_cache_entry));
+				for (j = 0; j < RTE_MAX_ETHPORTS; j++)
+					STAILQ_INIT(&flow_info->tun_i_prms_list);
+			}
+			return true;
+		}
+	}
+
+	return false;
+}
+
+/* When a dpdk application offloads the same tunnel inner flow
+ * on all the uplink ports, a tunnel inner flow entry is cached
+ * even if it is not for the right uplink port. Such tunnel
+ * inner flows will eventually get aged out as there won't be
+ * any traffic on these ports. When such a flow destroy is
+ * called, cleanup the tunnel inner flow entry.
+ */
+void
+ulp_clear_tun_inner_entry(struct bnxt_tun_cache_entry *tun_tbl, uint32_t fid)
+{
+	struct ulp_per_port_flow_info *flow_info;
+	int i, j;
+
+	for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
+		if (!tun_tbl[i].t_dst_ip_valid)
+			continue;
+		for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
+			flow_info = &tun_tbl[i].tun_flow_info[j];
+			if (ulp_chk_and_rem_tun_i_flow(&tun_tbl[i],
+						       flow_info, fid) == true)
+				return;
+		}
+	}
 }
diff --git a/drivers/net/bnxt/tf_ulp/ulp_tun.h b/drivers/net/bnxt/tf_ulp/ulp_tun.h
index 763138218b..2516eaca2c 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_tun.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_tun.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2020 Broadcom
+ * Copyright(c) 2014-2021 Broadcom
  * All rights reserved.
  */
 
@@ -10,12 +10,17 @@
 #include <stdbool.h>
 #include <sys/queue.h>
 
+#include "rte_version.h"
 #include "rte_ethdev.h"
 
 #include "ulp_template_db_enum.h"
 #include "ulp_template_struct.h"
 
-#define	BNXT_OUTER_TUN_SIGNATURE(l3_tun, params)		\
+#if RTE_VERSION_NUM(17, 11, 10, 16) == RTE_VERSION
+#define	RTE_ETHER_ADDR_LEN	ETHER_ADDR_LEN
+#endif
+
+#define	BNXT_OUTER_TUN_FLOW(l3_tun, params)		\
 	((l3_tun) &&					\
 	 ULP_BITMAP_ISSET((params)->act_bitmap.bits,	\
 			  BNXT_ULP_ACT_BIT_JUMP))
@@ -24,22 +29,16 @@
 	 !ULP_BITMAP_ISSET((params)->hdr_bitmap.bits,			\
 			   BNXT_ULP_HDR_BIT_O_ETH))
 
-#define	BNXT_FIRST_INNER_TUN_FLOW(state, inner_tun_sig)	\
+#define	BNXT_CACHE_INNER_TUN_FLOW(state, inner_tun_sig)	\
 	((state) == BNXT_ULP_FLOW_STATE_NORMAL && (inner_tun_sig))
 #define	BNXT_INNER_TUN_FLOW(state, inner_tun_sig)		\
 	((state) == BNXT_ULP_FLOW_STATE_TUN_O_OFFLD && (inner_tun_sig))
-#define	BNXT_OUTER_TUN_FLOW(outer_tun_sig)		((outer_tun_sig))
 
 /* It is invalid to get another outer flow offload request
  * for the same tunnel, while the outer flow is already offloaded.
  */
 #define	BNXT_REJECT_OUTER_TUN_FLOW(state, outer_tun_sig)	\
 	((state) == BNXT_ULP_FLOW_STATE_TUN_O_OFFLD && (outer_tun_sig))
-/* It is invalid to get another inner flow offload request
- * for the same tunnel, while the outer flow is not yet offloaded.
- */
-#define	BNXT_REJECT_INNER_TUN_FLOW(state, inner_tun_sig)	\
-	((state) == BNXT_ULP_FLOW_STATE_TUN_I_CACHED && (inner_tun_sig))
 
 #define	ULP_TUN_O_DMAC_HDR_FIELD_INDEX	1
 #define	ULP_TUN_O_IPV4_DIP_INDEX	19
@@ -50,10 +49,10 @@
  * requests arrive.
  *
  * If inner tunnel flow offload request arrives first then the flow
- * state will change from BNXT_ULP_FLOW_STATE_NORMAL to
- * BNXT_ULP_FLOW_STATE_TUN_I_CACHED and the following outer tunnel
- * flow offload request will change the state of the flow to
- * BNXT_ULP_FLOW_STATE_TUN_O_OFFLD from BNXT_ULP_FLOW_STATE_TUN_I_CACHED.
+ * state will remain in BNXT_ULP_FLOW_STATE_NORMAL state.
+ * The following outer tunnel flow offload request will change the
+ * state of the flow to BNXT_ULP_FLOW_STATE_TUN_O_OFFLD from
+ * BNXT_ULP_FLOW_STATE_NORMAL.
  *
  * If outer tunnel flow offload request arrives first then the flow state
  * will change from BNXT_ULP_FLOW_STATE_NORMAL to
@@ -67,12 +66,15 @@
 enum bnxt_ulp_tun_flow_state {
 	BNXT_ULP_FLOW_STATE_NORMAL = 0,
 	BNXT_ULP_FLOW_STATE_TUN_O_OFFLD,
-	BNXT_ULP_FLOW_STATE_TUN_I_CACHED
+};
+
+struct ulp_per_port_flow_info {
+	enum bnxt_ulp_tun_flow_state		state;
+	uint32_t				tun_i_cnt;
+	STAILQ_HEAD(, ulp_rte_parser_params)	tun_i_prms_list;
 };
 
 struct bnxt_tun_cache_entry {
-	enum bnxt_ulp_tun_flow_state	state;
-	bool				valid;
 	bool				t_dst_ip_valid;
 	uint8_t				t_dmac[RTE_ETHER_ADDR_LEN];
 	union {
@@ -80,13 +82,17 @@ struct bnxt_tun_cache_entry {
 		uint8_t			t_dst_ip6[16];
 	};
 	uint32_t			outer_tun_flow_id;
-	uint32_t			first_inner_tun_flow_id;
 	uint16_t			outer_tun_rej_cnt;
-	uint16_t			inner_tun_rej_cnt;
-	struct ulp_rte_parser_params	first_inner_tun_params;
+	struct ulp_per_port_flow_info	tun_flow_info[RTE_MAX_ETHPORTS];
 };
 
+void
+ulp_tun_tbl_init(struct bnxt_tun_cache_entry *tun_tbl);
+
 void
 ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx);
 
+void
+ulp_clear_tun_inner_entry(struct bnxt_tun_cache_entry *tun_tbl, uint32_t fid);
+
 #endif
-- 
2.17.1



More information about the dev mailing list