[dpdk-dev] [PATCH 7/9] net/qede/base: update HSI code

Rasesh Mody rmody at marvell.com
Mon Sep 30 04:49:19 CEST 2019


Update hardware software common base driver code in preparation to
update the firmware to version 8.40.25.0.

Signed-off-by: Rasesh Mody <rmody at marvell.com>
---
 drivers/net/qede/base/bcm_osal.c              |   1 +
 drivers/net/qede/base/common_hsi.h            | 164 ++++--
 drivers/net/qede/base/ecore.h                 |   4 +-
 drivers/net/qede/base/ecore_cxt.c             |  23 +-
 drivers/net/qede/base/ecore_dev.c             |  21 +-
 drivers/net/qede/base/ecore_gtt_reg_addr.h    |  42 +-
 drivers/net/qede/base/ecore_gtt_values.h      |  18 +-
 drivers/net/qede/base/ecore_hsi_common.h      | 231 +++++++--
 drivers/net/qede/base/ecore_hsi_debug_tools.h | 475 ++++++++----------
 drivers/net/qede/base/ecore_hsi_eth.h         | 134 ++---
 drivers/net/qede/base/ecore_hsi_init_func.h   |  25 +-
 drivers/net/qede/base/ecore_hsi_init_tool.h   |  38 ++
 drivers/net/qede/base/ecore_hw.c              |  16 +
 drivers/net/qede/base/ecore_hw.h              |  10 +-
 drivers/net/qede/base/ecore_init_fw_funcs.c   |   7 +-
 drivers/net/qede/base/ecore_init_ops.c        |  47 --
 drivers/net/qede/base/ecore_init_ops.h        |  10 -
 drivers/net/qede/base/ecore_iro.h             | 320 ++++++------
 drivers/net/qede/base/ecore_iro_values.h      | 336 ++++++++-----
 drivers/net/qede/base/ecore_mcp.c             |   1 +
 drivers/net/qede/base/eth_common.h            | 101 +++-
 drivers/net/qede/base/reg_addr.h              |  10 +
 drivers/net/qede/qede_rxtx.c                  |  16 +-
 23 files changed, 1218 insertions(+), 832 deletions(-)

diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index 9915df44f..48d016e24 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -10,6 +10,7 @@
 #include "bcm_osal.h"
 #include "ecore.h"
 #include "ecore_hw.h"
+#include "ecore_dev_api.h"
 #include "ecore_iov_api.h"
 #include "ecore_mcp_api.h"
 #include "ecore_l2_api.h"
diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
index b878a92aa..74afed1ec 100644
--- a/drivers/net/qede/base/common_hsi.h
+++ b/drivers/net/qede/base/common_hsi.h
@@ -13,12 +13,12 @@
 /* Temporarily here should be added to HSI automatically by resource allocation
  * tool.
  */
-#define T_TEST_AGG_INT_TEMP    6
-#define	M_TEST_AGG_INT_TEMP    8
-#define	U_TEST_AGG_INT_TEMP    6
-#define	X_TEST_AGG_INT_TEMP    14
-#define	Y_TEST_AGG_INT_TEMP    4
-#define	P_TEST_AGG_INT_TEMP    4
+#define T_TEST_AGG_INT_TEMP  6
+#define M_TEST_AGG_INT_TEMP  8
+#define U_TEST_AGG_INT_TEMP  6
+#define X_TEST_AGG_INT_TEMP  14
+#define Y_TEST_AGG_INT_TEMP  4
+#define P_TEST_AGG_INT_TEMP  4
 
 #define X_FINAL_CLEANUP_AGG_INT  1
 
@@ -30,21 +30,20 @@
 #define ISCSI_CDU_TASK_SEG_TYPE       0
 #define FCOE_CDU_TASK_SEG_TYPE        0
 #define RDMA_CDU_TASK_SEG_TYPE        1
+#define ETH_CDU_TASK_SEG_TYPE         2
 
 #define FW_ASSERT_GENERAL_ATTN_IDX    32
 
-#define MAX_PINNED_CCFC			32
-
 #define EAGLE_ENG1_WORKAROUND_NIG_FLOWCTRL_MODE	3
 
 /* Queue Zone sizes in bytes */
-#define TSTORM_QZONE_SIZE    8	 /*tstorm_scsi_queue_zone*/
-#define MSTORM_QZONE_SIZE    16  /*mstorm_eth_queue_zone. Used only for RX
-				  *producer of VFs in backward compatibility
-				  *mode.
-				  */
-#define USTORM_QZONE_SIZE    8	 /*ustorm_eth_queue_zone*/
-#define XSTORM_QZONE_SIZE    8	 /*xstorm_eth_queue_zone*/
+#define TSTORM_QZONE_SIZE    8   /*tstorm_queue_zone*/
+/*mstorm_eth_queue_zone. Used only for RX producer of VFs in backward
+ * compatibility mode.
+ */
+#define MSTORM_QZONE_SIZE    16
+#define USTORM_QZONE_SIZE    8   /*ustorm_queue_zone*/
+#define XSTORM_QZONE_SIZE    8   /*xstorm_eth_queue_zone*/
 #define YSTORM_QZONE_SIZE    0
 #define PSTORM_QZONE_SIZE    0
 
@@ -61,7 +60,8 @@
  */
 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD     112
 
-
+#define ETH_RGSRC_CTX_SIZE                6 /*Size in QREGS*/
+#define ETH_TGSRC_CTX_SIZE                6 /*Size in QREGS*/
 /********************************/
 /* CORE (LIGHT L2) FW CONSTANTS */
 /********************************/
@@ -76,15 +76,13 @@
 
 #define CORE_SPQE_PAGE_SIZE_BYTES                       4096
 
-/*
- * Usually LL2 queues are opened in pairs TX-RX.
- * There is a hard restriction on number of RX queues (limited by Tstorm RAM)
- * and TX counters (Pstorm RAM).
- * Number of TX queues is almost unlimited.
- * The constants are different so as to allow asymmetric LL2 connections
- */
+/* Number of LL2 RAM based (RX producers and statistics) queues */
+#define MAX_NUM_LL2_RX_RAM_QUEUES               32
+/* Number of LL2 context based (RX producers and statistics) queues */
+#define MAX_NUM_LL2_RX_CTX_QUEUES               208
+#define MAX_NUM_LL2_RX_QUEUES (MAX_NUM_LL2_RX_RAM_QUEUES + \
+			       MAX_NUM_LL2_RX_CTX_QUEUES)
 
-#define MAX_NUM_LL2_RX_QUEUES					48
 #define MAX_NUM_LL2_TX_STATS_COUNTERS			48
 
 
@@ -95,8 +93,8 @@
 
 
 #define FW_MAJOR_VERSION        8
-#define FW_MINOR_VERSION        37
-#define FW_REVISION_VERSION     7
+#define FW_MINOR_VERSION		40
+#define FW_REVISION_VERSION		25
 #define FW_ENGINEERING_VERSION  0
 
 /***********************/
@@ -134,6 +132,8 @@
 #define MAX_NUM_L2_QUEUES_BB	(256)
 #define MAX_NUM_L2_QUEUES_K2    (320)
 
+#define FW_LOWEST_CONSUMEDDMAE_CHANNEL   (26)
+
 /* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
 #define NUM_PHYS_TCS_4PORT_K2     4
 #define NUM_OF_PHYS_TCS           8
@@ -145,7 +145,6 @@
 #define NUM_OF_CONNECTION_TYPES (8)
 #define NUM_OF_TASK_TYPES       (8)
 #define NUM_OF_LCIDS            (320)
-#define NUM_OF_LTIDS            (320)
 
 /* Global PXP windows (GTT) */
 #define NUM_OF_GTT          19
@@ -172,6 +171,8 @@
 #define	CDU_CONTEXT_VALIDATION_CFG_USE_CID				(4)
 #define	CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE				(5)
 
+/*enabled, type A, use all */
+#define	CDU_CONTEXT_VALIDATION_DEFAULT_CFG				(0x3D)
 
 /*****************/
 /* DQ CONSTANTS  */
@@ -218,6 +219,7 @@
 #define DQ_XCM_TOE_TX_BD_PROD_CMD           DQ_XCM_AGG_VAL_SEL_WORD4
 #define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD     DQ_XCM_AGG_VAL_SEL_REG3
 #define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD    DQ_XCM_AGG_VAL_SEL_REG4
+#define DQ_XCM_ROCE_ACK_EDPM_DORQ_SEQ_CMD   DQ_XCM_AGG_VAL_SEL_WORD5
 
 /* UCM agg val selection (HW) */
 #define DQ_UCM_AGG_VAL_SEL_WORD0  0
@@ -292,6 +294,7 @@
 #define DQ_UCM_AGG_FLG_SHIFT_RULE1EN   7
 
 /* UCM agg counter flag selection (FW) */
+#define DQ_UCM_NVMF_NEW_CQE_CF_CMD          (1 << DQ_UCM_AGG_FLG_SHIFT_CF1)
 #define DQ_UCM_ETH_PMD_TX_ARM_CMD           (1 << DQ_UCM_AGG_FLG_SHIFT_CF4)
 #define DQ_UCM_ETH_PMD_RX_ARM_CMD           (1 << DQ_UCM_AGG_FLG_SHIFT_CF5)
 #define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD        (1 << DQ_UCM_AGG_FLG_SHIFT_CF4)
@@ -323,6 +326,9 @@
 /* PWM address mapping */
 #define DQ_PWM_OFFSET_DPM_BASE				0x0
 #define DQ_PWM_OFFSET_DPM_END				0x27
+#define DQ_PWM_OFFSET_XCM32_24ICID_BASE		0x28
+#define DQ_PWM_OFFSET_UCM32_24ICID_BASE		0x30
+#define DQ_PWM_OFFSET_TCM32_24ICID_BASE		0x38
 #define DQ_PWM_OFFSET_XCM16_BASE			0x40
 #define DQ_PWM_OFFSET_XCM32_BASE			0x44
 #define DQ_PWM_OFFSET_UCM16_BASE			0x48
@@ -342,6 +348,13 @@
 #define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD		(DQ_PWM_OFFSET_TCM16_BASE + 1)
 #define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD		(DQ_PWM_OFFSET_TCM16_BASE + 3)
 
+#define DQ_PWM_OFFSET_XCM_RDMA_24B_ICID_SQ_PROD \
+	(DQ_PWM_OFFSET_XCM32_24ICID_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_24B_ICID_CQ_CONS_32BIT \
+	(DQ_PWM_OFFSET_UCM32_24ICID_BASE + 4)
+#define DQ_PWM_OFFSET_TCM_ROCE_24B_ICID_RQ_PROD	\
+	(DQ_PWM_OFFSET_TCM32_24ICID_BASE + 1)
+
 #define DQ_REGION_SHIFT				        (12)
 
 /* DPM */
@@ -378,6 +391,10 @@
 /* number of global Vport/QCN rate limiters */
 #define MAX_QM_GLOBAL_RLS			256
 
+/* number of global rate limiters */
+#define MAX_QM_GLOBAL_RLS		256
+#define COMMON_MAX_QM_GLOBAL_RLS	(MAX_QM_GLOBAL_RLS)
+
 /* QM registers data */
 #define QM_LINE_CRD_REG_WIDTH		16
 #define QM_LINE_CRD_REG_SIGN_BIT	(1 << (QM_LINE_CRD_REG_WIDTH - 1))
@@ -431,9 +448,6 @@
 #define IGU_MEM_PBA_MSIX_RESERVED_UPPER		0x03ff
 
 #define IGU_CMD_INT_ACK_BASE			0x0400
-#define IGU_CMD_INT_ACK_UPPER			(IGU_CMD_INT_ACK_BASE + \
-						 MAX_TOT_SB_PER_PATH - \
-						 1)
 #define IGU_CMD_INT_ACK_RESERVED_UPPER		0x05ff
 
 #define IGU_CMD_ATTN_BIT_UPD_UPPER		0x05f0
@@ -446,9 +460,6 @@
 #define IGU_REG_SISR_MDPC_WOMASK_UPPER		0x05f6
 
 #define IGU_CMD_PROD_UPD_BASE			0x0600
-#define IGU_CMD_PROD_UPD_UPPER			(IGU_CMD_PROD_UPD_BASE + \
-						 MAX_TOT_SB_PER_PATH  - \
-						 1)
 #define IGU_CMD_PROD_UPD_RESERVED_UPPER		0x07ff
 
 /*****************/
@@ -701,6 +712,12 @@ struct common_queue_zone {
 	__le16 reserved;
 };
 
+struct nvmf_eqe_data {
+	__le16 icid /* The connection ID for which the EQE is written. */;
+	u8 reserved0[6] /* Alignment to line */;
+};
+
+
 /*
  * ETH Rx producers data
  */
@@ -770,6 +787,8 @@ enum protocol_type {
 	PROTOCOLID_PREROCE /* Pre (tapeout) RoCE */,
 	PROTOCOLID_COMMON /* ProtocolCommon */,
 	PROTOCOLID_TCP /* TCP */,
+	PROTOCOLID_RDMA /* RDMA */,
+	PROTOCOLID_SCSI /* SCSI */,
 	MAX_PROTOCOL_TYPE
 };
 
@@ -779,6 +798,36 @@ struct regpair {
 	__le32 hi /* high word for reg-pair */;
 };
 
+/*
+ * RoCE Destroy Event Data
+ */
+struct rdma_eqe_destroy_qp {
+	__le32 cid /* Dedicated field RoCE destroy QP event */;
+	u8 reserved[4];
+};
+
+/*
+ * RoCE Suspend Event Data
+ */
+struct rdma_eqe_suspend_qp {
+	__le32 cid /* Dedicated field RoCE Suspend QP event */;
+	u8 reserved[4];
+};
+
+/*
+ * RDMA Event Data Union
+ */
+union rdma_eqe_data {
+	struct regpair async_handle /* Host handle for the Async Completions */;
+	/* RoCE Destroy Event Data */
+	struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
+	/* RoCE Suspend QP Event Data */
+	struct rdma_eqe_suspend_qp rdma_suspend_qp_data;
+};
+
+struct tstorm_queue_zone {
+	__le32 reserved[2];
+};
 
 
 /*
@@ -993,6 +1042,18 @@ struct db_pwm_addr {
 #define DB_PWM_ADDR_RESERVED1_SHIFT 28
 };
 
+/*
+ * Structure for doorbell address, in legacy mode, without DEMS
+ */
+struct db_legacy_wo_dems_addr {
+	__le32 addr;
+#define DB_LEGACY_WO_DEMS_ADDR_RESERVED0_MASK  0x3
+#define DB_LEGACY_WO_DEMS_ADDR_RESERVED0_SHIFT 0
+#define DB_LEGACY_WO_DEMS_ADDR_ICID_MASK       0x3FFFFFFF /* internal CID */
+#define DB_LEGACY_WO_DEMS_ADDR_ICID_SHIFT      2
+};
+
+
 /*
  * Parameters to RDMA firmware, passed in EDPM doorbell
  */
@@ -1025,6 +1086,43 @@ struct db_rdma_dpm_params {
 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
 };
 
+/*
+ * Parameters to RDMA firmware, passed in EDPM doorbell
+ */
+struct db_rdma_24b_icid_dpm_params {
+	__le32 params;
+/* Size in QWORD-s of the DPM burst */
+#define DB_RDMA_24B_ICID_DPM_PARAMS_SIZE_MASK                0x3F
+#define DB_RDMA_24B_ICID_DPM_PARAMS_SIZE_SHIFT               0
+/* Type of DPM transacation (DPM_RDMA) (use enum db_dpm_type) */
+#define DB_RDMA_24B_ICID_DPM_PARAMS_DPM_TYPE_MASK            0x3
+#define DB_RDMA_24B_ICID_DPM_PARAMS_DPM_TYPE_SHIFT           6
+/* opcode for RDMA operation */
+#define DB_RDMA_24B_ICID_DPM_PARAMS_OPCODE_MASK              0xFF
+#define DB_RDMA_24B_ICID_DPM_PARAMS_OPCODE_SHIFT             8
+/* ICID extension */
+#define DB_RDMA_24B_ICID_DPM_PARAMS_ICID_EXT_MASK            0xFF
+#define DB_RDMA_24B_ICID_DPM_PARAMS_ICID_EXT_SHIFT           16
+/* Number of invalid bytes in last QWROD of the DPM transaction */
+#define DB_RDMA_24B_ICID_DPM_PARAMS_INV_BYTE_CNT_MASK        0x7
+#define DB_RDMA_24B_ICID_DPM_PARAMS_INV_BYTE_CNT_SHIFT       24
+/* Flag indicating 24b icid mode is enabled */
+#define DB_RDMA_24B_ICID_DPM_PARAMS_EXT_ICID_MODE_EN_MASK    0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_EXT_ICID_MODE_EN_SHIFT   27
+/* RoCE completion flag */
+#define DB_RDMA_24B_ICID_DPM_PARAMS_COMPLETION_FLG_MASK      0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_COMPLETION_FLG_SHIFT     28
+/* RoCE S flag */
+#define DB_RDMA_24B_ICID_DPM_PARAMS_S_FLG_MASK               0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_S_FLG_SHIFT              29
+#define DB_RDMA_24B_ICID_DPM_PARAMS_RESERVED1_MASK           0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_RESERVED1_SHIFT          30
+/* Connection type is iWARP */
+#define DB_RDMA_24B_ICID_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK  0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
+};
+
+
 /*
  * Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a
  * DPM burst
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 524a1dd46..b1d8706c9 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -834,8 +834,8 @@ struct ecore_dev {
 	u8				cache_shift;
 
 	/* Init */
-	const struct iro		*iro_arr;
-	#define IRO (p_hwfn->p_dev->iro_arr)
+	const u32			*iro_arr;
+#define IRO	((const struct iro *)p_hwfn->p_dev->iro_arr)
 
 	/* HW functions */
 	u8				num_hwfns;
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index bc5628c4e..0f04c9447 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -190,9 +190,7 @@ struct ecore_cxt_mngr {
 
 	/* Acquired CIDs */
 	struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES];
-	/* TBD - do we want this allocated to reserve space? */
-	struct ecore_cid_acquired_map
-		acquired_vf[MAX_CONN_TYPES][COMMON_MAX_NUM_VFS];
+	struct ecore_cid_acquired_map *acquired_vf[MAX_CONN_TYPES];
 
 	/* ILT  shadow table */
 	struct ecore_dma_mem *ilt_shadow;
@@ -1040,8 +1038,8 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
 
 static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
 {
+	u32 type, vf, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
-	u32 type, vf;
 
 	for (type = 0; type < MAX_CONN_TYPES; type++) {
 		OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map);
@@ -1049,7 +1047,7 @@ static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
 		p_mngr->acquired[type].max_count = 0;
 		p_mngr->acquired[type].start_cid = 0;
 
-		for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
+		for (vf = 0; vf < max_num_vfs; vf++) {
 			OSAL_FREE(p_hwfn->p_dev,
 				  p_mngr->acquired_vf[type][vf].cid_map);
 			p_mngr->acquired_vf[type][vf].cid_map = OSAL_NULL;
@@ -1087,6 +1085,7 @@ ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,
 static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
 {
 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	u32 max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
 	u32 start_cid = 0, vf_start_cid = 0;
 	u32 type, vf;
 
@@ -1101,7 +1100,7 @@ static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
 			goto cid_map_fail;
 
 		/* Handle VF maps */
-		for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
+		for (vf = 0; vf < max_num_vfs; vf++) {
 			p_map = &p_mngr->acquired_vf[type][vf];
 			if (ecore_cid_map_alloc_single(p_hwfn, type,
 						       vf_start_cid,
@@ -1236,10 +1235,10 @@ void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
 void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn)
 {
 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	u32 len, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
 	struct ecore_cid_acquired_map *p_map;
 	struct ecore_conn_type_cfg *p_cfg;
 	int type;
-	u32 len;
 
 	/* Reset acquired cids */
 	for (type = 0; type < MAX_CONN_TYPES; type++) {
@@ -1257,7 +1256,7 @@ void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn)
 		if (!p_cfg->cids_per_vf)
 			continue;
 
-		for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
+		for (vf = 0; vf < max_num_vfs; vf++) {
 			p_map = &p_mngr->acquired_vf[type][vf];
 			len = DIV_ROUND_UP(p_map->max_count,
 					   BITS_PER_MAP_WORD) *
@@ -1818,16 +1817,16 @@ enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
 					    enum protocol_type type,
 					    u32 *p_cid, u8 vfid)
 {
+	u32 rel_cid, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
 	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 	struct ecore_cid_acquired_map *p_map;
-	u32 rel_cid;
 
 	if (type >= MAX_CONN_TYPES) {
 		DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
 		return ECORE_INVAL;
 	}
 
-	if (vfid >= COMMON_MAX_NUM_VFS && vfid != ECORE_CXT_PF_CID) {
+	if (vfid >= max_num_vfs && vfid != ECORE_CXT_PF_CID) {
 		DP_NOTICE(p_hwfn, true, "VF [%02x] is out of range\n", vfid);
 		return ECORE_INVAL;
 	}
@@ -1913,12 +1912,12 @@ static bool ecore_cxt_test_cid_acquired(struct ecore_hwfn *p_hwfn,
 
 void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid, u8 vfid)
 {
+	u32 rel_cid, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
 	struct ecore_cid_acquired_map *p_map = OSAL_NULL;
 	enum protocol_type type;
 	bool b_acquired;
-	u32 rel_cid;
 
-	if (vfid != ECORE_CXT_PF_CID && vfid > COMMON_MAX_NUM_VFS) {
+	if (vfid != ECORE_CXT_PF_CID && vfid > max_num_vfs) {
 		DP_NOTICE(p_hwfn, true,
 			  "Trying to return incorrect CID belonging to VF %02x\n",
 			  vfid);
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 2c135afd2..2a11b4d29 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -1843,7 +1843,7 @@ static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn)
 
 	/* all vports participate in weighted fair queueing */
 	for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++)
-		qm_info->qm_vport_params[i].vport_wfq = 1;
+		qm_info->qm_vport_params[i].wfq = 1;
 }
 
 /* initialize qm port params */
@@ -2236,11 +2236,8 @@ static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)
 	/* vport table */
 	for (i = 0; i < qm_info->num_vports; i++) {
 		vport = &qm_info->qm_vport_params[i];
-		DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
-			   "vport idx %d, vport_rl %d, wfq %d,"
-			   " first_tx_pq_id [ ",
-			   qm_info->start_vport + i, vport->vport_rl,
-			   vport->vport_wfq);
+		DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "vport idx %d, wfq %d, first_tx_pq_id [ ",
+			   qm_info->start_vport + i, vport->wfq);
 		for (tc = 0; tc < NUM_OF_TCS; tc++)
 			DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ",
 				   vport->first_tx_pq_id[tc]);
@@ -2866,7 +2863,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
 	ecore_init_cau_rt_data(p_dev);
 
 	/* Program GTT windows */
-	ecore_gtt_init(p_hwfn, p_ptt);
+	ecore_gtt_init(p_hwfn);
 
 #ifndef ASIC_ONLY
 	if (CHIP_REV_IS_EMUL(p_dev)) {
@@ -6248,7 +6245,7 @@ enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
 
 /* Calculate final WFQ values for all vports and configure it.
  * After this configuration each vport must have
- * approx min rate =  vport_wfq * min_pf_rate / ECORE_WFQ_UNIT
+ * approx min rate =  wfq * min_pf_rate / ECORE_WFQ_UNIT
  */
 static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
 					       struct ecore_ptt *p_ptt,
@@ -6262,11 +6259,11 @@ static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
 		u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
 
-		vport_params[i].vport_wfq = (wfq_speed * ECORE_WFQ_UNIT) /
+		vport_params[i].wfq = (wfq_speed * ECORE_WFQ_UNIT) /
 		    min_pf_rate;
 		ecore_init_vport_wfq(p_hwfn, p_ptt,
 				     vport_params[i].first_tx_pq_id,
-				     vport_params[i].vport_wfq);
+				     vport_params[i].wfq);
 	}
 }
 
@@ -6275,7 +6272,7 @@ static void ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn)
 	int i;
 
 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
-		p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
+		p_hwfn->qm_info.qm_vport_params[i].wfq = 1;
 }
 
 static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
@@ -6290,7 +6287,7 @@ static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
 		ecore_init_wfq_default_param(p_hwfn);
 		ecore_init_vport_wfq(p_hwfn, p_ptt,
 				     vport_params[i].first_tx_pq_id,
-				     vport_params[i].vport_wfq);
+				     vport_params[i].wfq);
 	}
 }
 
diff --git a/drivers/net/qede/base/ecore_gtt_reg_addr.h b/drivers/net/qede/base/ecore_gtt_reg_addr.h
index 8c8fed4e7..f5b11eb28 100644
--- a/drivers/net/qede/base/ecore_gtt_reg_addr.h
+++ b/drivers/net/qede/base/ecore_gtt_reg_addr.h
@@ -8,43 +8,53 @@
 #define GTT_REG_ADDR_H
 
 /* Win 2 */
-/* Access:RW   DataWidth:0x20    */
+//Access:RW   DataWidth:0x20   //
 #define GTT_BAR0_MAP_REG_IGU_CMD                                      0x00f000UL
 
 /* Win 3 */
-/* Access:RW   DataWidth:0x20    */
+//Access:RW   DataWidth:0x20   //
 #define GTT_BAR0_MAP_REG_TSDM_RAM                                     0x010000UL
 
 /* Win 4 */
-/* Access:RW   DataWidth:0x20    */
+//Access:RW   DataWidth:0x20   //
 #define GTT_BAR0_MAP_REG_MSDM_RAM                                     0x011000UL
 
 /* Win 5 */
-/* Access:RW   DataWidth:0x20    */
+//Access:RW   DataWidth:0x20   //
 #define GTT_BAR0_MAP_REG_MSDM_RAM_1024                                0x012000UL
 
 /* Win 6 */
-/* Access:RW   DataWidth:0x20    */
-#define GTT_BAR0_MAP_REG_USDM_RAM                                     0x013000UL
+//Access:RW   DataWidth:0x20   //
+#define GTT_BAR0_MAP_REG_MSDM_RAM_2048                                0x013000UL
 
 /* Win 7 */
-/* Access:RW   DataWidth:0x20    */
-#define GTT_BAR0_MAP_REG_USDM_RAM_1024                                0x014000UL
+//Access:RW   DataWidth:0x20   //
+#define GTT_BAR0_MAP_REG_USDM_RAM                                     0x014000UL
 
 /* Win 8 */
-/* Access:RW   DataWidth:0x20    */
-#define GTT_BAR0_MAP_REG_USDM_RAM_2048                                0x015000UL
+//Access:RW   DataWidth:0x20   //
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024                                0x015000UL
 
 /* Win 9 */
-/* Access:RW   DataWidth:0x20    */
-#define GTT_BAR0_MAP_REG_XSDM_RAM                                     0x016000UL
+//Access:RW   DataWidth:0x20   //
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048                                0x016000UL
 
 /* Win 10 */
-/* Access:RW   DataWidth:0x20    */
-#define GTT_BAR0_MAP_REG_YSDM_RAM                                     0x017000UL
+//Access:RW   DataWidth:0x20   //
+#define GTT_BAR0_MAP_REG_XSDM_RAM                                     0x017000UL
 
 /* Win 11 */
-/* Access:RW   DataWidth:0x20    */
-#define GTT_BAR0_MAP_REG_PSDM_RAM                                     0x018000UL
+//Access:RW   DataWidth:0x20   //
+#define GTT_BAR0_MAP_REG_XSDM_RAM_1024                                0x018000UL
+
+/* Win 12 */
+//Access:RW   DataWidth:0x20   //
+#define GTT_BAR0_MAP_REG_YSDM_RAM                                     0x019000UL
+
+/* Win 13 */
+//Access:RW   DataWidth:0x20   //
+#define GTT_BAR0_MAP_REG_PSDM_RAM                                     0x01a000UL
+
+/* Win 14 */
 
 #endif
diff --git a/drivers/net/qede/base/ecore_gtt_values.h b/drivers/net/qede/base/ecore_gtt_values.h
index adc20c0ce..2035bed5c 100644
--- a/drivers/net/qede/base/ecore_gtt_values.h
+++ b/drivers/net/qede/base/ecore_gtt_values.h
@@ -13,15 +13,15 @@ static u32 pxp_global_win[] = {
 	0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
 	0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
 	0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
-	0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
-	0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
-	0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
-	0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
-	0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
-	0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
-	0,
-	0,
-	0,
+	0x1d02, /* win 6: addr=0x1d02000, size=4096 bytes */
+	0x1d80, /* win 7: addr=0x1d80000, size=4096 bytes */
+	0x1d81, /* win 8: addr=0x1d81000, size=4096 bytes */
+	0x1d82, /* win 9: addr=0x1d82000, size=4096 bytes */
+	0x1e00, /* win 10: addr=0x1e00000, size=4096 bytes */
+	0x1e01, /* win 11: addr=0x1e01000, size=4096 bytes */
+	0x1e80, /* win 12: addr=0x1e80000, size=4096 bytes */
+	0x1f00, /* win 13: addr=0x1f00000, size=4096 bytes */
+	0x1c08, /* win 14: addr=0x1c08000, size=4096 bytes */
 	0,
 	0,
 	0,
diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h
index 8fa200033..23cfcdeff 100644
--- a/drivers/net/qede/base/ecore_hsi_common.h
+++ b/drivers/net/qede/base/ecore_hsi_common.h
@@ -57,7 +57,7 @@ struct ystorm_core_conn_st_ctx {
  * The core storm context for the Pstorm
  */
 struct pstorm_core_conn_st_ctx {
-	__le32 reserved[4];
+	__le32 reserved[20];
 };
 
 /*
@@ -75,7 +75,7 @@ struct xstorm_core_conn_st_ctx {
 
 struct xstorm_core_conn_ag_ctx {
 	u8 reserved0 /* cdu_validation */;
-	u8 core_state /* state */;
+	u8 state /* state */;
 	u8 flags0;
 #define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK         0x1 /* exist_in_qm0 */
 #define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT        0
@@ -516,13 +516,20 @@ struct ustorm_core_conn_ag_ctx {
  * The core storm context for the Mstorm
  */
 struct mstorm_core_conn_st_ctx {
-	__le32 reserved[24];
+	__le32 reserved[40];
 };
 
 /*
  * The core storm context for the Ustorm
  */
 struct ustorm_core_conn_st_ctx {
+	__le32 reserved[20];
+};
+
+/*
+ * The core storm context for the Tstorm
+ */
+struct tstorm_core_conn_st_ctx {
 	__le32 reserved[4];
 };
 
@@ -549,6 +556,9 @@ struct core_conn_context {
 /* ustorm storm context */
 	struct ustorm_core_conn_st_ctx ustorm_st_context;
 	struct regpair ustorm_st_padding[2] /* padding */;
+/* tstorm storm context */
+	struct tstorm_core_conn_st_ctx tstorm_st_context;
+	struct regpair tstorm_st_padding[2] /* padding */;
 };
 
 
@@ -573,6 +583,7 @@ enum core_event_opcode {
 	CORE_EVENT_RX_QUEUE_STOP,
 	CORE_EVENT_RX_QUEUE_FLUSH,
 	CORE_EVENT_TX_QUEUE_UPDATE,
+	CORE_EVENT_QUEUE_STATS_QUERY,
 	MAX_CORE_EVENT_OPCODE
 };
 
@@ -601,7 +612,7 @@ struct core_ll2_port_stats {
 
 
 /*
- * Ethernet TX Per Queue Stats
+ * LL2 TX Per Queue Stats
  */
 struct core_ll2_pstorm_per_queue_stat {
 /* number of total bytes sent without errors */
@@ -616,16 +627,8 @@ struct core_ll2_pstorm_per_queue_stat {
 	struct regpair sent_mcast_pkts;
 /* number of total packets sent without errors */
 	struct regpair sent_bcast_pkts;
-};
-
-
-/*
- * Light-L2 RX Producers in Tstorm RAM
- */
-struct core_ll2_rx_prod {
-	__le16 bd_prod /* BD Producer */;
-	__le16 cqe_prod /* CQE Producer */;
-	__le32 reserved;
+/* number of total packets dropped due to errors */
+	struct regpair error_drop_pkts;
 };
 
 
@@ -636,7 +639,6 @@ struct core_ll2_tstorm_per_queue_stat {
 	struct regpair no_buff_discard;
 };
 
-
 struct core_ll2_ustorm_per_queue_stat {
 	struct regpair rcv_ucast_bytes;
 	struct regpair rcv_mcast_bytes;
@@ -647,6 +649,59 @@ struct core_ll2_ustorm_per_queue_stat {
 };
 
 
+/*
+ * Light-L2 RX Producers
+ */
+struct core_ll2_rx_prod {
+	__le16 bd_prod /* BD Producer */;
+	__le16 cqe_prod /* CQE Producer */;
+};
+
+
+
+struct core_ll2_tx_per_queue_stat {
+/* PSTORM per queue statistics */
+	struct core_ll2_pstorm_per_queue_stat pstorm_stat;
+};
+
+
+
+/*
+ * Structure for doorbell data, in PWM mode, for RX producers update.
+ */
+struct core_pwm_prod_update_data {
+	__le16 icid /* internal CID */;
+	u8 reserved0;
+	u8 params;
+/* aggregative command. Set DB_AGG_CMD_SET for producer update
+ * (use enum db_agg_cmd_sel)
+ */
+#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_MASK    0x3
+#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_SHIFT   0
+#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_MASK  0x3F /* Set 0. */
+#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_SHIFT 2
+	struct core_ll2_rx_prod prod /* Producers. */;
+};
+
+
+/*
+ * Ramrod data for rx/tx queue statistics query ramrod
+ */
+struct core_queue_stats_query_ramrod_data {
+	u8 rx_stat /* If set, collect RX queue statistics. */;
+	u8 tx_stat /* If set, collect TX queue statistics. */;
+	__le16 reserved[3];
+/* Address of RX statistic buffer. core_ll2_rx_per_queue_stat struct will be
+ * write to this address.
+ */
+	struct regpair rx_stat_addr;
+/* Address of TX statistic buffer. core_ll2_tx_per_queue_stat struct will be
+ * write to this address.
+ */
+	struct regpair tx_stat_addr;
+};
+
+
 /*
  * Core Ramrod Command IDs (light L2)
  */
@@ -658,6 +713,7 @@ enum core_ramrod_cmd_id {
 	CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
 	CORE_RAMROD_RX_QUEUE_FLUSH /* RX Flush queue Ramrod */,
 	CORE_RAMROD_TX_QUEUE_UPDATE /* TX Queue Update Ramrod */,
+	CORE_RAMROD_QUEUE_STATS_QUERY /* Queue Statist Query Ramrod */,
 	MAX_CORE_RAMROD_CMD_ID
 };
 
@@ -772,7 +828,8 @@ struct core_rx_gsi_offload_cqe {
 /* These are the lower 16 bit of QP id in RoCE BTH header */
 	__le16 qp_id;
 	__le32 src_qp /* Source QP from DETH header */;
-	__le32 reserved[3];
+	struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
+	__le32 reserved;
 };
 
 /*
@@ -803,24 +860,21 @@ union core_rx_cqe_union {
  * Ramrod data for rx queue start ramrod
  */
 struct core_rx_start_ramrod_data {
-	struct regpair bd_base /* bd address of the first bd page */;
+	struct regpair bd_base /* Address of the first BD page */;
 	struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */;
-	__le16 mtu /* Maximum transmission unit */;
+	__le16 mtu /* MTU */;
 	__le16 sb_id /* Status block ID */;
-	u8 sb_index /* index of the protocol index */;
-	u8 complete_cqe_flg /* post completion to the CQE ring if set */;
-	u8 complete_event_flg /* post completion to the event ring if set */;
-	u8 drop_ttl0_flg /* drop packet with ttl0 if set */;
-	__le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
-/* if set, 802.1q tags will be removed and copied to CQE */
-/* if set, 802.1q tags will be removed and copied to CQE */
+	u8 sb_index /* Status block index */;
+	u8 complete_cqe_flg /* if set - post completion to the CQE ring */;
+	u8 complete_event_flg /* if set - post completion to the event ring */;
+	u8 drop_ttl0_flg /* if set - drop packet with ttl=0 */;
+	__le16 num_of_pbl_pages /* Number of pages in CQE PBL */;
+/* if set - 802.1q tag will be removed and copied to CQE */
 	u8 inner_vlan_stripping_en;
-/* if set and inner vlan does not exist, the outer vlan will copied to CQE as
- * inner vlan. should be used in MF_OVLAN mode only.
- */
-	u8 report_outer_vlan;
+/* if set - outer tag wont be stripped, valid only in MF OVLAN mode. */
+	u8 outer_vlan_stripping_dis;
 	u8 queue_id /* Light L2 RX Queue ID */;
-	u8 main_func_queue /* Is this the main queue for the PF */;
+	u8 main_func_queue /* Set if this is the main PFs LL2 queue */;
 /* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if
  * main_func_queue is set.
  */
@@ -829,17 +883,21 @@ struct core_rx_start_ramrod_data {
  * main_func_queue is set.
  */
 	u8 mf_si_mcast_accept_all;
-/* Specifies how ll2 should deal with packets errors: packet_too_big and
- * no_buff
+/* If set, the inner vlan (802.1q tag) priority that is written to cqe will be
+ * zero out, used for TenantDcb
  */
+/* Specifies how ll2 should deal with RX packets errors */
 	struct core_rx_action_on_error action_on_error;
-/* set when in GSI offload mode on ROCE connection */
-	u8 gsi_offload_flag;
+	u8 gsi_offload_flag /* set for GSI offload mode */;
+/* If set, queue is subject for RX VFC classification. */
+	u8 vport_id_valid;
+	u8 vport_id /* Queue VPORT for RX VFC classification. */;
+	u8 zero_prod_flg /* If set, zero RX producers. */;
 /* If set, the inner vlan (802.1q tag) priority that is written to cqe will be
  * zero out, used for TenantDcb
  */
 	u8 wipe_inner_vlan_pri_en;
-	u8 reserved[5];
+	u8 reserved[2];
 };
 
 
@@ -959,13 +1017,14 @@ struct core_tx_start_ramrod_data {
 	u8 conn_type /* connection type that loaded ll2 */;
 	__le16 pbl_size /* Number of BD pages pointed by PBL */;
 	__le16 qm_pq_id /* QM PQ ID */;
-/* set when in GSI offload mode on ROCE connection */
-	u8 gsi_offload_flag;
+	u8 gsi_offload_flag /* set for GSI offload mode */;
+	u8 ctx_stats_en /* Context statistics enable */;
+/* If set, queue is part of VPORT and subject for TX switching. */
+	u8 vport_id_valid;
 /* vport id of the current connection, used to access non_rdma_in_to_in_pri_map
  * which is per vport
  */
 	u8 vport_id;
-	u8 resrved[2];
 };
 
 
@@ -1048,12 +1107,23 @@ struct eth_pstorm_per_pf_stat {
 	struct regpair sent_gre_bytes /* Sent GRE bytes */;
 	struct regpair sent_vxlan_bytes /* Sent VXLAN bytes */;
 	struct regpair sent_geneve_bytes /* Sent GENEVE bytes */;
-	struct regpair sent_gre_pkts /* Sent GRE packets */;
+	struct regpair sent_mpls_bytes /* Sent MPLS bytes */;
+	struct regpair sent_gre_mpls_bytes /* Sent GRE MPLS bytes (E5 Only) */;
+	struct regpair sent_udp_mpls_bytes /* Sent GRE MPLS bytes (E5 Only) */;
+	struct regpair sent_gre_pkts /* Sent GRE packets (E5 Only) */;
 	struct regpair sent_vxlan_pkts /* Sent VXLAN packets */;
 	struct regpair sent_geneve_pkts /* Sent GENEVE packets */;
+	struct regpair sent_mpls_pkts /* Sent MPLS packets (E5 Only) */;
+	struct regpair sent_gre_mpls_pkts /* Sent GRE MPLS packets (E5 Only) */;
+	struct regpair sent_udp_mpls_pkts /* Sent UDP MPLS packets (E5 Only) */;
 	struct regpair gre_drop_pkts /* Dropped GRE TX packets */;
 	struct regpair vxlan_drop_pkts /* Dropped VXLAN TX packets */;
 	struct regpair geneve_drop_pkts /* Dropped GENEVE TX packets */;
+	struct regpair mpls_drop_pkts /* Dropped MPLS TX packets (E5 Only) */;
+/* Dropped GRE MPLS TX packets (E5 Only) */
+	struct regpair gre_mpls_drop_pkts;
+/* Dropped UDP MPLS TX packets (E5 Only) */
+	struct regpair udp_mpls_drop_pkts;
 };
 
 
@@ -1176,6 +1246,8 @@ union event_ring_data {
 	struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
 /* Dedicated fields to iscsi connect done results */
 	struct iscsi_connect_done_results iscsi_conn_done_info;
+	union rdma_eqe_data rdma_data /* Dedicated field for RDMA data */;
+	struct nvmf_eqe_data nvmf_data /* Dedicated field for NVMf data */;
 	struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
 /* VF Initial Cleanup data */
 	struct initial_cleanup_eqe_data vf_init_cleanup;
@@ -1187,10 +1259,14 @@ union event_ring_data {
  */
 struct event_ring_entry {
 	u8 protocol_id /* Event Protocol ID (use enum protocol_type) */;
-	u8 opcode /* Event Opcode */;
-	__le16 reserved0 /* Reserved */;
+	u8 opcode /* Event Opcode (Per Protocol Type) */;
+	u8 reserved0 /* Reserved */;
+	u8 vfId /* vfId for this event, 0xFF if this is a PF event */;
 	__le16 echo /* Echo value from ramrod data on the host */;
-	u8 fw_return_code /* FW return code for SP ramrods */;
+/* FW return code for SP ramrods. Use (according to protocol) eth_return_code,
+ * or rdma_fw_return_code, or fcoe_completion_status
+ */
+	u8 fw_return_code;
 	u8 flags;
 /* 0: synchronous EQE - a completion of SP message. 1: asynchronous EQE */
 #define EVENT_RING_ENTRY_ASYNC_MASK      0x1
@@ -1320,6 +1396,22 @@ enum malicious_vf_error_id {
 	ETH_TUNN_IPV6_EXT_NBD_ERR,
 	ETH_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */,
 	ETH_ANTI_SPOOFING_ERR /* Anti-Spoofing verification failure */,
+/* packet scanned is too large (can be 9700 at most) */
+	ETH_PACKET_SIZE_TOO_LARGE,
+/* Tx packet with marked as insert VLAN when its illegal */
+	CORE_ILLEGAL_VLAN_MODE,
+/* indicated number of BDs for the packet is illegal */
+	CORE_ILLEGAL_NBDS,
+	CORE_FIRST_BD_WO_SOP /* 1st BD must have start_bd flag set */,
+/* There are not enough BDs for transmission of even one packet */
+	CORE_INSUFFICIENT_BDS,
+/* TX packet is shorter then reported on BDs or from minimal size */
+	CORE_PACKET_TOO_SMALL,
+	CORE_ILLEGAL_INBAND_TAGS /* TX packet has illegal inband tags marked */,
+	CORE_VLAN_INSERT_AND_INBAND_VLAN /* Vlan cant be added to inband tag */,
+	CORE_MTU_VIOLATION /* TX packet is greater then MTU */,
+	CORE_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */,
+	CORE_ANTI_SPOOFING_ERR /* Anti-Spoofing verification failure */,
 	MAX_MALICIOUS_VF_ERROR_ID
 };
 
@@ -1837,6 +1929,23 @@ enum vf_zone_size_mode {
 
 
 
+/*
+ * Xstorm non-triggering VF zone
+ */
+struct xstorm_non_trigger_vf_zone {
+	struct regpair non_edpm_ack_pkts /* RoCE received statistics */;
+};
+
+
+/*
+ * Tstorm VF zone
+ */
+struct xstorm_vf_zone {
+/* non-interrupt-triggering zone */
+	struct xstorm_non_trigger_vf_zone non_trigger;
+};
+
+
 
 /*
  * Attentions status block
@@ -2205,6 +2314,44 @@ struct igu_msix_vector {
 };
 
 
+struct mstorm_core_conn_ag_ctx {
+	u8 byte0 /* cdu_validation */;
+	u8 byte1 /* state */;
+	u8 flags0;
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* cf0 */
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* cf1 */
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* cf2 */
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
+	u8 flags1;
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+	__le16 word0 /* word0 */;
+	__le16 word1 /* word1 */;
+	__le32 reg0 /* reg0 */;
+	__le32 reg1 /* reg1 */;
+};
+
+
 /*
  * per encapsulation type enabling flags
  */
diff --git a/drivers/net/qede/base/ecore_hsi_debug_tools.h b/drivers/net/qede/base/ecore_hsi_debug_tools.h
index 085af0a3d..a959aeea7 100644
--- a/drivers/net/qede/base/ecore_hsi_debug_tools.h
+++ b/drivers/net/qede/base/ecore_hsi_debug_tools.h
@@ -11,98 +11,6 @@
 /****************************************/
 
 
-enum block_addr {
-	GRCBASE_GRC = 0x50000,
-	GRCBASE_MISCS = 0x9000,
-	GRCBASE_MISC = 0x8000,
-	GRCBASE_DBU = 0xa000,
-	GRCBASE_PGLUE_B = 0x2a8000,
-	GRCBASE_CNIG = 0x218000,
-	GRCBASE_CPMU = 0x30000,
-	GRCBASE_NCSI = 0x40000,
-	GRCBASE_OPTE = 0x53000,
-	GRCBASE_BMB = 0x540000,
-	GRCBASE_PCIE = 0x54000,
-	GRCBASE_MCP = 0xe00000,
-	GRCBASE_MCP2 = 0x52000,
-	GRCBASE_PSWHST = 0x2a0000,
-	GRCBASE_PSWHST2 = 0x29e000,
-	GRCBASE_PSWRD = 0x29c000,
-	GRCBASE_PSWRD2 = 0x29d000,
-	GRCBASE_PSWWR = 0x29a000,
-	GRCBASE_PSWWR2 = 0x29b000,
-	GRCBASE_PSWRQ = 0x280000,
-	GRCBASE_PSWRQ2 = 0x240000,
-	GRCBASE_PGLCS = 0x0,
-	GRCBASE_DMAE = 0xc000,
-	GRCBASE_PTU = 0x560000,
-	GRCBASE_TCM = 0x1180000,
-	GRCBASE_MCM = 0x1200000,
-	GRCBASE_UCM = 0x1280000,
-	GRCBASE_XCM = 0x1000000,
-	GRCBASE_YCM = 0x1080000,
-	GRCBASE_PCM = 0x1100000,
-	GRCBASE_QM = 0x2f0000,
-	GRCBASE_TM = 0x2c0000,
-	GRCBASE_DORQ = 0x100000,
-	GRCBASE_BRB = 0x340000,
-	GRCBASE_SRC = 0x238000,
-	GRCBASE_PRS = 0x1f0000,
-	GRCBASE_TSDM = 0xfb0000,
-	GRCBASE_MSDM = 0xfc0000,
-	GRCBASE_USDM = 0xfd0000,
-	GRCBASE_XSDM = 0xf80000,
-	GRCBASE_YSDM = 0xf90000,
-	GRCBASE_PSDM = 0xfa0000,
-	GRCBASE_TSEM = 0x1700000,
-	GRCBASE_MSEM = 0x1800000,
-	GRCBASE_USEM = 0x1900000,
-	GRCBASE_XSEM = 0x1400000,
-	GRCBASE_YSEM = 0x1500000,
-	GRCBASE_PSEM = 0x1600000,
-	GRCBASE_RSS = 0x238800,
-	GRCBASE_TMLD = 0x4d0000,
-	GRCBASE_MULD = 0x4e0000,
-	GRCBASE_YULD = 0x4c8000,
-	GRCBASE_XYLD = 0x4c0000,
-	GRCBASE_PTLD = 0x590000,
-	GRCBASE_YPLD = 0x5b0000,
-	GRCBASE_PRM = 0x230000,
-	GRCBASE_PBF_PB1 = 0xda0000,
-	GRCBASE_PBF_PB2 = 0xda4000,
-	GRCBASE_RPB = 0x23c000,
-	GRCBASE_BTB = 0xdb0000,
-	GRCBASE_PBF = 0xd80000,
-	GRCBASE_RDIF = 0x300000,
-	GRCBASE_TDIF = 0x310000,
-	GRCBASE_CDU = 0x580000,
-	GRCBASE_CCFC = 0x2e0000,
-	GRCBASE_TCFC = 0x2d0000,
-	GRCBASE_IGU = 0x180000,
-	GRCBASE_CAU = 0x1c0000,
-	GRCBASE_RGFS = 0xf00000,
-	GRCBASE_RGSRC = 0x320000,
-	GRCBASE_TGFS = 0xd00000,
-	GRCBASE_TGSRC = 0x322000,
-	GRCBASE_UMAC = 0x51000,
-	GRCBASE_XMAC = 0x210000,
-	GRCBASE_DBG = 0x10000,
-	GRCBASE_NIG = 0x500000,
-	GRCBASE_WOL = 0x600000,
-	GRCBASE_BMBN = 0x610000,
-	GRCBASE_IPC = 0x20000,
-	GRCBASE_NWM = 0x800000,
-	GRCBASE_NWS = 0x700000,
-	GRCBASE_MS = 0x6a0000,
-	GRCBASE_PHY_PCIE = 0x620000,
-	GRCBASE_LED = 0x6b8000,
-	GRCBASE_AVS_WRAP = 0x6b0000,
-	GRCBASE_MISC_AEU = 0x8000,
-	GRCBASE_BAR0_MAP = 0x1c00000,
-	MAX_BLOCK_ADDR
-};
-
-
 enum block_id {
 	BLOCK_GRC,
 	BLOCK_MISCS,
@@ -157,8 +65,6 @@ enum block_id {
 	BLOCK_MULD,
 	BLOCK_YULD,
 	BLOCK_XYLD,
-	BLOCK_PTLD,
-	BLOCK_YPLD,
 	BLOCK_PRM,
 	BLOCK_PBF_PB1,
 	BLOCK_PBF_PB2,
@@ -172,12 +78,9 @@ enum block_id {
 	BLOCK_TCFC,
 	BLOCK_IGU,
 	BLOCK_CAU,
-	BLOCK_RGFS,
-	BLOCK_RGSRC,
-	BLOCK_TGFS,
-	BLOCK_TGSRC,
 	BLOCK_UMAC,
 	BLOCK_XMAC,
+	BLOCK_MSTAT,
 	BLOCK_DBG,
 	BLOCK_NIG,
 	BLOCK_WOL,
@@ -189,8 +92,18 @@ enum block_id {
 	BLOCK_PHY_PCIE,
 	BLOCK_LED,
 	BLOCK_AVS_WRAP,
-	BLOCK_MISC_AEU,
+	BLOCK_PXPREQBUS,
 	BLOCK_BAR0_MAP,
+	BLOCK_MCP_FIO,
+	BLOCK_LAST_INIT,
+	BLOCK_PRS_FC,
+	BLOCK_PBF_FC,
+	BLOCK_NIG_LB_FC,
+	BLOCK_NIG_LB_FC_PLLH,
+	BLOCK_NIG_TX_FC_PLLH,
+	BLOCK_NIG_TX_FC,
+	BLOCK_NIG_RX_FC_PLLH,
+	BLOCK_NIG_RX_FC,
 	MAX_BLOCK_ID
 };
 
@@ -210,10 +123,13 @@ enum bin_dbg_buffer_type {
 	BIN_BUF_DBG_ATTN_REGS /* Attention registers */,
 	BIN_BUF_DBG_ATTN_INDEXES /* Attention indexes */,
 	BIN_BUF_DBG_ATTN_NAME_OFFSETS /* Attention name offsets */,
-	BIN_BUF_DBG_BUS_BLOCKS /* Debug Bus blocks */,
-	BIN_BUF_DBG_BUS_LINES /* Debug Bus lines */,
-	BIN_BUF_DBG_BUS_BLOCKS_USER_DATA /* Debug Bus blocks user data */,
+	BIN_BUF_DBG_BLOCKS /* Blocks debug data */,
+	BIN_BUF_DBG_BLOCKS_CHIP_DATA /* Blocks debug chip data */,
+	BIN_BUF_DBG_BUS_LINES /* Blocks debug bus lines */,
+	BIN_BUF_DBG_BLOCKS_USER_DATA /* Blocks debug user data */,
+	BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA /* Blocks debug chip user data */,
 	BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS /* Debug Bus line name offsets */,
+	BIN_BUF_DBG_RESET_REGS /* Reset registers */,
 	BIN_BUF_DBG_PARSING_STRINGS /* Debug Tools parsing strings */,
 	MAX_BIN_DBG_BUFFER_TYPE
 };
@@ -358,24 +274,95 @@ enum dbg_attn_type {
 
 
 /*
- * Debug Bus block data
+ * Block debug data
  */
-struct dbg_bus_block {
-/* Number of debug lines in this block (excluding signature & latency events) */
-	u8 num_of_lines;
-/* Indicates if this block has a latency events debug line (0/1). */
-	u8 has_latency_events;
-/* Offset of this blocks lines in the Debug Bus lines array. */
-	u16 lines_offset;
+struct dbg_block {
+	u8 name[15] /* Block name */;
+/* The letter (char) of the associated Storm, or 0 if no associated Storm. */
+	u8 associated_storm_letter;
+};
+
+
+/*
+ * Chip-specific block debug data
+ */
+struct dbg_block_chip {
+	u8 flags;
+/* Indicates if the block is removed in this chip (0/1). */
+#define DBG_BLOCK_CHIP_IS_REMOVED_MASK           0x1
+#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT          0
+/* Indicates if this block has a reset register (0/1). */
+#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK        0x1
+#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT       1
+/* Indicates if this block should be taken out of reset before GRC Dump (0/1).
+ * Valid only if has_reset_reg is set.
+ */
+#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK  0x1
+#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2
+/* Indicates if this block has a debug bus (0/1). */
+#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK          0x1
+#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT         3
+/* Indicates if this block has a latency events debug line (0/1). Valid only
+ * if has_dbg_bus is set.
+ */
+#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK   0x1
+#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT  4
+#define DBG_BLOCK_CHIP_RESERVED0_MASK            0x7
+#define DBG_BLOCK_CHIP_RESERVED0_SHIFT           5
+/* The DBG block client ID of this block/chip. Valid only if has_dbg_bus is
+ * set.
+ */
+	u8 dbg_client_id;
+/* The ID of the reset register of this block/chip in the dbg_reset_reg
+ * array.
+ */
+	u8 reset_reg_id;
+/* The bit offset of this block/chip in the reset register. Valid only if
+ * has_reset_reg is set.
+ */
+	u8 reset_reg_bit_offset;
+	struct dbg_mode_hdr dbg_bus_mode /* Mode header */;
+	u16 reserved1;
+	u8 reserved2;
+/* Number of Debug Bus lines in this block/chip (excluding signature and latency
+ * events). Valid only if has_dbg_bus is set.
+ */
+	u8 num_of_dbg_bus_lines;
+/* Offset of this block/chip Debug Bus lines in the Debug Bus lines array. Valid
+ * only if has_dbg_bus is set.
+ */
+	u16 dbg_bus_lines_offset;
+/* GRC address of the Debug Bus dbg_select register (in dwords). Valid only if
+ * has_dbg_bus is set.
+ */
+	u32 dbg_select_reg_addr;
+/* GRC address of the Debug Bus dbg_dword_enable register (in dwords). Valid
+ * only if has_dbg_bus is set.
+ */
+	u32 dbg_dword_enable_reg_addr;
+/* GRC address of the Debug Bus dbg_shift register (in dwords). Valid only if
+ * has_dbg_bus is set.
+ */
+	u32 dbg_shift_reg_addr;
+/* GRC address of the Debug Bus dbg_force_valid register (in dwords). Valid only
+ * if has_dbg_bus is set.
+ */
+	u32 dbg_force_valid_reg_addr;
+/* GRC address of the Debug Bus dbg_force_frame register (in dwords). Valid only
+ * if has_dbg_bus is set.
+ */
+	u32 dbg_force_frame_reg_addr;
 };
 
 
 /*
- * Debug Bus block user data
+ * Chip-specific block user debug data
+ */
+struct dbg_block_chip_user {
+/* Number of debug bus lines in this block (excluding signature and latency
+ * events).
  */
-struct dbg_bus_block_user_data {
-/* Number of debug lines in this block (excluding signature & latency events) */
-	u8 num_of_lines;
+	u8 num_of_dbg_bus_lines;
 /* Indicates if this block has a latency events debug line (0/1). */
 	u8 has_latency_events;
 /* Offset of this blocks lines in the debug bus line name offsets array. */
@@ -383,6 +370,14 @@ struct dbg_bus_block_user_data {
 };
 
 
+/*
+ * Block user debug data
+ */
+struct dbg_block_user {
+	u8 name[16] /* Block name */;
+};
+
+
 /*
  * Block Debug line data
  */
@@ -603,51 +598,42 @@ enum dbg_idle_chk_severity_types {
 
 
 /*
- * Debug Bus block data
+ * Reset register
  */
-struct dbg_bus_block_data {
-	__le16 data;
-/* 4-bit value: bit i set -> dword/qword i is enabled. */
-#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK       0xF
-#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT      0
-/* Number of dwords/qwords to shift right the debug data (0-3) */
-#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK       0xF
-#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT      4
-/* 4-bit value: bit i set -> dword/qword i is forced valid. */
-#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK  0xF
-#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8
-/* 4-bit value: bit i set -> dword/qword i frame bit is forced. */
-#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK  0xF
-#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12
-	u8 line_num /* Debug line number to select */;
-	u8 hw_id /* HW ID associated with the block */;
+struct dbg_reset_reg {
+	u32 data;
+#define DBG_RESET_REG_ADDR_MASK        0xFFFFFF /* GRC address (in dwords) */
+#define DBG_RESET_REG_ADDR_SHIFT       0
+/* indicates if this register is removed (0/1). */
+#define DBG_RESET_REG_IS_REMOVED_MASK  0x1
+#define DBG_RESET_REG_IS_REMOVED_SHIFT 24
+#define DBG_RESET_REG_RESERVED_MASK    0x7F
+#define DBG_RESET_REG_RESERVED_SHIFT   25
 };
 
 
 /*
- * Debug Bus Clients
- */
-enum dbg_bus_clients {
-	DBG_BUS_CLIENT_RBCN,
-	DBG_BUS_CLIENT_RBCP,
-	DBG_BUS_CLIENT_RBCR,
-	DBG_BUS_CLIENT_RBCT,
-	DBG_BUS_CLIENT_RBCU,
-	DBG_BUS_CLIENT_RBCF,
-	DBG_BUS_CLIENT_RBCX,
-	DBG_BUS_CLIENT_RBCS,
-	DBG_BUS_CLIENT_RBCH,
-	DBG_BUS_CLIENT_RBCZ,
-	DBG_BUS_CLIENT_OTHER_ENGINE,
-	DBG_BUS_CLIENT_TIMESTAMP,
-	DBG_BUS_CLIENT_CPU,
-	DBG_BUS_CLIENT_RBCY,
-	DBG_BUS_CLIENT_RBCQ,
-	DBG_BUS_CLIENT_RBCM,
-	DBG_BUS_CLIENT_RBCB,
-	DBG_BUS_CLIENT_RBCW,
-	DBG_BUS_CLIENT_RBCV,
-	MAX_DBG_BUS_CLIENTS
+ * Debug Bus block data
+ */
+struct dbg_bus_block_data {
+/* 4 bit value, bit i set -> dword/qword i is enabled in block. */
+	u8 enable_mask;
+/* Number of dwords/qwords to cyclically  right the blocks output (0-3). */
+	u8 right_shift;
+/* 4 bit value, bit i set -> dword/qword i is forced valid in block. */
+	u8 force_valid_mask;
+/* 4 bit value, bit i set -> dword/qword i frame bit is forced in block. */
+	u8 force_frame_mask;
+/* bit i set -> dword i contains this blocks data (after shifting). */
+	u8 dword_mask;
+	u8 line_num /* Debug line number to select */;
+	u8 hw_id /* HW ID associated with the block */;
+	u8 flags;
+/* 0/1. If 1, the debug line is 256b, otherwise its 128b. */
+#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK  0x1
+#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0
+#define DBG_BUS_BLOCK_DATA_RESERVED_MASK      0x7F
+#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT     1
 };
 
 
@@ -673,15 +659,19 @@ enum dbg_bus_constraint_ops {
  * Debug Bus trigger state data
  */
 struct dbg_bus_trigger_state_data {
-	u8 data;
-/* 4-bit value: bit i set -> dword i of the trigger state block
- * (after right shift) is enabled.
- */
-#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK  0xF
-#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0
-/* 4-bit value: bit i set -> dword i is compared by a constraint */
-#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK      0xF
-#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT     4
+/* Message length (in cycles) to be used for message-based trigger constraints.
+ * If set to 0, message length is based only on frame bit received from HW.
+ */
+	u8 msg_len;
+/* A bit for each dword in the debug bus cycle, indicating if this dword appears
+ * in a trigger constraint (1) or not (0)
+ */
+	u8 constraint_dword_mask;
+/* Storm ID to trigger on. Valid only when triggering on Storm data.
+ * (use enum dbg_storms)
+ */
+	u8 storm_id;
+	u8 reserved;
 };
 
 /*
@@ -751,11 +741,7 @@ struct dbg_bus_storm_data {
 struct dbg_bus_data {
 	u32 app_version /* The tools version number of the application */;
 	u8 state /* The current debug bus state */;
-	u8 hw_dwords /* HW dwords per cycle */;
-/* The HW IDs of the recorded HW blocks, where bits i*3..i*3+2 contain the
- * HW ID of dword/qword i
- */
-	u16 hw_id_mask;
+	u8 mode_256b_en /* Indicates if the 256 bit mode is enabled */;
 	u8 num_enabled_blocks /* Number of blocks enabled for recording */;
 	u8 num_enabled_storms /* Number of Storms enabled for recording */;
 	u8 target /* Output target */;
@@ -777,102 +763,46 @@ struct dbg_bus_data {
  * Valid only if both filter and trigger are enabled (0/1)
  */
 	u8 filter_post_trigger;
-	u16 reserved;
 /* Indicates if the recording trigger is enabled (0/1) */
 	u8 trigger_en;
-/* trigger states data */
-	struct dbg_bus_trigger_state_data trigger_states[3];
+/* A bit for each dword in the debug bus cycle, indicating if this dword
+ * appears in a filter constraint (1) or not (0)
+ */
+	u8 filter_constraint_dword_mask;
 	u8 next_trigger_state /* ID of next trigger state to be added */;
 /* ID of next filter/trigger constraint to be added */
 	u8 next_constraint_id;
-/* If true, all inputs are associated with HW ID 0. Otherwise, each input is
- * assigned a different HW ID (0/1)
+/* trigger states data */
+	struct dbg_bus_trigger_state_data trigger_states[3];
+/* Message length (in cycles) to be used for message-based filter constraints.
+ * If set to 0 message length is based only on frame bit received from HW.
  */
-	u8 unify_inputs;
+	u8 filter_msg_len;
 /* Indicates if the other engine sends it NW recording to this engine (0/1) */
 	u8 rcv_from_other_engine;
+/* A bit for each dword in the debug bus cycle, indicating if this dword is
+ * recorded (1) or not (0)
+ */
+	u8 blocks_dword_mask;
+/* Indicates if there are dwords in the debug bus cycle which are recorded
+ * by more tan one block (0/1)
+ */
+	u8 blocks_dword_overlap;
+/* The HW IDs of the recorded HW blocks, where bits i*3..i*3+2 contain the
+ * HW ID of dword/qword i
+ */
+	u32 hw_id_mask;
 /* Debug Bus PCI buffer data. Valid only when the target is
  * DBG_BUS_TARGET_ID_PCI.
  */
 	struct dbg_bus_pci_buf_data pci_buf;
 /* Debug Bus data for each block */
-	struct dbg_bus_block_data blocks[88];
+	struct dbg_bus_block_data blocks[132];
 /* Debug Bus data for each block */
 	struct dbg_bus_storm_data storms[6];
 };
 
 
-/*
- * Debug bus filter types
- */
-enum dbg_bus_filter_types {
-	DBG_BUS_FILTER_TYPE_OFF /* filter always off */,
-	DBG_BUS_FILTER_TYPE_PRE /* filter before trigger only */,
-	DBG_BUS_FILTER_TYPE_POST /* filter after trigger only */,
-	DBG_BUS_FILTER_TYPE_ON /* filter always on */,
-	MAX_DBG_BUS_FILTER_TYPES
-};
-
-
-/*
- * Debug bus frame modes
- */
-enum dbg_bus_frame_modes {
-	DBG_BUS_FRAME_MODE_0HW_4ST = 0 /* 0 HW dwords, 4 Storm dwords */,
-	DBG_BUS_FRAME_MODE_4HW_0ST = 3 /* 4 HW dwords, 0 Storm dwords */,
-	DBG_BUS_FRAME_MODE_8HW_0ST = 4 /* 8 HW dwords, 0 Storm dwords */,
-	MAX_DBG_BUS_FRAME_MODES
-};
-
-
-/*
- * Debug bus other engine mode
- */
-enum dbg_bus_other_engine_modes {
-	DBG_BUS_OTHER_ENGINE_MODE_NONE,
-	DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
-	DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
-	DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
-	DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX,
-	MAX_DBG_BUS_OTHER_ENGINE_MODES
-};
-
-
-
-/*
- * Debug bus post-trigger recording types
- */
-enum dbg_bus_post_trigger_types {
-	DBG_BUS_POST_TRIGGER_RECORD /* start recording after trigger */,
-	DBG_BUS_POST_TRIGGER_DROP /* drop data after trigger */,
-	MAX_DBG_BUS_POST_TRIGGER_TYPES
-};
-
-
-/*
- * Debug bus pre-trigger recording types
- */
-enum dbg_bus_pre_trigger_types {
-	DBG_BUS_PRE_TRIGGER_START_FROM_ZERO /* start recording from time 0 */,
-/* start recording some chunks before trigger */
-	DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
-	DBG_BUS_PRE_TRIGGER_DROP /* drop data before trigger */,
-	MAX_DBG_BUS_PRE_TRIGGER_TYPES
-};
-
-
-/*
- * Debug bus SEMI frame modes
- */
-enum dbg_bus_semi_frame_modes {
-/* 0 slow dwords, 4 fast dwords */
-	DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
-/* 4 slow dwords, 0 fast dwords */
-	DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
-	MAX_DBG_BUS_SEMI_FRAME_MODES
-};
-
-
 /*
  * Debug bus states
  */
@@ -901,6 +831,8 @@ enum dbg_bus_storm_modes {
 	DBG_BUS_STORM_MODE_LD_ST_ADDR /* load/store address (fast debug) */,
 	DBG_BUS_STORM_MODE_DRA_FSM /* DRA state machines (fast debug) */,
 	DBG_BUS_STORM_MODE_RH /* recording handlers (fast debug) */,
+/* recording handlers with store messages (fast debug) */
+	DBG_BUS_STORM_MODE_RH_WITH_STORE,
 	DBG_BUS_STORM_MODE_FOC /* FOC: FIN + DRA Rd (slow debug) */,
 	DBG_BUS_STORM_MODE_EXT_STORE /* FOC: External Store (slow) */,
 	MAX_DBG_BUS_STORM_MODES
@@ -955,14 +887,13 @@ enum dbg_grc_params {
 	DBG_GRC_PARAM_DUMP_CAU /* dump CAU memories (0/1) */,
 	DBG_GRC_PARAM_DUMP_QM /* dump QM memories (0/1) */,
 	DBG_GRC_PARAM_DUMP_MCP /* dump MCP memories (0/1) */,
-/* MCP Trace meta data size in bytes */
-	DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
+	DBG_GRC_PARAM_DUMP_DORQ /* dump DORQ memories (0/1) */,
 	DBG_GRC_PARAM_DUMP_CFC /* dump CFC memories (0/1) */,
 	DBG_GRC_PARAM_DUMP_IGU /* dump IGU memories (0/1) */,
 	DBG_GRC_PARAM_DUMP_BRB /* dump BRB memories (0/1) */,
 	DBG_GRC_PARAM_DUMP_BTB /* dump BTB memories (0/1) */,
 	DBG_GRC_PARAM_DUMP_BMB /* dump BMB memories (0/1) */,
-	DBG_GRC_PARAM_DUMP_NIG /* dump NIG memories (0/1) */,
+	DBG_GRC_PARAM_RESERVD1 /* reserved */,
 	DBG_GRC_PARAM_DUMP_MULD /* dump MULD memories (0/1) */,
 	DBG_GRC_PARAM_DUMP_PRS /* dump PRS memories (0/1) */,
 	DBG_GRC_PARAM_DUMP_DMAE /* dump PRS memories (0/1) */,
@@ -971,8 +902,9 @@ enum dbg_grc_params {
 	DBG_GRC_PARAM_DUMP_DIF /* dump DIF memories (0/1) */,
 	DBG_GRC_PARAM_DUMP_STATIC /* dump static debug data (0/1) */,
 	DBG_GRC_PARAM_UNSTALL /* un-stall Storms after dump (0/1) */,
-	DBG_GRC_PARAM_NUM_LCIDS /* number of LCIDs (0..320) */,
-	DBG_GRC_PARAM_NUM_LTIDS /* number of LTIDs (0..320) */,
+	DBG_GRC_PARAM_RESERVED2 /* reserved */,
+/* MCP Trace meta data size in bytes */
+	DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
 /* preset: exclude all memories from dump (1 only) */
 	DBG_GRC_PARAM_EXCLUDE_ALL,
 /* preset: include memories for crash dump (1 only) */
@@ -983,26 +915,12 @@ enum dbg_grc_params {
 	DBG_GRC_PARAM_DUMP_PHY /* dump PHY memories (0/1) */,
 	DBG_GRC_PARAM_NO_MCP /* dont perform MCP commands (0/1) */,
 	DBG_GRC_PARAM_NO_FW_VER /* dont read FW/MFW version (0/1) */,
+	DBG_GRC_PARAM_RESERVED3 /* reserved */,
+	DBG_GRC_PARAM_DUMP_MCP_HW_DUMP /* dump MCP HW Dump (0/1) */,
 	MAX_DBG_GRC_PARAMS
 };
 
 
-/*
- * Debug reset registers
- */
-enum dbg_reset_regs {
-	DBG_RESET_REG_MISCS_PL_UA,
-	DBG_RESET_REG_MISCS_PL_HV,
-	DBG_RESET_REG_MISCS_PL_HV_2,
-	DBG_RESET_REG_MISC_PL_UA,
-	DBG_RESET_REG_MISC_PL_HV,
-	DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
-	DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
-	DBG_RESET_REG_MISC_PL_PDA_VAUX,
-	MAX_DBG_RESET_REGS
-};
-
-
 /*
  * Debug status codes
  */
@@ -1016,15 +934,15 @@ enum dbg_status {
 	DBG_STATUS_INVALID_PCI_BUF_SIZE,
 	DBG_STATUS_PCI_BUF_ALLOC_FAILED,
 	DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
-	DBG_STATUS_TOO_MANY_INPUTS,
-	DBG_STATUS_INPUT_OVERLAP,
-	DBG_STATUS_HW_ONLY_RECORDING,
+	DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS,
+	DBG_STATUS_NO_MATCHING_FRAMING_MODE,
+	DBG_STATUS_VFC_READ_ERROR,
 	DBG_STATUS_STORM_ALREADY_ENABLED,
 	DBG_STATUS_STORM_NOT_ENABLED,
 	DBG_STATUS_BLOCK_ALREADY_ENABLED,
 	DBG_STATUS_BLOCK_NOT_ENABLED,
 	DBG_STATUS_NO_INPUT_ENABLED,
-	DBG_STATUS_NO_FILTER_TRIGGER_64B,
+	DBG_STATUS_NO_FILTER_TRIGGER_256B,
 	DBG_STATUS_FILTER_ALREADY_ENABLED,
 	DBG_STATUS_TRIGGER_ALREADY_ENABLED,
 	DBG_STATUS_TRIGGER_NOT_ENABLED,
@@ -1049,7 +967,7 @@ enum dbg_status {
 	DBG_STATUS_MCP_TRACE_NO_META,
 	DBG_STATUS_MCP_COULD_NOT_HALT,
 	DBG_STATUS_MCP_COULD_NOT_RESUME,
-	DBG_STATUS_RESERVED2,
+	DBG_STATUS_RESERVED0,
 	DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
 	DBG_STATUS_IGU_FIFO_BAD_DATA,
 	DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
@@ -1057,10 +975,15 @@ enum dbg_status {
 	DBG_STATUS_REG_FIFO_BAD_DATA,
 	DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
 	DBG_STATUS_DBG_ARRAY_NOT_SET,
-	DBG_STATUS_FILTER_BUG,
+	DBG_STATUS_RESERVED1,
 	DBG_STATUS_NON_MATCHING_LINES,
-	DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET,
+	DBG_STATUS_INSUFFICIENT_HW_IDS,
 	DBG_STATUS_DBG_BUS_IN_USE,
+	DBG_STATUS_INVALID_STORM_DBG_MODE,
+	DBG_STATUS_OTHER_ENGINE_BB_ONLY,
+	DBG_STATUS_FILTER_SINGLE_HW_ID,
+	DBG_STATUS_TRIGGER_SINGLE_HW_ID,
+	DBG_STATUS_MISSING_TRIGGER_STATE_STORM,
 	MAX_DBG_STATUS
 };
 
@@ -1108,9 +1031,9 @@ struct dbg_tools_data {
 	struct idle_chk_data idle_chk /* Idle Check data */;
 	u8 mode_enable[40] /* Indicates if a mode is enabled (0/1) */;
 /* Indicates if a block is in reset state (0/1) */
-	u8 block_in_reset[88];
+	u8 block_in_reset[132];
 	u8 chip_id /* Chip ID (from enum chip_ids) */;
-	u8 platform_id /* Platform ID */;
+	u8 hw_type /* HW Type */;
 	u8 num_ports /* Number of ports in the chip */;
 	u8 num_pfs_per_port /* Number of PFs in each port */;
 	u8 num_vfs /* Number of VFs in the chip */;
diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h
index b1cab2910..bd7bd8658 100644
--- a/drivers/net/qede/base/ecore_hsi_eth.h
+++ b/drivers/net/qede/base/ecore_hsi_eth.h
@@ -34,7 +34,7 @@ struct xstorm_eth_conn_st_ctx {
 
 struct xstorm_eth_conn_ag_ctx {
 	u8 reserved0 /* cdu_validation */;
-	u8 eth_state /* state */;
+	u8 state /* state */;
 	u8 flags0;
 /* exist_in_qm0 */
 #define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK            0x1
@@ -303,57 +303,6 @@ struct xstorm_eth_conn_ag_ctx {
 	__le16 word15 /* word15 */;
 };
 
-/*
- * The eth storm context for the Ystorm
- */
-struct ystorm_eth_conn_st_ctx {
-	__le32 reserved[8];
-};
-
-struct ystorm_eth_conn_ag_ctx {
-	u8 byte0 /* cdu_validation */;
-	u8 state /* state */;
-	u8 flags0;
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK                  0x1 /* exist_in_qm0 */
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT                 0
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK                  0x1 /* exist_in_qm1 */
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT                 1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK     0x3 /* cf0 */
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT    2
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK      0x3 /* cf1 */
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT     4
-#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK                   0x3 /* cf2 */
-#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT                  6
-	u8 flags1;
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK  0x1 /* cf0en */
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK   0x1 /* cf1en */
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT  1
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK                 0x1 /* cf2en */
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                2
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK               0x1 /* rule0en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT              3
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK               0x1 /* rule1en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT              4
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK               0x1 /* rule2en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT              5
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK               0x1 /* rule3en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT              6
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK               0x1 /* rule4en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT              7
-	u8 tx_q0_int_coallecing_timeset /* byte2 */;
-	u8 byte3 /* byte3 */;
-	__le16 word0 /* word0 */;
-	__le32 terminate_spqe /* reg0 */;
-	__le32 reg1 /* reg1 */;
-	__le16 tx_bd_cons_upd /* word1 */;
-	__le16 word2 /* word2 */;
-	__le16 word3 /* word3 */;
-	__le16 word4 /* word4 */;
-	__le32 reg2 /* reg2 */;
-	__le32 reg3 /* reg3 */;
-};
-
 struct tstorm_eth_conn_ag_ctx {
 	u8 byte0 /* cdu_validation */;
 	u8 byte1 /* state */;
@@ -458,6 +407,57 @@ struct tstorm_eth_conn_ag_ctx {
 	__le32 reg10 /* reg10 */;
 };
 
+/*
+ * The eth storm context for the Ystorm
+ */
+struct ystorm_eth_conn_st_ctx {
+	__le32 reserved[8];
+};
+
+struct ystorm_eth_conn_ag_ctx {
+	u8 byte0 /* cdu_validation */;
+	u8 state /* state */;
+	u8 flags0;
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK                  0x1 /* exist_in_qm0 */
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT                 0
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK                  0x1 /* exist_in_qm1 */
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT                 1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK     0x3 /* cf0 */
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT    2
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK      0x3 /* cf1 */
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT     4
+#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK                   0x3 /* cf2 */
+#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT                  6
+	u8 flags1;
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK  0x1 /* cf0en */
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK   0x1 /* cf1en */
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT  1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK                 0x1 /* cf2en */
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                2
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK               0x1 /* rule0en */
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT              3
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK               0x1 /* rule1en */
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT              4
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK               0x1 /* rule2en */
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT              5
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK               0x1 /* rule3en */
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT              6
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK               0x1 /* rule4en */
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT              7
+	u8 tx_q0_int_coallecing_timeset /* byte2 */;
+	u8 byte3 /* byte3 */;
+	__le16 word0 /* word0 */;
+	__le32 terminate_spqe /* reg0 */;
+	__le32 reg1 /* reg1 */;
+	__le16 tx_bd_cons_upd /* word1 */;
+	__le16 word2 /* word2 */;
+	__le16 word3 /* word3 */;
+	__le16 word4 /* word4 */;
+	__le32 reg2 /* reg2 */;
+	__le32 reg3 /* reg3 */;
+};
+
 struct ustorm_eth_conn_ag_ctx {
 	u8 byte0 /* cdu_validation */;
 	u8 byte1 /* state */;
@@ -557,12 +557,12 @@ struct eth_conn_context {
 	struct xstorm_eth_conn_st_ctx xstorm_st_context;
 /* xstorm aggregative context */
 	struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+/* tstorm aggregative context */
+	struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
 /* ystorm storm context */
 	struct ystorm_eth_conn_st_ctx ystorm_st_context;
 /* ystorm aggregative context */
 	struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
-/* tstorm aggregative context */
-	struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
 /* ustorm aggregative context */
 	struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
 /* ustorm storm context */
@@ -792,16 +792,34 @@ enum eth_ramrod_cmd_id {
 struct eth_return_code {
 	u8 value;
 /* error code (use enum eth_error_code) */
-#define ETH_RETURN_CODE_ERR_CODE_MASK  0x1F
+#define ETH_RETURN_CODE_ERR_CODE_MASK  0x3F
 #define ETH_RETURN_CODE_ERR_CODE_SHIFT 0
-#define ETH_RETURN_CODE_RESERVED_MASK  0x3
-#define ETH_RETURN_CODE_RESERVED_SHIFT 5
+#define ETH_RETURN_CODE_RESERVED_MASK  0x1
+#define ETH_RETURN_CODE_RESERVED_SHIFT 6
 /* rx path - 0, tx path - 1 */
 #define ETH_RETURN_CODE_RX_TX_MASK     0x1
 #define ETH_RETURN_CODE_RX_TX_SHIFT    7
 };
 
 
+/*
+ * tx destination enum
+ */
+enum eth_tx_dst_mode_config_enum {
+/* tx destination configuration override is disabled */
+	ETH_TX_DST_MODE_CONFIG_DISABLE,
+/* tx destination configuration override is enabled, vport and tx dst will be
+ * taken from from 4th bd
+ */
+	ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_BD,
+/* tx destination configuration override is enabled, vport and tx dst will be
+ * taken from from vport data
+ */
+	ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_VPORT,
+	MAX_ETH_TX_DST_MODE_CONFIG_ENUM
+};
+
+
 /*
  * What to do in case an error occurs
  */
@@ -1431,7 +1449,7 @@ struct vport_update_ramrod_data {
 
 struct E4XstormEthConnAgCtxDqExtLdPart {
 	u8 reserved0 /* cdu_validation */;
-	u8 eth_state /* state */;
+	u8 state /* state */;
 	u8 flags0;
 /* exist_in_qm0 */
 #define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK            0x1
diff --git a/drivers/net/qede/base/ecore_hsi_init_func.h b/drivers/net/qede/base/ecore_hsi_init_func.h
index d77edaa1d..7efe2eff1 100644
--- a/drivers/net/qede/base/ecore_hsi_init_func.h
+++ b/drivers/net/qede/base/ecore_hsi_init_func.h
@@ -88,7 +88,18 @@ struct init_nig_pri_tc_map_req {
 
 
 /*
- * QM per-port init parameters
+ * QM per global RL init parameters
+ */
+struct init_qm_global_rl_params {
+/* Rate limit in Mb/sec units. If set to zero, the link speed is uwsed
+ * instead.
+ */
+	u32 rate_limit;
+};
+
+
+/*
+ * QM per port init parameters
  */
 struct init_qm_port_params {
 	u8 active /* Indicates if this port is active */;
@@ -111,24 +122,20 @@ struct init_qm_pq_params {
 	u8 wrr_group /* WRR group */;
 /* Indicates if a rate limiter should be allocated for the PQ (0/1) */
 	u8 rl_valid;
+	u16 rl_id /* RL ID, valid only if rl_valid is true */;
 	u8 port_id /* Port ID */;
-	u8 reserved0;
-	u16 reserved1;
+	u8 reserved;
 };
 
 
 /*
- * QM per-vport init parameters
+ * QM per VPORT init parameters
  */
 struct init_qm_vport_params {
-/* rate limit in Mb/sec units. a value of 0 means dont configure. ignored if
- * VPORT RL is globally disabled.
- */
-	u32 vport_rl;
 /* WFQ weight. A value of 0 means dont configure. ignored if VPORT WFQ is
  * globally disabled.
  */
-	u16 vport_wfq;
+	u16 wfq;
 /* the first Tx PQ ID associated with this VPORT for each TC. */
 	u16 first_tx_pq_id[NUM_OF_TCS];
 };
diff --git a/drivers/net/qede/base/ecore_hsi_init_tool.h b/drivers/net/qede/base/ecore_hsi_init_tool.h
index 1fe4bfc61..4f878d061 100644
--- a/drivers/net/qede/base/ecore_hsi_init_tool.h
+++ b/drivers/net/qede/base/ecore_hsi_init_tool.h
@@ -46,10 +46,24 @@ enum bin_init_buffer_type {
 	BIN_BUF_INIT_VAL /* init data */,
 	BIN_BUF_INIT_MODE_TREE /* init modes tree */,
 	BIN_BUF_INIT_IRO /* internal RAM offsets */,
+	BIN_BUF_INIT_OVERLAYS /* FW overlays (except overlay 0) */,
 	MAX_BIN_INIT_BUFFER_TYPE
 };
 
 
+/*
+ * FW overlay buffer header
+ */
+struct fw_overlay_buf_hdr {
+	u32 data;
+#define FW_OVERLAY_BUF_HDR_STORM_ID_MASK  0xFF /* Storm ID */
+#define FW_OVERLAY_BUF_HDR_STORM_ID_SHIFT 0
+/* Size of Storm FW overlay buffer in dwords */
+#define FW_OVERLAY_BUF_HDR_BUF_SIZE_MASK  0xFFFFFF
+#define FW_OVERLAY_BUF_HDR_BUF_SIZE_SHIFT 8
+};
+
+
 /*
  * init array header: raw
  */
@@ -117,6 +131,30 @@ union init_array_hdr {
 };
 
 
+enum dbg_bus_clients {
+	DBG_BUS_CLIENT_RBCN,
+	DBG_BUS_CLIENT_RBCP,
+	DBG_BUS_CLIENT_RBCR,
+	DBG_BUS_CLIENT_RBCT,
+	DBG_BUS_CLIENT_RBCU,
+	DBG_BUS_CLIENT_RBCF,
+	DBG_BUS_CLIENT_RBCX,
+	DBG_BUS_CLIENT_RBCS,
+	DBG_BUS_CLIENT_RBCH,
+	DBG_BUS_CLIENT_RBCZ,
+	DBG_BUS_CLIENT_OTHER_ENGINE,
+	DBG_BUS_CLIENT_TIMESTAMP,
+	DBG_BUS_CLIENT_CPU,
+	DBG_BUS_CLIENT_RBCY,
+	DBG_BUS_CLIENT_RBCQ,
+	DBG_BUS_CLIENT_RBCM,
+	DBG_BUS_CLIENT_RBCB,
+	DBG_BUS_CLIENT_RBCW,
+	DBG_BUS_CLIENT_RBCV,
+	MAX_DBG_BUS_CLIENTS
+};
+
+
 enum init_modes {
 	MODE_BB_A0_DEPRECATED,
 	MODE_BB,
diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c
index 6a79db52e..0aed043bb 100644
--- a/drivers/net/qede/base/ecore_hw.c
+++ b/drivers/net/qede/base/ecore_hw.c
@@ -12,6 +12,8 @@
 #include "reg_addr.h"
 #include "ecore_utils.h"
 #include "ecore_iov_api.h"
+#include "ecore_gtt_values.h"
+#include "ecore_dev_api.h"
 
 #ifndef ASIC_ONLY
 #define ECORE_EMUL_FACTOR 2000
@@ -78,6 +80,20 @@ enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
 	return ECORE_SUCCESS;
 }
 
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
+{
+	u32 gtt_base;
+	u32 i;
+
+	/* Set the global windows */
+	gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
+
+	for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++)
+		if (pxp_global_win[i])
+			REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
+			       pxp_global_win[i]);
+}
+
 void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
 {
 	struct ecore_ptt *p_ptt;
diff --git a/drivers/net/qede/base/ecore_hw.h b/drivers/net/qede/base/ecore_hw.h
index e43f337dc..238bdb9db 100644
--- a/drivers/net/qede/base/ecore_hw.h
+++ b/drivers/net/qede/base/ecore_hw.h
@@ -8,9 +8,8 @@
 #define __ECORE_HW_H__
 
 #include "ecore.h"
-#include "ecore_dev_api.h"
 
-/* Forward decleration */
+/* Forward declaration */
 struct ecore_ptt;
 
 enum reserved_ptts {
@@ -53,10 +52,8 @@ enum reserved_ptts {
 * @brief ecore_gtt_init - Initialize GTT windows
 *
 * @param p_hwfn
-* @param p_ptt
 */
-void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
-		    struct ecore_ptt *p_ptt);
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
 
 /**
  * @brief ecore_ptt_invalidate - Forces all ptt entries to be re-configured
@@ -84,7 +81,6 @@ void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn);
 /**
  * @brief ecore_ptt_get_bar_addr - Get PPT's external BAR address
  *
- * @param p_hwfn
  * @param p_ptt
  *
  * @return u32
@@ -95,8 +91,8 @@ u32 ecore_ptt_get_bar_addr(struct ecore_ptt	*p_ptt);
  * @brief ecore_ptt_set_win - Set PTT Window's GRC BAR address
  *
  * @param p_hwfn
- * @param new_hw_addr
  * @param p_ptt
+ * @param new_hw_addr
  */
 void ecore_ptt_set_win(struct ecore_hwfn	*p_hwfn,
 		       struct ecore_ptt		*p_ptt,
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
index 34bcc4249..9f614a4cf 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.c
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -663,10 +663,10 @@ static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
 
 	/* Go over all PF VPORTs */
 	for (i = 0; i < num_vports; i++) {
-		if (!vport_params[i].vport_wfq)
+		if (!vport_params[i].wfq)
 			continue;
 
-		inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
+		inc_val = QM_WFQ_INC_VAL(vport_params[i].wfq);
 		if (inc_val > QM_WFQ_MAX_INC_VAL) {
 			DP_NOTICE(p_hwfn, true,
 				  "Invalid VPORT WFQ weight configuration\n");
@@ -709,8 +709,7 @@ static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
 
 	/* Go over all PF VPORTs */
 	for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
-		inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
-			  vport_params[i].vport_rl : link_speed);
+		inc_val = QM_RL_INC_VAL(link_speed);
 		if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
 			DP_NOTICE(p_hwfn, true,
 				  "Invalid VPORT rate-limit configuration\n");
diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c
index 8f7209100..ad8570a08 100644
--- a/drivers/net/qede/base/ecore_init_ops.c
+++ b/drivers/net/qede/base/ecore_init_ops.c
@@ -534,53 +534,6 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
 	return rc;
 }
 
-void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
-		    struct ecore_ptt *p_ptt)
-{
-	u32 gtt_base;
-	u32 i;
-
-#ifndef ASIC_ONLY
-	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
-		/* This is done by MFW on ASIC; regardless, this should only
-		 * be done once per chip [i.e., common]. Implementation is
-		 * not too bright, but it should work on the simple FPGA/EMUL
-		 * scenarios.
-		 */
-		static bool initialized;
-		int poll_cnt = 500;
-		u32 val;
-
-		/* initialize PTT/GTT (poll for completion) */
-		if (!initialized) {
-			ecore_wr(p_hwfn, p_ptt,
-				 PGLUE_B_REG_START_INIT_PTT_GTT, 1);
-			initialized = true;
-		}
-
-		do {
-			/* ptt might be overrided by HW until this is done */
-			OSAL_UDELAY(10);
-			ecore_ptt_invalidate(p_hwfn);
-			val = ecore_rd(p_hwfn, p_ptt,
-				       PGLUE_B_REG_INIT_DONE_PTT_GTT);
-		} while ((val != 1) && --poll_cnt);
-
-		if (!poll_cnt)
-			DP_ERR(p_hwfn,
-			       "PGLUE_B_REG_INIT_DONE didn't complete\n");
-	}
-#endif
-
-	/* Set the global windows */
-	gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
-
-	for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++)
-		if (pxp_global_win[i])
-			REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
-			       pxp_global_win[i]);
-}
-
 enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
 #ifdef CONFIG_ECORE_BINARY_FW
 					const u8 *fw_data)
diff --git a/drivers/net/qede/base/ecore_init_ops.h b/drivers/net/qede/base/ecore_init_ops.h
index de7846d46..21e433309 100644
--- a/drivers/net/qede/base/ecore_init_ops.h
+++ b/drivers/net/qede/base/ecore_init_ops.h
@@ -97,14 +97,4 @@ void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
 #define STORE_RT_REG_AGG(hwfn, offset, val)			\
 	ecore_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
 
-
-/**
- * @brief
- *      Initialize GTT global windows and set admin window
- *      related params of GTT/PTT to default values.
- *
- * @param p_hwfn
- */
-void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
-		    struct ecore_ptt *p_ptt);
 #endif /* __ECORE_INIT_OPS__ */
diff --git a/drivers/net/qede/base/ecore_iro.h b/drivers/net/qede/base/ecore_iro.h
index 12d45c1c5..b146faff9 100644
--- a/drivers/net/qede/base/ecore_iro.h
+++ b/drivers/net/qede/base/ecore_iro.h
@@ -35,207 +35,239 @@
 #define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) (IRO[7].base + \
 	((queue_zone_id) * IRO[7].m1))
 #define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
+/* Xstorm common PQ info */
+#define XSTORM_PQ_INFO_OFFSET(pq_id) (IRO[8].base + ((pq_id) * IRO[8].m1))
+#define XSTORM_PQ_INFO_SIZE (IRO[8].size)
 /* Xstorm Integration Test Data */
-#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
-#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
 /* Ystorm Integration Test Data */
-#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
-#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
 /* Pstorm Integration Test Data */
-#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
-#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
 /* Tstorm Integration Test Data */
-#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
-#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
 /* Mstorm Integration Test Data */
-#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
-#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[13].size)
 /* Ustorm Integration Test Data */
-#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base)
-#define USTORM_INTEG_TEST_DATA_SIZE (IRO[13].size)
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[14].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[14].size)
+/* Xstorm overlay buffer host address */
+#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[15].base)
+#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[15].size)
+/* Ystorm overlay buffer host address */
+#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[16].base)
+#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[16].size)
+/* Pstorm overlay buffer host address */
+#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[17].base)
+#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[17].size)
+/* Tstorm overlay buffer host address */
+#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[18].base)
+#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[18].size)
+/* Mstorm overlay buffer host address */
+#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[19].base)
+#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[19].size)
+/* Ustorm overlay buffer host address */
+#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[20].base)
+#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[20].size)
 /* Tstorm producers */
-#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) (IRO[14].base + \
-	((core_rx_queue_id) * IRO[14].m1))
-#define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size)
+#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) (IRO[21].base + \
+	((core_rx_queue_id) * IRO[21].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE (IRO[21].size)
 /* Tstorm LightL2 queue statistics */
 #define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
-	(IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
+	(IRO[22].base + ((core_rx_queue_id) * IRO[22].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[22].size)
 /* Ustorm LiteL2 queue statistics */
 #define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
-	(IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size)
+	(IRO[23].base + ((core_rx_queue_id) * IRO[23].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[23].size)
 /* Pstorm LiteL2 queue statistics */
 #define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
-	(IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17].size)
+	(IRO[24].base + ((core_tx_stats_id) * IRO[24].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[24].size)
 /* Mstorm queue statistics */
-#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[18].base + \
-	((stat_counter_id) * IRO[18].m1))
-#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
-/* Mstorm ETH PF queues producers */
-#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) (IRO[19].base + \
-	((queue_id) * IRO[19].m1))
-#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[25].base + \
+	((stat_counter_id) * IRO[25].m1))
+#define MSTORM_QUEUE_STAT_SIZE (IRO[25].size)
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[26].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[26].size)
 /* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size
  * mode.
  */
-#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) (IRO[20].base + \
-	((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
-#define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size)
-/* TPA agregation timeout in us resolution (on ASIC) */
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
+#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) (IRO[27].base + \
+	((vf_id) * IRO[27].m1) + ((vf_queue_id) * IRO[27].m2))
+#define MSTORM_ETH_VF_PRODS_SIZE (IRO[27].size)
+/* Mstorm ETH PF queues producers */
+#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) (IRO[28].base + \
+	((queue_id) * IRO[28].m1))
+#define MSTORM_ETH_PF_PRODS_SIZE (IRO[28].size)
 /* Mstorm pf statistics */
-#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[22].base + ((pf_id) * IRO[22].m1))
-#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size)
+#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[29].base + ((pf_id) * IRO[29].m1))
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[29].size)
 /* Ustorm queue statistics */
-#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[23].base + \
-	((stat_counter_id) * IRO[23].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[30].base + \
+	((stat_counter_id) * IRO[30].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[30].size)
 /* Ustorm pf statistics */
-#define USTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[24].base + ((pf_id) * IRO[24].m1))
-#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size)
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[31].base + ((pf_id) * IRO[31].m1))
+#define USTORM_ETH_PF_STAT_SIZE (IRO[31].size)
 /* Pstorm queue statistics */
-#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[25].base + \
-	((stat_counter_id) * IRO[25].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size)
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[32].base + \
+	((stat_counter_id) * IRO[32].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[32].size)
 /* Pstorm pf statistics */
-#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[26].base + ((pf_id) * IRO[26].m1))
-#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size)
+#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[33].base + ((pf_id) * IRO[33].m1))
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[33].size)
 /* Control frame's EthType configuration for TX control frame security */
-#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) (IRO[27].base + \
-	((ethType_id) * IRO[27].m1))
-#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size)
+#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) (IRO[34].base + \
+	((ethType_id) * IRO[34].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[34].size)
 /* Tstorm last parser message */
-#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base)
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size)
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[35].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[35].size)
 /* Tstorm Eth limit Rx rate */
-#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) (IRO[29].base + ((pf_id) * IRO[29].m1))
-#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) (IRO[36].base + ((pf_id) * IRO[36].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[36].size)
 /* RSS indirection table entry update command per PF offset in TSTORM PF BAR0.
  * Use eth_tstorm_rss_update_data for update.
  */
-#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) (IRO[30].base + \
-	((pf_id) * IRO[30].m1))
-#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[30].size)
+#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) (IRO[37].base + \
+	((pf_id) * IRO[37].m1))
+#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[37].size)
 /* Xstorm queue zone */
-#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[31].base + \
-	((queue_id) * IRO[31].m1))
-#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[31].size)
+#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[38].base + \
+	((queue_id) * IRO[38].m1))
+#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[38].size)
 /* Ystorm cqe producer */
-#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[32].base + \
-	((rss_id) * IRO[32].m1))
-#define YSTORM_TOE_CQ_PROD_SIZE (IRO[32].size)
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[39].base + \
+	((rss_id) * IRO[39].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[39].size)
 /* Ustorm cqe producer */
-#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[33].base + \
-	((rss_id) * IRO[33].m1))
-#define USTORM_TOE_CQ_PROD_SIZE (IRO[33].size)
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[40].base + \
+	((rss_id) * IRO[40].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[40].size)
 /* Ustorm grq producer */
-#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[34].base + \
-	((pf_id) * IRO[34].m1))
-#define USTORM_TOE_GRQ_PROD_SIZE (IRO[34].size)
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[41].base + \
+	((pf_id) * IRO[41].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[41].size)
 /* Tstorm cmdq-cons of given command queue-id */
-#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[35].base + \
-	((cmdq_queue_id) * IRO[35].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[35].size)
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[42].base + \
+	((cmdq_queue_id) * IRO[42].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[42].size)
 /* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
  * BDqueue-id
  */
-#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) (IRO[36].base + \
-	((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
+	(IRO[43].base + ((storage_func_id) * IRO[43].m1) + \
+	((bdq_id) * IRO[43].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[43].size)
 /* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
-#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) (IRO[37].base + \
-	((func_id) * IRO[37].m1) + ((bdq_id) * IRO[37].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[37].size)
+#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
+	(IRO[44].base + ((storage_func_id) * IRO[44].m1) + \
+	((bdq_id) * IRO[44].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[44].size)
 /* Tstorm iSCSI RX stats */
-#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[38].base + \
-	((pf_id) * IRO[38].m1))
-#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
+#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) (IRO[45].base + \
+	((storage_func_id) * IRO[45].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[45].size)
 /* Mstorm iSCSI RX stats */
-#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[39].base + \
-	((pf_id) * IRO[39].m1))
-#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
+#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) (IRO[46].base + \
+	((storage_func_id) * IRO[46].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[46].size)
 /* Ustorm iSCSI RX stats */
-#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[40].base + \
-	((pf_id) * IRO[40].m1))
-#define USTORM_ISCSI_RX_STATS_SIZE (IRO[40].size)
+#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) (IRO[47].base + \
+	((storage_func_id) * IRO[47].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[47].size)
 /* Xstorm iSCSI TX stats */
-#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[41].base + \
-	((pf_id) * IRO[41].m1))
-#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
+#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) (IRO[48].base + \
+	((storage_func_id) * IRO[48].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[48].size)
 /* Ystorm iSCSI TX stats */
-#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[42].base + \
-	((pf_id) * IRO[42].m1))
-#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
+#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) (IRO[49].base + \
+	((storage_func_id) * IRO[49].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[49].size)
 /* Pstorm iSCSI TX stats */
-#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[43].base + \
-	((pf_id) * IRO[43].m1))
-#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[43].size)
+#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) (IRO[50].base + \
+	((storage_func_id) * IRO[50].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[50].size)
 /* Tstorm FCoE RX stats */
-#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) (IRO[44].base + \
-	((pf_id) * IRO[44].m1))
-#define TSTORM_FCOE_RX_STATS_SIZE (IRO[44].size)
+#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) (IRO[51].base + \
+	((pf_id) * IRO[51].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE (IRO[51].size)
 /* Pstorm FCoE TX stats */
-#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) (IRO[45].base + \
-	((pf_id) * IRO[45].m1))
-#define PSTORM_FCOE_TX_STATS_SIZE (IRO[45].size)
+#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) (IRO[52].base + \
+	((pf_id) * IRO[52].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE (IRO[52].size)
 /* Pstorm RDMA queue statistics */
-#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[46].base + \
-	((rdma_stat_counter_id) * IRO[46].m1))
-#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[53].base + \
+	((rdma_stat_counter_id) * IRO[53].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[53].size)
 /* Tstorm RDMA queue statistics */
-#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[47].base + \
-	((rdma_stat_counter_id) * IRO[47].m1))
-#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[47].size)
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[54].base + \
+	((rdma_stat_counter_id) * IRO[54].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[54].size)
 /* Xstorm error level for assert */
-#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[48].base + \
-	((pf_id) * IRO[48].m1))
-#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size)
+#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[55].base + \
+	((pf_id) * IRO[55].m1))
+#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[55].size)
 /* Ystorm error level for assert */
-#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[49].base + \
-	((pf_id) * IRO[49].m1))
-#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size)
+#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[56].base + \
+	((pf_id) * IRO[56].m1))
+#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[56].size)
 /* Pstorm error level for assert */
-#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[50].base + \
-	((pf_id) * IRO[50].m1))
-#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size)
+#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[57].base + \
+	((pf_id) * IRO[57].m1))
+#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[57].size)
 /* Tstorm error level for assert */
-#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[51].base + \
-	((pf_id) * IRO[51].m1))
-#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size)
+#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[58].base + \
+	((pf_id) * IRO[58].m1))
+#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[58].size)
 /* Mstorm error level for assert */
-#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[52].base + \
-	((pf_id) * IRO[52].m1))
-#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size)
+#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[59].base + \
+	((pf_id) * IRO[59].m1))
+#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[59].size)
 /* Ustorm error level for assert */
-#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[53].base + \
-	((pf_id) * IRO[53].m1))
-#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[53].size)
+#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[60].base + \
+	((pf_id) * IRO[60].m1))
+#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[60].size)
 /* Xstorm iWARP rxmit stats */
-#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) (IRO[54].base + \
-	((pf_id) * IRO[54].m1))
-#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[54].size)
+#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) (IRO[61].base + \
+	((pf_id) * IRO[61].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[61].size)
 /* Tstorm RoCE Event Statistics */
-#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) (IRO[55].base + \
-	((roce_pf_id) * IRO[55].m1))
-#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[55].size)
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) (IRO[62].base + \
+	((roce_pf_id) * IRO[62].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[62].size)
 /* DCQCN Received Statistics */
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) (IRO[56].base + \
-	((roce_pf_id) * IRO[56].m1))
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[56].size)
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) (IRO[63].base + \
+	((roce_pf_id) * IRO[63].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[63].size)
 /* RoCE Error Statistics */
-#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) (IRO[57].base + \
-	((roce_pf_id) * IRO[57].m1))
-#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[57].size)
+#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) (IRO[64].base + \
+	((roce_pf_id) * IRO[64].m1))
+#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[64].size)
 /* DCQCN Sent Statistics */
-#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) (IRO[58].base + \
-	((roce_pf_id) * IRO[58].m1))
-#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[58].size)
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) (IRO[65].base + \
+	((roce_pf_id) * IRO[65].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[65].size)
 /* RoCE CQEs Statistics */
-#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) (IRO[59].base + \
-	((roce_pf_id) * IRO[59].m1))
-#define USTORM_ROCE_CQE_STATS_SIZE (IRO[59].size)
+#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) (IRO[66].base + \
+	((roce_pf_id) * IRO[66].m1))
+#define USTORM_ROCE_CQE_STATS_SIZE (IRO[66].size)
+/* Tstorm NVMf per port per producer consumer data */
+#define TSTORM_NVMF_PORT_TASKPOOL_PRODUCER_CONSUMER_OFFSET(port_num_id, \
+	taskpool_index) (IRO[67].base + ((port_num_id) * IRO[67].m1) + \
+	((taskpool_index) * IRO[67].m2))
+#define TSTORM_NVMF_PORT_TASKPOOL_PRODUCER_CONSUMER_SIZE (IRO[67].size)
+/* Ustorm NVMf per port counters */
+#define USTORM_NVMF_PORT_COUNTERS_OFFSET(port_num_id) (IRO[68].base + \
+	((port_num_id) * IRO[68].m1))
+#define USTORM_NVMF_PORT_COUNTERS_SIZE (IRO[68].size)
 
-#endif /* __IRO_H__ */
+#endif
diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h
index 30e632ce1..6442057ac 100644
--- a/drivers/net/qede/base/ecore_iro_values.h
+++ b/drivers/net/qede/base/ecore_iro_values.h
@@ -7,127 +7,221 @@
 #ifndef __IRO_VALUES_H__
 #define __IRO_VALUES_H__
 
-static const struct iro iro_arr[60] = {
-/* YSTORM_FLOW_CONTROL_MODE_OFFSET */
-	{      0x0,      0x0,      0x0,      0x0,      0x8},
-/* TSTORM_PORT_STAT_OFFSET(port_id) */
-	{   0x4cb8,     0x88,      0x0,      0x0,     0x88},
-/* TSTORM_LL2_PORT_STAT_OFFSET(port_id) */
-	{   0x6530,     0x20,      0x0,      0x0,     0x20},
-/* USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) */
-	{    0xb00,      0x8,      0x0,      0x0,      0x4},
-/* USTORM_FLR_FINAL_ACK_OFFSET(pf_id) */
-	{    0xa80,      0x8,      0x0,      0x0,      0x4},
-/* USTORM_EQE_CONS_OFFSET(pf_id) */
-	{      0x0,      0x8,      0x0,      0x0,      0x2},
-/* USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) */
-	{     0x80,      0x8,      0x0,      0x0,      0x4},
-/* USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) */
-	{     0x84,      0x8,      0x0,      0x0,      0x2},
-/* XSTORM_INTEG_TEST_DATA_OFFSET */
-	{   0x4c48,      0x0,      0x0,      0x0,     0x78},
-/* YSTORM_INTEG_TEST_DATA_OFFSET */
-	{   0x3e38,      0x0,      0x0,      0x0,     0x78},
-/* PSTORM_INTEG_TEST_DATA_OFFSET */
-	{   0x3ef8,      0x0,      0x0,      0x0,     0x78},
-/* TSTORM_INTEG_TEST_DATA_OFFSET */
-	{   0x4c40,      0x0,      0x0,      0x0,     0x78},
-/* MSTORM_INTEG_TEST_DATA_OFFSET */
-	{   0x4998,      0x0,      0x0,      0x0,     0x78},
-/* USTORM_INTEG_TEST_DATA_OFFSET */
-	{   0x7f50,      0x0,      0x0,      0x0,     0x78},
-/* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) */
-	{    0xa28,      0x8,      0x0,      0x0,      0x8},
-/* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
-	{   0x6210,     0x10,      0x0,      0x0,     0x10},
-/* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
-	{   0xb820,     0x30,      0x0,      0x0,     0x30},
-/* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */
-	{   0xa990,     0x30,      0x0,      0x0,     0x30},
-/* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
-	{   0x4b68,     0x80,      0x0,      0x0,     0x40},
-/* MSTORM_ETH_PF_PRODS_OFFSET(queue_id) */
-	{    0x1f8,      0x4,      0x0,      0x0,      0x4},
-/* MSTORM_ETH_VF_PRODS_OFFSET(vf_id,vf_queue_id) */
-	{   0x53a8,     0x80,      0x4,      0x0,      0x4},
-/* MSTORM_TPA_TIMEOUT_US_OFFSET */
-	{   0xc7d0,      0x0,      0x0,      0x0,      0x4},
-/* MSTORM_ETH_PF_STAT_OFFSET(pf_id) */
-	{   0x4ba8,     0x80,      0x0,      0x0,     0x20},
-/* USTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
-	{   0x8158,     0x40,      0x0,      0x0,     0x30},
-/* USTORM_ETH_PF_STAT_OFFSET(pf_id) */
-	{   0xe770,     0x60,      0x0,      0x0,     0x60},
-/* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
-	{   0x4090,     0x80,      0x0,      0x0,     0x38},
-/* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */
-	{   0xfea8,     0x78,      0x0,      0x0,     0x78},
-/* PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) */
-	{    0x1f8,      0x4,      0x0,      0x0,      0x4},
-/* TSTORM_ETH_PRS_INPUT_OFFSET */
-	{   0xaf20,      0x0,      0x0,      0x0,     0xf0},
-/* ETH_RX_RATE_LIMIT_OFFSET(pf_id) */
-	{   0xb010,      0x8,      0x0,      0x0,      0x8},
-/* TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) */
-	{    0xc00,      0x8,      0x0,      0x0,      0x8},
-/* XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) */
-	{    0x1f8,      0x8,      0x0,      0x0,      0x8},
-/* YSTORM_TOE_CQ_PROD_OFFSET(rss_id) */
-	{    0xac0,      0x8,      0x0,      0x0,      0x8},
-/* USTORM_TOE_CQ_PROD_OFFSET(rss_id) */
-	{   0x2578,      0x8,      0x0,      0x0,      0x8},
-/* USTORM_TOE_GRQ_PROD_OFFSET(pf_id) */
-	{   0x24f8,      0x8,      0x0,      0x0,      0x8},
-/* TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) */
-	{      0x0,      0x8,      0x0,      0x0,      0x8},
-/* TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
-	{    0x400,     0x18,      0x8,      0x0,      0x8},
-/* MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
-	{    0xb78,     0x18,      0x8,      0x0,      0x2},
-/* TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
-	{   0xd898,     0x50,      0x0,      0x0,     0x3c},
-/* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
-	{  0x12908,     0x18,      0x0,      0x0,     0x10},
-/* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
-	{  0x11aa8,     0x40,      0x0,      0x0,     0x18},
-/* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
-	{   0xa588,     0x50,      0x0,      0x0,     0x20},
-/* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
-	{   0x8f00,     0x40,      0x0,      0x0,     0x28},
-/* PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
-	{  0x10e30,     0x18,      0x0,      0x0,     0x10},
-/* TSTORM_FCOE_RX_STATS_OFFSET(pf_id) */
-	{   0xde48,     0x48,      0x0,      0x0,     0x38},
-/* PSTORM_FCOE_TX_STATS_OFFSET(pf_id) */
-	{  0x11298,     0x20,      0x0,      0x0,     0x20},
-/* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
-	{   0x40c8,     0x80,      0x0,      0x0,     0x10},
-/* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
-	{   0x5048,     0x10,      0x0,      0x0,     0x10},
-/* XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) */
-	{   0xa928,      0x8,      0x0,      0x0,      0x1},
-/* YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) */
-	{   0xa128,      0x8,      0x0,      0x0,      0x1},
-/* PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) */
-	{  0x11a30,      0x8,      0x0,      0x0,      0x1},
-/* TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) */
-	{   0xf030,      0x8,      0x0,      0x0,      0x1},
-/* MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) */
-	{  0x13028,      0x8,      0x0,      0x0,      0x1},
-/* USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) */
-	{  0x12c58,      0x8,      0x0,      0x0,      0x1},
-/* XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) */
-	{   0xc9b8,     0x30,      0x0,      0x0,     0x10},
-/* TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) */
-	{   0xed90,     0x28,      0x0,      0x0,     0x28},
-/* YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) */
-	{   0xad20,     0x18,      0x0,      0x0,     0x18},
-/* YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) */
-	{   0xaea0,      0x8,      0x0,      0x0,      0x8},
-/* PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) */
-	{  0x13c38,      0x8,      0x0,      0x0,      0x8},
-/* USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) */
-	{  0x13c50,     0x18,      0x0,      0x0,     0x18},
+/* Per-chip offsets in iro_arr in dwords */
+#define E4_IRO_ARR_OFFSET 0
+
+/* IRO Array */
+static const u32 iro_arr[] = {
+	/* E4 */
+	/* YSTORM_FLOW_CONTROL_MODE_OFFSET */
+	/* offset=0x0, size=0x8 */
+	0x00000000, 0x00000000, 0x00080000,
+	/* TSTORM_PORT_STAT_OFFSET(port_id), */
+	/* offset=0x3908, mult1=0x88, size=0x88 */
+	0x00003908, 0x00000088, 0x00880000,
+	/* TSTORM_LL2_PORT_STAT_OFFSET(port_id), */
+	/* offset=0x58f0, mult1=0x20, size=0x20 */
+	0x000058f0, 0x00000020, 0x00200000,
+	/* USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id), */
+	/* offset=0xb00, mult1=0x8, size=0x4 */
+	0x00000b00, 0x00000008, 0x00040000,
+	/* USTORM_FLR_FINAL_ACK_OFFSET(pf_id), */
+	/* offset=0xa80, mult1=0x8, size=0x4 */
+	0x00000a80, 0x00000008, 0x00040000,
+	/* USTORM_EQE_CONS_OFFSET(pf_id), */
+	/* offset=0x0, mult1=0x8, size=0x2 */
+	0x00000000, 0x00000008, 0x00020000,
+	/* USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id), */
+	/* offset=0x80, mult1=0x8, size=0x4 */
+	0x00000080, 0x00000008, 0x00040000,
+	/* USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id), */
+	/* offset=0x84, mult1=0x8, size=0x2 */
+	0x00000084, 0x00000008, 0x00020000,
+	/* XSTORM_PQ_INFO_OFFSET(pq_id), */
+	/* offset=0x5618, mult1=0x4, size=0x4 */
+	0x00005618, 0x00000004, 0x00040000,
+	/* XSTORM_INTEG_TEST_DATA_OFFSET */
+	/* offset=0x4cd0, size=0x78 */
+	0x00004cd0, 0x00000000, 0x00780000,
+	/* YSTORM_INTEG_TEST_DATA_OFFSET */
+	/* offset=0x3e40, size=0x78 */
+	0x00003e40, 0x00000000, 0x00780000,
+	/* PSTORM_INTEG_TEST_DATA_OFFSET */
+	/* offset=0x3e00, size=0x78 */
+	0x00003e00, 0x00000000, 0x00780000,
+	/* TSTORM_INTEG_TEST_DATA_OFFSET */
+	/* offset=0x3890, size=0x78 */
+	0x00003890, 0x00000000, 0x00780000,
+	/* MSTORM_INTEG_TEST_DATA_OFFSET */
+	/* offset=0x3b50, size=0x78 */
+	0x00003b50, 0x00000000, 0x00780000,
+	/* USTORM_INTEG_TEST_DATA_OFFSET */
+	/* offset=0x7f58, size=0x78 */
+	0x00007f58, 0x00000000, 0x00780000,
+	/* XSTORM_OVERLAY_BUF_ADDR_OFFSET */
+	/* offset=0x5e58, size=0x8 */
+	0x00005e58, 0x00000000, 0x00080000,
+	/* YSTORM_OVERLAY_BUF_ADDR_OFFSET */
+	/* offset=0x7100, size=0x8 */
+	0x00007100, 0x00000000, 0x00080000,
+	/* PSTORM_OVERLAY_BUF_ADDR_OFFSET */
+	/* offset=0xa820, size=0x8 */
+	0x0000a820, 0x00000000, 0x00080000,
+	/* TSTORM_OVERLAY_BUF_ADDR_OFFSET */
+	/* offset=0x4a18, size=0x8 */
+	0x00004a18, 0x00000000, 0x00080000,
+	/* MSTORM_OVERLAY_BUF_ADDR_OFFSET */
+	/* offset=0xa5a0, size=0x8 */
+	0x0000a5a0, 0x00000000, 0x00080000,
+	/* USTORM_OVERLAY_BUF_ADDR_OFFSET */
+	/* offset=0xbde8, size=0x8 */
+	0x0000bde8, 0x00000000, 0x00080000,
+	/* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id), */
+	/* offset=0x20, mult1=0x4, size=0x4 */
+	0x00000020, 0x00000004, 0x00040000,
+	/* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id), */
+	/* offset=0x56d0, mult1=0x10, size=0x10 */
+	0x000056d0, 0x00000010, 0x00100000,
+	/* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id), */
+	/* offset=0xc210, mult1=0x30, size=0x30 */
+	0x0000c210, 0x00000030, 0x00300000,
+	/* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id), */
+	/* offset=0xaa08, mult1=0x38, size=0x38 */
+	0x0000aa08, 0x00000038, 0x00380000,
+	/* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id), */
+	/* offset=0x3d20, mult1=0x80, size=0x40 */
+	0x00003d20, 0x00000080, 0x00400000,
+	/* MSTORM_TPA_TIMEOUT_US_OFFSET */
+	/* offset=0xbf60, size=0x4 */
+	0x0000bf60, 0x00000000, 0x00040000,
+	/* MSTORM_ETH_VF_PRODS_OFFSET(vf_id,vf_queue_id), */
+	/* offset=0x4560, mult1=0x80, mult2=0x4, size=0x4 */
+	0x00004560, 0x00040080, 0x00040000,
+	/* MSTORM_ETH_PF_PRODS_OFFSET(queue_id), */
+	/* offset=0x1f8, mult1=0x4, size=0x4 */
+	0x000001f8, 0x00000004, 0x00040000,
+	/* MSTORM_ETH_PF_STAT_OFFSET(pf_id), */
+	/* offset=0x3d60, mult1=0x80, size=0x20 */
+	0x00003d60, 0x00000080, 0x00200000,
+	/* USTORM_QUEUE_STAT_OFFSET(stat_counter_id), */
+	/* offset=0x8960, mult1=0x40, size=0x30 */
+	0x00008960, 0x00000040, 0x00300000,
+	/* USTORM_ETH_PF_STAT_OFFSET(pf_id), */
+	/* offset=0xe840, mult1=0x60, size=0x60 */
+	0x0000e840, 0x00000060, 0x00600000,
+	/* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id), */
+	/* offset=0x3f98, mult1=0x80, size=0x38 */
+	0x00003f98, 0x00000080, 0x00380000,
+	/* PSTORM_ETH_PF_STAT_OFFSET(pf_id), */
+	/* offset=0x100b8, mult1=0xc0, size=0xc0 */
+	0x000100b8, 0x000000c0, 0x00c00000,
+	/* PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id), */
+	/* offset=0x1f8, mult1=0x2, size=0x2 */
+	0x000001f8, 0x00000002, 0x00020000,
+	/* TSTORM_ETH_PRS_INPUT_OFFSET */
+	/* offset=0xa2a0, size=0x108 */
+	0x0000a2a0, 0x00000000, 0x01080000,
+	/* ETH_RX_RATE_LIMIT_OFFSET(pf_id), */
+	/* offset=0xa3a8, mult1=0x8, size=0x8 */
+	0x0000a3a8, 0x00000008, 0x00080000,
+	/* TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id), */
+	/* offset=0x1c0, mult1=0x8, size=0x8 */
+	0x000001c0, 0x00000008, 0x00080000,
+	/* XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id), */
+	/* offset=0x1f8, mult1=0x8, size=0x8 */
+	0x000001f8, 0x00000008, 0x00080000,
+	/* YSTORM_TOE_CQ_PROD_OFFSET(rss_id), */
+	/* offset=0xac0, mult1=0x8, size=0x8 */
+	0x00000ac0, 0x00000008, 0x00080000,
+	/* USTORM_TOE_CQ_PROD_OFFSET(rss_id), */
+	/* offset=0x2578, mult1=0x8, size=0x8 */
+	0x00002578, 0x00000008, 0x00080000,
+	/* USTORM_TOE_GRQ_PROD_OFFSET(pf_id), */
+	/* offset=0x24f8, mult1=0x8, size=0x8 */
+	0x000024f8, 0x00000008, 0x00080000,
+	/* TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id), */
+	/* offset=0x280, mult1=0x8, size=0x8 */
+	0x00000280, 0x00000008, 0x00080000,
+	/* TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id,bdq_id), */
+	/* offset=0x680, mult1=0x18, mult2=0x8, size=0x8 */
+	0x00000680, 0x00080018, 0x00080000,
+	/* MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id,bdq_id), */
+	/* offset=0xb78, mult1=0x18, mult2=0x8, size=0x2 */
+	0x00000b78, 0x00080018, 0x00020000,
+	/* TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id), */
+	/* offset=0xc640, mult1=0x50, size=0x3c */
+	0x0000c640, 0x00000050, 0x003c0000,
+	/* MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id), */
+	/* offset=0x12038, mult1=0x18, size=0x10 */
+	0x00012038, 0x00000018, 0x00100000,
+	/* USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id), */
+	/* offset=0x11b00, mult1=0x40, size=0x18 */
+	0x00011b00, 0x00000040, 0x00180000,
+	/* XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id), */
+	/* offset=0x94d0, mult1=0x50, size=0x20 */
+	0x000094d0, 0x00000050, 0x00200000,
+	/* YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id), */
+	/* offset=0x8b10, mult1=0x40, size=0x28 */
+	0x00008b10, 0x00000040, 0x00280000,
+	/* PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id), */
+	/* offset=0x10fc0, mult1=0x18, size=0x10 */
+	0x00010fc0, 0x00000018, 0x00100000,
+	/* TSTORM_FCOE_RX_STATS_OFFSET(pf_id), */
+	/* offset=0xc828, mult1=0x48, size=0x38 */
+	0x0000c828, 0x00000048, 0x00380000,
+	/* PSTORM_FCOE_TX_STATS_OFFSET(pf_id), */
+	/* offset=0x11090, mult1=0x20, size=0x20 */
+	0x00011090, 0x00000020, 0x00200000,
+	/* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id), */
+	/* offset=0x3fd0, mult1=0x80, size=0x10 */
+	0x00003fd0, 0x00000080, 0x00100000,
+	/* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id), */
+	/* offset=0x3c98, mult1=0x10, size=0x10 */
+	0x00003c98, 0x00000010, 0x00100000,
+	/* XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id), */
+	/* offset=0xa868, mult1=0x8, size=0x1 */
+	0x0000a868, 0x00000008, 0x00010000,
+	/* YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id), */
+	/* offset=0x97a0, mult1=0x8, size=0x1 */
+	0x000097a0, 0x00000008, 0x00010000,
+	/* PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id), */
+	/* offset=0x11310, mult1=0x8, size=0x1 */
+	0x00011310, 0x00000008, 0x00010000,
+	/* TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id), */
+	/* offset=0xf018, mult1=0x8, size=0x1 */
+	0x0000f018, 0x00000008, 0x00010000,
+	/* MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id), */
+	/* offset=0x12628, mult1=0x8, size=0x1 */
+	0x00012628, 0x00000008, 0x00010000,
+	/* USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id), */
+	/* offset=0x11da8, mult1=0x8, size=0x1 */
+	0x00011da8, 0x00000008, 0x00010000,
+	/* XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id), */
+	/* offset=0xa978, mult1=0x30, size=0x10 */
+	0x0000a978, 0x00000030, 0x00100000,
+	/* TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id), */
+	/* offset=0xd768, mult1=0x28, size=0x28 */
+	0x0000d768, 0x00000028, 0x00280000,
+	/* YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id), */
+	/* offset=0x9a58, mult1=0x18, size=0x18 */
+	0x00009a58, 0x00000018, 0x00180000,
+	/* YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id), */
+	/* offset=0x9bd8, mult1=0x8, size=0x8 */
+	0x00009bd8, 0x00000008, 0x00080000,
+	/* PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id), */
+	/* offset=0x13398, mult1=0x8, size=0x8 */
+	0x00013398, 0x00000008, 0x00080000,
+	/* USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id), */
+	/* offset=0x126e8, mult1=0x18, size=0x18 */
+	0x000126e8, 0x00000018, 0x00180000,
+	/* TSTORM_NVMF_PORT_TASKPOOL_PRODUCER_CONSUMER_OFFSET(port_num_id,taskpool_index), */
+	/* offset=0xe608, mult1=0x288, mult2=0x50, size=0x10 */
+	0x0000e608, 0x00500288, 0x00100000,
+	/* USTORM_NVMF_PORT_COUNTERS_OFFSET(port_num_id), */
+	/* offset=0x12970, mult1=0x138, size=0x28 */
+	0x00012970, 0x00000138, 0x00280000,
 };
+/* Data size: 828 bytes */
+
 
 #endif /* __IRO_VALUES_H__ */
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 23336c282..6559d8040 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -24,6 +24,7 @@
 
 #define CHIP_MCP_RESP_ITER_US 10
 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
+#define GRCBASE_MCP	0xe00000
 
 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000)	/* Account for 5 sec */
 #define ECORE_MCP_RESET_RETRIES (50 * 1000)	/* Account for 500 msec */
diff --git a/drivers/net/qede/base/eth_common.h b/drivers/net/qede/base/eth_common.h
index 9a401ed4a..4611d86d9 100644
--- a/drivers/net/qede/base/eth_common.h
+++ b/drivers/net/qede/base/eth_common.h
@@ -16,7 +16,7 @@
 /* ETH FP HSI Major version */
 #define ETH_HSI_VER_MAJOR                   3
 /* ETH FP HSI Minor version */
-#define ETH_HSI_VER_MINOR                   10
+#define ETH_HSI_VER_MINOR                   11   /* ETH FP HSI Minor version */
 
 /* Alias for 8.7.x.x/8.8.x.x ETH FP HSI MINOR version. In this version driver
  * is not required to set pkt_len field in eth_tx_1st_bd struct, and tunneling
@@ -24,6 +24,9 @@
  */
 #define ETH_HSI_VER_NO_PKT_LEN_TUNN         5
 
+/* Maximum number of pinned L2 connections (CIDs)*/
+#define ETH_PINNED_CONN_MAX_NUM             32
+
 #define ETH_CACHE_LINE_SIZE                 64
 #define ETH_RX_CQE_GAP                      32
 #define ETH_MAX_RAMROD_PER_CON              8
@@ -48,6 +51,7 @@
 #define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT   3
 #define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT        2
 #define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE      2
+#define ETH_TX_MIN_BDS_PER_PKT_W_VPORT_FORWARDING   4
 /* (QM_REG_TASKBYTECRDCOST_0, QM_VOQ_BYTE_CRD_TASK_COST) -
  * (VLAN-TAG + CRC + IPG + PREAMBLE)
  */
@@ -80,7 +84,7 @@
 /* Minimum number of free BDs in RX ring, that guarantee receiving of at least
  * one RX packet.
  */
-#define ETH_RX_BD_THRESHOLD                12
+#define ETH_RX_BD_THRESHOLD                16
 
 /* num of MAC/VLAN filters */
 #define ETH_NUM_MAC_FILTERS                 512
@@ -98,20 +102,20 @@
 #define ETH_RSS_IND_TABLE_ENTRIES_NUM       128
 /* Length of RSS key (in regs) */
 #define ETH_RSS_KEY_SIZE_REGS               10
-/* number of available RSS engines in K2 */
+/* number of available RSS engines in AH */
 #define ETH_RSS_ENGINE_NUM_K2               207
 /* number of available RSS engines in BB */
 #define ETH_RSS_ENGINE_NUM_BB               127
 
 /* TPA constants */
 /* Maximum number of open TPA aggregations */
-#define ETH_TPA_MAX_AGGS_NUM              64
-/* Maximum number of additional buffers, reported by TPA-start CQE */
-#define ETH_TPA_CQE_START_LEN_LIST_SIZE   ETH_RX_MAX_BUFF_PER_PKT
+#define ETH_TPA_MAX_AGGS_NUM                64
+/* TPA-start CQE additional BD list length. Used for backward compatible  */
+#define ETH_TPA_CQE_START_BW_LEN_LIST_SIZE  2
 /* Maximum number of buffers, reported by TPA-continue CQE */
-#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE    6
+#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE      6
 /* Maximum number of buffers, reported by TPA-end CQE */
-#define ETH_TPA_CQE_END_LEN_LIST_SIZE     4
+#define ETH_TPA_CQE_END_LEN_LIST_SIZE       4
 
 /* Control frame check constants */
 /* Number of etherType values configured by driver for control frame check */
@@ -125,12 +129,12 @@
 /*
  * Destination port mode
  */
-enum dest_port_mode {
-	DEST_PORT_PHY /* Send to physical port. */,
-	DEST_PORT_LOOPBACK /* Send to loopback port. */,
-	DEST_PORT_PHY_LOOPBACK /* Send to physical and loopback port. */,
-	DEST_PORT_DROP /* Drop the packet in PBF. */,
-	MAX_DEST_PORT_MODE
+enum dst_port_mode {
+	DST_PORT_PHY /* Send to physical port. */,
+	DST_PORT_LOOPBACK /* Send to loopback port. */,
+	DST_PORT_PHY_LOOPBACK /* Send to physical and loopback port. */,
+	DST_PORT_DROP /* Drop the packet in PBF. */,
+	MAX_DST_PORT_MODE
 };
 
 
@@ -353,9 +357,13 @@ struct eth_fast_path_rx_reg_cqe {
 /* Tunnel Parsing Flags */
 	struct eth_tunnel_parsing_flags tunnel_pars_flags;
 	u8 bd_num /* Number of BDs, used for packet */;
-	u8 reserved[9];
-	struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */;
-	u8 reserved1[3];
+	u8 reserved;
+	__le16 reserved2;
+/* aRFS flow ID or Resource ID - Indicates a Vport ID from which packet was
+ * sent, used when sending from VF to VF Representor.
+ */
+	__le32 flow_id_or_resource_id;
+	u8 reserved1[7];
 	struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
 };
 
@@ -422,10 +430,14 @@ struct eth_fast_path_rx_tpa_start_cqe {
 	struct eth_tunnel_parsing_flags tunnel_pars_flags;
 	u8 tpa_agg_index /* TPA aggregation index */;
 	u8 header_len /* Packet L2+L3+L4 header length */;
-/* Additional BDs length list. */
-	__le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
-	struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */;
-	u8 reserved;
+/* Additional BDs length list. Used for backward compatible. */
+	__le16 bw_ext_bd_len_list[ETH_TPA_CQE_START_BW_LEN_LIST_SIZE];
+	__le16 reserved2;
+/* aRFS or GFS flow ID or Resource ID - Indicates a Vport ID from which packet
+ * was sent, used when sending from VF to VF Representor
+ */
+	__le32 flow_id_or_resource_id;
+	u8 reserved[3];
 	struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
 };
 
@@ -602,6 +614,41 @@ struct eth_tx_3rd_bd {
 };
 
 
+/*
+ * The parsing information data for the forth tx bd of a given packet.
+ */
+struct eth_tx_data_4th_bd {
+/* Destination Vport ID to forward the packet, applicable only when
+ * tx_dst_port_mode_config == ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_BD and
+ * dst_port_mode == DST_PORT_LOOPBACK, used to route the packet from VF
+ * Representor to VF
+ */
+	u8 dst_vport_id;
+	u8 reserved4;
+	__le16 bitfields;
+/* if set, dst_vport_id has a valid value and will be used in FW */
+#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_MASK  0x1
+#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_SHIFT 0
+#define ETH_TX_DATA_4TH_BD_RESERVED1_MASK           0x7F
+#define ETH_TX_DATA_4TH_BD_RESERVED1_SHIFT          1
+/* Should be 0 in all the BDs, except the first one. (for debug) */
+#define ETH_TX_DATA_4TH_BD_START_BD_MASK            0x1
+#define ETH_TX_DATA_4TH_BD_START_BD_SHIFT           8
+#define ETH_TX_DATA_4TH_BD_RESERVED2_MASK           0x7F
+#define ETH_TX_DATA_4TH_BD_RESERVED2_SHIFT          9
+	__le16 reserved3;
+};
+
+/*
+ * The forth tx bd of a given packet
+ */
+struct eth_tx_4th_bd {
+	struct regpair addr /* Single continuous buffer */;
+	__le16 nbytes /* Number of bytes in this BD. */;
+	struct eth_tx_data_4th_bd data /* Parsing information data. */;
+};
+
+
 /*
  * Complementary information for the regular tx bd of a given packet.
  */
@@ -633,7 +680,8 @@ union eth_tx_bd_types {
 /* The second tx bd of a given packet */
 	struct eth_tx_2nd_bd second_bd;
 	struct eth_tx_3rd_bd third_bd /* The third tx bd of a given packet */;
-	struct eth_tx_bd reg_bd /* The common non-special bd */;
+	struct eth_tx_4th_bd fourth_bd /* The fourth tx bd of a given packet */;
+	struct eth_tx_bd reg_bd /* The common regular bd */;
 };
 
 
@@ -653,6 +701,15 @@ enum eth_tx_tunn_type {
 };
 
 
+/*
+ * Mstorm Queue Zone
+ */
+struct mstorm_eth_queue_zone {
+	struct eth_rx_prod_data rx_producers /* ETH Rx producers data */;
+	__le32 reserved[3];
+};
+
+
 /*
  * Ystorm Queue Zone
  */
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
index 9277b46fa..91d889dc8 100644
--- a/drivers/net/qede/base/reg_addr.h
+++ b/drivers/net/qede/base/reg_addr.h
@@ -1235,3 +1235,13 @@
 #define NIG_REG_PPF_TO_ENGINE_SEL 0x508900UL
 #define NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL 0x501b98UL
 #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL 0x501b40UL
+
+#define MCP_REG_CACHE_PAGING_ENABLE 0xe06304UL
+#define PSWRQ2_REG_RESET_STT 0x240008UL
+#define PSWRQ2_REG_PRTY_STS_WR_H_0 0x240208UL
+#define PCI_EXP_DEVCTL_PAYLOAD 0x00e0
+#define PGLUE_B_REG_MASTER_DISCARD_NBLOCK 0x2aa58cUL
+#define PGLUE_B_REG_PRTY_STS_WR_H_0 0x2a8208UL
+#define DORQ_REG_VF_USAGE_CNT_LIM 0x1009ccUL
+#define PGLUE_B_REG_SR_IOV_DISABLED_REQUEST 0x2aa06cUL
+#define PGLUE_B_REG_SR_IOV_DISABLED_REQUEST_CLR 0x2aa070UL
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index abc86402d..77ee3b34f 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -1602,17 +1602,17 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 			/* Mark it as LRO packet */
 			ol_flags |= PKT_RX_LRO;
 			/* In split mode,  seg_len is same as len_on_first_bd
-			 * and ext_bd_len_list will be empty since there are
+			 * and bw_ext_bd_len_list will be empty since there are
 			 * no additional buffers
 			 */
 			PMD_RX_LOG(INFO, rxq,
-			    "TPA start[%d] - len_on_first_bd %d header %d"
-			    " [bd_list[0] %d], [seg_len %d]\n",
-			    cqe_start_tpa->tpa_agg_index,
-			    rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
-			    cqe_start_tpa->header_len,
-			    rte_le_to_cpu_16(cqe_start_tpa->ext_bd_len_list[0]),
-			    rte_le_to_cpu_16(cqe_start_tpa->seg_len));
+			 "TPA start[%d] - len_on_first_bd %d header %d"
+			 " [bd_list[0] %d], [seg_len %d]\n",
+			 cqe_start_tpa->tpa_agg_index,
+			 rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
+			 cqe_start_tpa->header_len,
+			 rte_le_to_cpu_16(cqe_start_tpa->bw_ext_bd_len_list[0]),
+			 rte_le_to_cpu_16(cqe_start_tpa->seg_len));
 
 		break;
 		case ETH_RX_CQE_TYPE_TPA_CONT:
-- 
2.18.0



More information about the dev mailing list