[dpdk-dev] [PATCH] drivers/net/bnxt New driver for Broadcom bnxt

Stephen Hurd stephen.hurd at broadcom.com
Wed Mar 2 22:36:54 CET 2016


Initial new driver for Broadcom bnxt (Cumulus) devices.
- Adds drivers/net/bnxt and the librte_pmd_bnxt
- Adds bnxt PCI IDs
- Adds support for 2/2.5/25/50Gbps modes to rte_ethdev.h

Signed-off-by: Stephen Hurd <stephen.hurd at broadcom.com>
---
 MAINTAINERS                                     |    4 +
 config/common_bsdapp                            |    5 +
 config/common_linuxapp                          |    5 +
 drivers/net/Makefile                            |    1 +
 drivers/net/bnxt/Makefile                       |   79 +
 drivers/net/bnxt/bnxt.h                         |  217 +++
 drivers/net/bnxt/bnxt_cpr.c                     |  138 ++
 drivers/net/bnxt/bnxt_cpr.h                     |  117 ++
 drivers/net/bnxt/bnxt_ethdev.c                  | 1434 +++++++++++++++++
 drivers/net/bnxt/bnxt_filter.c                  |  175 +++
 drivers/net/bnxt/bnxt_filter.h                  |   74 +
 drivers/net/bnxt/bnxt_hwrm.c                    | 1536 +++++++++++++++++++
 drivers/net/bnxt/bnxt_hwrm.h                    |  103 ++
 drivers/net/bnxt/bnxt_irq.c                     |  155 ++
 drivers/net/bnxt/bnxt_irq.h                     |   51 +
 drivers/net/bnxt/bnxt_ring.c                    |  305 ++++
 drivers/net/bnxt/bnxt_ring.h                    |  104 ++
 drivers/net/bnxt/bnxt_rxq.c                     |  384 +++++
 drivers/net/bnxt/bnxt_rxq.h                     |   77 +
 drivers/net/bnxt/bnxt_rxr.c                     |  370 +++++
 drivers/net/bnxt/bnxt_rxr.h                     |   73 +
 drivers/net/bnxt/bnxt_stats.c                   |  222 +++
 drivers/net/bnxt/bnxt_stats.h                   |   44 +
 drivers/net/bnxt/bnxt_txq.c                     |  165 ++
 drivers/net/bnxt/bnxt_txq.h                     |   81 +
 drivers/net/bnxt/bnxt_txr.c                     |  316 ++++
 drivers/net/bnxt/bnxt_txr.h                     |   71 +
 drivers/net/bnxt/bnxt_vnic.c                    |  284 ++++
 drivers/net/bnxt/bnxt_vnic.h                    |   79 +
 drivers/net/bnxt/hsi_struct_def_dpdk.h          | 1869 +++++++++++++++++++++++
 drivers/net/bnxt/rte_pmd_bnxt_version.map       |    4 +
 lib/librte_eal/common/include/rte_pci_dev_ids.h |   45 +-
 lib/librte_ether/rte_ethdev.h                   |    4 +
 mk/rte.app.mk                                   |    1 +
 34 files changed, 8587 insertions(+), 5 deletions(-)
 create mode 100644 drivers/net/bnxt/Makefile
 create mode 100644 drivers/net/bnxt/bnxt.h
 create mode 100644 drivers/net/bnxt/bnxt_cpr.c
 create mode 100644 drivers/net/bnxt/bnxt_cpr.h
 create mode 100644 drivers/net/bnxt/bnxt_ethdev.c
 create mode 100644 drivers/net/bnxt/bnxt_filter.c
 create mode 100644 drivers/net/bnxt/bnxt_filter.h
 create mode 100644 drivers/net/bnxt/bnxt_hwrm.c
 create mode 100644 drivers/net/bnxt/bnxt_hwrm.h
 create mode 100644 drivers/net/bnxt/bnxt_irq.c
 create mode 100644 drivers/net/bnxt/bnxt_irq.h
 create mode 100644 drivers/net/bnxt/bnxt_ring.c
 create mode 100644 drivers/net/bnxt/bnxt_ring.h
 create mode 100644 drivers/net/bnxt/bnxt_rxq.c
 create mode 100644 drivers/net/bnxt/bnxt_rxq.h
 create mode 100644 drivers/net/bnxt/bnxt_rxr.c
 create mode 100644 drivers/net/bnxt/bnxt_rxr.h
 create mode 100644 drivers/net/bnxt/bnxt_stats.c
 create mode 100644 drivers/net/bnxt/bnxt_stats.h
 create mode 100644 drivers/net/bnxt/bnxt_txq.c
 create mode 100644 drivers/net/bnxt/bnxt_txq.h
 create mode 100644 drivers/net/bnxt/bnxt_txr.c
 create mode 100644 drivers/net/bnxt/bnxt_txr.h
 create mode 100644 drivers/net/bnxt/bnxt_vnic.c
 create mode 100644 drivers/net/bnxt/bnxt_vnic.h
 create mode 100644 drivers/net/bnxt/hsi_struct_def_dpdk.h
 create mode 100644 drivers/net/bnxt/rte_pmd_bnxt_version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 628bc05..6ee6c3c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -359,6 +359,10 @@ F: drivers/crypto/aesni_mb/
 Intel QuickAssist
 F: drivers/crypto/qat/
 
+Broadcom bnxt
+M: Stephen Hurd <stephen.hurd at broadcom.com>
+F: drivers/net/bnxt/
+
 
 Packet processing
 -----------------
diff --git a/config/common_bsdapp b/config/common_bsdapp
index 696382c..f37c7bb 100644
--- a/config/common_bsdapp
+++ b/config/common_bsdapp
@@ -276,6 +276,11 @@ CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_TX_FREE=n
 CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_DRIVER=n
 
 #
+# Compile burst-oriented BNXT PMD driver
+#
+CONFIG_RTE_LIBRTE_BNXT_PMD=y
+
+#
 # Compile example software rings based PMD
 #
 CONFIG_RTE_LIBRTE_PMD_RING=y
diff --git a/config/common_linuxapp b/config/common_linuxapp
index f1638db..35f544b 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -280,6 +280,11 @@ CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_TX_FREE=n
 CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_DRIVER=n
 
 #
+# Compile burst-oriented BNXT PMD driver
+#
+CONFIG_RTE_LIBRTE_BNXT_PMD=y
+
+#
 # Compile example software rings based PMD
 #
 CONFIG_RTE_LIBRTE_PMD_RING=y
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 6e4497e..6f0d64b 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -41,6 +41,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k
 DIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e
 DIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe
 DIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4
+DIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt
 DIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5
 DIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += mpipe
 DIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp
diff --git a/drivers/net/bnxt/Makefile b/drivers/net/bnxt/Makefile
new file mode 100644
index 0000000..74de642
--- /dev/null
+++ b/drivers/net/bnxt/Makefile
@@ -0,0 +1,79 @@
+#   BSD LICENSE
+#
+#   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+#   Copyright(c) 2014 6WIND S.A.
+#   Copyright(c) 2015 Broadcom Corporation. All rights reserved.
+#   All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_bnxt.a
+
+LIBABIVER := 1
+
+CFLAGS += -O3
+CFLAGS += -DPORT_QSTATS_BROKEN
+CFLAGS += -DFUNC_QSTATS_BROKEN
+#CFLAGS += -DFPGA
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DCONFIG_B_SRIOV
+#CFLAGS += -DHSI_DEBUG
+
+EXPORT_MAP := rte_pmd_bnxt_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_hwrm.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_ring.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_vnic.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_rxq.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_rxr.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txq.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txr.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_irq.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_stats.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_cpr.c
+
+#
+# Export include files
+#
+SYMLINK-y-include +=
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += lib/librte_malloc
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
new file mode 100644
index 0000000..20a9c0a
--- /dev/null
+++ b/drivers/net/bnxt/bnxt.h
@@ -0,0 +1,217 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_H_
+#define _BNXT_H_
+
+#include <inttypes.h>
+#include <sys/queue.h>
+
+#include <rte_ethdev.h>
+#include <rte_memory.h>
+#include <rte_lcore.h>
+#include <rte_spinlock.h>
+
+// TODO make bnxt.def_cp_ring a pointer to avoid this...
+#include "bnxt_cpr.h"
+
+#define BNXT_VER_MAJ 1
+#define BNXT_VER_MIN 0
+#define BNXT_VER_UPD 1
+
+#define U64_TO_U32_LO(addr) ((uint32_t)(((uint64_t)(addr)) & 0xFFFFFFFF))
+#define U64_TO_U32_HI(addr) ((uint32_t)(((uint64_t)(addr)) >> 32))
+
+#define SET_BIT_IN_ARRAY(array, bitnum)			\
+	(array[bitnum / (sizeof(array[0]) * 8)] |=	\
+	 (1 << (bitnum % (sizeof(array[0]) * 8))))
+
+#define B_MAX_MSIX_VEC	16
+
+#define BNXT_MAX_MTU		9000
+#define VLAN_TAG_SIZE		4
+
+#define NUM_ACTION_RECORD_REGIONS		5
+
+enum bnxt_hw_context
+{
+	HW_CONTEXT_NONE     = 0,
+	HW_CONTEXT_IS_RSS   = 1,
+	HW_CONTEXT_IS_COS   = 2,
+	HW_CONTEXT_IS_LB    = 3,
+};
+
+#define INVALID_STATS_CTX_ID	-1
+
+#if defined(CONFIG_B_SRIOV)
+struct bnxt_vf_info {
+	uint16_t 	fw_fid;
+	uint8_t	mac_addr[ETHER_ADDR_LEN];
+	uint16_t	max_rsscos_ctx;
+	uint16_t	max_cp_rings;
+	uint16_t	max_tx_rings;
+	uint16_t	max_rx_rings;
+	uint16_t	max_l2_ctx;
+	uint16_t	max_vnics;
+	struct bnxt_pf_info *pf;
+};
+
+struct bnxt_pf_info {
+#define BNXT_FIRST_PF_FID	1
+#define BNXT_MAX_VFS(bp)	(bp->pf.max_vfs)
+#define BNXT_FIRST_VF_FID	128
+#define BNXT_PF_RINGS_USED(bp)	bnxt_get_num_queues(bp)
+#define BNXT_PF_RINGS_AVAIL(bp)	(bp->pf.max_cp_rings - BNXT_PF_RINGS_USED(bp))
+	uint32_t	fw_fid;
+	uint8_t	port_id;
+	uint8_t	mac_addr[ETHER_ADDR_LEN];
+	uint16_t	max_rsscos_ctx;
+	uint16_t	max_cp_rings;
+	uint16_t	max_tx_rings;
+	uint16_t	max_rx_rings;
+	uint16_t	max_l2_ctx;
+	uint16_t	max_vnics;
+	uint16_t	first_vf_id;
+	uint16_t	active_vfs;
+	uint16_t	max_vfs;
+	void		*vf_req_buf;
+	phys_addr_t	vf_req_buf_dma_addr;
+	uint32_t	vf_req_fwd[8];
+	struct bnxt_vf_info	*vf;
+};
+#endif
+
+/* Max wait time is 10 * 100ms = 1s */
+#define BNXT_LINK_WAIT_CNT	10
+#define BNXT_LINK_WAIT_INTERVAL	100
+struct bnxt_link_info {
+	uint8_t			phy_flags;
+	uint8_t			mac_type;
+	uint8_t			phy_link_status;
+	uint8_t			loop_back;
+	uint8_t			link_up;
+	uint8_t			duplex;
+	uint8_t			pause;
+	uint8_t			force_pause;
+	uint8_t			auto_pause;
+	uint8_t			auto_mode;
+#define PHY_VER_LEN		3
+	uint8_t			phy_ver[PHY_VER_LEN];
+	uint16_t		link_speed;
+	uint16_t		support_speeds;
+	uint16_t		auto_link_speed;
+	uint16_t		auto_link_speed_mask;
+	uint32_t		preemphasis;
+};
+
+#define BNXT_COS_QUEUE_COUNT	8
+struct bnxt_cos_queue_info {
+	uint8_t	id;
+	uint8_t	profile;
+};
+
+struct bnxt {
+	void			*bar0;
+
+	struct rte_eth_dev	*eth_dev;
+	struct rte_pci_device	*pdev;
+
+	uint32_t		flags;
+	#define BNXT_FLAG_DCB_ENABLED	(1<<0)
+	#define BNXT_FLAG_VF		(1<<1)
+	#define BNXT_FLAG_LRO		(1<<2)
+	#define BNXT_FLAG_GRO		(1<<3)
+	#define BNXT_FLAG_160B_TCAM	(1<<16)
+
+#define BNXT_PF(bp)		(!((bp)->flags & BNXT_FLAG_VF))
+#define BNXT_VF(bp)		((bp)->flags & BNXT_FLAG_VF)
+
+//	uint32_t		rx_copy_thresh;
+	unsigned		rx_nr_rings;
+	unsigned		rx_cp_nr_rings;
+	struct bnxt_rx_queue **rx_queues;
+
+	unsigned		tx_nr_rings;
+	unsigned		tx_cp_nr_rings;
+	struct bnxt_tx_queue **tx_queues;
+
+	/* Default completion ring */
+	struct bnxt_cp_ring_info	def_cp_ring;
+
+#define MAX_NUM_RINGS	48 /* 340 for Cumulus */
+	struct bnxt_ring_grp_info	grp_info[MAX_NUM_RINGS];
+	unsigned		nr_vnics;
+	struct bnxt_vnic_info	*vnic_info;
+	STAILQ_HEAD(, bnxt_vnic_info)	free_vnic_list;
+
+	struct bnxt_filter_info	*filter_info;
+	STAILQ_HEAD(, bnxt_filter_info)	free_filter_list;
+
+	/* VNIC pointer for flow filter (VMDq) pools */
+#define MAX_FF_POOLS	ETH_64_POOLS
+	STAILQ_HEAD(, bnxt_vnic_info)	ff_pool[MAX_FF_POOLS];
+
+	unsigned int		current_interval;
+
+	struct bnxt_irq		*irq_tbl;
+
+#define MAX_NUM_MAC_ADDR	32
+	uint8_t			mac_addr[ETHER_ADDR_LEN];
+
+#define NUM_REG_WINDOWS		16
+	uint32_t		reg_window_base[NUM_REG_WINDOWS];
+	uint32_t		msg_enable;
+
+	uint16_t		hwrm_cmd_seq;
+	uint32_t		hwrm_intr_seq_id;
+	void			*hwrm_cmd_resp_addr;
+	phys_addr_t		hwrm_cmd_resp_dma_addr;
+	rte_spinlock_t	hwrm_lock;
+
+	uint16_t		vxlan_port;
+	uint8_t			vxlan_port_cnt;
+	uint16_t		vxlan_fw_dst_port_id;
+
+	struct bnxt_link_info	link_info;
+#define BNXT_LINK_SPEED_AUTO	0
+	struct bnxt_cos_queue_info	cos_queue[BNXT_COS_QUEUE_COUNT];
+	uint16_t		max_req_len;
+
+#ifdef CONFIG_B_SRIOV
+	int			nr_vfs;
+	struct bnxt_pf_info	pf;
+	struct bnxt_vf_info	vf;
+#endif
+};
+
+#endif
diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
new file mode 100644
index 0000000..4a5e2e6
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -0,0 +1,138 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_hwrm.h"
+
+/*
+ * Async event handling
+ */
+void bnxt_handle_async_event(struct bnxt *bp __rte_unused,
+			     struct cmpl_base *cmp)
+{
+	struct hwrm_async_event_cmpl *async_cmp =
+				(struct hwrm_async_event_cmpl *)cmp;
+
+	/* TODO: HWRM async events are not defined yet */
+	/* Needs to handle: link events, error events, etc. */
+	switch (async_cmp->event_id) {
+	case 0:
+		/* Assume LINK_CHANGE == 0 */
+		RTE_LOG(INFO, PMD, "Link change event\n");
+
+		/* Can just prompt the update_op routine to do a qcfg
+		   instead of doing the actual qcfg */
+		break;
+	case 1:
+		break;
+	default:
+		RTE_LOG(ERR, PMD, "handle_async_event id = 0x%x\n",
+			async_cmp->event_id);
+		break;
+	}
+}
+
+void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
+{
+	struct hwrm_fwd_req_cmpl *fwd_cmpl = (struct hwrm_fwd_req_cmpl *)cmpl;
+	struct input *fwd_cmd;
+	uint16_t logical_vf_id, error_code;
+
+	/* Qualify the fwd request */
+	if (fwd_cmpl->source_id < bp->pf.first_vf_id) {
+		RTE_LOG(ERR, PMD,
+			"FWD req's source_id 0x%x > first_vf_id 0x%x\n",
+			fwd_cmpl->source_id, bp->pf.first_vf_id);
+		error_code = HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED;
+		goto reject;
+	} else if (fwd_cmpl->req_len_type >> HWRM_FWD_REQ_CMPL_REQ_LEN_SFT >
+		   128 - sizeof(struct input)) {
+		RTE_LOG(ERR, PMD,
+		    "FWD req's cmd len 0x%x > 108 bytes allowed\n",
+		    fwd_cmpl->req_len_type >> HWRM_FWD_REQ_CMPL_REQ_LEN_SFT);
+		error_code = HWRM_ERR_CODE_INVALID_PARAMS;
+		goto reject;
+	}
+
+	/* Locate VF's forwarded command */
+	logical_vf_id = fwd_cmpl->source_id - bp->pf.first_vf_id;
+	fwd_cmd = (struct input *)((uint8_t *)bp->pf.vf_req_buf +
+		   (logical_vf_id * 128));
+
+	/* Provision the request */
+	switch (fwd_cmd->req_type) {
+	case HWRM_CFA_L2_FILTER_ALLOC:
+	case HWRM_CFA_L2_FILTER_FREE:
+	case HWRM_CFA_L2_FILTER_CFG:
+	case HWRM_CFA_L2_SET_RX_MASK:
+		break;
+	default:
+		error_code = HWRM_ERR_CODE_INVALID_PARAMS;
+		goto reject;
+	}
+
+	/* Forward */
+	fwd_cmd->target_id = fwd_cmpl->source_id;
+	bnxt_hwrm_exec_fwd_resp(bp, fwd_cmd);
+	return;
+
+reject:
+	/* TODO: Encap the reject error resp into the hwrm_err_iput? */
+	/* Use the error_code for the reject cmd */
+	RTE_LOG(ERR, PMD,
+		"Error 0x%x found in the forward request\n", error_code);
+}
+
+/* For the default completion ring only */
+void bnxt_free_def_cp_ring(struct bnxt *bp)
+{
+	struct bnxt_cp_ring_info *cpr = &bp->def_cp_ring;
+	struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+	bnxt_free_ring(ring);
+}
+
+/* For the default completion ring only */
+void bnxt_init_def_ring_struct(struct bnxt *bp)
+{
+	struct bnxt_cp_ring_info *cpr = &bp->def_cp_ring;
+	struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+	ring->bd = (void *)cpr->cp_desc_ring;
+	ring->bd_dma = cpr->cp_desc_mapping;
+	ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE);
+	ring->ring_mask = ring->ring_size - 1;
+	ring->vmem_size = 0;
+	ring->vmem = NULL;
+}
diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
new file mode 100644
index 0000000..7e7df3b
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_cpr.h
@@ -0,0 +1,117 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_CPR_H_
+#define _BNXT_CPR_H_
+
+// TODO make bnxt_cp_ring_info.cp_ring_struct a pointer to avoid this.
+#include "bnxt_ring.h"
+#include "hsi_struct_def_dpdk.h"
+
+#define CMP_VALID(cmp, raw_cons, ring)					\
+	(!!(((struct cmpl_base *)(cmp))->info3_v & CMPL_BASE_V) ==			\
+	 !((raw_cons) & ((ring)->ring_size)))
+
+#define CMP_TYPE(cmp)						\
+	(((struct cmpl_base *)cmp)->type & CMPL_BASE_TYPE_MASK)
+
+#define ADV_RAW_CMP(idx, n)	((idx) + (n))
+#define NEXT_RAW_CMP(idx)	ADV_RAW_CMP(idx, 1)
+#define RING_CMP(ring, idx)		((idx) & (ring)->ring_mask)
+#define NEXT_CMP(idx)		RING_CMP(ADV_RAW_CMP(idx, 1))
+
+#define DB_CP_REARM_FLAGS	(DB_KEY_CP | DB_IDX_VALID)
+#define DB_CP_FLAGS		(DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
+
+#define B_CP_DB_REARM(cpr, raw_cons)					\
+		*(uint32_t *)((cpr)->cp_doorbell) = (DB_CP_REARM_FLAGS | \
+				RING_CMP(&cpr->cp_ring_struct, raw_cons))
+
+#define B_CP_DIS_DB(cpr, raw_cons)					\
+		*(uint32_t *)((cpr)->cp_doorbell) = (DB_CP_FLAGS |	\
+				RING_CMP(&cpr->cp_ring_struct, raw_cons))
+
+struct bnxt_cp_ring_info {
+	uint32_t		cp_raw_cons;
+	void 			*cp_doorbell;
+
+	struct cmpl_base	*cp_desc_ring;
+
+	phys_addr_t		cp_desc_mapping;
+
+	struct ctx_hw_stats	*hw_stats;
+	phys_addr_t		hw_stats_map;
+	uint32_t		hw_stats_ctx_id;
+
+	struct bnxt_ring_struct	cp_ring_struct;
+};
+
+#define RX_CMP_L2_ERRORS						\
+	(RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_PKT_CMPL_ERRORS_CRC_ERROR)
+
+#define RX_CMP_L4_CS_OK(rxcmp1)						\
+	    (((rxcmp1)->rx_cmp_flags2 &					\
+	      (RX_CMP_FLAGS2_L4_CS_CALC | RX_CMP_FLAGS2_T_L4_CS_CALC)) &&\
+	     !((rxcmp1)->rx_cmp_cfa_code_errors_v2 &			\
+	       (RX_CMPL_ERRORS_L4_CS_ERROR | RX_CMPL_ERRORS_T_L4_CS_ERROR)))
+
+#define RX_CMP_ENCAP(rxcmp1)						\
+	    (((rxcmp1)->rx_cmp_flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3)
+
+#define B_TPA_START_AGG_ID(rx_tpa_start)				\
+	((rx_tpa_start)->rx_tpa_start_cmp_misc_v1 &			\
+	 RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT
+
+#define B_TPA_END_AGG_ID(rx_tpa_end)					\
+	((rx_tpa_end)->rx_tpa_end_cmp_misc_v1 &				\
+	 RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT
+
+#define B_TPA_END_TPA_SEGS(rx_tpa_end)					\
+	((rx_tpa_end)->rx_tpa_end_cmp_misc_v1 &				\
+	 RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT
+
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO				\
+	(RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO &			\
+	 RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS)
+
+#define B_TPA_END_GRO(rx_tpa_end)					\
+	((rx_tpa_end)->rx_tpa_end_cmp_len_flags_type &			\
+	 RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO)
+
+struct bnxt;
+void bnxt_free_def_cp_ring(struct bnxt *bp);
+void bnxt_init_def_ring_struct(struct bnxt *bp);
+void bnxt_handle_async_event(struct bnxt *bp, struct cmpl_base *cmp);
+void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmp);
+
+#endif
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
new file mode 100644
index 0000000..1fd5d15
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -0,0 +1,1434 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_filter.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_irq.h"
+#include "bnxt_ring.h"
+#include "bnxt_rxq.h"
+#include "bnxt_rxr.h"
+#include "bnxt_stats.h"
+#include "bnxt_txq.h"
+#include "bnxt_txr.h"
+#include "bnxt_vnic.h"
+#include "hsi_struct_def_dpdk.h"
+
+#define STRIZE(x)		#x
+#define STRIFY(x)		STRIZE(x)
+#define DRV_MODULE_NAME		"bnxt"
+#define DRV_MODULE_VERSION	STRIFY(BNXT_VER_MAJ) "." STRIFY(BNXT_VER_MIN) "." STRIFY(BNXT_VER_UPD)
+
+static const char bnxt_version[] =
+	"Broadcom Cumulus driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
+
+static struct rte_pci_id bnxt_pci_id_map[] = {
+
+#define RTE_PCI_DEV_ID_DECL_BNXT(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
+
+	{.device_id = 0},
+};
+
+#define BNXT_ETH_RSS_SUPPORT (	\
+	ETH_RSS_IPV4 |		\
+	ETH_RSS_NONFRAG_IPV4_TCP |	\
+	ETH_RSS_NONFRAG_IPV4_UDP |	\
+	ETH_RSS_IPV6 |		\
+	ETH_RSS_NONFRAG_IPV6_TCP |	\
+	ETH_RSS_NONFRAG_IPV6_UDP)
+
+struct cfa_mcast_rec {
+	uint16_t action0_5[6];
+	uint32_t mc_grp_entry;
+};
+
+/***********************/
+
+/*
+ * High level utility functions
+ */
+
+static void bnxt_free_mem(struct bnxt *bp)
+{
+	bnxt_free_filter_mem(bp);
+	bnxt_free_vnic_attributes(bp);
+	bnxt_free_vnic_mem(bp);
+
+	bnxt_free_stats(bp);
+	bnxt_free_tx_rings(bp);
+	bnxt_free_rx_rings(bp);
+	bnxt_free_def_cp_ring(bp);
+}
+
+static int bnxt_alloc_mem(struct bnxt *bp)
+{
+	int rc;
+
+	/* Default completion ring */
+	bnxt_init_def_ring_struct(bp);
+	rc = bnxt_alloc_rings(bp, 0, NULL, NULL,
+			      &bp->def_cp_ring, "def_cp_ring");
+	if (rc)
+		goto alloc_mem_err;
+
+	rc = bnxt_alloc_vnic_mem(bp);
+	if (rc)
+		goto alloc_mem_err;
+
+	rc = bnxt_alloc_vnic_attributes(bp);
+	if (rc)
+		goto alloc_mem_err;
+
+	rc = bnxt_alloc_filter_mem(bp);
+	if (rc)
+		goto alloc_mem_err;
+
+	return 0;
+
+alloc_mem_err:
+	bnxt_free_mem(bp);
+	return rc;
+}
+
+static int bnxt_init_chip(struct bnxt *bp)
+{
+	unsigned i, rss_idx, fw_idx;
+	int rc;
+
+	rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
+	if (rc) {
+		RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc);
+		goto err_out;
+	}
+
+	rc = bnxt_alloc_hwrm_rings(bp);
+	if (rc) {
+		RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc);
+		goto err_out;
+	}
+
+	rc = bnxt_alloc_all_hwrm_ring_grps(bp);
+	if (rc) {
+		RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc);
+		goto err_out;
+	}
+
+	rc = bnxt_mq_rx_configure(bp);
+	if (rc) {
+		RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc);
+		goto err_out;
+	}
+
+	/* bp->flags needs to be revisited for other stateless offload
+	   features */
+	bp->flags &= ~(BNXT_FLAG_GRO | BNXT_FLAG_LRO);
+
+	/* VNIC configuration */
+	for (i = 0; i < bp->nr_vnics; i++) {
+		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+		rc = bnxt_hwrm_vnic_alloc(bp, vnic);
+		if (rc) {
+			RTE_LOG(ERR, PMD, "HWRM vnic alloc failure rc: %x\n",
+				rc);
+			goto err_out;
+		}
+
+		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
+		if (rc) {
+			RTE_LOG(ERR, PMD,
+				"HWRM vnic ctx alloc failure rc: %x\n", rc);
+			goto err_out;
+		}
+
+		rc = bnxt_hwrm_vnic_cfg(bp, vnic);
+		if (rc) {
+			RTE_LOG(ERR, PMD, "HWRM vnic cfg failure rc: %x\n", rc);
+			goto err_out;
+		}
+
+		rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
+		if (rc) {
+			RTE_LOG(ERR, PMD, "HWRM vnic filter failure rc: %x\n",
+				rc);
+			goto err_out;
+		}
+		if (vnic->rss_table && vnic->hash_type) {
+			/* Fill the RSS hash & redirection table with
+			   ring group ids for all VNICs */
+			for (rss_idx = 0, fw_idx = 0;
+			     rss_idx < HW_HASH_INDEX_SIZE;
+			     rss_idx++, fw_idx++) {
+				if (vnic->fw_grp_ids[fw_idx] ==
+				    INVALID_HW_RING_ID)
+					fw_idx = 0;
+				vnic->rss_table[rss_idx] =
+						vnic->fw_grp_ids[fw_idx];
+			}
+			rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+			if (rc) {
+				RTE_LOG(ERR, PMD,
+					"HWRM vnic set RSS failure rc: %x\n",
+					rc);
+				goto err_out;
+			}
+		}
+	}
+	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0]);
+	if (rc) {
+		RTE_LOG(ERR, PMD,
+			"HWRM cfa l2 rx mask failure rc: %x\n", rc);
+		goto err_out;
+	}
+
+	return 0;
+
+err_out:
+	bnxt_free_all_hwrm_resources(bp);
+
+	return rc;
+}
+
+static int bnxt_shutdown_nic(struct bnxt *bp)
+{
+	bnxt_free_all_hwrm_resources(bp);
+	bnxt_free_all_filters(bp);
+	bnxt_free_all_vnics(bp);
+	return 0;
+}
+
+static int bnxt_init_nic(struct bnxt *bp)
+{
+	int rc;
+
+	bnxt_init_ring_grps(bp);
+	bnxt_init_vnics(bp);
+	bnxt_init_filters(bp);
+
+	rc = bnxt_init_chip(bp);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+/*
+ * Device configuration and status function
+ */
+
+static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
+				  struct rte_eth_dev_info *dev_info)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+	uint16_t max_vnics, i, j, vpool, vrxq;
+
+	/* MAC Specifics */
+	dev_info->max_mac_addrs = MAX_NUM_MAC_ADDR;
+	dev_info->max_hash_mac_addrs = 0;
+
+	/* PF/VF specifics */
+	if (BNXT_PF(bp)) {
+		dev_info->max_rx_queues = bp->pf.max_rx_rings;
+		dev_info->max_tx_queues = bp->pf.max_tx_rings;
+		dev_info->max_vfs = bp->pf.active_vfs;
+		dev_info->reta_size = bp->pf.max_rsscos_ctx;
+		max_vnics = bp->pf.max_vnics;
+	} else {
+		dev_info->max_rx_queues = bp->vf.max_rx_rings;
+		dev_info->max_tx_queues = bp->vf.max_tx_rings;
+		dev_info->reta_size = bp->vf.max_rsscos_ctx;
+		max_vnics = bp->vf.max_vnics;
+	}
+
+	/* Fast path specifics */
+	dev_info->min_rx_bufsize = 1;	// Minimum RX Producer Buffer BD length field
+	dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
+				  + VLAN_TAG_SIZE;
+	dev_info->rx_offload_capa = 0;
+	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
+					DEV_TX_OFFLOAD_TCP_CKSUM |
+					DEV_TX_OFFLOAD_UDP_CKSUM |
+					DEV_TX_OFFLOAD_TCP_TSO;
+
+	/* *INDENT-OFF* */
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_thresh = {
+			.pthresh = 8,
+			.hthresh = 8,
+			.wthresh = 0,
+		},
+		.rx_free_thresh = 32,
+		.rx_drop_en = 0,
+	};
+
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_thresh = {
+			.pthresh = 32,
+			.hthresh = 0,
+			.wthresh = 0,
+		},
+		.tx_free_thresh = 32,
+		.tx_rs_thresh = 32,
+		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+			     ETH_TXQ_FLAGS_NOOFFLOADS,
+	};
+	/* *INDENT-ON* */
+
+	/*
+	 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
+	 *       need further investigation.
+	 */
+
+	/* VMDq resources */
+	vpool = 64; /* ETH_64_POOLS */
+	vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
+	for (i = 0; i < 4; vpool >>= 1, i++) {
+		if (max_vnics > vpool) {
+			for (j = 0; j < 5; vrxq >>= 1, j++) {
+				if (dev_info->max_rx_queues > vrxq) {
+					if (vpool > vrxq)
+						vpool = vrxq;
+					goto found;
+				}
+			}
+			/* Not enough resources to support VMDq */
+			break;
+		}
+	}
+	/* Not enough resources to support VMDq */
+	vpool = vrxq = 0;
+found:
+	dev_info->max_vmdq_pools = vpool;
+	dev_info->vmdq_queue_num = vrxq;
+
+	dev_info->vmdq_pool_base = 0;
+	dev_info->vmdq_queue_base = 0;
+}
+
+/* Configure the device based on the configuration provided */
+static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+	int rc = 0;
+
+	bp->rx_queues = (void *)eth_dev->data->rx_queues;
+	bp->tx_queues = (void *)eth_dev->data->tx_queues;
+
+	/* Inherit new configurations */
+	bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
+	bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
+	bp->rx_cp_nr_rings = bp->rx_nr_rings;
+	bp->tx_cp_nr_rings = bp->tx_nr_rings;
+
+	if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
+		eth_dev->data->mtu =
+				eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
+				ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
+	bnxt_set_hwrm_link_config(bp, true);
+	return rc;
+}
+
+static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+	int rc;
+
+	rc = bnxt_hwrm_func_reset(bp);
+	if (rc) {
+		RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
+		rc = -1;
+		goto error;
+	}
+	rc = bnxt_setup_int(bp);
+	if (rc)
+		goto error;
+
+	rc = bnxt_alloc_mem(bp);
+	if (rc)
+		goto error;
+
+	rc = bnxt_request_int(bp);
+	if (rc)
+		goto error;
+
+	rc = bnxt_init_nic(bp);
+	if (rc)
+		goto error;
+
+	bnxt_enable_int(bp);
+
+	return 0;
+
+error:
+	bnxt_shutdown_nic(bp);
+	bnxt_disable_int(bp);
+	bnxt_free_int(bp);
+	bnxt_free_tx_mbufs(bp);
+	bnxt_free_rx_mbufs(bp);
+	bnxt_free_mem(bp);
+	return rc;
+}
+
+/* Unload the driver, release the IRQ */
+static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+#ifdef FPGA_DEBUG
+	/* The FPGA needs about 10ms of settle time to DMA
+	   the last few completions.
+	   100ms is the experimental value for reliability purposes */
+	rte_delay_ms(100);
+#endif
+
+	if (bp->eth_dev->data->dev_started) {
+		/* TBD: STOP HW queues DMA */
+		eth_dev->data->dev_link.link_status = 0;
+	}
+	bnxt_shutdown_nic(bp);
+	bnxt_disable_int(bp);
+	bnxt_free_int(bp);
+}
+
+static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+	eth_dev->data->dev_link.link_status = 1;
+	bnxt_set_hwrm_link_config(bp, true);
+	return 0;
+}
+
+static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+	eth_dev->data->dev_link.link_status = 0;
+	bnxt_set_hwrm_link_config(bp, false);
+	return 0;
+}
+
+static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+	int rc;
+
+	bnxt_dev_stop_op(eth_dev);
+	bnxt_free_tx_mbufs(bp);
+	bnxt_free_rx_mbufs(bp);
+	bnxt_free_mem(bp);
+
+	if (BNXT_PF(bp)) {
+		/* Notify all VFs about the device going down */
+		/* PF to VF notification */
+
+		/* Clean up for VFs */
+		rc = bnxt_hwrm_func_vf_free(bp, bp->pf.active_vfs);
+		if (rc) {
+			RTE_LOG(ERR, PMD,
+				"Failed to free VFs with rc = 0x%d!", rc);
+			bp->pf.active_vfs = 0;
+		}
+
+		/* Free all VF fwd cmd buffer */
+		rte_free(bp->pf.vf_req_buf);
+	}
+
+	rte_free(eth_dev->data->mac_addrs);
+	bnxt_free_hwrm_resources(bp);
+}
+
+#ifdef FPGA_DEBUG
+static int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
+			       int wait_to_complete __rte_unused)
+{
+	/* Hard code link status and attrib for now */
+	bnxt_dev_set_link_up_op(eth_dev);
+
+	eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+	eth_dev->data->dev_link.link_speed = ETH_LINK_SPEED_10;
+	return 0;
+}
+#else
+static int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
+			       int wait_to_complete)
+{
+	int rc = 0;
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+	struct rte_eth_link new;
+	unsigned cnt = BNXT_LINK_WAIT_CNT;
+
+	memset(&new, 0, sizeof(new));
+	do {
+		/* Retrieve link info from hardware */
+		rc = bnxt_get_hwrm_link_config(bp, &new);
+		if (rc) {
+			new.link_speed = ETH_LINK_SPEED_100;
+			new.link_duplex = ETH_LINK_FULL_DUPLEX;
+			RTE_LOG(ERR, PMD,
+				"Failed to retrieve link rc = 0x%d!", rc);
+			goto out;
+		}
+		if (!wait_to_complete)
+			break;
+
+		rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
+
+	} while (!new.link_status && cnt--);
+
+	/* Timed out or success */
+	if (new.link_status) {
+		/* Update only if success */
+		eth_dev->data->dev_link.link_duplex = new.link_duplex;
+		eth_dev->data->dev_link.link_speed = new.link_speed;
+	}
+	eth_dev->data->dev_link.link_status = new.link_status;
+out:
+	return rc;
+}
+#endif
+
+static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+	struct bnxt_vnic_info *vnic;
+
+	if (bp->vnic_info == NULL)
+		return;
+
+	vnic = &bp->vnic_info[0];
+
+	vnic->flags |= BNXT_VNIC_INFO_PROMISC;
+	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
+}
+
+static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+	struct bnxt_vnic_info *vnic;
+
+	if (bp->vnic_info == NULL)
+		return;
+
+	vnic = &bp->vnic_info[0];
+
+	vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
+	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
+}
+
+static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+	struct bnxt_vnic_info *vnic;
+
+	if (bp->vnic_info == NULL)
+		return;
+
+	vnic = &bp->vnic_info[0];
+
+	vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
+	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
+}
+
+static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+	struct bnxt_vnic_info *vnic;
+
+	if (bp->vnic_info == NULL)
+		return;
+
+	vnic = &bp->vnic_info[0];
+
+	vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
+	bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
+}
+
+static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
+			    struct rte_eth_rss_reta_entry64 *reta_conf,
+			    uint16_t reta_size)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+	struct bnxt_vnic_info *vnic;
+	int i;
+
+	if (!dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+		return -EINVAL;
+
+	if (reta_size != HW_HASH_INDEX_SIZE) {
+		RTE_LOG(ERR, PMD, "The configured hash table lookup size "
+			"(%d) must equal the size supported by the hardware "
+			"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
+		return -EINVAL;
+	}
+	/* Update the RSS VNIC(s) */
+	for (i = 0; i < MAX_FF_POOLS; i++) {
+		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+			memcpy(vnic->rss_table, reta_conf, reta_size);
+
+			bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+		}
+	}
+	return 0;
+}
+
+static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
+			      struct rte_eth_rss_reta_entry64 *reta_conf,
+			      uint16_t reta_size)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+
+	/* Retrieve from the default VNIC */
+	if (!vnic)
+		return -EINVAL;
+	if (!vnic->rss_table)
+		return -EINVAL;
+
+	if (reta_size != HW_HASH_INDEX_SIZE) {
+		RTE_LOG(ERR, PMD, "The configured hash table lookup size "
+			"(%d) must equal the size supported by the hardware "
+			"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
+		return -EINVAL;
+	}
+	/* EW - need to revisit here copying from u64 to u16 */
+	memcpy(reta_conf, vnic->rss_table, reta_size);
+
+	return 0;
+}
+
+static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
+				   struct rte_eth_rss_conf *rss_conf)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+	struct bnxt_vnic_info *vnic;
+	uint16_t hash_type = 0;
+	int i;
+
+	/* If RSS enablement were different than dev_configure,
+	   then return -EINVAL */
+	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+		if (!rss_conf->rss_hf)
+			return -EINVAL;
+	} else {
+		if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
+			return -EINVAL;
+	}
+	switch (rss_conf->rss_hf) {
+	case ETH_RSS_IPV4:
+		hash_type = HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
+		break;
+	case ETH_RSS_NONFRAG_IPV4_TCP:
+		hash_type = HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
+		break;
+	case ETH_RSS_NONFRAG_IPV4_UDP:
+//              hash_type = HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
+		break;
+	case ETH_RSS_IPV6:
+		hash_type = HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
+		break;
+	case ETH_RSS_NONFRAG_IPV6_TCP:
+		hash_type = HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
+		break;
+	case ETH_RSS_NONFRAG_IPV6_UDP:
+//              hash_type = HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
+		break;
+	case ETH_RSS_IP:
+		hash_type = HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
+		    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
+		break;
+	case ETH_RSS_UDP:
+//              hash_type = HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 |
+//                          HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
+		break;
+	default:
+		hash_type = HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
+		break;
+	}
+	/* Update the RSS VNIC(s) */
+	for (i = 0; i < MAX_FF_POOLS; i++) {
+		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+			vnic->hash_type = hash_type;
+
+			/* Use the supplied key if the key length is
+			   acceptable and the rss_key is not NULL */
+			if (rss_conf->rss_key &&
+			    rss_conf->rss_key_len <= HW_HASH_KEY_SIZE)
+				memcpy(vnic->rss_hash_key, rss_conf->rss_key,
+				       rss_conf->rss_key_len);
+
+			bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+		}
+	}
+	return 0;
+}
+
+static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
+				     struct rte_eth_rss_conf *rss_conf)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+	int len;
+
+	/* RSS configuration is the same for all VNICs */
+	if (vnic && vnic->rss_hash_key) {
+		if (rss_conf->rss_key) {
+			len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
+			      rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
+			memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
+		}
+		switch (vnic->hash_type) {
+		case HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4:
+			rss_conf->rss_hf = ETH_RSS_IPV4;
+			break;
+		case HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4:
+			rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP;
+			break;
+//              case HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4:
+//                      rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP;
+//                      break;
+		case HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6:
+			rss_conf->rss_hf = ETH_RSS_IPV6;
+			break;
+		case HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6:
+			rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV6_TCP;
+			break;
+//              case HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6:
+//                      rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV6_UDP;
+//                      break;
+		case (HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
+		      HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6):
+			rss_conf->rss_hf = ETH_RSS_IP;
+			break;
+//              case HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 | HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6:
+//                      rss_conf->rss_hf = ETH_RSS_UDP;
+//                      break;
+		default:
+			rss_conf->rss_hf = 0;
+			break;
+		}
+	} else {
+		rss_conf->rss_hf = 0;
+	}
+	return 0;
+}
+
+static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
+{
+	struct rte_eth_dev_info dev_info;
+	uint32_t max_dev_mtu;
+
+	bnxt_dev_info_get_op(eth_dev, &dev_info);
+	max_dev_mtu = dev_info.max_rx_pktlen -
+		      ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
+
+	if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) {
+		RTE_LOG(ERR, PMD, "MTU requested must be within (%d, %d)\n",
+			ETHER_MIN_MTU, max_dev_mtu);
+		return -EINVAL;
+	}
+	if (new_mtu > ETHER_MTU) {
+		eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+		eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
+			new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE;
+	} else {
+		eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+	}
+	eth_dev->data->mtu = new_mtu;
+
+	/* TODO: If the device is active:
+	   - Close NIC
+	   - Free all ring/buffer resources as according to the new MTU
+	   - Open NIC
+
+	   Else
+
+	   If the queues have already been setup:
+	   - Re-allocate all ring/buffer resources as according to the new MTU
+	*/
+	return 0;
+}
+
+static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
+				    uint32_t index)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+	uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
+	struct bnxt_vnic_info *vnic;
+	struct bnxt_filter_info *filter, *temp_filter;
+	int i;
+
+	/* Loop through all VNICs from the specified filter flow pools to
+	   remove the corresponding MAC addr filter */
+	for (i = 0; i < MAX_FF_POOLS; i++) {
+		if (!(pool_mask & (1 << i)))
+			continue;
+
+		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+			filter = STAILQ_FIRST(&vnic->filter);
+			while (filter) {
+				temp_filter = STAILQ_NEXT(filter, next);
+				if (filter->mac_index == index) {
+					STAILQ_REMOVE(&vnic->filter, filter,
+						      bnxt_filter_info, next);
+					bnxt_hwrm_clear_filter(bp, filter);
+					filter->mac_index = INVALID_MAC_INDEX;
+					memset(&filter->l2_addr, 0,
+					       ETHER_ADDR_LEN);
+					STAILQ_INSERT_TAIL(
+							&bp->free_filter_list,
+							filter, next);
+				}
+				filter = temp_filter;
+			}
+		}
+	}
+}
+
+static void bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
+				 struct ether_addr *mac_addr,
+				 uint32_t index, uint32_t pool)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+	struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]);
+	struct bnxt_filter_info *filter;
+
+	if (!vnic) {
+		RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool);
+		return;
+	}
+	/* Attach requested MAC address to the new l2_filter */
+	STAILQ_FOREACH(filter, &vnic->filter, next) {
+		if (filter->mac_index == index) {
+			RTE_LOG(ERR, PMD,
+				"MAC addr already existed for pool %d\n", pool);
+			return;
+		}
+	}
+	filter = bnxt_alloc_filter(bp);
+	if (!filter) {
+		RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+		return;
+	}
+	STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
+	filter->mac_index = index;
+	memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
+	bnxt_hwrm_set_filter(bp, vnic, filter);
+}
+
+static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
+{
+	struct bnxt_filter_info *filter, *temp_filter, *new_filter;
+	struct bnxt_vnic_info *vnic;
+	unsigned i;
+	int rc = 0;
+
+	/* Cycle through all VNICs */
+	for (i = 0; i < MAX_FF_POOLS; i++) {
+		/* For each VNIC and each associated filter(s)
+		   if VLAN exists && VLAN matches vlan_id
+		       remove the MAC+VLAN filter
+		       add a new MAC only filter
+		   else
+		       VLAN filter doesn't exist, just skip and continue
+		*/
+		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+			filter = STAILQ_FIRST(&vnic->filter);
+			while (filter) {
+				temp_filter = STAILQ_NEXT(filter, next);
+
+				if (filter->enables &
+				    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN &&
+				    filter->l2_ovlan == vlan_id) {
+					/* Must delete the filter */
+					STAILQ_REMOVE(&vnic->filter, filter,
+						      bnxt_filter_info, next);
+					bnxt_hwrm_clear_filter(bp, filter);
+					STAILQ_INSERT_TAIL(
+							&bp->free_filter_list,
+							filter, next);
+
+					/* Need to examine to see if the MAC
+					   filter already existed or not before
+					   allocating a new one */
+
+					new_filter = bnxt_alloc_filter(bp);
+					if (!new_filter) {
+						RTE_LOG(ERR, PMD,
+							"MAC/VLAN filter alloc failed\n");
+						rc = -ENOMEM;
+						goto exit;
+					}
+					STAILQ_INSERT_TAIL(&vnic->filter,
+							   new_filter, next);
+					/* Inherit MAC from the previous
+					   filter */
+					new_filter->mac_index =
+							filter->mac_index;
+					memcpy(new_filter->l2_addr,
+					       filter->l2_addr, ETHER_ADDR_LEN);
+					/* MAC only filter */
+					rc = bnxt_hwrm_set_filter(bp, vnic,
+								  new_filter);
+					if (rc)
+						goto exit;
+				}
+				filter = temp_filter;
+			}
+		}
+	}
+exit:
+	return rc;
+}
+
+static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
+{
+	struct bnxt_filter_info *filter, *temp_filter, *new_filter;
+	struct bnxt_vnic_info *vnic;
+	unsigned i;
+	int rc = 0;
+
+	/* Cycle through all VNICs */
+	for (i = 0; i < MAX_FF_POOLS; i++) {
+		/* For each VNIC and each associated filter(s)
+		   if VLAN exists:
+		     if VLAN matches vlan_id
+		       VLAN filter already exists, just skip and continue
+		     else
+		       add a new MAC+VLAN filter
+		   else
+		       Remove the old MAC only filter
+		       Add a new MAC+VLAN filter
+		*/
+		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+			filter = STAILQ_FIRST(&vnic->filter);
+			while (filter) {
+				temp_filter = STAILQ_NEXT(filter, next);
+
+				if (filter->enables &
+				    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN) {
+					if (filter->l2_ovlan == vlan_id)
+						goto cont;
+				} else {
+					/* Must delete the MAC filter */
+					STAILQ_REMOVE(&vnic->filter, filter,
+						      bnxt_filter_info, next);
+					bnxt_hwrm_clear_filter(bp, filter);
+					filter->l2_ovlan = 0;
+					STAILQ_INSERT_TAIL(
+							&bp->free_filter_list,
+							filter, next);
+				}
+				new_filter = bnxt_alloc_filter(bp);
+				if (!new_filter) {
+					RTE_LOG(ERR, PMD,
+						"MAC/VLAN filter alloc failed\n");
+					rc = -ENOMEM;
+					goto exit;
+				}
+				STAILQ_INSERT_TAIL(&vnic->filter, new_filter,
+						   next);
+				/* Inherit MAC from the previous filter */
+				new_filter->mac_index = filter->mac_index;
+				memcpy(new_filter->l2_addr, filter->l2_addr,
+				       ETHER_ADDR_LEN);
+				/* MAC + VLAN ID filter */
+				new_filter->l2_ovlan = vlan_id;
+				new_filter->l2_ovlan_mask = 0xF000;
+				new_filter->enables |=
+					HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN |
+					HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK;
+				rc = bnxt_hwrm_set_filter(bp, vnic, new_filter);
+				if (rc)
+					goto exit;
+cont:
+				filter = temp_filter;
+			}
+		}
+	}
+exit:
+	return rc;
+}
+
+static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
+				   uint16_t vlan_id, int on)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+	/* These operations apply to ALL existing MAC/VLAN filters */
+	if (on)
+		return bnxt_add_vlan_filter(bp, vlan_id);
+	else
+		return bnxt_del_vlan_filter(bp, vlan_id);
+}
+
+static void bnxt_vlan_strip_queue_set_op(struct rte_eth_dev *eth_dev,
+					 uint16_t rx_queue_id, int on)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+	struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[rx_queue_id];
+	struct bnxt_vnic_info *vnic;
+	int rc = 0;
+
+	/* VLAN strip at the VNIC level is supported */
+	if (!rxq) {
+		RTE_LOG(ERR, PMD, "Rx queue with id %d not defined!",
+			rx_queue_id);
+		return;
+	}
+	if (rxq->vnic) {
+		RTE_LOG(ERR, PMD, "Rx queue with id %d does not have a VNIC!",
+			rx_queue_id);
+		return;
+	}
+	vnic = rxq->vnic;
+	if ((on && vnic->vlan_strip == true) ||
+	    (!on && vnic->vlan_strip == false)) {
+		RTE_LOG(INFO, PMD,
+			"Rx queue with id %d already has VLAN strip set to %d",
+			rx_queue_id, on);
+		goto done;
+	}
+	vnic->vlan_strip = on ? true : false;
+	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
+	if (rc) {
+		RTE_LOG(ERR, PMD, "HWRM vnic cfg failure rc: %x\n", rc);
+		return;
+	}
+
+	/* TODO: If there are other rx queues that belong to the same VNIC,
+		 we have the following options:
+
+		1. Accept the change and silently force the same VLAN strip
+		   setting to all associated rx queues [current implementation]
+		2. Migrate any rx queues that are hanging on the same VNIC to
+		   a new VNIC
+		3. Reject the request if there are other rx queues using the
+		   same VNIC
+	*/
+done:
+	RTE_LOG(ERR, PMD, "Rx queue with id %d has VLAN strip set to %d!",
+		rx_queue_id, on);
+}
+
+static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
+			       struct rte_eth_fc_conf *fc_conf __rte_unused)
+{
+	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+	struct rte_eth_link link_info;
+	int rc;
+
+	rc = bnxt_get_hwrm_link_config(bp, &link_info);
+	if (rc)
+		return rc;
+
+	memset(fc_conf, 0, sizeof(*fc_conf));
+	fc_conf->high_water = 0;
+	fc_conf->low_water = 0;
+	fc_conf->pause_time = 0;
+	fc_conf->send_xon = 0;
+	fc_conf->mac_ctrl_frame_fwd = 0;
+	if (bp->link_info.auto_pause)
+		fc_conf->autoneg = 1;
+	switch(bp->link_info.pause) {
+		case 0:
+			fc_conf->mode = RTE_FC_NONE;
+			break;
+		case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
+			fc_conf->mode = RTE_FC_TX_PAUSE;
+			break;
+		case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
+			fc_conf->mode = RTE_FC_RX_PAUSE;
+			break;
+		case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
+			fc_conf->mode = RTE_FC_FULL;
+			break;
+	}
+	return 0;
+}
+
+static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
+			       struct rte_eth_fc_conf *fc_conf)
+{
+	struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+
+	switch(fc_conf->mode) {
+		case RTE_FC_NONE:
+			bp->link_info.auto_pause = 0;
+			bp->link_info.force_pause = 0;
+			break;
+		case RTE_FC_RX_PAUSE:
+			if (fc_conf->autoneg) {
+				bp->link_info.auto_pause = HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
+				bp->link_info.force_pause = 0;
+			}
+			else {
+				bp->link_info.auto_pause = 0;
+				bp->link_info.force_pause = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
+			}
+			break;
+		case RTE_FC_TX_PAUSE:
+			if (fc_conf->autoneg) {
+				bp->link_info.auto_pause = HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
+				bp->link_info.force_pause = 0;
+			}
+			else {
+				bp->link_info.auto_pause = 0;
+				bp->link_info.force_pause = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
+			}
+			break;
+		case RTE_FC_FULL:
+			if (fc_conf->autoneg) {
+				bp->link_info.auto_pause = HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
+						HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
+				bp->link_info.force_pause = 0;
+			}
+			else {
+				bp->link_info.auto_pause = 0;
+				bp->link_info.force_pause = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
+						HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
+			}
+			break;
+	}
+	return bnxt_set_hwrm_link_config(bp, true);
+}
+
+/*
+ * Initialization
+ */
+
+static struct eth_dev_ops bnxt_dev_ops = {
+	.dev_infos_get = bnxt_dev_info_get_op,
+	.dev_configure = bnxt_dev_configure_op,
+	.dev_start = bnxt_dev_start_op,
+	.dev_stop = bnxt_dev_stop_op,
+	.dev_set_link_up = bnxt_dev_set_link_up_op,
+	.dev_set_link_down = bnxt_dev_set_link_down_op,
+	.dev_close = bnxt_dev_close_op,
+	.stats_get = bnxt_stats_get_op,
+	.stats_reset = bnxt_stats_reset_op,
+	.rx_queue_setup = bnxt_rx_queue_setup_op,
+	.rx_queue_release = bnxt_rx_queue_release_op,
+//        .rx_queue_count       = bnxt_rx_queue_count_op,
+//        .rx_descriptor_done   = bnxt_rx_descriptor_done_op,
+	.tx_queue_setup = bnxt_tx_queue_setup_op,
+	.tx_queue_release = bnxt_tx_queue_release_op,
+//        .rx_queue_start       = bnxt_rx_queue_start_op,
+//        .rx_queue_stop        = bnxt_rx_queue_stop_op,
+//        .tx_queue_start       = bnxt_tx_queue_start_op,
+//        .tx_queue_stop        = bnxt_tx_queue_stop_op,
+	.reta_update = bnxt_reta_update_op,
+	.reta_query = bnxt_reta_query_op,
+	.rss_hash_update = bnxt_rss_hash_update_op,
+	.rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
+	.link_update = bnxt_link_update_op,
+	.promiscuous_enable = bnxt_promiscuous_enable_op,
+	.promiscuous_disable = bnxt_promiscuous_disable_op,
+	.allmulticast_enable = bnxt_allmulticast_enable_op,
+	.allmulticast_disable = bnxt_allmulticast_disable_op,
+	.mtu_set = bnxt_mtu_set_op,
+	.mac_addr_add = bnxt_mac_addr_add_op,
+	.mac_addr_remove = bnxt_mac_addr_remove_op,
+	.vlan_filter_set = bnxt_vlan_filter_set_op,
+	.vlan_strip_queue_set = bnxt_vlan_strip_queue_set_op,
+	.flow_ctrl_get = bnxt_flow_ctrl_get_op,
+	.flow_ctrl_set = bnxt_flow_ctrl_set_op,
+#if 0				// Phase 2/3
+	.dev_led_on = bnxt_dev_led_on_op,
+	.dev_led_off = bnxt_dev_led_off_op,
+	.queue_stats_mapping_set = bnxt_queue_stats_mapping_set_op,
+	.vlan_tpid_set = bnxt_vlan_tpid_set_op,
+	.vlan_offload_set = bnxt_vlan_offload_set_op,
+	.priority_flow_ctrl_set = bnxt_priority_flow_ctrl_set_op,
+	.uc_hash_table_set = bnxt_uc_hash_table_set_op,
+	.uc_all_hash_table_set = bnxt_uc_all_hash_table_set_op,
+	.mirror_rule_set = bnxt_mirror_rule_set_op,
+	.mirror_rule_reset = bnxt_mirror_rule_reset_op,
+	.set_vf_rx_mode = bnxt_set_vf_rx_mode_op,
+	.set_vf_rx = bnxt_set_vf_rx_op,
+	.set_vf_tx = bnxt_set_vf_tx_op,
+	.set_vf_vlan_filter = bnxt_set_vf_vlan_filter_op,
+	.set_queue_rate_limit = bnxt_set_queue_rate_limit_op,
+	.set_vf_rate_limit = bnxt_set_vf_rate_limit_op,
+	.fdir_add_signature_filter = bnxt_fdir_add_signature_filter_op,
+	.fdir_update_signature_filter = bnxt_fdir_update_signature_filter_op,
+	.fdir_remove_signature_filter = bnxt_fdir_remove_signature_filter_op,
+	.fdir_infos_get = bnxt_fdir_info_get_op,
+	.fdir_add_perfect_filter = bnxt_fdir_add_perfect_filter_op,
+	.fdir_update_perfect_filter = bnxt_fdir_update_perfect_filter_op,
+	.fdir_remove_perfect_filter = bnxt_fdir_remove_perfect_filter_op,
+	.fdir_set_masks = bnxt_fdir_set_masks_op,
+	.add_syn_filter = bnxt_add_syn_filter_op,
+	.remove_syn_filter = bnxt_remove_syn_filter_op,
+	.get_syn_filter = bnxt_get_syn_filter_op,
+	.add_ethertype_filter = bnxt_add_ethertype_filter_op,
+	.remove_ethertype_filter = bnxt_remove_ethertype_filter_op,
+	.get_ethertype_filter = bnxt_get_ethertype_filter_op,
+	.add_5tuple_filter = bnxt_add_5tuple_filter_op,
+	.remove_5tuple_filter = bnxt_remove_5tuple_filter_op,
+	.get_5tuple_filter = bnxt_get_5tuple_filter_op,
+#endif
+};
+
+static bool bnxt_vf_pciid(uint16_t id)
+{
+	if (id == BROADCOM_DEV_ID_57304_VF ||
+	    id == BROADCOM_DEV_ID_57406_VF)
+		return true;
+	return false;
+}
+
+static int bnxt_init_board(struct rte_eth_dev *eth_dev)
+{
+	int rc;
+	struct bnxt *bp = eth_dev->data->dev_private;
+
+	/* enable device (incl. PCI PM wakeup), and bus-mastering */
+	if (!eth_dev->pci_dev->mem_resource[0].addr) {
+		RTE_LOG(ERR, PMD,
+			"Cannot find PCI device base address, aborting\n");
+		rc = -ENODEV;
+		goto init_err_disable;
+	}
+
+	bp->eth_dev = eth_dev;
+	bp->pdev = eth_dev->pci_dev;
+
+	bp->bar0 = (void *)eth_dev->pci_dev->mem_resource[0].addr;
+	if (!bp->bar0) {
+		RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n");
+		rc = -ENOMEM;
+		goto init_err_release;
+	}
+	return 0;
+
+init_err_release:
+	if (bp->bar0) {
+		bp->bar0 = NULL;
+	}
+
+init_err_disable:
+
+	return rc;
+}
+
+static int
+bnxt_dev_init(struct rte_eth_dev *eth_dev)
+{
+	static int version_printed;
+	struct bnxt *bp;
+	int rc;
+
+	if (version_printed++ == 0)
+		RTE_LOG(INFO, PMD, "%s", bnxt_version);
+
+	// Function 2 and 3 don't work in FPGA
+	if (eth_dev->pci_dev->addr.function >= 2
+	    && eth_dev->pci_dev->addr.function < 4) {
+		RTE_LOG(ERR, PMD, "Function not enabled %x:\n",
+			eth_dev->pci_dev->addr.function);
+		return -ENOMEM;
+	}
+
+	bp = eth_dev->data->dev_private;
+
+	if (bnxt_vf_pciid(eth_dev->pci_dev->id.device_id))
+		bp->flags |= BNXT_FLAG_VF;
+
+	rc = bnxt_init_board(eth_dev);
+	if (rc) {
+		RTE_LOG(ERR, PMD,
+			"Board initialization failed rc: %x\n", rc);
+		goto error;
+	}
+	eth_dev->dev_ops = &bnxt_dev_ops;
+	eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
+	eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
+
+	rc = bnxt_alloc_hwrm_resources(bp);
+	if (rc) {
+		RTE_LOG(ERR, PMD,
+			"hwrm resource allocation failure rc: %x\n", rc);
+		goto error;
+	}
+	rc = bnxt_hwrm_ver_get(bp);
+	if (rc)
+		goto error;
+	bnxt_hwrm_queue_qportcfg(bp);
+
+	/* Get the MAX capabilities for this function */
+	rc = bnxt_hwrm_func_qcaps(bp);
+	if (rc) {
+		RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc);
+		goto error_free;
+	}
+	eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
+					ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR, 0);
+	if (eth_dev->data->mac_addrs == NULL) {
+		RTE_LOG(ERR, PMD,
+			"Failed to alloc %u bytes needed to store MAC addr tbl",
+			ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR);
+		rc = -ENOMEM;
+		goto error_free;
+	}
+	/* Copy the permanent MAC from the qcap response address now. */
+	if (BNXT_PF(bp))
+		memcpy(bp->mac_addr, bp->pf.mac_addr, sizeof(bp->mac_addr));
+	else
+		memcpy(bp->mac_addr, bp->vf.mac_addr, sizeof(bp->mac_addr));
+	memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
+	bp->def_cp_ring.cp_ring_struct.ring_size =
+	    rte_align32pow2(MAX_CP_DESC_CNT);
+
+	/* SR-IOV specifics */
+	if (eth_dev->pci_dev->max_vfs) {
+		if (BNXT_PF(bp)) {
+			uint16_t buf_size = bp->pf.max_vfs * 128;
+
+			if (eth_dev->pci_dev->max_vfs > bp->pf.max_vfs) {
+				RTE_LOG(ERR, PMD,
+				  "Max VF request (%d) exceeded support (%d)",
+				  eth_dev->pci_dev->max_vfs, bp->pf.max_vfs);
+				rc = -EINVAL;
+				bp->pf.active_vfs = 0;
+				goto error_free;
+			}
+			/* Allocate VFs */
+			bp->pf.active_vfs = eth_dev->pci_dev->max_vfs;
+			rc = bnxt_hwrm_func_vf_alloc(bp, bp->pf.active_vfs);
+			if (rc) {
+				RTE_LOG(ERR, PMD, "Failed to alloc VFs");
+				rc = -ENOMEM;
+				bp->pf.active_vfs = 0;
+				goto error_free;
+			}
+
+			/* Register VF forwarding command buffer */
+			bp->pf.vf_req_buf = rte_zmalloc("bnxt_vf_req_buf",
+							buf_size, 0);
+			if (bp->pf.vf_req_buf == NULL) {
+				RTE_LOG(ERR, PMD,
+					"Failed to alloc %u bytes needed for VF req buf",
+					buf_size);
+				rc = -ENOMEM;
+				goto error_free;
+			}
+			bp->pf.vf_req_buf_dma_addr =
+					rte_malloc_virt2phy(bp->pf.vf_req_buf);
+
+			rc = bnxt_hwrm_func_pfvfbufs_register(bp,
+						bp->pf.vf_req_buf_dma_addr,
+						buf_size);
+			if (rc) {
+				RTE_LOG(ERR, PMD,
+					"Failed to register VF req buf");
+				rc = -EBUSY;
+				goto error_free;
+			}
+
+			SET_BIT_IN_ARRAY(bp->pf.vf_req_fwd,
+					 HWRM_CFA_L2_FILTER_ALLOC);
+			SET_BIT_IN_ARRAY(bp->pf.vf_req_fwd,
+					 HWRM_CFA_L2_FILTER_FREE);
+			SET_BIT_IN_ARRAY(bp->pf.vf_req_fwd,
+					 HWRM_CFA_L2_FILTER_CFG);
+			SET_BIT_IN_ARRAY(bp->pf.vf_req_fwd,
+					 HWRM_CFA_L2_SET_RX_MASK);
+
+		}
+	}
+	rc = bnxt_hwrm_func_driver_register(bp, 0,
+					    bp->pf.vf_req_fwd);
+	if (rc) {
+		RTE_LOG(ERR, PMD,
+			"Failed to register driver");
+		rc = -EBUSY;
+		goto error_free;
+	}
+
+	RTE_LOG(INFO, PMD, DRV_MODULE_NAME " found at mem %lx, node addr %pM\n",
+		eth_dev->pci_dev->mem_resource[0].phys_addr,
+		eth_dev->pci_dev->mem_resource[0].addr);
+
+	return 0;
+
+error_free:
+	bnxt_dev_close_op(eth_dev);
+error:
+	return rc;
+}
+
+static int
+bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
+	struct bnxt *bp;
+
+	bp = eth_dev->data->dev_private;
+	return bnxt_hwrm_func_driver_unregister(bp, 0);
+}
+
+static struct eth_driver bnxt_rte_pmd = {
+	.pci_drv = {
+		    .name = "rte_" DRV_MODULE_NAME "_pmd",
+		    .id_table = bnxt_pci_id_map,
+		    .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+		    },
+	.eth_dev_init = bnxt_dev_init,
+	.eth_dev_uninit = bnxt_dev_uninit,
+	.dev_private_size = sizeof(struct bnxt),
+};
+
+static int bnxt_rte_pmd_init(const char *name, const char *params __rte_unused)
+{
+	RTE_LOG(INFO, PMD, "bnxt_rte_pmd_init() called for %s\n", name);
+	rte_eth_driver_register(&bnxt_rte_pmd);
+	return 0;
+}
+
+static struct rte_driver bnxt_pmd_drv = {
+	.name = "eth_bnxt",
+	.type = PMD_PDEV,
+	.init = bnxt_rte_pmd_init,
+};
+
+PMD_REGISTER_DRIVER(bnxt_pmd_drv);
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
new file mode 100644
index 0000000..0fd7330
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -0,0 +1,175 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+
+#include <rte_log.h>
+#include <rte_malloc.h>
+
+#include "bnxt.h"
+#include "bnxt_filter.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_vnic.h"
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * Filter Functions
+ */
+
+struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
+{
+	struct bnxt_filter_info *filter;
+
+	/* Find the 1st unused filter from the free_filter_list pool*/
+	filter = STAILQ_FIRST(&bp->free_filter_list);
+	if (!filter) {
+		RTE_LOG(ERR, PMD, "No more free filter resources\n");
+		return NULL;
+	}
+	STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
+
+	/* Default to L2 MAC Addr filter */
+	filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
+	filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
+				HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
+	memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
+	       ETHER_ADDR_LEN);
+	memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
+	return filter;
+}
+
+void bnxt_init_filters(struct bnxt *bp)
+{
+	struct bnxt_filter_info *filter;
+	int i, max_filters;
+
+	if (BNXT_PF(bp)) {
+		struct bnxt_pf_info *pf = &bp->pf;
+
+		max_filters = pf->max_l2_ctx;
+	} else {
+		struct bnxt_vf_info *vf = &bp->vf;
+
+		max_filters = vf->max_l2_ctx;
+	}
+	STAILQ_INIT(&bp->free_filter_list);
+	for (i = 0; i < max_filters; i++) {
+		filter = &bp->filter_info[i];
+		filter->fw_l2_filter_id = -1;
+		STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
+	}
+}
+
+void bnxt_free_all_filters(struct bnxt *bp)
+{
+	struct bnxt_vnic_info *vnic;
+	struct bnxt_filter_info *filter, *temp_filter;
+	int i;
+
+	for (i = 0; i < MAX_FF_POOLS; i++) {
+		STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+			filter = STAILQ_FIRST(&vnic->filter);
+			while (filter) {
+				temp_filter = STAILQ_NEXT(filter, next);
+				STAILQ_REMOVE(&vnic->filter, filter,
+					      bnxt_filter_info, next);
+				STAILQ_INSERT_TAIL(&bp->free_filter_list,
+						   filter, next);
+				filter = temp_filter;
+			}
+			STAILQ_INIT(&vnic->filter);
+		}
+	}
+}
+
+void bnxt_free_filter_mem(struct bnxt *bp)
+{
+	struct bnxt_filter_info *filter;
+	uint16_t max_filters, i;
+	int rc = 0;
+
+	/* Ensure that all filters are freed */
+	if (BNXT_PF(bp)) {
+		struct bnxt_pf_info *pf = &bp->pf;
+
+		max_filters = pf->max_l2_ctx;
+	} else {
+		struct bnxt_vf_info *vf = &bp->vf;
+
+		max_filters = vf->max_l2_ctx;
+	}
+	for (i = 0; i < max_filters; i++) {
+		filter = &bp->filter_info[i];
+		if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
+			RTE_LOG(ERR, PMD, "HWRM filter is not freed??\n");
+			/* Call HWRM to try to free filter again */
+			rc = bnxt_hwrm_clear_filter(bp, filter);
+			if (rc)
+				RTE_LOG(ERR, PMD,
+				       "HWRM filter cannot be freed rc = %d\n",
+					rc);
+		}
+		filter->fw_l2_filter_id = -1;
+	}
+	STAILQ_INIT(&bp->free_filter_list);
+
+	rte_free(bp->filter_info);
+	bp->filter_info = NULL;
+}
+
+int bnxt_alloc_filter_mem(struct bnxt *bp)
+{
+	struct bnxt_filter_info *filter_mem;
+	uint16_t max_filters;
+
+	if (BNXT_PF(bp)) {
+		struct bnxt_pf_info *pf = &bp->pf;
+
+		max_filters = pf->max_l2_ctx;
+	} else {
+		struct bnxt_vf_info *vf = &bp->vf;
+
+		max_filters = vf->max_l2_ctx;
+	}
+	/* Allocate memory for VNIC pool and filter pool */
+	filter_mem = rte_zmalloc("bnxt_filter_info",
+				 max_filters * sizeof(struct bnxt_filter_info),
+				 0);
+	if (filter_mem == NULL) {
+		RTE_LOG(ERR, PMD, "Failed to alloc memory for %d filters",
+			max_filters);
+		return -ENOMEM;
+	}
+	bp->filter_info = filter_mem;
+	return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h
new file mode 100644
index 0000000..0da504a
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_filter.h
@@ -0,0 +1,74 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_FILTER_H_
+#define _BNXT_FILTER_H_
+
+#include <rte_ether.h>
+
+struct bnxt;
+struct bnxt_filter_info {
+	STAILQ_ENTRY(bnxt_filter_info)	next;
+	uint64_t		fw_l2_filter_id;
+#define INVALID_MAC_INDEX	((uint16_t)-1)
+	uint16_t		mac_index;
+
+	/* Filter Characteristics */
+	uint32_t		flags;
+	uint32_t		enables;
+	uint8_t			l2_addr[ETHER_ADDR_LEN];
+	uint8_t			l2_addr_mask[ETHER_ADDR_LEN];
+	uint16_t		l2_ovlan;
+	uint16_t		l2_ovlan_mask;
+	uint16_t		l2_ivlan;
+	uint16_t		l2_ivlan_mask;
+	uint8_t			t_l2_addr[ETHER_ADDR_LEN];
+	uint8_t			t_l2_addr_mask[ETHER_ADDR_LEN];
+	uint16_t		t_l2_ovlan;
+	uint16_t		t_l2_ovlan_mask;
+	uint16_t		t_l2_ivlan;
+	uint16_t		t_l2_ivlan_mask;
+	uint8_t			tunnel_type;
+	uint16_t		mirror_vnic_id;
+	uint32_t		vni;
+	uint8_t			pri_hint;
+	uint64_t		l2_filter_id_hint;
+};
+
+struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp);
+void bnxt_init_filters(struct bnxt *bp);
+void bnxt_free_all_filters(struct bnxt *bp);
+void bnxt_free_filter_mem(struct bnxt *bp);
+int bnxt_alloc_filter_mem(struct bnxt *bp);
+
+#endif
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
new file mode 100644
index 0000000..2996ba7
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -0,0 +1,1536 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+
+#include "bnxt.h"
+#include "bnxt_filter.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_rxq.h"
+#include "bnxt_txq.h"
+#include "bnxt_vnic.h"
+#include "hsi_struct_def_dpdk.h"
+
+#define HWRM_RING_ALLOC_TX	0x1
+#define HWRM_RING_ALLOC_RX	0x2
+#define HWRM_RING_ALLOC_AGG	0x4
+#define HWRM_RING_ALLOC_CMPL	0x8
+
+#define HWRM_RESP_LENGTH	4096
+
+#ifdef FPGA
+#define HWRM_CMD_TIMEOUT		200000
+#else
+#define HWRM_CMD_TIMEOUT		2000
+#endif
+#define HWRM_RESP_ERR_CODE_MASK		0xffff
+
+/*
+ * HWRM Functions (sent to HWRM)
+ * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
+ * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
+ * command was failed by the ChiMP.
+ */
+
+static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
+{
+	unsigned i;
+	int intr_process;
+	struct input *req = msg;
+	struct output *resp = bp->hwrm_cmd_resp_addr;
+	uint32_t *data = msg;
+	volatile uint8_t *bar;
+	uint8_t *valid;
+
+	rte_spinlock_lock(&bp->hwrm_lock);
+	intr_process = (req->cmpl_ring == INVALID_HW_RING_ID) ? 0 : 1;
+
+	/* Write request msg to hwrm channel */
+	for (i = 0; i < msg_len; i += 4) {
+		bar = (volatile uint8_t *)bp->bar0 + i;
+		*(volatile uint32_t *)bar = *data;
+		data++;
+	}
+
+	for (; i < bp->max_req_len; i += 4) {
+		bar = (volatile uint8_t *)bp->bar0 + i;
+		*(volatile uint32_t *)bar = 0;
+	}
+
+	/* currently supports only one outstanding message */
+	if (intr_process)
+		bp->hwrm_intr_seq_id = req->seq_id;
+
+	/* Ring channel doorbell */
+	bar = (volatile uint8_t *)bp->bar0 + 0x100;
+	*(volatile uint32_t *)bar = 1;
+
+	rte_rmb();
+
+	if (intr_process) {
+		i = 0;
+		/* Wait until hwrm response cmpl interrupt is processed */
+		while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
+		       i++ < HWRM_CMD_TIMEOUT) {
+			rte_delay_us(600);
+		}
+
+		if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
+			RTE_LOG(ERR, PMD, "Resp cmpl intr err msg:%x\n",
+				req->req_type);
+			goto err_ret;
+		}
+	} else {
+		/* Poll for the valid bit */
+		for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
+			/* Sanity check on the resp->resp_len */
+			if (resp->resp_len && resp->resp_len <=
+			    HWRM_RESP_LENGTH) {
+				/* Last byte of resp contains the valid key */
+				valid = (uint8_t *) resp + resp->resp_len - 1;
+				if (*valid == HWRM_RESP_VALID_KEY)
+					break;
+			}
+			rte_delay_us(600);
+		}
+
+		if (i >= HWRM_CMD_TIMEOUT) {
+			RTE_LOG(ERR, PMD, "Error sending msg %x\n",
+				req->req_type);
+			goto err_ret;
+		}
+	}
+	rte_spinlock_unlock(&bp->hwrm_lock);
+	return 0;
+
+err_ret:
+	rte_spinlock_unlock(&bp->hwrm_lock);
+	return -1;
+}
+
+#define HWRM_PREP(req, type, cr, resp) \
+	memset(bp->hwrm_cmd_resp_addr, 0, HWRM_RESP_LENGTH); \
+	req.req_type = rte_cpu_to_le_16( HWRM_##type ); \
+	req.cmpl_ring = rte_cpu_to_le_16( cr ); \
+	req.seq_id = rte_cpu_to_le_16( bp->hwrm_cmd_seq++ ); \
+	req.target_id = rte_cpu_to_le_16(0xffff); \
+	req.resp_addr = \
+			rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr);
+
+#define HWRM_CHECK_RESULT() \
+	if (rc) { \
+		RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
+			__func__, rc); \
+		return rc; \
+	} \
+	if (resp->error_code) { \
+		rc = rte_le_to_cpu_16(resp->error_code); \
+		RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
+		return rc; \
+	}
+
+int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+	int rc = 0;
+	struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
+	struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
+	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+	req.mask = 0;
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	return rc;
+}
+
+int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+	int rc = 0;
+	struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
+	struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
+	uint32_t mask = 0;
+
+	HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
+	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+
+	/* FIXME add multicast flag, when multicast adding options is supported
+	 * by ethtool.
+	 */
+	if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
+		mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
+	if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
+		mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
+	req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST |
+				    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
+				    mask);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	return rc;
+}
+
+int bnxt_hwrm_set_filter(struct bnxt *bp,
+			 struct bnxt_vnic_info *vnic,
+			 struct bnxt_filter_info *filter)
+{
+	int rc = 0;
+	struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
+	struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+	uint32_t enables = 0;
+
+	HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
+
+	req.flags = rte_cpu_to_le_32(filter->flags);
+
+	enables = filter->enables |
+	      HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
+	req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+
+	if (enables &
+	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
+		memcpy(req.l2_addr, filter->l2_addr,
+		       ETHER_ADDR_LEN);
+	if (enables &
+	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
+		memcpy(req.l2_addr_mask, filter->l2_addr_mask,
+		       ETHER_ADDR_LEN);
+	if (enables &
+	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
+		req.l2_ovlan = filter->l2_ovlan;
+	if (enables &
+	    HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
+		req.l2_ovlan_mask = filter->l2_ovlan_mask;
+
+	req.enables = rte_cpu_to_le_32(enables);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
+
+	return rc;
+}
+
+int bnxt_hwrm_clear_filter(struct bnxt *bp,
+			   struct bnxt_filter_info *filter)
+{
+	int rc = 0;
+	struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
+	struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
+
+	req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	filter->fw_l2_filter_id = -1;
+
+	return 0;
+}
+
+int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
+			   struct bnxt_vnic_info *vnic)
+{
+	int rc = 0;
+	struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
+	struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
+
+	req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
+
+	req.ring_grp_tbl_addr =
+	    rte_cpu_to_le_64(vnic->rss_table_dma_addr);
+	req.hash_key_tbl_addr =
+	    rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
+	req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	return rc;
+}
+
+int bnxt_hwrm_vnic_set_dcb(struct bnxt *bp __rte_unused,
+			   struct bnxt_vnic_info *vnic __rte_unused)
+{
+	return 0;
+}
+
+int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+	int rc = 0;
+	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
+	struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
+						bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
+
+	req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+
+	return rc;
+}
+
+int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+	int rc = 0;
+	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
+	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
+						bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
+
+	return rc;
+}
+
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+	int rc = 0;
+	struct hwrm_vnic_cfg_input req = {.req_type = 0 };
+	struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, VNIC_CFG, -1, resp);
+
+	/* Only RSS support for now TBD: COS & LB */
+	req.enables =
+	    rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
+			     HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
+			     HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
+	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+	req.dflt_ring_grp =
+		rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
+	req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
+	req.cos_rule = rte_cpu_to_le_16(0xffff);
+	req.lb_rule = rte_cpu_to_le_16(0xffff);
+	req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
+				   ETHER_CRC_LEN + VLAN_TAG_SIZE);
+	if (vnic->func_default)
+		req.flags = 1;
+	if (vnic->vlan_strip)
+		req.flags |=
+		    rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	return rc;
+}
+
+int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+	int rc = 0;
+	struct hwrm_vnic_free_input req = {.req_type = 0 };
+	struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
+
+	if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+		return rc;
+
+	HWRM_PREP(req, VNIC_FREE, -1, resp);
+
+	req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	vnic->fw_vnic_id = INVALID_HW_RING_ID;
+	return rc;
+}
+
+int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+	int rc = 0, i, j;
+	struct hwrm_vnic_alloc_input req = {.req_type = 0 };
+	struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+	/* map ring groups to this vnic */
+	for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
+
+		if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) {
+			RTE_LOG(ERR, PMD,
+				"Not enough ring groups avail:%x req:%x\n", j,
+				(vnic->end_grp_id - vnic->start_grp_id) + 1);
+			break;
+		}
+		vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
+	}
+
+	vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+	vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
+
+	HWRM_PREP(req, VNIC_ALLOC, -1, resp);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
+	return rc;
+}
+
+int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned idx)
+{
+	int rc = 0;
+	struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
+	struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
+
+	req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
+	req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
+	req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
+	req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	bp->grp_info[idx].fw_grp_id =
+	    rte_le_to_cpu_16(resp->ring_group_id);
+
+	return rc;
+}
+
+int bnxt_hwrm_ring_alloc(struct bnxt *bp,
+			 struct bnxt_ring_struct *ring,
+			 uint32_t ring_type, uint32_t map_index,
+			 uint32_t stats_ctx_id)
+{
+	int rc = 0;
+	struct hwrm_ring_alloc_input req = {.req_type = 0 };
+	struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, RING_ALLOC, -1, resp);
+
+	req.enables = rte_cpu_to_le_32(0);
+
+	req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
+	req.fbo = rte_cpu_to_le_32(0);
+	/* Association of ring index with doorbell index */
+	req.logical_id = rte_cpu_to_le_16(map_index);
+
+	switch (ring_type) {
+	case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
+		req.queue_id = bp->cos_queue[0].id;
+	case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
+		req.ring_type = ring_type;
+		req.cmpl_ring_id =
+		    rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
+		req.length = rte_cpu_to_le_32(ring->ring_size);
+		req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
+		req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables)
+					       |
+					       HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
+		break;
+	case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
+		req.ring_type = ring_type;
+		req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
+		req.length = rte_cpu_to_le_32(ring->ring_size);
+		break;
+	default:
+		RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
+			ring_type);
+		return -1;
+	}
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	if (rc || resp->error_code) {
+		if (rc == 0 && resp->error_code)
+			rc = rte_le_to_cpu_16(resp->error_code);
+		switch (ring_type) {
+		case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
+			RTE_LOG(ERR, PMD,
+				"hwrm_ring_alloc cp failed. rc:%d\n", rc);
+			return rc;
+		case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
+			RTE_LOG(ERR, PMD,
+				"hwrm_ring_alloc rx failed. rc:%d\n", rc);
+			return rc;
+		case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
+			RTE_LOG(ERR, PMD,
+				"hwrm_ring_alloc tx failed. rc:%d\n", rc);
+			return rc;
+		default:
+			RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
+			return rc;
+		}
+	}
+
+	ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
+	return rc;
+}
+
+int bnxt_hwrm_ring_free(struct bnxt *bp,
+			struct bnxt_ring_struct *ring, uint32_t ring_type)
+{
+	int rc;
+	struct hwrm_ring_free_input req = {.req_type = 0 };
+	struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, RING_FREE, -1, resp);
+
+	req.ring_type = ring_type;
+	req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	if (rc || resp->error_code) {
+		if (rc == 0 && resp->error_code)
+			rc = rte_le_to_cpu_16(resp->error_code);
+
+		switch (ring_type) {
+		case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
+			RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
+				rc);
+			return rc;
+		case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
+			RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
+				rc);
+			return rc;
+		case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
+			RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
+				rc);
+			return rc;
+		default:
+			RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
+			return rc;
+		}
+	}
+	return 0;
+}
+
+int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
+			     struct bnxt_cp_ring_info *cpr, unsigned idx)
+{
+	int rc;
+	struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
+	struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
+
+	req.update_period_ms = rte_cpu_to_le_32(1000);
+
+	req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
+	req.stats_dma_addr =
+	    rte_cpu_to_le_64(cpr->hw_stats_map);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
+	bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
+
+	return rc;
+}
+
+int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+	int rc = 0;
+	struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
+	struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
+
+	if (cpr->hw_stats_ctx_id == (uint32_t)INVALID_STATS_CTX_ID)
+		return rc;
+
+	req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
+	req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	return rc;
+}
+
+int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+{
+	int rc = 0;
+	struct hwrm_func_qcaps_input req = {.req_type = 0 };
+	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, FUNC_QCAPS, -1, resp);
+
+	req.fid = rte_cpu_to_le_16(0xffff);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	if (BNXT_PF(bp)) {
+		struct bnxt_pf_info *pf = &bp->pf;
+
+		pf->fw_fid = rte_le_to_cpu_32(resp->fid);
+		pf->port_id = resp->port_id;
+		memcpy(pf->mac_addr, resp->perm_mac_address, ETHER_ADDR_LEN);
+		pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
+		pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
+		pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
+		pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
+		pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+		pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+		pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
+		pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
+	} else {
+		struct bnxt_vf_info *vf = &bp->vf;
+
+		vf->fw_fid = rte_le_to_cpu_32(resp->fid);
+		memcpy(vf->mac_addr, &resp->perm_mac_address, ETHER_ADDR_LEN);
+		vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
+		vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
+		vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
+		vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
+		vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+		vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+	}
+
+	return rc;
+}
+
+int bnxt_hwrm_func_reset(struct bnxt *bp)
+{
+	int rc = 0;
+	struct hwrm_func_reset_input req = {.req_type = 0 };
+	struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, FUNC_RESET, -1, resp);
+
+	req.enables = rte_cpu_to_le_32(0);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	return rc;
+}
+
+int bnxt_hwrm_func_qstats(struct bnxt *bp, struct hwrm_func_qstats_output *stats)
+{
+	int rc = 0;
+	struct hwrm_func_qstats_input req = {.req_type = 0 };
+	struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, FUNC_QSTATS, -1, resp);
+
+	req.fid = -1;
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	if (stats)
+		memcpy(stats, resp, sizeof(*stats));
+
+	return rc;
+}
+
+int bnxt_hwrm_func_clr_stats(struct bnxt *bp)
+{
+	int rc = 0;
+	struct hwrm_func_clr_stats_input req = {.req_type = 0 };
+	struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, FUNC_CLR_STATS, -1, resp);
+
+	req.fid = -1;
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	return rc;
+}
+
+int bnxt_hwrm_ver_get(struct bnxt *bp)
+{
+	int rc = 0;
+	struct hwrm_ver_get_input req = {.req_type = 0 };
+	struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+	uint32_t my_version;
+	uint32_t fw_version;
+
+	HWRM_PREP(req, VER_GET, -1, resp);
+
+	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
+	req.hwrm_intf_min = HWRM_VERSION_MINOR;
+	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
+		resp->hwrm_intf_maj, resp->hwrm_intf_min,
+		resp->hwrm_intf_upd,
+		resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
+
+	my_version = HWRM_VERSION_MAJOR << 16;
+	my_version |= HWRM_VERSION_MINOR << 8;
+	my_version |= HWRM_VERSION_UPDATE;
+
+	fw_version = resp->hwrm_intf_maj << 16;
+	fw_version |= resp->hwrm_intf_min << 8;
+	fw_version |= resp->hwrm_intf_upd;
+
+#if HWRM_VERSION_MAJOR==0
+	if (my_version != fw_version) {
+		RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
+		RTE_LOG(ERR, PMD, "This driver requires %u.%u.%u\n",
+			HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
+		return -1;
+	}
+#else
+	if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
+		RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
+		return -1;
+	}
+#endif
+
+	if(my_version != fw_version) {
+		RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
+		if (my_version < fw_version) {
+			RTE_LOG(INFO, PMD, "Firmware API version is newer than driver.\n");
+			RTE_LOG(INFO, PMD, "The driver may be missing features.\n");
+		}
+		else {
+			RTE_LOG(INFO, PMD, "Firmware API version is older than driver.\n");
+			RTE_LOG(INFO, PMD, "Not all driver features may be functional.\n");
+		}
+	}
+
+	return rc;
+}
+
+int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
+{
+	int rc = 0;
+	struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
+	struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+#define GET_QUEUE_INFO(x) \
+	bp->cos_queue[x].id = resp->queue_id##x; \
+	bp->cos_queue[x].profile = resp->queue_id##x##_service_profile;
+
+	GET_QUEUE_INFO(0);
+	GET_QUEUE_INFO(1);
+	GET_QUEUE_INFO(2);
+	GET_QUEUE_INFO(3);
+	GET_QUEUE_INFO(4);
+	GET_QUEUE_INFO(5);
+	GET_QUEUE_INFO(6);
+	GET_QUEUE_INFO(7);
+
+	return rc;
+}
+
+int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned idx)
+{
+	int rc;
+	struct hwrm_ring_grp_free_input req = {.req_type = 0 };
+	struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, RING_GRP_FREE, -1, resp);
+
+	req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
+	return rc;
+}
+
+int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
+			    struct bnxt_cp_ring_info *cpr, unsigned idx)
+{
+	int rc;
+	struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
+	struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
+
+	req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
+	req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+	bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
+
+	return rc;
+}
+
+int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
+				   uint32_t *vf_req_fwd)
+{
+	int rc;
+	struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
+	struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
+	req.flags = flags;
+	req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER;
+	req.ver_maj = BNXT_VER_MAJ;
+	req.ver_min = BNXT_VER_MIN;
+	req.ver_upd = BNXT_VER_UPD;
+
+	memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	return rc;
+}
+
+int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
+{
+	int rc;
+	struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
+	struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
+	req.flags = flags;
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	return rc;
+}
+
+int bnxt_hwrm_func_pfvfbufs_register(struct bnxt *bp, phys_addr_t buf,
+				     uint16_t buf_size)
+{
+	int rc;
+	struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
+	struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
+
+	/* 1 buffer for all VFs */
+	req.enables = 0;
+	req.req_buf_len = buf_size;
+	req.req_buf_page_size = buf_size;
+	req.req_buf_num_pages = 1;
+	req.req_buf_page_addr0 = rte_cpu_to_le_64(buf);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	return rc;
+}
+
+int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
+{
+	int rc;
+	struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
+	struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
+
+	memcpy(req.encap_request, fwd_cmd,
+	       sizeof(req.encap_request));
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	return rc;
+}
+
+int bnxt_hwrm_func_vf_alloc(struct bnxt *bp, uint16_t num_vfs)
+{
+	int rc;
+	struct hwrm_func_vf_alloc_input req = {.req_type = 0 };
+	struct hwrm_func_vf_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, FUNC_VF_ALLOC, -1, resp);
+
+	req.num_vfs = num_vfs;
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	return rc;
+}
+
+int bnxt_hwrm_func_vf_free(struct bnxt *bp, uint16_t num_vfs)
+{
+	int rc;
+	struct hwrm_func_vf_free_input req = {.req_type = 0 };
+	struct hwrm_func_vf_free_output *resp = bp->hwrm_cmd_resp_addr;
+
+	if (num_vfs == 0)
+		return 0;
+
+	HWRM_PREP(req, FUNC_VF_FREE, -1, resp);
+
+	req.num_vfs = num_vfs;
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	return rc;
+}
+
+static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
+{
+	int rc = 0;
+	struct hwrm_port_phy_cfg_input req = {.req_type = 0};
+	struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
+
+	req.flags = conf->phy_flags;
+	if (conf->link_up) {
+		req.force_link_speed = conf->link_speed;
+		/*
+		 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
+		 * any auto mode, even "none".
+		 */
+		if (req.auto_mode == HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE) {
+			req.flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
+		}
+		else {
+			req.auto_mode = conf->auto_mode;
+			req.enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
+			req.auto_link_speed_mask = conf->auto_link_speed_mask;
+			req.enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
+			req.auto_link_speed = conf->auto_link_speed;
+			req.enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
+		}
+		req.auto_duplex = conf->duplex;
+		req.enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
+		req.auto_pause = conf->auto_pause;
+		// Set force_pause if there is no auto or if there is a force
+		if (req.auto_pause)
+			req.enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
+		else
+			req.enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
+		req.force_pause = conf->force_pause;
+		if (req.force_pause)
+			req.enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
+	}
+	else {
+		req.flags &= ~HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
+		req.flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN;
+		req.force_link_speed = 0;
+RTE_LOG(ERR, PMD, "Forcing link down\n");
+	}
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	return rc;
+}
+
+static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
+				   struct bnxt_link_info *link_info)
+{
+	int rc = 0;
+	struct hwrm_port_phy_qcfg_input req = {.req_type = 0};
+	struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	link_info->phy_link_status = resp->link;
+	if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) {
+		link_info->link_up = 1;
+		link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
+	} else {
+		link_info->link_up = 0;
+		link_info->link_speed = 0;
+	}
+	link_info->duplex = resp->duplex;
+	link_info->pause = resp->pause;
+	link_info->auto_pause = resp->auto_pause;
+	link_info->force_pause = resp->force_pause;
+	link_info->auto_mode = resp->auto_mode;
+
+	link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
+	link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
+	link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
+	link_info->phy_ver[0] = resp->phy_maj;
+	link_info->phy_ver[1] = resp->phy_min;
+	link_info->phy_ver[2] = resp->phy_bld;
+
+	return rc;
+}
+
+int bnxt_hwrm_port_qstats(struct bnxt *bp, struct hwrm_port_qstats_output *stats)
+{
+	int rc = 0;
+	struct hwrm_port_qstats_input req = {.req_type = 0 };
+	struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
+
+	if (!BNXT_PF(bp))
+		return -1;
+
+	HWRM_PREP(req, PORT_QSTATS, -1, resp);
+	req.port_id = bp->pf.port_id;
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	if (stats)
+		memcpy(stats, resp, sizeof(*stats));
+
+	return rc;
+}
+
+int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
+{
+	int rc = 0;
+	struct hwrm_port_clr_stats_input req = {.req_type = 0 };
+	struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
+
+	if (!BNXT_PF(bp))
+		return -1;
+
+	HWRM_PREP(req, PORT_CLR_STATS, -1, resp);
+	req.port_id = bp->pf.port_id;
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT();
+
+	return rc;
+}
+
+/*
+ * HWRM utility functions
+ */
+
+int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
+{
+	unsigned i;
+	int rc = 0;
+
+	for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
+		struct bnxt_tx_queue *txq;
+		struct bnxt_rx_queue *rxq;
+		struct bnxt_cp_ring_info *cpr;
+
+		if (i >= bp->rx_cp_nr_rings) {
+			txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
+			cpr = &txq->cp_ring;
+		} else {
+			rxq = bp->rx_queues[i];
+			cpr = &rxq->cp_ring;
+		}
+
+		rc = bnxt_hwrm_stat_clear(bp, cpr);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
+{
+	unsigned i;
+	int rc = 0;
+
+	for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
+		struct bnxt_tx_queue *txq;
+		struct bnxt_rx_queue *rxq;
+		struct bnxt_cp_ring_info *cpr;
+		unsigned idx = i + 1;
+
+		if (i >= bp->rx_cp_nr_rings) {
+			txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
+			cpr = &txq->cp_ring;
+		} else {
+			rxq = bp->rx_queues[i];
+			cpr = &rxq->cp_ring;
+		}
+
+		rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
+
+		if (rc)
+			return rc;
+	}
+	return rc;
+}
+
+void bnxt_free_hwrm_resources(struct bnxt *bp)
+{
+	/* Release memzone */
+	rte_free(bp->hwrm_cmd_resp_addr);
+	bp->hwrm_cmd_resp_addr = NULL;
+	bp->hwrm_cmd_resp_dma_addr = 0;
+}
+
+int bnxt_alloc_hwrm_resources(struct bnxt *bp)
+{
+	struct rte_pci_device *pdev = bp->pdev;
+	char type[RTE_MEMZONE_NAMESIZE];
+
+	sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
+		pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
+	bp->hwrm_cmd_resp_addr = rte_malloc(type, HWRM_RESP_LENGTH, 0);
+	if (bp->hwrm_cmd_resp_addr == NULL)
+		return -ENOMEM;
+	bp->hwrm_cmd_resp_dma_addr =
+	    rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
+	bp->max_req_len = HWRM_MAX_REQ_LEN;
+	rte_spinlock_init(&bp->hwrm_lock);
+
+	return 0;
+}
+
+static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, unsigned idx)
+{
+	struct bnxt_ring_struct *cp_ring = &cpr->cp_ring_struct;
+
+	bnxt_hwrm_ring_free(bp, cp_ring,
+			HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
+	cp_ring->fw_ring_id = INVALID_HW_RING_ID;
+	bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
+	memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct.ring_size * sizeof(*cpr->cp_desc_ring));
+	cpr->cp_raw_cons = 0;
+}
+
+int bnxt_free_all_hwrm_rings(struct bnxt *bp)
+{
+	unsigned i;
+	int rc = 0;
+
+	for (i = 0; i < bp->tx_cp_nr_rings; i++) {
+		struct bnxt_tx_queue *txq = bp->tx_queues[i];
+		struct bnxt_tx_ring_info *txr = &txq->tx_ring;
+		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+		struct bnxt_cp_ring_info *cpr = &txq->cp_ring;
+		unsigned idx = bp->rx_cp_nr_rings + i + 1;
+
+		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+			bnxt_hwrm_ring_free(bp, ring,
+					HWRM_RING_FREE_INPUT_RING_TYPE_TX);
+			ring->fw_ring_id = INVALID_HW_RING_ID;
+			memset(txr->tx_desc_ring, 0, txr->tx_ring_struct.ring_size * sizeof(*txr->tx_desc_ring));
+			memset(txr->tx_buf_ring, 0, txr->tx_ring_struct.ring_size * sizeof(*txr->tx_buf_ring));
+			txr->tx_prod = 0;
+			txr->tx_cons = 0;
+		}
+		if (cpr->cp_ring_struct.fw_ring_id != INVALID_HW_RING_ID) {
+			bnxt_free_cp_ring(bp, cpr, idx);
+		}
+	}
+
+	for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+		struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+		struct bnxt_rx_ring_info *rxr = &rxq->rx_ring;
+		struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+		struct bnxt_cp_ring_info *cpr = &rxq->cp_ring;
+		unsigned idx = i + 1;
+
+		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+			bnxt_hwrm_ring_free(bp, ring,
+					HWRM_RING_FREE_INPUT_RING_TYPE_RX);
+			ring->fw_ring_id = INVALID_HW_RING_ID;
+			bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
+			memset(rxr->rx_desc_ring, 0, rxr->rx_ring_struct.ring_size * sizeof(*rxr->rx_desc_ring));
+			memset(rxr->rx_buf_ring, 0, rxr->rx_ring_struct.ring_size * sizeof(*rxr->rx_buf_ring));
+			rxr->rx_prod = 0;;
+		}
+		if (cpr->cp_ring_struct.fw_ring_id != INVALID_HW_RING_ID) {
+			bnxt_free_cp_ring(bp, cpr, idx);
+		}
+	}
+
+	/* Default completion ring */
+	{
+		struct bnxt_cp_ring_info *cpr = &bp->def_cp_ring;
+
+		if (cpr->cp_ring_struct.fw_ring_id != INVALID_HW_RING_ID) {
+			bnxt_free_cp_ring(bp, cpr, 0);
+		}
+	}
+
+	return rc;
+}
+
+int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
+{
+	uint16_t i;
+	uint32_t rc = 0;
+
+	for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+		unsigned idx = i + 1;
+
+		if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
+			RTE_LOG(ERR, PMD,
+				"Attempt to free invalid ring group %d\n",
+				idx);
+			continue;
+		}
+
+		rc = bnxt_hwrm_ring_grp_free(bp, idx);
+
+		if (rc)
+			return rc;
+	}
+	return rc;
+}
+
+int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
+{
+	uint16_t i;
+	uint32_t rc = 0;
+
+	for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+		unsigned idx = i + 1;
+
+		if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
+		    bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
+			continue;
+
+		rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
+
+		if (rc)
+			return rc;
+	}
+	return rc;
+}
+
+int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
+{
+	int rc;
+	unsigned i;
+	struct bnxt_cp_ring_info *cpr;
+
+	for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
+		unsigned idx = i + 1;
+
+		if (i >= bp->rx_cp_nr_rings)
+			cpr = &bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
+		else
+			cpr = &bp->rx_queues[i]->cp_ring;
+		if (cpr->hw_stats_ctx_id != (uint32_t) INVALID_STATS_CTX_ID) {
+			rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
+			if (rc)
+				return rc;
+		}
+	}
+	return 0;
+}
+
+int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+	struct bnxt_filter_info *filter;
+	int rc = 0;
+
+	STAILQ_FOREACH(filter, &vnic->filter, next) {
+		rc = bnxt_hwrm_set_filter(bp, vnic, filter);
+		if (rc)
+			break;
+	}
+	return rc;
+}
+
+int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+	struct bnxt_filter_info *filter;
+	int rc = 0;
+
+	STAILQ_FOREACH(filter, &vnic->filter, next) {
+		rc = bnxt_hwrm_clear_filter(bp, filter);
+		if (rc)
+			break;
+	}
+	return rc;
+}
+
+void bnxt_free_all_hwrm_resources(struct bnxt *bp)
+{
+	struct bnxt_vnic_info *vnic;
+	unsigned i;
+
+	if (bp->vnic_info == NULL)
+		return;
+
+	vnic = &bp->vnic_info[0];
+	bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
+
+	/* VNIC resources */
+	for (i = 0; i < bp->nr_vnics; i++) {
+		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+		bnxt_clear_hwrm_vnic_filters(bp, vnic);
+
+		bnxt_hwrm_vnic_ctx_free(bp, vnic);
+		bnxt_hwrm_vnic_free(bp, vnic);
+	}
+	/* Ring resources */
+	bnxt_free_all_hwrm_rings(bp);
+	bnxt_free_all_hwrm_ring_grps(bp);
+	bnxt_free_all_hwrm_stat_ctxs(bp);
+}
+
+static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
+{
+	uint8_t eth_link_duplex = ETH_LINK_AUTONEG_DUPLEX;
+
+	switch (hw_link_duplex) {
+	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
+		break;
+	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
+		eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+		break;
+	case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
+		eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+		break;
+	default:
+		RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
+			hw_link_duplex);
+		break;
+	}
+	return eth_link_duplex;
+}
+
+static uint16_t bnxt_parse_eth_link_duplex(uint16_t eth_link_duplex)
+{
+	uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
+
+	switch (eth_link_duplex) {
+	case ETH_LINK_AUTONEG_DUPLEX:
+		break;
+	case ETH_LINK_HALF_DUPLEX:
+		hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
+		break;
+	case ETH_LINK_FULL_DUPLEX:
+		hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL;
+		break;
+	default:
+		RTE_LOG(ERR, PMD,
+			"Unsupported link duplex mode %d; default to AUTO\n",
+			eth_link_duplex);
+		break;
+	}
+	return hw_link_duplex;
+}
+
+static uint16_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
+{
+	uint16_t eth_link_speed = ETH_LINK_SPEED_AUTONEG;
+
+	switch (hw_link_speed) {
+	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
+		eth_link_speed = ETH_LINK_SPEED_100;
+		break;
+	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
+		eth_link_speed = ETH_LINK_SPEED_1000;
+		break;
+	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
+		eth_link_speed = ETH_LINK_SPEED_2000;
+		break;
+	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
+		eth_link_speed = ETH_LINK_SPEED_2500;
+		break;
+	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
+		eth_link_speed = ETH_LINK_SPEED_10G;
+		break;
+	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
+		eth_link_speed = ETH_LINK_SPEED_20G;
+		break;
+	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
+		eth_link_speed = ETH_LINK_SPEED_25G;
+		break;
+	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
+		eth_link_speed = ETH_LINK_SPEED_40G;
+		break;
+	case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
+		eth_link_speed = ETH_LINK_SPEED_50G;
+		break;
+	default:
+		RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
+			hw_link_speed);
+		break;
+	}
+	return eth_link_speed;
+}
+
+static uint16_t bnxt_parse_eth_link_speed(uint16_t conf_link_speed)
+{
+	uint16_t eth_link_speed = ETH_LINK_SPEED_AUTONEG;
+
+	switch (conf_link_speed) {
+	case ETH_LINK_SPEED_AUTONEG:
+		break;
+	case ETH_LINK_SPEED_100:
+		eth_link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB;
+		break;
+	case ETH_LINK_SPEED_1000:
+		eth_link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB;
+		break;
+	case ETH_LINK_SPEED_2000:
+		eth_link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB;
+		break;
+	case ETH_LINK_SPEED_2500:
+		eth_link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB;
+		break;
+	case ETH_LINK_SPEED_10G:
+		eth_link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB;
+		break;
+	case ETH_LINK_SPEED_20G:
+		eth_link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB;
+		break;
+	case ETH_LINK_SPEED_25G:
+		eth_link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB;
+		break;
+	case ETH_LINK_SPEED_40G:
+		eth_link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB;
+		break;
+	case ETH_LINK_SPEED_50G:
+		eth_link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB;
+		break;
+	default:
+		RTE_LOG(ERR, PMD,
+			"Unsupported link speed %d; default to AUTO\n",
+			conf_link_speed);
+		break;
+	}
+	return eth_link_speed;
+}
+
+int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
+{
+	int rc = 0;
+	struct bnxt_link_info *link_info = &bp->link_info;
+
+	rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
+	if (rc) {
+		RTE_LOG(ERR, PMD,
+			"Get link config failed with rc %d\n", rc);
+		goto exit;
+	}
+	if (link_info->link_up)
+		link->link_speed = bnxt_parse_hw_link_speed(link_info->link_speed);
+	else
+		link->link_speed = ETH_LINK_SPEED_10;
+	link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
+	link->link_status = link_info->link_up;
+exit:
+	return rc;
+}
+
+int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
+{
+	int rc = 0;
+	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+	struct bnxt_link_info link_req;
+	uint16_t speed;
+
+	memset(&link_req, 0, sizeof(link_req));
+	speed = bnxt_parse_eth_link_speed(dev_conf->link_speed);
+	link_req.link_up = link_up;
+	if (speed == ETH_LINK_SPEED_AUTONEG) {
+		link_req.phy_flags =
+				HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
+		link_req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW;
+		/* TODO: Currently, only a subset of speeds are supported
+		   in DPDK */
+		link_req.auto_link_speed_mask =
+			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB |
+			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB |
+			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB |
+			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB |
+			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB |
+			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB |
+			HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
+		link_req.auto_link_speed = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB;
+	} else {
+		link_req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
+		link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE |
+			HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
+		link_req.link_speed = speed;
+	}
+	link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_duplex);
+	link_req.auto_pause = bp->link_info.auto_pause;
+	link_req.force_pause = bp->link_info.force_pause;
+
+	rc =  bnxt_hwrm_port_phy_cfg(bp, &link_req);
+	if (rc) {
+		RTE_LOG(ERR, PMD,
+			"Set link config failed with rc %d\n", rc);
+	}
+	/* TODO: Do we need to reset PHY? */
+	return rc;
+}
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
new file mode 100644
index 0000000..eab5a3d
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -0,0 +1,103 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_HWRM_H_
+#define _BNXT_HWRM_H_
+
+#include <inttypes.h>
+#include <stdbool.h>
+
+struct bnxt;
+#define HWRM_SEQ_ID_INVALID -1U
+
+int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
+				   struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_set_filter(struct bnxt *bp,
+			 struct bnxt_vnic_info *vnic,
+			 struct bnxt_filter_info *filter);
+int bnxt_hwrm_clear_filter(struct bnxt *bp,
+			   struct bnxt_filter_info *filter);
+int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
+			   struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_set_dcb(struct bnxt *bp __rte_unused,
+			   struct bnxt_vnic_info *vnic __rte_unused);
+int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned idx);
+int bnxt_hwrm_ring_alloc(struct bnxt *bp,
+			 struct bnxt_ring_struct *ring,
+			 uint32_t ring_type, uint32_t map_index,
+			 uint32_t stats_ctx_id);
+int bnxt_hwrm_ring_free(struct bnxt *bp,
+			struct bnxt_ring_struct *ring, uint32_t ring_type);
+int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
+			     struct bnxt_cp_ring_info *cpr, unsigned idx);
+int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr);
+int bnxt_hwrm_func_qcaps(struct bnxt *bp);
+int bnxt_hwrm_func_reset(struct bnxt *bp);
+int bnxt_hwrm_func_qstats(struct bnxt *bp, struct hwrm_func_qstats_output *stats);
+int bnxt_hwrm_func_clr_stats(struct bnxt *bp);
+int bnxt_hwrm_ver_get(struct bnxt *bp);
+int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
+int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned idx);
+int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
+			    struct bnxt_cp_ring_info *cpr, unsigned idx);
+int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
+				   uint32_t *vf_req_fwd);
+int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags);
+int bnxt_hwrm_func_pfvfbufs_register(struct bnxt *bp, phys_addr_t buf,
+				     uint16_t buf_size);
+int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd);
+int bnxt_hwrm_func_vf_alloc(struct bnxt *bp, uint16_t num_vfs);
+int bnxt_hwrm_func_vf_free(struct bnxt *bp, uint16_t num_vfs);
+int bnxt_hwrm_port_qstats(struct bnxt *bp, struct hwrm_port_qstats_output *stats);
+int bnxt_hwrm_port_clr_stats(struct bnxt *bp);
+int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp);
+int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp);
+void bnxt_free_hwrm_resources(struct bnxt *bp);
+int bnxt_alloc_hwrm_resources(struct bnxt *bp);
+int bnxt_free_all_hwrm_rings(struct bnxt *bp);
+int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp);
+int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp);
+int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp);
+int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+void bnxt_free_all_hwrm_resources(struct bnxt *bp);
+
+int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link);
+int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up);
+#endif
diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c
new file mode 100644
index 0000000..76f0c60
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_irq.c
@@ -0,0 +1,155 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+
+#include <rte_malloc.h>
+
+#include "bnxt.h"
+#include "bnxt_irq.h"
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * Interrupts
+ */
+
+static void bnxt_int_handler(struct rte_intr_handle *handle __rte_unused,
+			     void *param)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+	struct bnxt_cp_ring_info *cpr = &bp->def_cp_ring;
+	uint32_t raw_cons = cpr->cp_raw_cons;
+	uint32_t cons;
+	struct cmpl_base *cmp;
+
+	while (1) {
+		cons = RING_CMP(&cpr->cp_ring_struct, raw_cons);
+		cmp = &cpr->cp_desc_ring[cons];
+
+		if (!CMP_VALID(cmp, raw_cons, &cpr->cp_ring_struct))
+			break;
+
+		switch (CMP_TYPE(cmp)) {
+		case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
+			/* Handle any async event */
+			bnxt_handle_async_event(bp, cmp);
+			break;
+		case CMPL_BASE_TYPE_HWRM_FWD_RESP:
+			/* Handle HWRM forwarded responses */
+			bnxt_handle_fwd_req(bp, cmp);
+			break;
+		default:
+			/* Ignore any other events */
+			break;
+		}
+		raw_cons = NEXT_RAW_CMP(raw_cons);
+	}
+	B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
+
+}
+
+void bnxt_free_int(struct bnxt *bp)
+{
+	struct bnxt_irq *irq;
+
+	irq = bp->irq_tbl;
+	if (irq) {
+		if (irq->requested) {
+			rte_intr_disable(&bp->pdev->intr_handle);
+			rte_intr_callback_unregister(&bp->pdev->intr_handle,
+						     irq->handler,
+						     (void *)bp->eth_dev);
+			irq->requested = 0;
+		}
+		rte_free((void *)bp->irq_tbl);
+		bp->irq_tbl = NULL;
+	}
+}
+
+void bnxt_disable_int(struct bnxt *bp)
+{
+	struct bnxt_cp_ring_info *cpr = &bp->def_cp_ring;
+
+	/* Only the default completion ring */
+	B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+}
+
+void bnxt_enable_int(struct bnxt *bp)
+{
+	struct bnxt_cp_ring_info *cpr = &bp->def_cp_ring;
+
+	B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
+}
+
+int bnxt_setup_int(struct bnxt *bp)
+{
+	uint16_t total_vecs;
+	const int len = sizeof(bp->irq_tbl[0].name);
+	int i, rc = 0;
+
+	/* DPDK host only supports 1 MSI-X vector */
+	total_vecs = 1;
+	bp->irq_tbl = rte_calloc("bnxt_irq_tbl", total_vecs,
+				 sizeof(struct bnxt_irq), 0);
+	if (bp->irq_tbl) {
+		for (i = 0; i < total_vecs; i++) {
+			bp->irq_tbl[i].vector = i;
+			snprintf(bp->irq_tbl[i].name, len,
+				 "%s-%d", bp->eth_dev->data->name, i);
+			bp->irq_tbl[i].handler = bnxt_int_handler;
+		}
+	} else {
+		rc = -ENOMEM;
+		goto setup_exit;
+	}
+	return 0;
+
+setup_exit:
+	RTE_LOG(ERR, PMD, "bnxt_irq_tbl setup failed");
+	return rc;
+}
+
+int bnxt_request_int(struct bnxt *bp)
+{
+	int rc = 0;
+
+	struct bnxt_irq *irq = bp->irq_tbl;
+
+	rte_intr_callback_register(&bp->pdev->intr_handle, irq->handler,
+				   (void *)bp->eth_dev);
+	rte_intr_enable(&(bp->pdev->intr_handle));
+
+	irq->requested = 1;
+	return rc;
+}
diff --git a/drivers/net/bnxt/bnxt_irq.h b/drivers/net/bnxt/bnxt_irq.h
new file mode 100644
index 0000000..e21bec5
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_irq.h
@@ -0,0 +1,51 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_IRQ_H_
+#define _BNXT_IRQ_H_
+
+struct bnxt_irq {
+	rte_intr_callback_fn	handler;
+	unsigned int		vector;
+	uint8_t			requested;
+	char			name[RTE_ETH_NAME_MAX_LEN + 2];
+};
+
+struct bnxt;
+void bnxt_free_int(struct bnxt *bp);
+void bnxt_disable_int(struct bnxt *bp);
+void bnxt_enable_int(struct bnxt *bp);
+int bnxt_setup_int(struct bnxt *bp);
+int bnxt_request_int(struct bnxt *bp);
+
+#endif
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
new file mode 100644
index 0000000..f668664
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -0,0 +1,305 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdbool.h>
+
+#include <rte_byteorder.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+
+#include "bnxt.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_ring.h"
+#include "bnxt_rxq.h"
+#include "bnxt_txq.h"
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * Generic ring handling
+ */
+
+void bnxt_free_ring(struct bnxt_ring_struct *ring)
+{
+	/* The actual ring is reserved via rte_memzone_reserve API.
+	   The current document/code indicates that:
+	   "Note: A reserved zone cannot be freed."
+	 */
+	if (ring->vmem_size && *ring->vmem) {
+		memset((char *)*ring->vmem, 0, ring->vmem_size);
+		*ring->vmem = NULL;
+	}
+}
+
+/*
+ * Ring groups
+ */
+
+void bnxt_init_ring_grps(struct bnxt *bp)
+{
+	int i;
+
+	for (i = 0; i < MAX_NUM_RINGS; i++)
+		memset(&bp->grp_info[i], INVALID_HW_RING_ID,
+		       sizeof(struct bnxt_ring_grp_info));
+}
+
+/*
+ * Allocates a completion ring with vmem and stats optionally also allocating
+ * a TX and/or RX ring.  Passing NULL as tx_ring_info and/or rx_ring_info
+ * to not allocate them.
+ *
+ * Order in the allocation is:
+ * stats - Always non-zero length
+ * cp vmem - Always zero-length, supported for the bnxt_ring_struct abstraction
+ * tx vmem - Only non-zero length if tx_ring_info is not NULL
+ * rx vmem - Only non-zero length if rx_ring_info is not NULL
+ * cp bd ring - Always non-zero length
+ * tx bd ring - Only non-zero length if tx_ring_info is not NULL
+ * rx bd ring - Only non-zero length if rx_ring_info is not NULL
+ */
+int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
+			    struct bnxt_tx_ring_info *tx_ring_info,
+			    struct bnxt_rx_ring_info *rx_ring_info,
+			    struct bnxt_cp_ring_info *cp_ring_info,
+			    const char *suffix)
+{
+	struct bnxt_ring_struct *cp_ring = &cp_ring_info->cp_ring_struct;
+	struct bnxt_ring_struct *tx_ring;
+	struct bnxt_ring_struct *rx_ring;
+	struct rte_pci_device *pdev = bp->pdev;
+	const struct rte_memzone *mz = NULL;
+	char mz_name[RTE_MEMZONE_NAMESIZE];
+
+	int stats_len = (tx_ring_info || rx_ring_info) ?
+	    RTE_CACHE_LINE_ROUNDUP(sizeof(struct ctx_hw_stats)) : 0;
+
+	int cp_vmem_start = stats_len;
+	int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);
+
+	int tx_vmem_start = cp_vmem_start + cp_vmem_len;
+	int tx_vmem_len =
+	    tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info->
+						  tx_ring_struct.vmem_size) : 0;
+
+	int rx_vmem_start = tx_vmem_start + tx_vmem_len;
+	int rx_vmem_len =
+	    rx_ring_info ? RTE_CACHE_LINE_ROUNDUP(rx_ring_info->
+						  rx_ring_struct.vmem_size) : 0;
+
+	int cp_ring_start = rx_vmem_start + rx_vmem_len;
+	int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size *
+						 sizeof(struct cmpl_base));
+
+	int tx_ring_start = cp_ring_start + cp_ring_len;
+	int tx_ring_len = tx_ring_info ?
+	    RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct.ring_size *
+				   sizeof(struct tx_bd_long)) : 0;
+
+	int rx_ring_start = tx_ring_start + tx_ring_len;
+	int rx_ring_len = rx_ring_info ?
+	    RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct.ring_size *
+				   sizeof(struct rx_prod_pkt_bd)) : 0;
+
+	int total_alloc_len = rx_ring_start + rx_ring_len;
+
+	snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+		 "bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain,
+		 pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx,
+		 suffix);
+	mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+	if ((mz = rte_memzone_lookup(mz_name)) == NULL) {
+		mz = rte_memzone_reserve(mz_name, total_alloc_len,
+					 SOCKET_ID_ANY,
+					 RTE_MEMZONE_2MB |
+					 RTE_MEMZONE_SIZE_HINT_ONLY);
+		if (mz == NULL)
+			return -ENOMEM;
+	}
+	memset(mz->addr, 0, mz->len);
+
+	if (tx_ring_info) {
+		tx_ring = &tx_ring_info->tx_ring_struct;
+
+		tx_ring->bd = ((char *)mz->addr + tx_ring_start);
+		tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd;
+		tx_ring->bd_dma = mz->phys_addr + tx_ring_start;
+		tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
+
+		if (!tx_ring->bd)
+			return -ENOMEM;
+		if (tx_ring->vmem_size) {
+			tx_ring->vmem =
+			    (void **)((char *)mz->addr + tx_vmem_start);
+			tx_ring_info->tx_buf_ring =
+			    (struct bnxt_sw_tx_bd *)tx_ring->vmem;
+		}
+	}
+
+	if (rx_ring_info) {
+		rx_ring = &rx_ring_info->rx_ring_struct;
+
+		rx_ring->bd = ((char *)mz->addr + rx_ring_start);
+		rx_ring_info->rx_desc_ring =
+		    (struct rx_prod_pkt_bd *)rx_ring->bd;
+		rx_ring->bd_dma = mz->phys_addr + rx_ring_start;
+		rx_ring_info->rx_desc_mapping = rx_ring->bd_dma;
+
+		if (!rx_ring->bd)
+			return -ENOMEM;
+		if (rx_ring->vmem_size) {
+			rx_ring->vmem =
+			    (void **)((char *)mz->addr + rx_vmem_start);
+			rx_ring_info->rx_buf_ring =
+			    (struct bnxt_sw_rx_bd *)rx_ring->vmem;
+		}
+	}
+
+	cp_ring->bd = ((char *)mz->addr + cp_ring_start);
+	cp_ring->bd_dma = mz->phys_addr + cp_ring_start;
+	cp_ring_info->cp_desc_ring = cp_ring->bd;
+	cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
+
+	if (!cp_ring->bd)
+		return -ENOMEM;
+	if (cp_ring->vmem_size)
+		*cp_ring->vmem = ((char *)mz->addr + stats_len);
+	if (stats_len) {
+		cp_ring_info->hw_stats = mz->addr;
+		cp_ring_info->hw_stats_map = mz->phys_addr;
+	}
+	cp_ring_info->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+	return 0;
+}
+
+/* ring_grp usage:
+ * [0] = default completion ring
+ * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings
+ * [1+rx_cp_nr_rings + 1 -> +tx_cp_nr_rings] = tx_cp, tx rings
+ */
+int bnxt_alloc_hwrm_rings(struct bnxt *bp)
+{
+	unsigned i;
+	int rc = 0;
+
+	/* Default completion ring */
+	{
+		struct bnxt_cp_ring_info *cpr = &bp->def_cp_ring;
+		struct bnxt_ring_struct *cp_ring = &cpr->cp_ring_struct;
+
+		rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
+					  HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
+					  0, INVALID_STATS_CTX_ID);
+		if (rc)
+			goto err_out;
+		cpr->cp_doorbell =
+		    (char *)bp->eth_dev->pci_dev->mem_resource[2].addr;
+		B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+		bp->grp_info[0].cp_fw_ring_id = cp_ring->fw_ring_id;
+	}
+
+	for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+		struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+		struct bnxt_cp_ring_info *cpr = &rxq->cp_ring;
+		struct bnxt_ring_struct *cp_ring = &cpr->cp_ring_struct;
+		struct bnxt_rx_ring_info *rxr = &rxq->rx_ring;
+		struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+		unsigned idx = i + 1;
+
+		/* Rx cmpl */
+		rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
+					HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
+					idx, INVALID_STATS_CTX_ID);
+		if (rc)
+			goto err_out;
+		cpr->cp_doorbell =
+		    (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
+		    idx * 0x80;
+		bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id;
+		B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+
+		/* Rx ring */
+		rc = bnxt_hwrm_ring_alloc(bp, ring,
+					HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
+					idx, cpr->hw_stats_ctx_id);
+		if (rc)
+			goto err_out;
+		rxr->rx_prod = 0;
+		rxr->rx_doorbell =
+		    (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
+		    idx * 0x80;
+		bp->grp_info[idx].rx_fw_ring_id = ring->fw_ring_id;
+		B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+		if (bnxt_init_one_rx_ring(rxq)) {
+			RTE_LOG(ERR, PMD, "bnxt_init_one_rx_ring failed!");
+			bnxt_rx_queue_release_op(rxq);
+			return -ENOMEM;
+		}
+		B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+	}
+
+	for (i = 0; i < bp->tx_cp_nr_rings; i++) {
+		struct bnxt_tx_queue *txq = bp->tx_queues[i];
+		struct bnxt_cp_ring_info *cpr = &txq->cp_ring;
+		struct bnxt_ring_struct *cp_ring = &cpr->cp_ring_struct;
+		struct bnxt_tx_ring_info *txr = &txq->tx_ring;
+		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+		unsigned idx = 1 + bp->rx_cp_nr_rings + i;
+
+		/* Tx cmpl */
+		rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
+					HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
+					idx, INVALID_STATS_CTX_ID);
+		if (rc)
+			goto err_out;
+
+		cpr->cp_doorbell =
+		    (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
+		    idx * 0x80;
+		bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id;
+		B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+
+		/* Tx ring */
+		rc = bnxt_hwrm_ring_alloc(bp, ring,
+					HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
+					idx, cpr->hw_stats_ctx_id);
+		if (rc)
+			goto err_out;
+
+		txr->tx_doorbell =
+		    (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
+		    idx * 0x80;
+	}
+
+err_out:
+	return rc;
+}
diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h
new file mode 100644
index 0000000..751e80c
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_ring.h
@@ -0,0 +1,104 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_RING_H_
+#define _BNXT_RING_H_
+
+#include <inttypes.h>
+
+#include <rte_memory.h>
+
+#define RING_NEXT(ring, idx)		(((idx) + 1) & (ring)->ring_mask)
+
+#define RTE_MBUF_DATA_DMA_ADDR(mb) \
+	(uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
+
+#define DB_IDX_MASK						0xffffff
+#define DB_IDX_VALID						(0x1<<26)
+#define DB_IRQ_DIS						(0x1<<27)
+#define DB_KEY_TX						(0x0<<28)
+#define DB_KEY_RX						(0x1<<28)
+#define DB_KEY_CP						(0x2<<28)
+#define DB_KEY_ST						(0x3<<28)
+#define DB_KEY_TX_PUSH						(0x4<<28)
+#define DB_LONG_TX_PUSH						(0x2<<24)
+
+#define DEFAULT_CP_RING_SIZE	256
+#define DEFAULT_RX_RING_SIZE	256
+#define DEFAULT_TX_RING_SIZE	256
+
+#define MAX_TPA		128
+
+// TODO: These are derived from the Linux driver assuming 4k pages
+#define MAX_RX_DESC_CNT (8 * 1024)
+#define MAX_TX_DESC_CNT (4 * 1024)
+#define MAX_CP_DESC_CNT (16 * 1024)
+
+#define INVALID_HW_RING_ID      ((uint16_t) -1)
+
+struct bnxt_ring_struct {
+	void			*bd;
+	phys_addr_t		bd_dma;
+	uint32_t		ring_size;
+	uint32_t		ring_mask;
+
+	int			vmem_size;
+	void			**vmem;
+
+	uint16_t		fw_ring_id; /* Ring id filled by Chimp FW */
+};
+
+struct bnxt_ring_grp_info {
+	uint16_t	fw_stats_ctx;
+	uint16_t	fw_grp_id;
+	uint16_t	rx_fw_ring_id;
+	uint16_t	cp_fw_ring_id;
+	uint16_t	ag_fw_ring_id;
+};
+
+struct bnxt;
+struct bnxt_tx_queue;
+struct bnxt_rx_queue;
+struct bnxt_tx_ring_info;
+struct bnxt_rx_ring_info;
+struct bnxt_cp_ring_info;
+void bnxt_free_ring(struct bnxt_ring_struct *ring);
+void bnxt_init_ring_grps(struct bnxt *bp);
+int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
+			    struct bnxt_tx_ring_info *tx_ring_info,
+			    struct bnxt_rx_ring_info *rx_ring_info,
+			    struct bnxt_cp_ring_info *cp_ring_info,
+			    const char *suffix);
+int bnxt_alloc_hwrm_rings(struct bnxt *bp);
+
+#endif
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
new file mode 100644
index 0000000..423073c
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -0,0 +1,384 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+
+#include <rte_malloc.h>
+
+#include "bnxt.h"
+#include "bnxt_filter.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_rxq.h"
+#include "bnxt_vnic.h"
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * RX Queues
+ */
+
+void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
+{
+	struct bnxt_cp_ring_info *cpr = &rxq->cp_ring;
+
+	/* 'Unreserve' rte_memzone */
+	/* N/A */
+
+	if (cpr->hw_stats) {
+		cpr->hw_stats = NULL;
+	}
+}
+
+int bnxt_mq_rx_configure(struct bnxt *bp)
+{
+	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+	unsigned i, j, nb_q_per_grp, ring_idx;
+	int start_grp_id, end_grp_id, rc = 0;
+	struct bnxt_vnic_info *vnic;
+	struct bnxt_filter_info *filter;
+	struct bnxt_rx_queue *rxq;
+
+	bp->nr_vnics = 0;
+
+	/* Single queue mode */
+	if (bp->rx_cp_nr_rings < 2) {
+		vnic = bnxt_alloc_vnic(bp);
+		if (!vnic) {
+			RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
+			rc = -ENOMEM;
+			goto err_out;
+		}
+		STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
+		bp->nr_vnics++;
+
+		rxq = bp->eth_dev->data->rx_queues[0];
+		rxq->vnic = vnic;
+
+		vnic->func_default = true;
+		vnic->ff_pool_idx = 0;
+		vnic->start_grp_id = 1;
+		vnic->end_grp_id = vnic->start_grp_id +
+				   bp->rx_cp_nr_rings - 1;
+		filter = bnxt_alloc_filter(bp);
+		if (!filter) {
+			RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+			rc = -ENOMEM;
+			goto err_out;
+		}
+		STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
+		goto out;
+	}
+
+	/* Multi-queue mode */
+	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+		/* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
+		enum rte_eth_nb_pools pools;
+
+		switch (dev_conf->rxmode.mq_mode) {
+		case ETH_MQ_RX_VMDQ_DCB_RSS: {
+				const struct rte_eth_vmdq_dcb_conf *conf =
+				    &dev_conf->rx_adv_conf.vmdq_dcb_conf;
+
+				/* Should only support 8 pools in this mode */
+				pools = conf->nb_queue_pools *
+				    dev_conf->rx_adv_conf.dcb_rx_conf.nb_tcs;
+				break;
+			}
+		case ETH_MQ_RX_VMDQ_DCB: {
+				const struct rte_eth_vmdq_dcb_conf *conf =
+				    &dev_conf->rx_adv_conf.vmdq_dcb_conf;
+
+				/* ETH_16/32_POOLs */
+				pools = conf->nb_queue_pools;
+				break;
+			}
+		case ETH_MQ_RX_VMDQ_RSS:
+		case ETH_MQ_RX_VMDQ_ONLY:
+		default: {
+				const struct rte_eth_vmdq_rx_conf *conf =
+				    &dev_conf->rx_adv_conf.vmdq_rx_conf;
+
+				/* ETH_8/64_POOLs */
+				pools = conf->nb_queue_pools;
+				break;
+			}
+		}
+		/* For each pool, allocate MACVLAN CFA rule & VNIC */
+		if (!pools) {
+			RTE_LOG(ERR, PMD,
+				"VMDq pool not set, defaulted to 64\n");
+			pools = ETH_64_POOLS;
+		}
+		nb_q_per_grp = bp->rx_cp_nr_rings / pools;
+		start_grp_id = 1;
+		end_grp_id = start_grp_id + nb_q_per_grp - 1;
+
+		ring_idx = 0;
+		for (i = 0; i < pools; i++) {
+			vnic = bnxt_alloc_vnic(bp);
+			if (!vnic) {
+				RTE_LOG(ERR, PMD,
+					"VNIC alloc failed\n");
+				return -ENOMEM;
+			}
+			STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next);
+			bp->nr_vnics++;
+
+			for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
+				rxq = bp->eth_dev->data->rx_queues[ring_idx];
+				rxq->vnic = vnic;
+			}
+			if (i == 0)
+				vnic->func_default = true;
+			vnic->ff_pool_idx = i;
+			vnic->start_grp_id = start_grp_id;
+			vnic->end_grp_id = end_grp_id;
+
+			filter = bnxt_alloc_filter(bp);
+			if (!filter) {
+				RTE_LOG(ERR, PMD,
+					"L2 filter alloc failed\n");
+				rc = -ENOMEM;
+				goto err_out;
+			}
+			/* TODO: Configure & associate CFA rule for
+			   each VNIC for each VMDq with MACVLAN, MACVLAN+TC */
+			STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
+
+			start_grp_id = end_grp_id + 1;
+			end_grp_id += nb_q_per_grp;
+		}
+		goto out;
+	}
+
+	/* Non-VMDq mode - RSS, DCB, RSS+DCB */
+	if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) {
+		const struct rte_eth_dcb_rx_conf *conf =
+					&dev_conf->rx_adv_conf.dcb_rx_conf;
+
+		/* In order to support DCB+RSS, each TC will
+		   be assigned to one VNIC */
+		nb_q_per_grp = bp->rx_cp_nr_rings / conf->nb_tcs;
+		start_grp_id = 1;
+		end_grp_id = start_grp_id + nb_q_per_grp - 1;
+
+		ring_idx = 0;
+		for (i = 0; i < conf->nb_tcs; i++) {
+			/* Allocate & configure VNIC */
+			vnic = bnxt_alloc_vnic(bp);
+			if (!vnic) {
+				RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
+				rc = -ENOMEM;
+				goto err_out;
+			}
+			STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
+			bp->nr_vnics++;
+
+			for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
+				rxq = bp->eth_dev->data->rx_queues[ring_idx];
+				rxq->vnic = vnic;
+			}
+			if (i == 0)
+				vnic->func_default = true;
+			vnic->ff_pool_idx = 0;
+			vnic->start_grp_id = start_grp_id;
+			vnic->end_grp_id = end_grp_id;
+
+			filter = bnxt_alloc_filter(bp);
+			if (!filter) {
+				RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+				rc = -ENOMEM;
+				goto err_out;
+			}
+			/* TODO: Configure & associate CFA rule for
+			   each VNIC for each TC */
+			STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
+
+			start_grp_id = end_grp_id + 1;
+			end_grp_id += nb_q_per_grp;
+			if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+				vnic->hash_type =
+					HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
+					HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
+		}
+	} else {
+		/* Init default VNIC for RSS or DCB only */
+		vnic = bnxt_alloc_vnic(bp);
+		if (!vnic) {
+			RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
+			return -ENOMEM;
+		}
+		/* Partition the rx queues for the single pool */
+		for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+			rxq = bp->eth_dev->data->rx_queues[i];
+			rxq->vnic = vnic;
+		}
+		STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
+		bp->nr_vnics++;
+
+		vnic->func_default = true;
+		vnic->ff_pool_idx = 0;
+		vnic->start_grp_id = 1;
+		vnic->end_grp_id = vnic->start_grp_id +
+				   bp->rx_cp_nr_rings - 1;
+		filter = bnxt_alloc_filter(bp);
+		if (!filter) {
+			RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+			return -ENOMEM;
+		}
+		STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
+
+		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB)
+			bnxt_hwrm_vnic_set_dcb(bp, 0);
+		else if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+			vnic->hash_type =
+				HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
+				HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
+	}
+
+out:
+	return rc;
+
+err_out:
+	/* Free allocated vnic/filters */
+
+	return rc;
+}
+
+static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
+{
+	struct bnxt_sw_rx_bd *sw_ring;
+	uint16_t i;
+
+	if (rxq) {
+		sw_ring = rxq->rx_ring.rx_buf_ring;
+		if (sw_ring) {
+			for (i = 0; i < rxq->nb_rx_desc; i++) {
+				if (sw_ring[i].mbuf) {
+					rte_pktmbuf_free_seg(sw_ring[i].mbuf);
+					sw_ring[i].mbuf = NULL;
+				}
+			}
+		}
+	}
+}
+
+void bnxt_free_rx_mbufs(struct bnxt *bp)
+{
+	struct bnxt_rx_queue *rxq;
+	int i;
+
+	for (i = 0; i < (int)bp->rx_nr_rings; i++) {
+		rxq = bp->rx_queues[i];
+		bnxt_rx_queue_release_mbufs(rxq);
+	}
+}
+
+void bnxt_rx_queue_release_op(void *rx_queue)
+{
+	struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
+	struct bnxt_rx_ring_info *rxr;
+	struct bnxt_cp_ring_info *cpr;
+	struct bnxt_ring_struct *ring;
+
+	if (rxq) {
+		bnxt_rx_queue_release_mbufs(rxq);
+
+		/* Free RX ring hardware descriptors */
+		rxr = &rxq->rx_ring;
+		ring = &rxr->rx_ring_struct;
+		bnxt_free_ring(ring);
+
+		/* Free RX completion ring hardware descriptors */
+		cpr = &rxq->cp_ring;
+		ring = &cpr->cp_ring_struct;
+		bnxt_free_ring(ring);
+
+		bnxt_free_rxq_stats(rxq);
+
+		rte_free(rxq);
+	}
+}
+
+int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
+			       uint16_t queue_idx,
+			       uint16_t nb_desc,
+			       unsigned int socket_id,
+			       const struct rte_eth_rxconf *rx_conf,
+			       struct rte_mempool *mp)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+	struct bnxt_rx_queue *rxq;
+	struct bnxt_rx_ring_info *rxr;
+	struct bnxt_cp_ring_info *cpr;
+
+	if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
+		RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
+		return -EINVAL;
+	}
+
+	if (eth_dev->data->rx_queues) {
+		rxq = eth_dev->data->rx_queues[queue_idx];
+		if (rxq)
+			bnxt_rx_queue_release_op(rxq);
+	}
+	rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+	if (rxq == NULL) {
+		RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!");
+		return -ENOMEM;
+	}
+	rxq->bp = bp;
+	rxq->mb_pool = mp;
+	rxq->nb_rx_desc = nb_desc;
+	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+
+	bnxt_init_rx_ring_struct(rxq);
+
+	rxq->queue_id = queue_idx;
+	rxq->port_id = eth_dev->data->port_id;
+	rxq->crc_len =
+	    (uint8_t) ((eth_dev->data->dev_conf.
+			rxmode.hw_strip_crc) ? 0 : ETHER_CRC_LEN);
+	rxr = &rxq->rx_ring;
+	cpr = &rxq->cp_ring;
+
+	eth_dev->data->rx_queues[queue_idx] = rxq;
+	/* Allocate RX ring hardware descriptors */
+	if (bnxt_alloc_rings(bp, queue_idx, NULL, rxr, cpr, "bnxt_rx_ring")) {
+		RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for rx_ring failed!");
+		bnxt_rx_queue_release_op(rxq);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
new file mode 100644
index 0000000..44e142d
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -0,0 +1,77 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_RQX_H_
+#define _BNXT_RQX_H_
+
+// TODO make bnxt_rx_queue.cp_ring and bnxt_rx_queue.rx_ring pointers
+#include "bnxt_rxr.h"
+
+struct bnxt;
+struct bnxt_rx_queue {
+	struct rte_mempool	*mb_pool; /* mbuf pool for RX ring */
+	struct rte_mbuf		*pkt_first_seg; /* 1st seg of pkt */
+	struct rte_mbuf		*pkt_last_seg; /* Last seg of pkt */
+	uint64_t		mbuf_initializer; /* val to init mbuf */
+	uint16_t		nb_rx_desc; /* num of RX desc */
+	uint16_t		rx_tail; /* cur val of RDT register */
+	uint16_t		nb_rx_hold; /* num held free RX desc */
+	uint16_t		rx_free_thresh; /* max free RX desc to hold */
+	uint16_t		queue_id; /* RX queue index */
+	uint16_t		reg_idx; /* RX queue register index */
+	uint8_t			port_id; /* Device port identifier */
+	uint8_t			crc_len; /* 0 if CRC stripped, 4 otherwise */
+	//uint8_t			drop_en; /* If not 0, set SRRCTL.Drop_En */
+	//uint8_t			rx_deferred_start; /* not in global dev start */
+
+	struct bnxt		*bp;
+	struct bnxt_vnic_info	*vnic;
+
+	uint32_t			rx_buf_size;
+	uint32_t			rx_buf_use_size;  /* useable size */
+	struct bnxt_rx_ring_info	rx_ring;
+	struct bnxt_cp_ring_info	cp_ring;
+};
+
+void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq);
+int bnxt_mq_rx_configure(struct bnxt *bp);
+void bnxt_rx_queue_release_op(void *rx_queue);
+int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
+			       uint16_t queue_idx,
+			       uint16_t nb_desc,
+			       unsigned int socket_id,
+			       const struct rte_eth_rxconf *rx_conf,
+			       struct rte_mempool *mp);
+void bnxt_free_rx_mbufs(struct bnxt *bp);
+
+#endif
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
new file mode 100644
index 0000000..9f76ae1
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -0,0 +1,370 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+
+#include <rte_byteorder.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+
+#include "bnxt.h"
+#include "bnxt_rxr.h"
+#include "bnxt_rxq.h"
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * RX Ring handling
+ */
+
+static inline struct rte_mbuf *__bnxt_alloc_rx_data(struct rte_mempool *mb)
+{
+	struct rte_mbuf *data;
+
+	data = __rte_mbuf_raw_alloc(mb);
+	__rte_mbuf_sanity_check(data, 0);
+
+	return data;
+}
+
+static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
+				     struct bnxt_rx_ring_info *rxr,
+				     uint16_t prod)
+{
+	struct rx_prod_pkt_bd *rxbd = &rxr->rx_desc_ring[prod];
+	struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
+	struct rte_mbuf *data;
+
+	data = __bnxt_alloc_rx_data(rxq->mb_pool);
+	if (!data)
+		return -ENOMEM;
+
+	rx_buf->mbuf = data;
+
+	rxbd->addr_hi =
+	    rte_cpu_to_le_32(U64_TO_U32_HI
+			     (RTE_MBUF_DATA_DMA_ADDR(rx_buf->mbuf)));
+	rxbd->addr_lo =
+	    rte_cpu_to_le_32(U64_TO_U32_LO
+			     (RTE_MBUF_DATA_DMA_ADDR(rx_buf->mbuf)));
+
+	return 0;
+}
+
+static void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons,
+			       struct rte_mbuf *mbuf)
+{
+	uint16_t prod = rxr->rx_prod;
+	struct bnxt_sw_rx_bd *prod_rx_buf;
+	struct rx_prod_pkt_bd *prod_bd, *cons_bd;
+
+	prod_rx_buf = &rxr->rx_buf_ring[prod];
+
+	prod_rx_buf->mbuf = mbuf;
+
+	prod_bd = &rxr->rx_desc_ring[prod];
+	cons_bd = &rxr->rx_desc_ring[cons];
+
+	prod_bd->addr_hi = cons_bd->addr_hi;
+	prod_bd->addr_lo = cons_bd->addr_lo;
+}
+
+static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
+			    struct bnxt_rx_queue *rxq, uint32_t * raw_cons)
+{
+	struct bnxt_cp_ring_info *cpr = &rxq->cp_ring;
+	struct bnxt_rx_ring_info *rxr = &rxq->rx_ring;
+	struct rx_pkt_cmpl *rxcmp;
+	struct rx_pkt_cmpl_hi *rxcmp1;
+	uint32_t tmp_raw_cons = *raw_cons;
+	uint16_t cons, prod, cp_cons =
+	    RING_CMP(&cpr->cp_ring_struct, tmp_raw_cons);
+	struct bnxt_sw_rx_bd *rx_buf;
+	struct rte_mbuf *mbuf;
+	int rc = 0;
+
+	rxcmp = (struct rx_pkt_cmpl *)
+	    &cpr->cp_desc_ring[cp_cons];
+
+	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
+	cp_cons = RING_CMP(&cpr->cp_ring_struct, tmp_raw_cons);
+	rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons];
+
+	if (!CMP_VALID(rxcmp1, tmp_raw_cons, &cpr->cp_ring_struct))
+		return -EBUSY;
+
+	prod = rxr->rx_prod;
+
+	/* EW - GRO deferred to phase 3 */
+	cons = rxcmp->opaque;
+	rx_buf = &rxr->rx_buf_ring[cons];
+	mbuf = rx_buf->mbuf;
+	rte_prefetch0(mbuf);
+
+	mbuf->nb_segs = 1;
+	mbuf->next = NULL;
+	mbuf->pkt_len = rxcmp->len;
+	mbuf->data_len = mbuf->pkt_len;
+	mbuf->port = rxq->port_id;
+	mbuf->ol_flags = 0;
+	if (rxcmp->flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
+		mbuf->hash.rss = rxcmp->rss_hash;
+		mbuf->ol_flags |= PKT_RX_RSS_HASH;
+	}
+	else {
+		mbuf->hash.fdir.id = rxcmp1->cfa_code;
+		mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+	}
+	if (rxcmp1->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
+		mbuf->vlan_tci = rxcmp1->metadata &
+			(RX_PKT_CMPL_METADATA_VID_MASK |
+			RX_PKT_CMPL_METADATA_DE |
+			RX_PKT_CMPL_METADATA_PRI_MASK);
+		mbuf->ol_flags |= PKT_RX_VLAN_PKT;
+	}
+
+	rx_buf->mbuf = NULL;
+	if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
+		/* Re-install the mbuf back to the rx ring */
+		bnxt_reuse_rx_mbuf(rxr, cons, mbuf);
+
+		rc = -EIO;
+		goto next_rx;
+	}
+	/*
+	 * TODO: Redesign this....
+	 * If the allocation fails, the packet does not get received.
+	 * Simply returning this will result in slowly falling behind
+	 * on the producer ring buffers.
+	 * Instead, "filling up" the producer just before ringing the
+	 * doorbell could be a better solution since it will let the
+	 * producer ring starve until memory is available again pushing
+	 * the drops into hardware and getting them out of the driver
+	 * allowing recovery to a full producer ring.
+	 *
+	 * This could also help with cache usage by preventing per-packet
+	 * calls in favour of a tight loop with the same function being called
+	 * in it.
+	 */
+	if (bnxt_alloc_rx_data(rxq, rxr, prod)) {
+		RTE_LOG(ERR, PMD, "mbuf alloc failed with prod=0x%x\n", prod);
+		rc = -ENOMEM;
+		goto next_rx;
+	}
+
+	/* All MBUFs are allocated with the same size under DPDK,
+	   no optimization for rx_copy_thresh */
+
+	/* AGG buf operation is deferred */
+
+	/* EW - VLAN reception.  Must compare against the ol_flags */
+
+	*rx_pkt = mbuf;
+next_rx:
+	rxr->rx_prod = RING_NEXT(&rxr->rx_ring_struct, prod);
+
+	*raw_cons = tmp_raw_cons;
+
+	return rc;
+}
+
+uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+			       uint16_t nb_pkts)
+{
+	struct bnxt_rx_queue *rxq = rx_queue;
+	struct bnxt_cp_ring_info *cpr = &rxq->cp_ring;
+	struct bnxt_rx_ring_info *rxr = &rxq->rx_ring;
+	uint32_t raw_cons = cpr->cp_raw_cons;
+	uint32_t cons;
+	int nb_rx_pkts = 0;
+	bool rx_event = false;
+	struct rx_pkt_cmpl *rxcmp;
+
+	/* Handle RX burst request */
+	while (1) {
+		int rc;
+
+		cons = RING_CMP(&cpr->cp_ring_struct, raw_cons);
+		rte_prefetch0(&cpr->cp_desc_ring[cons]);
+		rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+
+		if (!CMP_VALID(rxcmp, raw_cons, &cpr->cp_ring_struct))
+			break;
+
+		// TODO: Avoid magic numbers...
+		if ((CMP_TYPE(rxcmp) & 0x30) == 0x10) {
+			rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
+			if (likely(!rc))
+				nb_rx_pkts++;
+			else if (rc == -EBUSY)	/* partial completion */
+				break;
+			rx_event = true;
+		}
+		raw_cons = NEXT_RAW_CMP(raw_cons);
+		if (nb_rx_pkts == nb_pkts)
+			break;
+	}
+	if (raw_cons == cpr->cp_raw_cons) {
+		/* For PMD, there is no need to keep on pushing to REARM
+		   the doorbell if there are no new completions */
+		return nb_rx_pkts;
+	}
+	cpr->cp_raw_cons = raw_cons;
+
+	B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+	if (rx_event)
+		B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+	return nb_rx_pkts;
+}
+
+void bnxt_free_rx_rings(struct bnxt *bp)
+{
+	int i;
+
+	for (i = 0; i < (int)bp->rx_nr_rings; i++) {
+		struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+		struct bnxt_rx_ring_info *rxr;
+		struct bnxt_cp_ring_info *cpr;
+		struct bnxt_ring_struct *ring;
+
+		if (!rxq)
+			continue;
+
+		rxr = &rxq->rx_ring;
+
+		rte_free(rxr->rx_tpa);
+		rxr->rx_tpa = NULL;
+
+		ring = &rxr->rx_ring_struct;
+		bnxt_free_ring(ring);
+
+		cpr = &rxq->cp_ring;
+		ring = &cpr->cp_ring_struct;
+		bnxt_free_ring(ring);
+
+		rte_free(rxq);
+		bp->rx_queues[i] = NULL;
+	}
+}
+
+void bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq)
+{
+	struct bnxt *bp = rxq->bp;
+	struct bnxt_cp_ring_info *cpr;
+	struct bnxt_rx_ring_info *rxr;
+	struct bnxt_ring_struct *ring;
+
+	rxq->rx_buf_use_size = bp->eth_dev->data->mtu +
+			       ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE;
+	rxq->rx_buf_size = rxq->rx_buf_use_size + sizeof(struct rte_mbuf);
+
+	rxr = &rxq->rx_ring;
+	ring = &rxr->rx_ring_struct;
+	ring->ring_size = rte_align32pow2(rxq->nb_rx_desc);
+	ring->ring_mask = ring->ring_size - 1;
+	ring->bd = (void *)rxr->rx_desc_ring;
+	ring->bd_dma = rxr->rx_desc_mapping;
+	ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
+	ring->vmem = (void **)&rxr->rx_buf_ring;
+
+	cpr = &rxq->cp_ring;
+	ring = &cpr->cp_ring_struct;
+	ring->ring_size = rxr->rx_ring_struct.ring_size * 2;
+	ring->ring_mask = ring->ring_size - 1;
+	ring->bd = (void *)cpr->cp_desc_ring;
+	ring->bd_dma = cpr->cp_desc_mapping;
+	ring->vmem_size = 0;
+	ring->vmem = NULL;
+}
+
+static void bnxt_init_rxbds(struct bnxt_ring_struct *ring, uint32_t type,
+			    uint16_t len)
+{
+	uint32_t j;
+	struct rx_prod_pkt_bd *rx_bd_ring = (struct rx_prod_pkt_bd *)ring->bd;
+
+	if (!rx_bd_ring)
+		return;
+	for (j = 0; j < ring->ring_size; j++) {
+		rx_bd_ring[j].flags_type = type;
+		rx_bd_ring[j].len = len;
+		rx_bd_ring[j].opaque = j;
+	}
+}
+
+int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
+{
+	struct bnxt_rx_ring_info *rxr;
+	struct bnxt_ring_struct *ring;
+	uint32_t prod, type;
+	unsigned i;
+
+	type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD;
+
+/* XXX
+	if (NET_IP_ALIGN == 2)
+		type |= RX_BD_FLAGS_SOP;
+*/
+
+	rxr = &rxq->rx_ring;
+	ring = &rxr->rx_ring_struct;
+	bnxt_init_rxbds(ring, type, rxq->rx_buf_use_size);
+
+	prod = rxr->rx_prod;
+	for (i = 0; i < ring->ring_size; i++) {
+		if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
+			RTE_LOG(WARNING, PMD,
+				"init'ed rx ring %d with %d/%d mbufs only\n",
+				rxq->queue_id, i, ring->ring_size);
+			break;
+		}
+		rxr->rx_prod = prod;
+		prod = RING_NEXT(&rxr->rx_ring_struct, prod);
+	}
+
+	if (rxr->rx_tpa) {
+		struct rte_mbuf *data;
+
+		for (i = 0; i < MAX_TPA; i++) {
+			data = __bnxt_alloc_rx_data(rxq->mb_pool);
+			if (!data)
+				return -ENOMEM;
+
+			rxr->rx_tpa[i].data = data;
+			rxr->rx_tpa[i].mapping =
+			    rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(data));
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h
new file mode 100644
index 0000000..ba25736
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_rxr.h
@@ -0,0 +1,73 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_RXR_H_
+#define _BNXT_RXR_H_
+
+#define B_RX_DB(db, prod)						\
+		*(uint32_t *)db = (DB_KEY_RX | prod)
+
+struct bnxt_tpa_info {
+	struct rte_mbuf		*data;
+	phys_addr_t		mapping;
+	uint16_t		len;
+	uint32_t		flags2;
+	uint32_t		metadata;
+	uint32_t		rss_hash;
+};
+
+struct bnxt_sw_rx_bd {
+	struct rte_mbuf		*mbuf; /* data associated with RX descriptor */
+};
+
+struct bnxt_rx_ring_info {
+	uint16_t		rx_prod;
+	void 			*rx_doorbell;
+
+	struct rx_prod_pkt_bd	*rx_desc_ring;
+	struct bnxt_sw_rx_bd	*rx_buf_ring; /* sw ring */
+
+	phys_addr_t		rx_desc_mapping;
+
+	struct bnxt_tpa_info	*rx_tpa;
+
+	struct bnxt_ring_struct	rx_ring_struct;
+};
+
+uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+			       uint16_t nb_pkts);
+void bnxt_free_rx_rings(struct bnxt *bp);
+void bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq);
+int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
+
+#endif
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
new file mode 100644
index 0000000..2db763a
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -0,0 +1,222 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+
+#include <rte_byteorder.h>
+
+#include "bnxt.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_rxq.h"
+#include "bnxt_stats.h"
+#include "bnxt_txq.h"
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * Statistics functions
+ */
+
+void bnxt_free_stats(struct bnxt *bp)
+{
+	int i;
+
+	for (i = 0; i < (int)bp->tx_cp_nr_rings; i++) {
+		struct bnxt_tx_queue *txq = bp->tx_queues[i];
+		bnxt_free_txq_stats(txq);
+	}
+	for (i = 0; i < (int)bp->rx_cp_nr_rings; i++) {
+		struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+		bnxt_free_rxq_stats(rxq);
+	}
+}
+
+void bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
+			   struct rte_eth_stats *bnxt_stats)
+{
+	unsigned i;
+	struct bnxt *bp = eth_dev->data->dev_private;
+#ifndef FUNC_QSTATS_BROKEN
+	struct hwrm_func_qstats_output fstats;
+#endif
+#ifndef PORT_QSTATS_BROKEN
+	struct hwrm_port_qstats_output pstats;
+#endif
+
+	memset(bnxt_stats, 0, sizeof(*bnxt_stats));
+
+	for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+		struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+		struct bnxt_cp_ring_info *cpr = &rxq->cp_ring;
+		struct ctx_hw_stats64 *hw_stats =
+		    (struct ctx_hw_stats64 *)cpr->hw_stats;
+
+		bnxt_stats->q_ipackets[i] +=
+		    rte_le_to_cpu_64(hw_stats->rx_ucast_pkts);
+		bnxt_stats->q_ipackets[i] +=
+		    rte_le_to_cpu_64(hw_stats->rx_mcast_pkts);
+		bnxt_stats->q_ipackets[i] +=
+		    rte_le_to_cpu_64(hw_stats->rx_bcast_pkts);
+
+		bnxt_stats->q_ibytes[i] +=
+		    rte_le_to_cpu_64(hw_stats->rx_ucast_bytes);
+		bnxt_stats->q_ibytes[i] +=
+		    rte_le_to_cpu_64(hw_stats->rx_mcast_bytes);
+		bnxt_stats->q_ibytes[i] +=
+		    rte_le_to_cpu_64(hw_stats->rx_bcast_bytes);
+
+		/*
+		 * TBD: No clear mapping to this... we don't seem
+		 * to have a stat specifically for dropped due to
+		 * insufficient mbufs.
+		 */
+		bnxt_stats->q_errors[i] = 0;
+
+		// These get replaces once the *_QSTATS commands work
+		bnxt_stats->ipackets += bnxt_stats->q_ipackets[i];
+		bnxt_stats->ibytes += bnxt_stats->q_ibytes[i];
+		bnxt_stats->imissed += bnxt_stats->q_errors[i];
+		bnxt_stats->ierrors += rte_le_to_cpu_64(hw_stats->rx_err_pkts);
+		bnxt_stats->imcasts += rte_le_to_cpu_64(hw_stats->rx_mcast_pkts);
+	}
+
+	for (i = 0; i < bp->tx_cp_nr_rings; i++) {
+		struct bnxt_tx_queue *txq = bp->tx_queues[i];
+		struct bnxt_cp_ring_info *cpr = &txq->cp_ring;
+		struct ctx_hw_stats64 *hw_stats =
+		    (struct ctx_hw_stats64 *)cpr->hw_stats;
+
+		bnxt_stats->q_opackets[i] +=
+		    rte_le_to_cpu_64(hw_stats->tx_ucast_pkts);
+		bnxt_stats->q_opackets[i] +=
+		    rte_le_to_cpu_64(hw_stats->tx_mcast_pkts);
+		bnxt_stats->q_opackets[i] +=
+		    rte_le_to_cpu_64(hw_stats->tx_bcast_pkts);
+
+		bnxt_stats->q_obytes[i] +=
+		    rte_le_to_cpu_64(hw_stats->tx_ucast_bytes);
+		bnxt_stats->q_obytes[i] +=
+		    rte_le_to_cpu_64(hw_stats->tx_mcast_bytes);
+		bnxt_stats->q_obytes[i] +=
+		    rte_le_to_cpu_64(hw_stats->tx_bcast_bytes);
+
+		// These get replaces once the *_QSTATS commands work
+		bnxt_stats->opackets += bnxt_stats->q_opackets[i];
+		bnxt_stats->obytes +=  bnxt_stats->q_obytes[i];
+		bnxt_stats->oerrors += rte_le_to_cpu_64(hw_stats->tx_drop_pkts);
+		bnxt_stats->oerrors += rte_le_to_cpu_64(hw_stats->tx_err_pkts);
+	}
+
+#ifndef FUNC_QSTATS_BROKEN
+	if (bnxt_hwrm_func_qstats(bp, &fstats) == 0) {
+		bnxt_stats->ipackets = fstats.rx_ucast_pkts + \
+				fstats.rx_mcast_pkts + fstats.rx_bcast_pkts + \
+				fstats.rx_err_pkts;
+		bnxt_stats->opackets = fstats.tx_ucast_pkts + \
+				fstats.tx_mcast_pkts + fstats.tx_bcast_pkts + \
+				fstats.tx_err_pkts;
+		bnxt_stats->ibytes = fstats.rx_ucast_bytes + \
+				fstats.rx_mcast_bytes + fstats.rx_bcast_bytes;
+		bnxt_stats->obytes = fstats.tx_ucast_bytes + \
+				fstats.tx_mcast_bytes + fstats.tx_bcast_bytes;
+		bnxt_stats->ierrors = fstats.rx_err_pkts;
+		bnxt_stats->oerrors = fstats.tx_err_pkts;
+		bnxt_stats->imcasts = fstats.rx_mcast_pkts;
+		/*
+		 * TBD: No clear mapping to this... we don't seem
+		 * to have a stat specifically for dropped due to
+		 * insufficient mbufs.
+		 */
+		bnxt_stats->imissed = 0;
+		// bnxt_stats->rx_nombuf += ; /**< Total number of RX mbuf allocation failures. */
+		// bnxt_stats->fdirmatch += ; /**< Total number of RX packets matching a filter. */
+		// bnxt_stats->fdirmiss;  /**< Total number of RX packets not matching any filter. */
+
+		/* We don't have separate XON/XOFF stats. */
+		// bnxt_stats->tx_pause_xon += ;  /**< Total nb. of XON pause frame sent. */
+		// bnxt_stats->rx_pause_xon;  /**< Total nb. of XON pause frame received. */
+		// bnxt_stats->tx_pause_xoff; /**< Total nb. of XOFF pause frame sent. */
+		// bnxt_stats->rx_pause_xoff; /**< Total nb. of XOFF pause frame received. */
+		/**< Total number of queue packets received that are dropped. */
+		// bnxt_stats->ilbpackets;
+		/**< Total number of good packets received from loopback,VF Only */
+		// bnxt_stats->olbpackets;
+		/**< Total number of good packets transmitted to loopback,VF Only */
+		// bnxt_stats->ilbbytes;
+		/**< Total number of good bytes received from loopback,VF Only */
+		// bnxt_stats->olbbytes;
+		/**< Total number of good bytes transmitted to loopback,VF Only */
+	}
+#endif
+
+#ifndef PORT_QSTATS_BROKEN
+	if (bnxt_hwrm_port_qstats(bp, &pstats) != 0)
+		return;
+
+	bnxt_stats->ipackets = pstats.rx_total_pkts;
+	bnxt_stats->opackets = pstats.tx_good_pkts;
+	bnxt_stats->ibytes = pstats.rx_bytes;
+	bnxt_stats->obytes = pstats.tx_bytes;
+	bnxt_stats->ibadcrc = pstats.rx_fcs_err_pkts;
+	bnxt_stats->ibadlen = pstats.rx_ovrsz_pkts;
+	bnxt_stats->ierrors = pstats.rx_total_pkts - pstats.rx_good_pkts;
+	bnxt_stats->oerrors = pstats.tx_err_pkts;
+	bnxt_stats->imcasts = pstats.rx_mcast_pkts;
+	// bnxt_stats->rx_nombuf += ; /**< Total number of RX mbuf allocation failures. */
+	// bnxt_stats->fdirmatch += ; /**< Total number of RX packets matching a filter. */
+	// bnxt_stats->fdirmiss;  /**< Total number of RX packets not matching any filter. */
+
+	/* We don't have separate XON/XOFF stats. */
+	// bnxt_stats->tx_pause_xon += ;  /**< Total nb. of XON pause frame sent. */
+	// bnxt_stats->rx_pause_xon;  /**< Total nb. of XON pause frame received. */
+	// bnxt_stats->tx_pause_xoff; /**< Total nb. of XOFF pause frame sent. */
+	// bnxt_stats->rx_pause_xoff; /**< Total nb. of XOFF pause frame received. */
+	/**< Total number of queue packets received that are dropped. */
+	// bnxt_stats->ilbpackets;
+	/**< Total number of good packets received from loopback,VF Only */
+	// bnxt_stats->olbpackets;
+	/**< Total number of good packets transmitted to loopback,VF Only */
+	// bnxt_stats->ilbbytes;
+	/**< Total number of good bytes received from loopback,VF Only */
+	// bnxt_stats->olbbytes;
+	/**< Total number of good bytes transmitted to loopback,VF Only */
+#endif
+}
+
+void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+	bnxt_clear_all_hwrm_stat_ctxs(bp);
+	bnxt_hwrm_func_clr_stats(bp);
+	bnxt_hwrm_port_clr_stats(bp);
+}
diff --git a/drivers/net/bnxt/bnxt_stats.h b/drivers/net/bnxt/bnxt_stats.h
new file mode 100644
index 0000000..65408a4
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_stats.h
@@ -0,0 +1,44 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_STATS_H_
+#define _BNXT_STATS_H_
+
+#include <rte_ethdev.h>
+
+void bnxt_free_stats(struct bnxt *bp);
+void bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
+			   struct rte_eth_stats *bnxt_stats);
+void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev);
+
+#endif
diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c
new file mode 100644
index 0000000..a1048b8
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_txq.c
@@ -0,0 +1,165 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+
+#include <rte_malloc.h>
+
+#include "bnxt.h"
+#include "bnxt_txq.h"
+
+/*
+ * TX Queues
+ */
+
+void bnxt_free_txq_stats(struct bnxt_tx_queue *txq)
+{
+	struct bnxt_cp_ring_info *cpr = &txq->cp_ring;
+
+	/* 'Unreserve' rte_memzone */
+	/* N/A */
+
+	if (cpr->hw_stats) {
+		cpr->hw_stats = NULL;
+	}
+}
+
+static void bnxt_tx_queue_release_mbufs(struct bnxt_tx_queue *txq)
+{
+	struct bnxt_sw_tx_bd *sw_ring;
+	uint16_t i;
+
+	sw_ring = txq->tx_ring.tx_buf_ring;
+	if (sw_ring) {
+		for (i = 0; i < txq->tx_ring.tx_ring_struct.ring_size; i++) {
+			if (sw_ring[i].mbuf) {
+				rte_pktmbuf_free(sw_ring[i].mbuf);
+				sw_ring[i].mbuf = NULL;
+			}
+		}
+	}
+}
+
+void bnxt_free_tx_mbufs(struct bnxt *bp)
+{
+	struct bnxt_tx_queue *txq;
+	int i;
+
+	for (i = 0; i < (int)bp->tx_nr_rings; i++) {
+		txq = bp->tx_queues[i];
+		bnxt_tx_queue_release_mbufs(txq);
+	}
+}
+
+void bnxt_tx_queue_release_op(void *tx_queue)
+{
+	struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
+	struct bnxt_tx_ring_info *txr;
+	struct bnxt_cp_ring_info *cpr;
+	struct bnxt_ring_struct *ring;
+
+	if (txq) {
+		/* Free TX ring hardware descriptors */
+		bnxt_tx_queue_release_mbufs(txq);
+		txr = &txq->tx_ring;
+		ring = &txr->tx_ring_struct;
+		bnxt_free_ring(ring);
+
+		/* Free TX completion ring hardware descriptors */
+		cpr = &txq->cp_ring;
+		ring = &cpr->cp_ring_struct;
+		bnxt_free_ring(ring);
+
+		bnxt_free_txq_stats(txq);
+
+		rte_free(txq);
+	}
+}
+
+int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
+			       uint16_t queue_idx,
+			       uint16_t nb_desc,
+			       unsigned int socket_id,
+			       const struct rte_eth_txconf *tx_conf)
+{
+	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+	struct bnxt_tx_queue *txq;
+	struct bnxt_tx_ring_info *txr;
+	struct bnxt_cp_ring_info *cpr;
+
+	if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
+		RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
+		return -EINVAL;
+	}
+
+	if (eth_dev->data->tx_queues) {
+		txq = eth_dev->data->tx_queues[queue_idx];
+		if (txq) {
+			bnxt_tx_queue_release_op(txq);
+			txq = NULL;
+		}
+	}
+	txq = rte_zmalloc_socket("bnxt_tx_queue", sizeof(struct bnxt_tx_queue),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+	if (txq == NULL) {
+		RTE_LOG(ERR, PMD, "bnxt_tx_queue allocation failed!");
+		return -ENOMEM;
+	}
+	txq->bp = bp;
+	txq->nb_tx_desc = nb_desc;
+	txq->tx_free_thresh = tx_conf->tx_free_thresh;
+
+	bnxt_init_tx_ring_struct(txq);
+
+	txq->queue_id = queue_idx;
+	txq->port_id = eth_dev->data->port_id;
+
+	txr = &txq->tx_ring;
+	cpr = &txq->cp_ring;
+
+	/* Allocate TX ring hardware descriptors */
+	if (bnxt_alloc_rings(bp, queue_idx, txr, NULL, cpr, "bnxt_tx_ring")) {
+		RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for tx_ring failed!");
+		bnxt_tx_queue_release_op(txq);
+		return -ENOMEM;
+	}
+
+	if (bnxt_init_one_tx_ring(txq)) {
+		RTE_LOG(ERR, PMD, "bnxt_init_one_tx_ring failed!");
+		bnxt_tx_queue_release_op(txq);
+		return -ENOMEM;
+	}
+
+	eth_dev->data->tx_queues[queue_idx] = txq;
+	return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h
new file mode 100644
index 0000000..0f499ac
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_txq.h
@@ -0,0 +1,81 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_TXQ_H_
+#define _BNXT_TXQ_H_
+
+// TODO make bnxt_tx_queue.cp_ring and bnxt_tx_queue.tx_ring pointers
+#include "bnxt_txr.h"
+
+struct bnxt_tx_queue {
+	uint16_t		nb_tx_desc;    /* number of TX descriptors */
+	//uint16_t		tx_tail;       /* current value of TDT reg */
+	uint16_t		tx_free_thresh;/* minimum TX before freeing */
+	/** Number of TX descriptors to use before RS bit is set. */
+//	uint16_t		tx_rs_thresh;
+	/** Number of TX descriptors used since RS bit was set. */
+//	uint16_t		nb_tx_used;
+	/** Index to last TX descriptor to have been cleaned. */
+	uint16_t		last_desc_cleaned;
+	/** Total number of TX descriptors ready to be allocated. */
+	uint16_t		tx_next_dd; /* next desc to scan for DD bit */
+	uint16_t		tx_next_rs; /* next desc to set RS bit */
+	uint16_t		queue_id; /* TX queue index */
+	uint16_t		reg_idx; /* TX queue register index */
+	uint8_t			port_id; /* Device port identifier */
+	uint8_t			pthresh; /* Prefetch threshold register */
+	uint8_t			hthresh; /* Host threshold register */
+	uint8_t			wthresh; /* Write-back threshold reg */
+	uint32_t		txq_flags; /* Holds flags for this TXq */
+	uint32_t		ctx_curr; /* Hardware context states */
+	uint8_t			tx_deferred_start; /* not in global dev start */
+
+	struct bnxt		*bp;
+	int			index;
+	int			tx_wake_thresh;
+	struct bnxt_tx_ring_info	tx_ring;
+
+	unsigned		cp_nr_rings;
+	struct bnxt_cp_ring_info	cp_ring;
+};
+
+void bnxt_free_txq_stats(struct bnxt_tx_queue *txq);
+void bnxt_free_tx_mbufs(struct bnxt *bp);
+void bnxt_tx_queue_release_op(void *tx_queue);
+int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
+			       uint16_t queue_idx,
+			       uint16_t nb_desc,
+			       unsigned int socket_id,
+			       const struct rte_eth_txconf *tx_conf);
+
+#endif
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
new file mode 100644
index 0000000..b6edda8
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -0,0 +1,316 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+
+#include <rte_byteorder.h>
+#include <rte_malloc.h>
+
+#include "bnxt.h"
+#include "bnxt_txq.h"
+#include "bnxt_txr.h"
+#include "hsi_struct_def_dpdk.h"
+#include <stdbool.h>
+
+/*
+ * TX Ring handling
+ */
+
+void bnxt_free_tx_rings(struct bnxt *bp)
+{
+	int i;
+
+	for (i = 0; i < (int)bp->tx_nr_rings; i++) {
+		struct bnxt_tx_queue *txq = bp->tx_queues[i];
+		struct bnxt_tx_ring_info *txr;
+		struct bnxt_cp_ring_info *cpr;
+		struct bnxt_ring_struct *ring;
+
+		if (!txq)
+			continue;
+
+		txr = &txq->tx_ring;
+
+		ring = &txr->tx_ring_struct;
+		bnxt_free_ring(ring);
+
+		cpr = &txq->cp_ring;
+		ring = &cpr->cp_ring_struct;
+		bnxt_free_ring(ring);
+
+		rte_free(txq);
+		bp->tx_queues[i] = NULL;
+	}
+}
+
+int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq)
+{
+	struct bnxt_tx_ring_info *txr = &txq->tx_ring;
+	struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+
+	txq->tx_wake_thresh = ring->ring_size / 2;
+	ring->fw_ring_id = INVALID_HW_RING_ID;
+
+	return 0;
+}
+
+void bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq)
+{
+	struct bnxt_cp_ring_info *cpr;
+	struct bnxt_tx_ring_info *txr;
+	struct bnxt_ring_struct *ring;
+
+	txr = &txq->tx_ring;
+	ring = &txr->tx_ring_struct;
+	ring->ring_size = rte_align32pow2(txq->nb_tx_desc + 1);
+	ring->ring_mask = ring->ring_size - 1;
+	ring->bd = (void *)txr->tx_desc_ring;
+	ring->bd_dma = txr->tx_desc_mapping;
+	ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_tx_bd);
+	ring->vmem = (void **)&txr->tx_buf_ring;
+
+	cpr = &txq->cp_ring;
+	ring = &cpr->cp_ring_struct;
+	ring->ring_size = txr->tx_ring_struct.ring_size;
+	ring->ring_mask = ring->ring_size - 1;
+	ring->bd = (void *)cpr->cp_desc_ring;
+	ring->bd_dma = cpr->cp_desc_mapping;
+	ring->vmem_size = 0;
+	ring->vmem = NULL;
+}
+
+static inline uint32_t bnxt_tx_avail(struct bnxt_tx_ring_info *txr)
+{
+	/* Tell compiler to fetch tx indices from memory. */
+	rte_compiler_barrier();
+
+	return txr->tx_ring_struct.ring_size - ((txr->tx_prod - txr->tx_cons) & txr->tx_ring_struct.ring_mask) - 1;
+}
+
+static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
+				struct bnxt_tx_queue *txq)
+{
+	struct bnxt_tx_ring_info *txr = &txq->tx_ring;
+	struct tx_bd_long *txbd;
+	struct tx_bd_long_hi *txbd1;
+	uint32_t vlan_tag_flags, cfa_action;
+	bool long_bd = false;
+	uint16_t last_prod = 0;
+	struct rte_mbuf *m_seg;
+	struct bnxt_sw_tx_bd *tx_buf;
+	static const uint32_t lhint_arr[4] = {
+		TX_BD_LONG_FLAGS_LHINT_LT512,
+		TX_BD_LONG_FLAGS_LHINT_LT1K,
+		TX_BD_LONG_FLAGS_LHINT_LT2K,
+		TX_BD_LONG_FLAGS_LHINT_LT2K
+	};
+
+	if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
+				PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM | PKT_TX_VLAN_PKT))
+		long_bd = true;
+
+	// Setup tx_buf
+	tx_buf = &txr->tx_buf_ring[txr->tx_prod];
+	tx_buf->mbuf = tx_pkt;
+	tx_buf->nr_bds = long_bd + tx_pkt->nb_segs;
+	last_prod = (txr->tx_prod + tx_buf->nr_bds - 1) & txr->tx_ring_struct.ring_mask;
+
+	if (unlikely(bnxt_tx_avail(txr) < tx_buf->nr_bds))
+		return -ENOMEM;
+
+	// Setup txbd
+	txbd = &txr->tx_desc_ring[txr->tx_prod];
+	txbd->opaque = txr->tx_prod;
+	txbd->flags_type = tx_buf->nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT;
+	txbd->len = tx_pkt->data_len;
+	if (txbd->len >= 2014)
+		txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K;
+	else
+		txbd->flags_type |= lhint_arr[txbd->len >> 9];
+	txbd->addr_hi =
+	    rte_cpu_to_le_32(U64_TO_U32_HI
+			     (RTE_MBUF_DATA_DMA_ADDR(tx_buf->mbuf)));
+	txbd->addr_lo =
+	    rte_cpu_to_le_32(U64_TO_U32_LO
+			     (RTE_MBUF_DATA_DMA_ADDR(tx_buf->mbuf)));
+
+	if (long_bd) {
+		txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
+		vlan_tag_flags = cfa_action = 0;
+		if (tx_buf->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+			// shurd: Should this mask at TX_BD_LONG_CFA_META_VLAN_VID_MASK?
+			vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
+				tx_buf->mbuf->vlan_tci;
+			/* Currently supports 8021Q, 8021AD vlan offloads
+			 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
+			 */
+			/* DPDK only supports 802.11q VLAN packets */
+			vlan_tag_flags |= TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
+		}
+
+		txr->tx_prod = RING_NEXT(&txr->tx_ring_struct, txr->tx_prod);
+
+		txbd1 = (struct tx_bd_long_hi *)&txr->tx_desc_ring[txr->tx_prod];
+		txbd1->lflags = 0;
+		txbd1->cfa_meta = vlan_tag_flags;
+		txbd1->cfa_action = cfa_action;
+
+		if (tx_pkt->ol_flags & PKT_TX_TCP_SEG) {
+			/* TSO */
+			txbd1->lflags = TX_BD_LONG_LFLAGS_LSO;
+			txbd1->hdr_size = tx_pkt->l2_len + tx_pkt->l3_len +
+					tx_pkt->l4_len;
+			txbd1->mss = tx_pkt->tso_segsz;
+
+		} else if (tx_pkt->ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
+			/* TCP/UDP CSO */
+			txbd1->lflags = TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
+			txbd1->mss = 0;
+
+		} else if (tx_pkt->ol_flags & PKT_TX_IP_CKSUM) {
+			/* IP CSO */
+			txbd1->lflags = TX_BD_LONG_LFLAGS_IP_CHKSUM;
+			txbd1->mss = 0;
+		}
+	}
+	else {
+		txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
+	}
+
+	m_seg = tx_pkt->next;
+	/* i is set at the end of the if(long_bd) block */
+	while (txr->tx_prod < last_prod) {
+		txr->tx_prod = RING_NEXT(&txr->tx_ring_struct, txr->tx_prod);
+		tx_buf = &txr->tx_buf_ring[txr->tx_prod];
+
+		txbd = &txr->tx_desc_ring[txr->tx_prod];
+		txbd->addr_hi =
+		    rte_cpu_to_le_32(U64_TO_U32_HI
+				     (RTE_MBUF_DATA_DMA_ADDR(m_seg)));
+		txbd->addr_lo =
+		    rte_cpu_to_le_32(U64_TO_U32_LO
+				     (RTE_MBUF_DATA_DMA_ADDR(m_seg)));
+		txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
+		txbd->len = m_seg->data_len;
+
+		m_seg = m_seg->next;
+	}
+
+	txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END;
+
+	txr->tx_prod = RING_NEXT(&txr->tx_ring_struct, txr->tx_prod);
+
+	return 0;
+}
+
+static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts)
+{
+	struct bnxt_tx_ring_info *txr = &txq->tx_ring;
+	uint16_t cons = txr->tx_cons;
+	int i, j;
+
+	for (i = 0; i < nr_pkts; i++) {
+		struct bnxt_sw_tx_bd *tx_buf;
+		struct rte_mbuf *mbuf;
+
+		tx_buf = &txr->tx_buf_ring[cons];
+		cons = RING_NEXT(&txr->tx_ring_struct, cons);
+		mbuf = tx_buf->mbuf;
+		tx_buf->mbuf = NULL;
+
+		/* EW - no need to unmap DMA memory? */
+
+		for (j=1; j<tx_buf->nr_bds; j++)
+			cons = RING_NEXT(&txr->tx_ring_struct, cons);
+		rte_pktmbuf_free(mbuf);
+	}
+
+	txr->tx_cons = cons;
+}
+
+static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
+{
+	struct bnxt_cp_ring_info *cpr = &txq->cp_ring;
+	uint32_t raw_cons = cpr->cp_raw_cons;
+	uint32_t cons;
+	int nb_tx_pkts = 0;
+	struct tx_cmpl *txcmp;
+
+	if ((txq->tx_ring.tx_ring_struct.ring_size - (bnxt_tx_avail(&txq->tx_ring))) >
+	    txq->tx_free_thresh) {
+		while (1) {
+			cons = RING_CMP(&cpr->cp_ring_struct, raw_cons);
+			txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons];
+
+			if (!CMP_VALID(txcmp, raw_cons, &cpr->cp_ring_struct))
+				break;
+
+			if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
+				nb_tx_pkts++;
+			else
+				RTE_LOG(DEBUG, PMD, "Unhandled CMP type %02x\n", CMP_TYPE(txcmp));
+			raw_cons = NEXT_RAW_CMP(raw_cons);
+		}
+		if (nb_tx_pkts)
+			bnxt_tx_cmp(txq, nb_tx_pkts);
+		cpr->cp_raw_cons = raw_cons;
+		B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+	}
+	return nb_tx_pkts;
+}
+
+uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+			       uint16_t nb_pkts)
+{
+	struct bnxt_tx_queue *txq = tx_queue;
+	uint16_t nb_tx_pkts = 0;
+	uint16_t db_mask = txq->tx_ring.tx_ring_struct.ring_size >> 2;
+	uint16_t last_db_mask = 0;
+
+	/* Handle TX completions */
+	bnxt_handle_tx_cp(txq);
+
+	/* Handle TX burst request */
+	for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) {
+		if (bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq))
+			break;
+		else if((nb_tx_pkts & db_mask) != last_db_mask) {
+			B_TX_DB(txq->tx_ring.tx_doorbell, txq->tx_ring.tx_prod);
+			last_db_mask = nb_tx_pkts & db_mask;
+		}
+	}
+	if (nb_tx_pkts)
+		B_TX_DB(txq->tx_ring.tx_doorbell, txq->tx_ring.tx_prod);
+
+	return nb_tx_pkts;
+}
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
new file mode 100644
index 0000000..1533b23
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -0,0 +1,71 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_TXR_H_
+#define _BNXT_TXR_H_
+
+#define MAX_TX_RINGS	16
+#define BNXT_TX_PUSH_THRESH 92
+
+#define B_TX_DB(db, prod)						\
+		*(uint32_t *)db = (DB_KEY_TX | prod)
+
+struct bnxt_tx_ring_info {
+	uint16_t		tx_prod;
+	uint16_t		tx_cons;
+	void 			*tx_doorbell;
+
+	struct tx_bd_long		*tx_desc_ring;
+	struct bnxt_sw_tx_bd	*tx_buf_ring;
+
+	phys_addr_t		tx_desc_mapping;
+
+#define BNXT_DEV_STATE_CLOSING	0x1
+	uint32_t		dev_state;
+
+	struct bnxt_ring_struct	tx_ring_struct;
+};
+
+struct bnxt_sw_tx_bd {
+	struct rte_mbuf		*mbuf; /* mbuf associated with TX descriptor */
+	uint8_t			is_gso;
+	unsigned short		nr_bds;
+};
+
+void bnxt_free_tx_rings(struct bnxt *bp);
+int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq);
+void bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq);
+uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+			       uint16_t nb_pkts);
+
+#endif
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
new file mode 100644
index 0000000..46b0f87
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -0,0 +1,284 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+
+#include "bnxt.h"
+#include "bnxt_vnic.h"
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * VNIC Functions
+ */
+
+static void prandom_bytes(void *dest_ptr, size_t len)
+{
+	char *dest = (char *)dest_ptr;
+	uint64_t rb;
+
+	while (len) {
+		rb = rte_rand();
+		if (len >= 8) {
+			memcpy(dest, &rb, 8);
+			len -= 8;
+			dest += 8;
+		} else {
+			memcpy(dest, &rb, len);
+			dest += len;
+			len = 0;
+		}
+	}
+}
+
+void bnxt_init_vnics(struct bnxt *bp)
+{
+	struct bnxt_vnic_info *vnic;
+	uint16_t max_vnics;
+	int i, j;
+
+	if (BNXT_PF(bp)) {
+		struct bnxt_pf_info *pf = &bp->pf;
+
+		max_vnics = pf->max_vnics;
+	} else {
+		struct bnxt_vf_info *vf = &bp->vf;
+
+		max_vnics = vf->max_vnics;
+	}
+	STAILQ_INIT(&bp->free_vnic_list);
+	for (i = 0; i < max_vnics; i++) {
+		vnic = &bp->vnic_info[i];
+		vnic->fw_vnic_id = INVALID_HW_RING_ID;
+		vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+		vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
+
+		for (j = 0; j < MAX_QUEUES_PER_VNIC; j++)
+			vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
+
+		prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE);
+		STAILQ_INIT(&vnic->filter);
+		STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic, next);
+	}
+	for (i = 0; i < MAX_FF_POOLS; i++)
+		STAILQ_INIT(&bp->ff_pool[i]);
+}
+
+int bnxt_free_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+			  int pool)
+{
+	struct bnxt_vnic_info *temp;
+
+	temp = STAILQ_FIRST(&bp->ff_pool[pool]);
+	while (temp) {
+		if (temp == vnic) {
+			STAILQ_REMOVE(&bp->ff_pool[pool], vnic,
+				      bnxt_vnic_info, next);
+			vnic->fw_vnic_id = INVALID_HW_RING_ID;
+			STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic,
+					   next);
+			return 0;
+		}
+		temp = STAILQ_NEXT(temp, next);
+	}
+	RTE_LOG(ERR, PMD, "VNIC %p is not found in pool[%d]\n", vnic, pool);
+	return -EINVAL;
+}
+
+struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp)
+{
+	struct bnxt_vnic_info *vnic;
+
+	/* Find the 1st unused vnic from the free_vnic_list pool*/
+	vnic = STAILQ_FIRST(&bp->free_vnic_list);
+	if (!vnic) {
+		RTE_LOG(ERR, PMD, "No more free VNIC resources\n");
+		return NULL;
+	}
+	STAILQ_REMOVE_HEAD(&bp->free_vnic_list, next);
+	return vnic;
+}
+
+void bnxt_free_all_vnics(struct bnxt *bp)
+{
+	struct bnxt_vnic_info *temp, *next;
+	int i;
+
+	for (i = 0; i < MAX_FF_POOLS; i++) {
+		temp = STAILQ_FIRST(&bp->ff_pool[i]);
+		while (temp) {
+			next = STAILQ_NEXT(temp, next);
+			STAILQ_REMOVE(&bp->ff_pool[i], temp, bnxt_vnic_info,
+				      next);
+			STAILQ_INSERT_TAIL(&bp->free_vnic_list, temp, next);
+			temp = next;
+		}
+	}
+}
+
+void bnxt_free_vnic_attributes(struct bnxt *bp)
+{
+	struct bnxt_vnic_info *vnic;
+
+	STAILQ_FOREACH(vnic, &bp->free_vnic_list, next) {
+		if (vnic->rss_table) {
+			/* 'Unreserve' the rss_table */
+			/* N/A */
+
+			vnic->rss_table = NULL;
+		}
+
+		if (vnic->rss_hash_key) {
+			/* 'Unreserve' the rss_hash_key */
+			/* N/A */
+
+			vnic->rss_hash_key = NULL;
+		}
+	}
+}
+
+int bnxt_alloc_vnic_attributes(struct bnxt *bp)
+{
+	struct bnxt_vnic_info *vnic;
+	struct rte_pci_device *pdev = bp->pdev;
+	const struct rte_memzone *mz;
+	char mz_name[RTE_MEMZONE_NAMESIZE];
+	int entry_length = RTE_CACHE_LINE_ROUNDUP(
+				HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table) +
+				HW_HASH_KEY_SIZE);
+	uint16_t max_vnics;
+	int i, rc = 0;
+
+	if (BNXT_PF(bp)) {
+		struct bnxt_pf_info *pf = &bp->pf;
+
+		max_vnics = pf->max_vnics;
+	} else {
+		struct bnxt_vf_info *vf = &bp->vf;
+
+		max_vnics = vf->max_vnics;
+	}
+	snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+		 "bnxt_%04x:%02x:%02x:%02x_vnicattr", pdev->addr.domain,
+		 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
+	mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+	if ((mz = rte_memzone_lookup(mz_name)) == NULL) {
+		mz = rte_memzone_reserve(mz_name,
+					 entry_length * max_vnics,
+					 SOCKET_ID_ANY,
+					 RTE_MEMZONE_2MB |
+					 RTE_MEMZONE_SIZE_HINT_ONLY);
+		if (mz == NULL)
+			return -ENOMEM;
+	}
+
+	for (i = 0; i < max_vnics; i++) {
+		vnic = &bp->vnic_info[i];
+
+		/* Allocate rss table and hash key */
+		vnic->rss_table =
+			(void *)((char *)mz->addr + (entry_length * i));
+		memset(vnic->rss_table, -1, entry_length);
+
+		vnic->rss_table_dma_addr = mz->phys_addr + (entry_length * i);
+		vnic->rss_hash_key = (void *)((char *)vnic->rss_table +
+			     HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table));
+
+		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr +
+			     HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table);
+
+		if (!vnic->rss_table) {
+			rc = -ENOMEM;
+			goto out;
+		}
+	}
+	return 0;
+
+out:
+	return rc;
+}
+
+void bnxt_free_vnic_mem(struct bnxt *bp)
+{
+	struct bnxt_vnic_info *vnic;
+	uint16_t max_vnics, i;
+
+	if (BNXT_PF(bp)) {
+		struct bnxt_pf_info *pf = &bp->pf;
+
+		max_vnics = pf->max_vnics;
+	} else {
+		struct bnxt_vf_info *vf = &bp->vf;
+
+		max_vnics = vf->max_vnics;
+	}
+	for (i = 0; i < max_vnics; i++) {
+		vnic = &bp->vnic_info[i];
+		if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
+			RTE_LOG(ERR, PMD, "VNIC is not freed yet!\n");
+			/* Call HWRM to free VNIC */
+		}
+		vnic->fw_vnic_id = INVALID_HW_RING_ID;
+	}
+
+	rte_free(bp->vnic_info);
+	bp->vnic_info = NULL;
+}
+
+int bnxt_alloc_vnic_mem(struct bnxt *bp)
+{
+	struct bnxt_vnic_info *vnic_mem;
+	uint16_t max_vnics;
+
+	if (BNXT_PF(bp)) {
+		struct bnxt_pf_info *pf = &bp->pf;
+
+		max_vnics = pf->max_vnics;
+	} else {
+		struct bnxt_vf_info *vf = &bp->vf;
+
+		max_vnics = vf->max_vnics;
+	}
+	/* Allocate memory for VNIC pool and filter pool */
+	vnic_mem = rte_zmalloc("bnxt_vnic_info",
+			       max_vnics * sizeof(struct bnxt_vnic_info), 0);
+	if (vnic_mem == NULL) {
+		RTE_LOG(ERR, PMD, "Failed to alloc memory for %d VNICs",
+			max_vnics);
+		return -ENOMEM;
+	}
+	bp->vnic_info = vnic_mem;
+	return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h
new file mode 100644
index 0000000..d989d96
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_vnic.h
@@ -0,0 +1,79 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_VNIC_H_
+#define _BNXT_VNIC_H_
+
+#include <sys/queue.h>
+#include <stdbool.h>
+
+struct bnxt_vnic_info {
+	STAILQ_ENTRY(bnxt_vnic_info)	next;
+	uint8_t		ff_pool_idx;
+
+	uint16_t	fw_vnic_id; /* returned by Chimp during alloc */
+	uint16_t	fw_rss_cos_lb_ctx;
+	uint16_t	ctx_is_rss_cos_lb;
+#define MAX_NUM_TRAFFIC_CLASSES		8
+#define MAX_NUM_RSS_QUEUES_PER_VNIC	16
+#define MAX_QUEUES_PER_VNIC	(MAX_NUM_RSS_QUEUES_PER_VNIC + MAX_NUM_TRAFFIC_CLASSES)
+	uint16_t	start_grp_id;
+	uint16_t	end_grp_id;
+	uint16_t	fw_grp_ids[MAX_QUEUES_PER_VNIC];
+	uint16_t	hash_type;
+	phys_addr_t 	rss_table_dma_addr;
+	uint16_t	*rss_table;
+	phys_addr_t 	rss_hash_key_dma_addr;
+	void		*rss_hash_key;
+	uint32_t	flags;
+#define BNXT_VNIC_INFO_PROMISC			(1 << 0)
+#define BNXT_VNIC_INFO_ALLMULTI			(1 << 1)
+
+	bool		vlan_strip;
+	bool		func_default;
+
+	STAILQ_HEAD(, bnxt_filter_info)	filter;
+};
+
+struct bnxt;
+void bnxt_init_vnics(struct bnxt *bp);
+int bnxt_free_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+			  int pool);
+struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp);
+void bnxt_free_all_vnics(struct bnxt *bp);
+void bnxt_free_vnic_attributes(struct bnxt *bp);
+int bnxt_alloc_vnic_attributes(struct bnxt *bp);
+void bnxt_free_vnic_mem(struct bnxt *bp);
+int bnxt_alloc_vnic_mem(struct bnxt *bp);
+
+#endif
diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h
new file mode 100644
index 0000000..df14be5
--- /dev/null
+++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h
@@ -0,0 +1,1869 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2014-2015 Broadcom Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Broadcom Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _HSI_STRUCT_DEF_H_
+#define _HSI_STRUCT_DEF_H_
+
+typedef struct ctx_hw_stats64 {
+    uint64_t rx_ucast_pkts;
+    uint64_t rx_mcast_pkts;
+    uint64_t rx_bcast_pkts;
+    uint64_t rx_drop_pkts;
+    uint64_t rx_err_pkts;
+    uint64_t rx_ucast_bytes;
+    uint64_t rx_mcast_bytes;
+    uint64_t rx_bcast_bytes;
+    uint64_t tx_ucast_pkts;
+    uint64_t tx_mcast_pkts;
+    uint64_t tx_bcast_pkts;
+    uint64_t tx_drop_pkts;
+    uint64_t tx_err_pkts;
+    uint64_t tx_ucast_bytes;
+    uint64_t tx_mcast_bytes;
+    uint64_t tx_bcast_bytes;
+    uint64_t tpa_pkts;
+    uint64_t tpa_bytes;
+    uint64_t tpa_events;
+    uint64_t tpa_aborts;
+} ctx_hw_stats64_t;
+struct ctx_hw_stats
+{
+    uint32_t rx_ucast_pkts_lo;
+    uint32_t rx_ucast_pkts_hi;
+    uint32_t rx_mcast_pkts_lo;
+    uint32_t rx_mcast_pkts_hi;
+    uint32_t rx_bcast_pkts_lo;
+    uint32_t rx_bcast_pkts_hi;
+    uint32_t rx_discard_pkts_lo;
+    uint32_t rx_discard_pkts_hi;
+    uint32_t rx_drop_pkts_lo;
+    uint32_t rx_drop_pkts_hi;
+    uint32_t rx_ucast_bytes_lo;
+    uint32_t rx_ucast_bytes_hi;
+    uint32_t rx_mcast_bytes_lo;
+    uint32_t rx_mcast_bytes_hi;
+    uint32_t rx_bcast_bytes_lo;
+    uint32_t rx_bcast_bytes_hi;
+    uint32_t tx_ucast_pkts_lo;
+    uint32_t tx_ucast_pkts_hi;
+    uint32_t tx_mcast_pkts_lo;
+    uint32_t tx_mcast_pkts_hi;
+    uint32_t tx_bcast_pkts_lo;
+    uint32_t tx_bcast_pkts_hi;
+    uint32_t tx_discard_pkts_lo;
+    uint32_t tx_discard_pkts_hi;
+    uint32_t tx_drop_pkts_lo;
+    uint32_t tx_drop_pkts_hi;
+    uint32_t tx_ucast_bytes_lo;
+    uint32_t tx_ucast_bytes_hi;
+    uint32_t tx_mcast_bytes_lo;
+    uint32_t tx_mcast_bytes_hi;
+    uint32_t tx_bcast_bytes_lo;
+    uint32_t tx_bcast_bytes_hi;
+    uint32_t tpa_pkts_lo;
+    uint32_t tpa_pkts_hi;
+    uint32_t tpa_bytes_lo;
+    uint32_t tpa_bytes_hi;
+    uint32_t tpa_events_lo;
+    uint32_t tpa_events_hi;
+    uint32_t tpa_aborts_lo;
+    uint32_t tpa_aborts_hi;
+} __attribute__((packed)) ctx_hw_stats_t, *pctx_hw_stats_t;
+#define TX_BD_SHORT_TYPE_TX_BD_SHORT (UINT32_C(0x0) << 0)
+
+typedef struct tx_bd_long
+{
+    uint16_t flags_type;
+    #define TX_BD_LONG_TYPE_MASK UINT32_C(0x3f)
+    #define TX_BD_LONG_TYPE_SFT 0
+    #define TX_BD_LONG_TYPE_TX_BD_LONG (UINT32_C(0x10) << 0)
+    #define TX_BD_LONG_FLAGS_PACKET_END UINT32_C(0x40)
+    #define TX_BD_LONG_FLAGS_NO_CMPL UINT32_C(0x80)
+    #define TX_BD_LONG_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
+    #define TX_BD_LONG_FLAGS_BD_CNT_SFT 8
+    #define TX_BD_LONG_FLAGS_LHINT_MASK UINT32_C(0x6000)
+    #define TX_BD_LONG_FLAGS_LHINT_SFT 13
+    #define TX_BD_LONG_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13)
+    #define TX_BD_LONG_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13)
+    #define TX_BD_LONG_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13)
+    #define TX_BD_LONG_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13)
+    #define TX_BD_LONG_FLAGS_COAL_NOW UINT32_C(0x8000)
+    #define TX_BD_LONG_FLAGS_MASK UINT32_C(0xffc0)
+    #define TX_BD_LONG_FLAGS_SFT 6
+    uint16_t len;
+    uint32_t opaque;
+    uint32_t addr_lo;
+    uint32_t addr_hi;
+} __attribute__((packed)) tx_bd_long_t, *ptx_bd_long_t;
+
+typedef struct tx_bd_long_hi
+{
+    uint16_t lflags;
+    #define TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM UINT32_C(0x1)
+    #define TX_BD_LONG_LFLAGS_IP_CHKSUM UINT32_C(0x2)
+    #define TX_BD_LONG_LFLAGS_NOCRC UINT32_C(0x4)
+    #define TX_BD_LONG_LFLAGS_STAMP UINT32_C(0x8)
+    #define TX_BD_LONG_LFLAGS_T_IP_CHKSUM UINT32_C(0x10)
+    #define TX_BD_LONG_LFLAGS_LSO UINT32_C(0x20)
+    #define TX_BD_LONG_LFLAGS_IPID_FMT UINT32_C(0x40)
+    #define TX_BD_LONG_LFLAGS_T_IPID UINT32_C(0x80)
+    #define TX_BD_LONG_LFLAGS_ROCE_CRC UINT32_C(0x100)
+    #define TX_BD_LONG_LFLAGS_FCOE_CRC UINT32_C(0x200)
+    uint16_t hdr_size;
+    #define TX_BD_LONG_HDR_SIZE_MASK UINT32_C(0x1ff)
+    #define TX_BD_LONG_HDR_SIZE_SFT 0
+    uint32_t mss;
+    #define TX_BD_LONG_MSS_MASK UINT32_C(0x7fff)
+    #define TX_BD_LONG_MSS_SFT 0
+    uint16_t unused_2;
+    uint16_t cfa_action;
+    uint32_t cfa_meta;
+    #define TX_BD_LONG_CFA_META_VLAN_VID_MASK UINT32_C(0xfff)
+    #define TX_BD_LONG_CFA_META_VLAN_VID_SFT 0
+    #define TX_BD_LONG_CFA_META_VLAN_DE UINT32_C(0x1000)
+    #define TX_BD_LONG_CFA_META_VLAN_PRI_MASK UINT32_C(0xe000)
+    #define TX_BD_LONG_CFA_META_VLAN_PRI_SFT 13
+    #define TX_BD_LONG_CFA_META_VLAN_TPID_MASK UINT32_C(0x70000)
+    #define TX_BD_LONG_CFA_META_VLAN_TPID_SFT 16
+    #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8 (UINT32_C(0x0) << 16)
+    #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100 (UINT32_C(0x1) << 16)
+    #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100 (UINT32_C(0x2) << 16)
+    #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200 (UINT32_C(0x3) << 16)
+    #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300 (UINT32_C(0x4) << 16)
+    #define TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG (UINT32_C(0x5) << 16)
+    #define TX_BD_LONG_CFA_META_VLAN_RESERVED_MASK UINT32_C(0xff80000)
+    #define TX_BD_LONG_CFA_META_VLAN_RESERVED_SFT 19
+    #define TX_BD_LONG_CFA_META_KEY_MASK UINT32_C(0xf0000000)
+    #define TX_BD_LONG_CFA_META_KEY_SFT 28
+    #define TX_BD_LONG_CFA_META_KEY_NONE (UINT32_C(0x0) << 28)
+    #define TX_BD_LONG_CFA_META_KEY_VLAN_TAG (UINT32_C(0x1) << 28)
+} __attribute__((packed)) tx_bd_long_hi_t, *ptx_bd_long_hi_t;
+
+typedef struct rx_prod_pkt_bd
+{
+    uint16_t flags_type;
+    #define RX_PROD_PKT_BD_TYPE_MASK UINT32_C(0x3f)
+    #define RX_PROD_PKT_BD_TYPE_SFT 0
+    #define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT (UINT32_C(0x4) << 0)
+    #define RX_PROD_PKT_BD_FLAGS_SOP_PAD UINT32_C(0x40)
+    #define RX_PROD_PKT_BD_FLAGS_EOP_PAD UINT32_C(0x80)
+    #define RX_PROD_PKT_BD_FLAGS_BUFFERS_MASK UINT32_C(0x300)
+    #define RX_PROD_PKT_BD_FLAGS_BUFFERS_SFT 8
+    #define RX_PROD_PKT_BD_FLAGS_MASK UINT32_C(0xffc0)
+    #define RX_PROD_PKT_BD_FLAGS_SFT 6
+    uint16_t len;
+    uint32_t opaque;
+    uint32_t addr_lo;
+    uint32_t addr_hi;
+} __attribute__((packed)) rx_prod_pkt_bd_t, *prx_prod_pkt_bd_t;
+
+typedef struct cmpl_base
+{
+    uint16_t type;
+    #define CMPL_BASE_TYPE_MASK UINT32_C(0x3f)
+    #define CMPL_BASE_TYPE_SFT 0
+    #define CMPL_BASE_TYPE_TX_L2 (UINT32_C(0x0) << 0)
+    #define CMPL_BASE_TYPE_RX_L2 (UINT32_C(0x11) << 0)
+    #define CMPL_BASE_TYPE_RX_AGG (UINT32_C(0x12) << 0)
+    #define CMPL_BASE_TYPE_RX_TPA_START (UINT32_C(0x13) << 0)
+    #define CMPL_BASE_TYPE_RX_TPA_END (UINT32_C(0x15) << 0)
+    #define CMPL_BASE_TYPE_STAT_EJECT (UINT32_C(0x1a) << 0)
+    #define CMPL_BASE_TYPE_HWRM_DONE (UINT32_C(0x20) << 0)
+    #define CMPL_BASE_TYPE_HWRM_FWD_REQ (UINT32_C(0x22) << 0)
+    #define CMPL_BASE_TYPE_HWRM_FWD_RESP (UINT32_C(0x24) << 0)
+    #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT (UINT32_C(0x2e) << 0)
+    #define CMPL_BASE_TYPE_CQ_NOTIFICATION (UINT32_C(0x30) << 0)
+    #define CMPL_BASE_TYPE_SRQ_EVENT (UINT32_C(0x32) << 0)
+    #define CMPL_BASE_TYPE_DBQ_EVENT (UINT32_C(0x34) << 0)
+    #define CMPL_BASE_TYPE_QP_EVENT (UINT32_C(0x38) << 0)
+    #define CMPL_BASE_TYPE_FUNC_EVENT (UINT32_C(0x3a) << 0)
+    uint16_t info1;
+    uint32_t info2;
+    uint32_t info3_v;
+    #define CMPL_BASE_V UINT32_C(0x1)
+    #define CMPL_BASE_INFO3_MASK UINT32_C(0xfffffffe)
+    #define CMPL_BASE_INFO3_SFT 1
+    uint32_t info4;
+} __attribute__((packed)) cmpl_base_t, *pcmpl_base_t;
+
+typedef struct tx_cmpl
+{
+    uint16_t flags_type;
+    #define TX_CMPL_TYPE_MASK UINT32_C(0x3f)
+    #define TX_CMPL_TYPE_SFT 0
+    #define TX_CMPL_TYPE_TX_L2 (UINT32_C(0x0) << 0)
+    #define TX_CMPL_FLAGS_ERROR UINT32_C(0x40)
+    #define TX_CMPL_FLAGS_PUSH UINT32_C(0x80)
+    #define TX_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+    #define TX_CMPL_FLAGS_SFT 6
+    uint16_t unused_0;
+    uint32_t opaque;
+    uint16_t errors_v;
+    #define TX_CMPL_V UINT32_C(0x1)
+    #define TX_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe)
+    #define TX_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+    #define TX_CMPL_ERRORS_BUFFER_ERROR_NO_ERROR (UINT32_C(0x0) << 1)
+    #define TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT (UINT32_C(0x2) << 1)
+    #define TX_CMPL_ERRORS_ZERO_LENGTH_PKT UINT32_C(0x10)
+    #define TX_CMPL_ERRORS_EXCESSIVE_BD_LENGTH UINT32_C(0x20)
+    #define TX_CMPL_ERRORS_DMA_ERROR UINT32_C(0x40)
+    #define TX_CMPL_ERRORS_HINT_TOO_SHORT UINT32_C(0x80)
+    #define TX_CMPL_ERRORS_POISON_TLP_ERROR UINT32_C(0x100)
+    #define TX_CMPL_ERRORS_MASK UINT32_C(0xfffe)
+    #define TX_CMPL_ERRORS_SFT 1
+    uint16_t unused_1;
+    uint32_t unused_2;
+} __attribute__((packed)) tx_cmpl_t, *ptx_cmpl_t;
+
+typedef struct rx_pkt_cmpl
+{
+    uint16_t flags_type;
+    #define RX_PKT_CMPL_TYPE_MASK UINT32_C(0x3f)
+    #define RX_PKT_CMPL_TYPE_SFT 0
+    #define RX_PKT_CMPL_TYPE_RX_L2 (UINT32_C(0x11) << 0)
+    #define RX_PKT_CMPL_FLAGS_ERROR UINT32_C(0x40)
+    #define RX_PKT_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
+    #define RX_PKT_CMPL_FLAGS_PLACEMENT_SFT 7
+    #define RX_PKT_CMPL_FLAGS_PLACEMENT_NORMAL (UINT32_C(0x0) << 7)
+    #define RX_PKT_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7)
+    #define RX_PKT_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7)
+    #define RX_PKT_CMPL_FLAGS_RSS_VALID UINT32_C(0x400)
+    #define RX_PKT_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
+    #define RX_PKT_CMPL_FLAGS_ITYPE_SFT 12
+    #define RX_PKT_CMPL_FLAGS_ITYPE_NOT_KNOWN (UINT32_C(0x0) << 12)
+    #define RX_PKT_CMPL_FLAGS_ITYPE_IP (UINT32_C(0x1) << 12)
+    #define RX_PKT_CMPL_FLAGS_ITYPE_TCP (UINT32_C(0x2) << 12)
+    #define RX_PKT_CMPL_FLAGS_ITYPE_UDP (UINT32_C(0x3) << 12)
+    #define RX_PKT_CMPL_FLAGS_ITYPE_FCOE (UINT32_C(0x4) << 12)
+    #define RX_PKT_CMPL_FLAGS_ITYPE_ROCE (UINT32_C(0x5) << 12)
+    #define RX_PKT_CMPL_FLAGS_ITYPE_ICMP (UINT32_C(0x7) << 12)
+    #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_WO_TIMESTAMP (UINT32_C(0x8) << 12)
+    #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP (UINT32_C(0x9) << 12)
+    #define RX_PKT_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+    #define RX_PKT_CMPL_FLAGS_SFT 6
+    uint16_t len;
+    uint32_t opaque;
+    uint8_t agg_bufs_v1;
+    #define RX_PKT_CMPL_V1 UINT32_C(0x1)
+    #define RX_PKT_CMPL_AGG_BUFS_MASK UINT32_C(0x3e)
+    #define RX_PKT_CMPL_AGG_BUFS_SFT 1
+    uint8_t rss_hash_type;
+    uint8_t payload_offset;
+    uint8_t unused_1;
+    uint32_t rss_hash;
+} __attribute__((packed)) rx_pkt_cmpl_t, *prx_pkt_cmpl_t;
+
+typedef struct rx_pkt_cmpl_hi
+{
+    uint32_t flags2;
+    #define RX_PKT_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1)
+    #define RX_PKT_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2)
+    #define RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4)
+    #define RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8)
+    #define RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0)
+    #define RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT 4
+    #define RX_PKT_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4)
+    #define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4)
+    #define RX_PKT_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100)
+    uint32_t metadata;
+    #define RX_PKT_CMPL_METADATA_VID_MASK UINT32_C(0xfff)
+    #define RX_PKT_CMPL_METADATA_VID_SFT 0
+    #define RX_PKT_CMPL_METADATA_DE UINT32_C(0x1000)
+    #define RX_PKT_CMPL_METADATA_PRI_MASK UINT32_C(0xe000)
+    #define RX_PKT_CMPL_METADATA_PRI_SFT 13
+    #define RX_PKT_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000)
+    #define RX_PKT_CMPL_METADATA_TPID_SFT 16
+    uint16_t errors_v2;
+    #define RX_PKT_CMPL_V2 UINT32_C(0x1)
+    #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe)
+    #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+    #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (UINT32_C(0x0) << 1)
+    #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (UINT32_C(0x1) << 1)
+    #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (UINT32_C(0x2) << 1)
+    #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (UINT32_C(0x3) << 1)
+    #define RX_PKT_CMPL_ERRORS_IP_CS_ERROR UINT32_C(0x10)
+    #define RX_PKT_CMPL_ERRORS_L4_CS_ERROR UINT32_C(0x20)
+    #define RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR UINT32_C(0x40)
+    #define RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR UINT32_C(0x80)
+    #define RX_PKT_CMPL_ERRORS_CRC_ERROR UINT32_C(0x100)
+    #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_MASK UINT32_C(0xe00)
+    #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_SFT 9
+    #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR (UINT32_C(0x0) << 9)
+    #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (UINT32_C(0x1) << 9)
+    #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (UINT32_C(0x2) << 9)
+    #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (UINT32_C(0x3) << 9)
+    #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (UINT32_C(0x4) << 9)
+    #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (UINT32_C(0x5) << 9)
+    #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (UINT32_C(0x6) << 9)
+    #define RX_PKT_CMPL_ERRORS_PKT_ERROR_MASK UINT32_C(0xf000)
+    #define RX_PKT_CMPL_ERRORS_PKT_ERROR_SFT 12
+    #define RX_PKT_CMPL_ERRORS_PKT_ERROR_NO_ERROR (UINT32_C(0x0) << 12)
+    #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION (UINT32_C(0x1) << 12)
+    #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (UINT32_C(0x2) << 12)
+    #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (UINT32_C(0x3) << 12)
+    #define RX_PKT_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (UINT32_C(0x4) << 12)
+    #define RX_PKT_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (UINT32_C(0x5) << 12)
+    #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (UINT32_C(0x6) << 12)
+    #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (UINT32_C(0x7) << 12)
+    #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (UINT32_C(0x8) << 12)
+    #define RX_PKT_CMPL_ERRORS_MASK UINT32_C(0xfffe)
+    #define RX_PKT_CMPL_ERRORS_SFT 1
+    uint16_t cfa_code;
+    uint32_t reorder;
+    #define RX_PKT_CMPL_REORDER_MASK UINT32_C(0xffffff)
+    #define RX_PKT_CMPL_REORDER_SFT 0
+} __attribute__((packed)) rx_pkt_cmpl_hi_t, *prx_pkt_cmpl_hi_t;
+
+typedef struct hwrm_fwd_req_cmpl
+{
+    uint16_t req_len_type;
+    #define HWRM_FWD_REQ_CMPL_TYPE_MASK UINT32_C(0x3f)
+    #define HWRM_FWD_REQ_CMPL_TYPE_SFT 0
+    #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ (UINT32_C(0x22) << 0)
+    #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK UINT32_C(0xffc0)
+    #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6
+    uint16_t source_id;
+    uint32_t unused_0;
+    uint64_t req_buf_addr_v;
+    #define HWRM_FWD_REQ_CMPL_V UINT32_C(0x1)
+    #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK UINT32_C(0xfffffffe)
+    #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+} __attribute__((packed)) hwrm_fwd_req_cmpl_t, *phwrm_fwd_req_cmpl_t;
+
+typedef struct hwrm_async_event_cmpl
+{
+    uint16_t type;
+    #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK UINT32_C(0x3f)
+    #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0
+    #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT (UINT32_C(0x2e) << 0)
+    uint16_t event_id;
+    #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE (UINT32_C(0x0) << 0)
+    #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE (UINT32_C(0x1) << 0)
+    #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE (UINT32_C(0x2) << 0)
+    #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE (UINT32_C(0x3) << 0)
+    #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (UINT32_C(0x4) << 0)
+    #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (UINT32_C(0x5) << 0)
+    #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (UINT32_C(0x10) << 0)
+    #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (UINT32_C(0x11) << 0)
+    #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (UINT32_C(0x20) << 0)
+    #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD (UINT32_C(0x21) << 0)
+    #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (UINT32_C(0x30) << 0)
+    #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (UINT32_C(0x31) << 0)
+    #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (UINT32_C(0x32) << 0)
+    #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR (UINT32_C(0xff) << 0)
+    uint32_t event_data2;
+    uint8_t opaque_v;
+    #define HWRM_ASYNC_EVENT_CMPL_V UINT32_C(0x1)
+    #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK UINT32_C(0xfe)
+    #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+    uint8_t timestamp_lo;
+    uint16_t timestamp_hi;
+    uint32_t event_data1;
+} __attribute__((packed)) hwrm_async_event_cmpl_t, *phwrm_async_event_cmpl_t;
+#define HWRM_VERSION_MAJOR 1
+#define HWRM_VERSION_MINOR 0
+#define HWRM_VERSION_UPDATE 0
+#define HWRM_VERSION_STR "1.0.0"
+#define HWRM_NA_SIGNATURE ((uint32_t)(-1))
+#define HWRM_MAX_REQ_LEN (128)
+#define HWRM_MAX_RESP_LEN (176)
+#define HW_HASH_INDEX_SIZE 0x80
+#define HW_HASH_KEY_SIZE 40
+#define HWRM_RESP_VALID_KEY 1
+
+typedef struct input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+} __attribute__((packed)) input_t, *pinput_t;
+
+typedef struct output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+} __attribute__((packed)) output_t, *poutput_t;
+
+typedef struct cmd_nums
+{
+    uint16_t req_type;
+    #define HWRM_VER_GET (UINT32_C(0x0))
+    #define HWRM_FUNC_BUF_UNRGTR (UINT32_C(0xe))
+    #define HWRM_FUNC_VF_CFG (UINT32_C(0xf))
+    #define RESERVED1 (UINT32_C(0x10))
+    #define HWRM_FUNC_RESET (UINT32_C(0x11))
+    #define HWRM_FUNC_GETFID (UINT32_C(0x12))
+    #define HWRM_FUNC_VF_ALLOC (UINT32_C(0x13))
+    #define HWRM_FUNC_VF_FREE (UINT32_C(0x14))
+    #define HWRM_FUNC_QCAPS (UINT32_C(0x15))
+    #define HWRM_FUNC_QCFG (UINT32_C(0x16))
+    #define HWRM_FUNC_CFG (UINT32_C(0x17))
+    #define HWRM_FUNC_QSTATS (UINT32_C(0x18))
+    #define HWRM_FUNC_CLR_STATS (UINT32_C(0x19))
+    #define HWRM_FUNC_DRV_UNRGTR (UINT32_C(0x1a))
+    #define HWRM_FUNC_VF_RESC_FREE (UINT32_C(0x1b))
+    #define HWRM_FUNC_VF_VNIC_IDS_QUERY (UINT32_C(0x1c))
+    #define HWRM_FUNC_DRV_RGTR (UINT32_C(0x1d))
+    #define HWRM_FUNC_DRV_QVER (UINT32_C(0x1e))
+    #define HWRM_FUNC_BUF_RGTR (UINT32_C(0x1f))
+    #define HWRM_PORT_PHY_CFG (UINT32_C(0x20))
+    #define HWRM_PORT_MAC_CFG (UINT32_C(0x21))
+    #define RESERVED2 (UINT32_C(0x22))
+    #define HWRM_PORT_QSTATS (UINT32_C(0x23))
+    #define HWRM_PORT_LPBK_QSTATS (UINT32_C(0x24))
+    #define HWRM_PORT_CLR_STATS (UINT32_C(0x25))
+    #define HWRM_PORT_LPBK_CLR_STATS (UINT32_C(0x26))
+    #define HWRM_PORT_PHY_QCFG (UINT32_C(0x27))
+    #define HWRM_PORT_MAC_QCFG (UINT32_C(0x28))
+    #define HWRM_PORT_BLINK_LED (UINT32_C(0x29))
+    #define HWRM_QUEUE_QPORTCFG (UINT32_C(0x30))
+    #define HWRM_QUEUE_QCFG (UINT32_C(0x31))
+    #define HWRM_QUEUE_CFG (UINT32_C(0x32))
+    #define HWRM_QUEUE_BUFFERS_QCFG (UINT32_C(0x33))
+    #define HWRM_QUEUE_BUFFERS_CFG (UINT32_C(0x34))
+    #define HWRM_QUEUE_PFCENABLE_QCFG (UINT32_C(0x35))
+    #define HWRM_QUEUE_PFCENABLE_CFG (UINT32_C(0x36))
+    #define HWRM_QUEUE_PRI2COS_QCFG (UINT32_C(0x37))
+    #define HWRM_QUEUE_PRI2COS_CFG (UINT32_C(0x38))
+    #define HWRM_QUEUE_COS2BW_QCFG (UINT32_C(0x39))
+    #define HWRM_QUEUE_COS2BW_CFG (UINT32_C(0x3a))
+    #define HWRM_VNIC_ALLOC (UINT32_C(0x40))
+    #define HWRM_VNIC_FREE (UINT32_C(0x41))
+    #define HWRM_VNIC_CFG (UINT32_C(0x42))
+    #define HWRM_VNIC_QCFG (UINT32_C(0x43))
+    #define HWRM_VNIC_TPA_CFG (UINT32_C(0x44))
+    #define HWRM_VNIC_TPA_QCFG (UINT32_C(0x45))
+    #define HWRM_VNIC_RSS_CFG (UINT32_C(0x46))
+    #define HWRM_VNIC_RSS_QCFG (UINT32_C(0x47))
+    #define HWRM_VNIC_PLCMODES_CFG (UINT32_C(0x48))
+    #define HWRM_VNIC_PLCMODES_QCFG (UINT32_C(0x49))
+    #define HWRM_RING_ALLOC (UINT32_C(0x50))
+    #define HWRM_RING_FREE (UINT32_C(0x51))
+    #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (UINT32_C(0x52))
+    #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (UINT32_C(0x53))
+    #define HWRM_RING_RESET (UINT32_C(0x5e))
+    #define HWRM_RING_GRP_ALLOC (UINT32_C(0x60))
+    #define HWRM_RING_GRP_FREE (UINT32_C(0x61))
+    #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (UINT32_C(0x70))
+    #define HWRM_VNIC_RSS_COS_LB_CTX_FREE (UINT32_C(0x71))
+    #define HWRM_CFA_L2_FILTER_ALLOC (UINT32_C(0x90))
+    #define HWRM_CFA_L2_FILTER_FREE (UINT32_C(0x91))
+    #define HWRM_CFA_L2_FILTER_CFG (UINT32_C(0x92))
+    #define HWRM_CFA_L2_SET_RX_MASK (UINT32_C(0x93))
+    #define RESERVED3 (UINT32_C(0x94))
+    #define HWRM_CFA_TUNNEL_FILTER_ALLOC (UINT32_C(0x95))
+    #define HWRM_CFA_TUNNEL_FILTER_FREE (UINT32_C(0x96))
+    #define HWRM_CFA_ENCAP_RECORD_ALLOC (UINT32_C(0x97))
+    #define HWRM_CFA_ENCAP_RECORD_FREE (UINT32_C(0x98))
+    #define HWRM_CFA_NTUPLE_FILTER_ALLOC (UINT32_C(0x99))
+    #define HWRM_CFA_NTUPLE_FILTER_FREE (UINT32_C(0x9a))
+    #define HWRM_CFA_NTUPLE_FILTER_CFG (UINT32_C(0x9b))
+    #define HWRM_CFA_EM_FLOW_ALLOC (UINT32_C(0x9c))
+    #define HWRM_CFA_EM_FLOW_FREE (UINT32_C(0x9d))
+    #define HWRM_CFA_EM_FLOW_CFG (UINT32_C(0x9e))
+    #define HWRM_TUNNEL_DST_PORT_QUERY (UINT32_C(0xa0))
+    #define HWRM_TUNNEL_DST_PORT_ALLOC (UINT32_C(0xa1))
+    #define HWRM_TUNNEL_DST_PORT_FREE (UINT32_C(0xa2))
+    #define HWRM_STAT_CTX_ALLOC (UINT32_C(0xb0))
+    #define HWRM_STAT_CTX_FREE (UINT32_C(0xb1))
+    #define HWRM_STAT_CTX_QUERY (UINT32_C(0xb2))
+    #define HWRM_STAT_CTX_CLR_STATS (UINT32_C(0xb3))
+    #define HWRM_FW_RESET (UINT32_C(0xc0))
+    #define HWRM_FW_QSTATUS (UINT32_C(0xc1))
+    #define HWRM_EXEC_FWD_RESP (UINT32_C(0xd0))
+    #define HWRM_REJECT_FWD_RESP (UINT32_C(0xd1))
+    #define HWRM_FWD_RESP (UINT32_C(0xd2))
+    #define HWRM_FWD_ASYNC_EVENT_CMPL (UINT32_C(0xd3))
+    #define HWRM_TEMP_MONITOR_QUERY (UINT32_C(0xe0))
+    #define HWRM_DBG_DUMP (UINT32_C(0xff14))
+    #define HWRM_NVM_MODIFY (UINT32_C(0xfff4))
+    #define HWRM_NVM_VERIFY_UPDATE (UINT32_C(0xfff5))
+    #define HWRM_NVM_GET_DEV_INFO (UINT32_C(0xfff6))
+    #define HWRM_NVM_ERASE_DIR_ENTRY (UINT32_C(0xfff7))
+    #define HWRM_NVM_MOD_DIR_ENTRY (UINT32_C(0xfff8))
+    #define HWRM_NVM_FIND_DIR_ENTRY (UINT32_C(0xfff9))
+    #define HWRM_NVM_GET_DIR_ENTRIES (UINT32_C(0xfffa))
+    #define HWRM_NVM_GET_DIR_INFO (UINT32_C(0xfffb))
+    #define HWRM_NVM_READ (UINT32_C(0xfffd))
+    #define HWRM_NVM_WRITE (UINT32_C(0xfffe))
+    uint16_t unused_0[3];
+} __attribute__((packed)) cmd_nums_t, *pcmd_nums_t;
+
+typedef struct ret_codes
+{
+    uint16_t error_code;
+    #define HWRM_ERR_CODE_SUCCESS (UINT32_C(0x0))
+    #define HWRM_ERR_CODE_FAIL (UINT32_C(0x1))
+    #define HWRM_ERR_CODE_INVALID_PARAMS (UINT32_C(0x2))
+    #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (UINT32_C(0x3))
+    #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (UINT32_C(0x4))
+    #define HWRM_ERR_CODE_INVALID_FLAGS (UINT32_C(0x5))
+    #define HWRM_ERR_CODE_INVALID_ENABLES (UINT32_C(0x6))
+    #define HWRM_ERR_CODE_HWRM_ERROR (UINT32_C(0xf))
+    #define HWRM_ERR_CODE_UNKNOWN_ERR (UINT32_C(0xfffe))
+    #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (UINT32_C(0xffff))
+    uint16_t unused_0[3];
+} __attribute__((packed)) ret_codes_t, *pret_codes_t;
+
+typedef struct hwrm_ver_get_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint8_t hwrm_intf_maj;
+    uint8_t hwrm_intf_min;
+    uint8_t hwrm_intf_upd;
+    uint8_t unused_0[5];
+} __attribute__((packed)) hwrm_ver_get_input_t, *phwrm_ver_get_input_t;
+
+typedef struct hwrm_ver_get_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint8_t hwrm_intf_maj;
+    uint8_t hwrm_intf_min;
+    uint8_t hwrm_intf_upd;
+    uint8_t hwrm_intf_rsvd;
+    uint8_t hwrm_fw_maj;
+    uint8_t hwrm_fw_min;
+    uint8_t hwrm_fw_bld;
+    uint8_t hwrm_fw_rsvd;
+    uint8_t mgmt_fw_maj;
+    uint8_t mgmt_fw_min;
+    uint8_t mgmt_fw_bld;
+    uint8_t mgmt_fw_rsvd;
+    uint8_t netctrl_fw_maj;
+    uint8_t netctrl_fw_min;
+    uint8_t netctrl_fw_bld;
+    uint8_t netctrl_fw_rsvd;
+    uint32_t reserved1;
+    uint8_t roce_fw_maj;
+    uint8_t roce_fw_min;
+    uint8_t roce_fw_bld;
+    uint8_t roce_fw_rsvd;
+    char hwrm_fw_name[16];
+    char mgmt_fw_name[16];
+    char netctrl_fw_name[16];
+    uint32_t reserved2[4];
+    char roce_fw_name[16];
+    uint16_t chip_num;
+    uint8_t chip_rev;
+    uint8_t chip_metal;
+    uint8_t chip_bond_id;
+    uint8_t chip_platform_type;
+    #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_ASIC (UINT32_C(0x0) << 0)
+    #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_FPGA (UINT32_C(0x1) << 0)
+    #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM (UINT32_C(0x2) << 0)
+    uint16_t max_req_win_len;
+    uint16_t max_resp_len;
+    uint16_t def_req_timeout;
+    uint8_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_ver_get_output_t, *phwrm_ver_get_output_t;
+
+typedef struct hwrm_func_reset_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint32_t enables;
+    #define HWRM_FUNC_RESET_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1)
+    uint16_t vf_id;
+    uint8_t func_reset_level;
+    #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETALL (UINT32_C(0x0) << 0)
+    #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETME (UINT32_C(0x1) << 0)
+    #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETCHILDREN (UINT32_C(0x2) << 0)
+    #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF (UINT32_C(0x3) << 0)
+    uint8_t unused_0;
+} __attribute__((packed)) hwrm_func_reset_input_t, *phwrm_func_reset_input_t;
+
+typedef struct hwrm_func_reset_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_func_reset_output_t, *phwrm_func_reset_output_t;
+
+typedef struct hwrm_func_vf_alloc_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint32_t enables;
+    #define HWRM_FUNC_VF_ALLOC_INPUT_ENABLES_FIRST_VF_ID UINT32_C(0x1)
+    uint16_t first_vf_id;
+    uint16_t num_vfs;
+} __attribute__((packed)) hwrm_func_vf_alloc_input_t, *phwrm_func_vf_alloc_input_t;
+
+typedef struct hwrm_func_vf_alloc_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint16_t first_vf_id;
+    uint8_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t unused_4;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_func_vf_alloc_output_t, *phwrm_func_vf_alloc_output_t;
+
+typedef struct hwrm_func_vf_free_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint32_t enables;
+    #define HWRM_FUNC_VF_FREE_INPUT_ENABLES_FIRST_VF_ID UINT32_C(0x1)
+    uint16_t first_vf_id;
+    uint16_t num_vfs;
+} __attribute__((packed)) hwrm_func_vf_free_input_t, *phwrm_func_vf_free_input_t;
+
+typedef struct hwrm_func_vf_free_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_func_vf_free_output_t, *phwrm_func_vf_free_output_t;
+
+typedef struct hwrm_func_qcaps_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint16_t fid;
+    uint16_t unused_0[3];
+} __attribute__((packed)) hwrm_func_qcaps_input_t, *phwrm_func_qcaps_input_t;
+
+typedef struct hwrm_func_qcaps_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint16_t fid;
+    uint16_t port_id;
+    uint32_t flags;
+    #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PUSH_MODE_SUPPORTED UINT32_C(0x1)
+    #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GLOBAL_MSIX_AUTOMASKING UINT32_C(0x2)
+    uint8_t perm_mac_address[6];
+    uint16_t max_rsscos_ctx;
+    uint16_t max_cmpl_rings;
+    uint16_t max_tx_rings;
+    uint16_t max_rx_rings;
+    uint16_t max_l2_ctxs;
+    uint16_t max_vnics;
+    uint16_t first_vf_id;
+    uint16_t max_vfs;
+    uint16_t max_stat_ctx;
+    uint32_t max_encap_records;
+    uint32_t max_decap_records;
+    uint32_t max_tx_em_flows;
+    uint32_t max_tx_wm_flows;
+    uint32_t max_rx_em_flows;
+    uint32_t max_rx_wm_flows;
+    uint32_t max_mcast_filters;
+    uint32_t max_flow_id;
+    uint32_t max_hw_ring_grps;
+    uint8_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_func_qcaps_output_t, *phwrm_func_qcaps_output_t;
+
+typedef struct hwrm_func_qstats_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint16_t fid;
+    uint16_t unused_0[3];
+} __attribute__((packed)) hwrm_func_qstats_input_t, *phwrm_func_qstats_input_t;
+
+typedef struct hwrm_func_qstats_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint64_t tx_ucast_pkts;
+    uint64_t tx_mcast_pkts;
+    uint64_t tx_bcast_pkts;
+    uint64_t tx_err_pkts;
+    uint64_t tx_drop_pkts;
+    uint64_t tx_ucast_bytes;
+    uint64_t tx_mcast_bytes;
+    uint64_t tx_bcast_bytes;
+    uint64_t rx_ucast_pkts;
+    uint64_t rx_mcast_pkts;
+    uint64_t rx_bcast_pkts;
+    uint64_t rx_err_pkts;
+    uint64_t rx_drop_pkts;
+    uint64_t rx_ucast_bytes;
+    uint64_t rx_mcast_bytes;
+    uint64_t rx_bcast_bytes;
+    uint64_t rx_agg_pkts;
+    uint64_t rx_agg_bytes;
+    uint64_t rx_agg_events;
+    uint64_t rx_agg_aborts;
+    uint32_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_func_qstats_output_t, *phwrm_func_qstats_output_t;
+
+typedef struct hwrm_func_clr_stats_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint16_t fid;
+    uint16_t unused_0[3];
+} __attribute__((packed)) hwrm_func_clr_stats_input_t, *phwrm_func_clr_stats_input_t;
+
+typedef struct hwrm_func_clr_stats_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_func_clr_stats_output_t, *phwrm_func_clr_stats_output_t;
+
+typedef struct hwrm_func_drv_rgtr_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint32_t flags;
+    #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_ALL_MODE UINT32_C(0x1)
+    #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE UINT32_C(0x2)
+    uint32_t enables;
+    #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE UINT32_C(0x1)
+    #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER UINT32_C(0x2)
+    #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_TIMESTAMP UINT32_C(0x4)
+    #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD UINT32_C(0x8)
+    #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD UINT32_C(0x10)
+    uint16_t os_type;
+    #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UNKNOWN (UINT32_C(0x0) << 0)
+    #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_OTHER (UINT32_C(0x1) << 0)
+    #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_MSDOS (UINT32_C(0xe) << 0)
+    #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_SOLARIS (UINT32_C(0x1d) << 0)
+    #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_LINUX (UINT32_C(0x24) << 0)
+    #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD (UINT32_C(0x2a) << 0)
+    #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_ESXI (UINT32_C(0x68) << 0)
+    #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN864 (UINT32_C(0x73) << 0)
+    #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN2012R2 (UINT32_C(0x74) << 0)
+    uint8_t ver_maj;
+    uint8_t ver_min;
+    uint8_t ver_upd;
+    uint8_t unused_0;
+    uint16_t unused_1;
+    uint32_t timestamp;
+    uint32_t unused_2;
+    uint32_t vf_req_fwd[8];
+    uint32_t async_event_fwd[8];
+} __attribute__((packed)) hwrm_func_drv_rgtr_input_t, *phwrm_func_drv_rgtr_input_t;
+
+typedef struct hwrm_func_drv_rgtr_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_func_drv_rgtr_output_t, *phwrm_func_drv_rgtr_output_t;
+
+typedef struct hwrm_func_drv_unrgtr_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint32_t flags;
+    #define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN UINT32_C(0x1)
+    uint32_t unused_0;
+} __attribute__((packed)) hwrm_func_drv_unrgtr_input_t, *phwrm_func_drv_unrgtr_input_t;
+
+typedef struct hwrm_func_drv_unrgtr_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_func_drv_unrgtr_output_t, *phwrm_func_drv_unrgtr_output_t;
+
+typedef struct hwrm_func_buf_rgtr_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint32_t enables;
+    #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1)
+    #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_ERR_BUF_ADDR UINT32_C(0x2)
+    uint16_t vf_id;
+    uint16_t req_buf_num_pages;
+    uint16_t req_buf_page_size;
+    #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_16B (UINT32_C(0x4) << 0)
+    #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_4K (UINT32_C(0xc) << 0)
+    #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_8K (UINT32_C(0xd) << 0)
+    #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_64K (UINT32_C(0x10) << 0)
+    #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_2M (UINT32_C(0x16) << 0)
+    #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_4M (UINT32_C(0x17) << 0)
+    #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_1G (UINT32_C(0x1e) << 0)
+    uint16_t req_buf_len;
+    uint16_t resp_buf_len;
+    uint8_t unused_0;
+    uint8_t unused_1;
+    uint64_t req_buf_page_addr0;
+    uint64_t req_buf_page_addr1;
+    uint64_t req_buf_page_addr2;
+    uint64_t req_buf_page_addr3;
+    uint64_t req_buf_page_addr4;
+    uint64_t req_buf_page_addr5;
+    uint64_t req_buf_page_addr6;
+    uint64_t req_buf_page_addr7;
+    uint64_t req_buf_page_addr8;
+    uint64_t req_buf_page_addr9;
+    uint64_t error_buf_addr;
+    uint64_t resp_buf_addr;
+} __attribute__((packed)) hwrm_func_buf_rgtr_input_t, *phwrm_func_buf_rgtr_input_t;
+
+typedef struct hwrm_func_buf_rgtr_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_func_buf_rgtr_output_t, *phwrm_func_buf_rgtr_output_t;
+
+typedef struct hwrm_port_phy_cfg_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint32_t flags;
+    #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY UINT32_C(0x1)
+    #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN UINT32_C(0x2)
+    #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE UINT32_C(0x4)
+    #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG UINT32_C(0x8)
+    uint32_t enables;
+    #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE UINT32_C(0x1)
+    #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX UINT32_C(0x2)
+    #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE UINT32_C(0x4)
+    #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED UINT32_C(0x8)
+    #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK UINT32_C(0x10)
+    #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_WIRESPEED UINT32_C(0x20)
+    #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_LPBK UINT32_C(0x40)
+    #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_PREEMPHASIS UINT32_C(0x80)
+    #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE UINT32_C(0x100)
+    uint16_t port_id;
+    uint16_t force_link_speed;
+    #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB (UINT32_C(0x1) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB (UINT32_C(0xa) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2GB (UINT32_C(0x14) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB (UINT32_C(0x19) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB (UINT32_C(0x64) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB (UINT32_C(0xc8) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB (UINT32_C(0xfa) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB (UINT32_C(0x190) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB (UINT32_C(0x1f4) << 0)
+    uint8_t auto_mode;
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE (UINT32_C(0x0) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS (UINT32_C(0x1) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_SPEED (UINT32_C(0x2) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW (UINT32_C(0x3) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_MASK (UINT32_C(0x4) << 0)
+    uint8_t auto_duplex;
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF (UINT32_C(0x0) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL (UINT32_C(0x1) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH (UINT32_C(0x2) << 0)
+    uint8_t auto_pause;
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX UINT32_C(0x1)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX UINT32_C(0x2)
+    uint8_t unused_0;
+    uint16_t auto_link_speed;
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB (UINT32_C(0x1) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB (UINT32_C(0xa) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2GB (UINT32_C(0x14) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB (UINT32_C(0x19) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10GB (UINT32_C(0x64) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB (UINT32_C(0xc8) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB (UINT32_C(0xfa) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_40GB (UINT32_C(0x190) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB (UINT32_C(0x1f4) << 0)
+    uint16_t auto_link_speed_mask;
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MBHD UINT32_C(0x1)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB UINT32_C(0x2)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GBHD UINT32_C(0x4)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB UINT32_C(0x8)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2GB UINT32_C(0x10)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB UINT32_C(0x20)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB UINT32_C(0x40)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB UINT32_C(0x80)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB UINT32_C(0x100)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB UINT32_C(0x200)
+    #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB UINT32_C(0x400)
+    uint8_t wirespeed;
+    #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_OFF (UINT32_C(0x0) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_ON (UINT32_C(0x1) << 0)
+    uint8_t lpbk;
+    #define HWRM_PORT_PHY_CFG_INPUT_LPBK_NONE (UINT32_C(0x0) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_LPBK_LOCAL (UINT32_C(0x1) << 0)
+    #define HWRM_PORT_PHY_CFG_INPUT_LPBK_REMOTE (UINT32_C(0x2) << 0)
+    uint8_t force_pause;
+    #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX UINT32_C(0x1)
+    #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX UINT32_C(0x2)
+    uint8_t unused_1;
+    uint32_t preemphasis;
+    uint32_t unused_2;
+} __attribute__((packed)) hwrm_port_phy_cfg_input_t, *phwrm_port_phy_cfg_input_t;
+
+typedef struct hwrm_port_phy_cfg_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_port_phy_cfg_output_t, *phwrm_port_phy_cfg_output_t;
+
+typedef struct hwrm_port_phy_qcfg_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint16_t port_id;
+    uint16_t unused_0[3];
+} __attribute__((packed)) hwrm_port_phy_qcfg_input_t, *phwrm_port_phy_qcfg_input_t;
+
+typedef struct hwrm_port_phy_qcfg_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint8_t link;
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK (UINT32_C(0x0) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SIGNAL (UINT32_C(0x1) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK (UINT32_C(0x2) << 0)
+    uint8_t unused_0;
+    uint16_t link_speed;
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB (UINT32_C(0x1) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB (UINT32_C(0xa) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB (UINT32_C(0x14) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB (UINT32_C(0x19) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB (UINT32_C(0x64) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB (UINT32_C(0xc8) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB (UINT32_C(0xfa) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB (UINT32_C(0x190) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB (UINT32_C(0x1f4) << 0)
+    uint8_t duplex;
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_HALF (UINT32_C(0x0) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_FULL (UINT32_C(0x1) << 0)
+    uint8_t pause;
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX UINT32_C(0x1)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX UINT32_C(0x2)
+    uint16_t support_speeds;
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD UINT32_C(0x1)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MB UINT32_C(0x2)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GBHD UINT32_C(0x4)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB UINT32_C(0x8)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB UINT32_C(0x10)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB UINT32_C(0x20)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB UINT32_C(0x40)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB UINT32_C(0x80)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB UINT32_C(0x100)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB UINT32_C(0x200)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB UINT32_C(0x400)
+    uint16_t force_link_speed;
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB (UINT32_C(0x1) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB (UINT32_C(0xa) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2GB (UINT32_C(0x14) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB (UINT32_C(0x19) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB (UINT32_C(0x64) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_20GB (UINT32_C(0xc8) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB (UINT32_C(0xfa) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB (UINT32_C(0x190) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB (UINT32_C(0x1f4) << 0)
+    uint8_t auto_mode;
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE (UINT32_C(0x0) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ALL_SPEEDS (UINT32_C(0x1) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_SPEED (UINT32_C(0x2) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_OR_BELOW (UINT32_C(0x3) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_MASK (UINT32_C(0x4) << 0)
+    uint8_t auto_pause;
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_TX UINT32_C(0x1)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_RX UINT32_C(0x2)
+    uint16_t auto_link_speed;
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100MB (UINT32_C(0x1) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_1GB (UINT32_C(0xa) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2GB (UINT32_C(0x14) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2_5GB (UINT32_C(0x19) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10GB (UINT32_C(0x64) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_20GB (UINT32_C(0xc8) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_25GB (UINT32_C(0xfa) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_40GB (UINT32_C(0x190) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_50GB (UINT32_C(0x1f4) << 0)
+    uint16_t auto_link_speed_mask;
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MBHD UINT32_C(0x1)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MB UINT32_C(0x2)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GBHD UINT32_C(0x4)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GB UINT32_C(0x8)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2GB UINT32_C(0x10)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2_5GB UINT32_C(0x20)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10GB UINT32_C(0x40)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_20GB UINT32_C(0x80)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_25GB UINT32_C(0x100)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_40GB UINT32_C(0x200)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_50GB UINT32_C(0x400)
+    uint8_t wirespeed;
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_OFF (UINT32_C(0x0) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_ON (UINT32_C(0x1) << 0)
+    uint8_t lpbk;
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_NONE (UINT32_C(0x0) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LOCAL (UINT32_C(0x1) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_REMOTE (UINT32_C(0x2) << 0)
+    uint8_t force_pause;
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_TX UINT32_C(0x1)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_RX UINT32_C(0x2)
+    uint8_t reserved1;
+    uint32_t preemphasis;
+    uint8_t phy_maj;
+    uint8_t phy_min;
+    uint8_t phy_bld;
+    uint8_t phy_type;
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR4 (UINT32_C(0x1) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4 (UINT32_C(0x2) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR4 (UINT32_C(0x3) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR4 (UINT32_C(0x4) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2 (UINT32_C(0x5) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX4 (UINT32_C(0x6) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR (UINT32_C(0x7) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET (UINT32_C(0x8) << 0)
+    uint8_t media_type;
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP (UINT32_C(0x1) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC (UINT32_C(0x2) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE (UINT32_C(0x3) << 0)
+    uint8_t transceiver_type;
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_TRANSCEIVER_TYPE_XCVR_INTERNAL (UINT32_C(0x1) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_TRANSCEIVER_TYPE_XCVR_EXTERNAL (UINT32_C(0x2) << 0)
+    uint8_t phy_addr;
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK UINT32_C(0x1f)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_SFT 0
+    uint8_t unused_2;
+    uint16_t link_partner_adv_speeds;
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MBHD UINT32_C(0x1)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MB UINT32_C(0x2)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GBHD UINT32_C(0x4)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GB UINT32_C(0x8)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2GB UINT32_C(0x10)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2_5GB UINT32_C(0x20)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10GB UINT32_C(0x40)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_20GB UINT32_C(0x80)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_25GB UINT32_C(0x100)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_40GB UINT32_C(0x200)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_50GB UINT32_C(0x400)
+    uint8_t link_partner_adv_auto_mode;
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_NONE (UINT32_C(0x0) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (UINT32_C(0x1) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (UINT32_C(0x2) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (UINT32_C(0x3) << 0)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_MASK (UINT32_C(0x4) << 0)
+    uint8_t link_partner_adv_pause;
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_TX UINT32_C(0x1)
+    #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_RX UINT32_C(0x2)
+    uint8_t unused_3;
+    uint8_t unused_4;
+    uint8_t unused_5;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_port_phy_qcfg_output_t, *phwrm_port_phy_qcfg_output_t;
+
+typedef struct hwrm_port_qstats_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint16_t port_id;
+    uint8_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2[3];
+    uint8_t unused_3;
+    uint64_t tx_stat_host_addr;
+    uint64_t rx_stat_host_addr;
+} __attribute__((packed)) hwrm_port_qstats_input_t, *phwrm_port_qstats_input_t;
+
+typedef struct hwrm_port_qstats_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint16_t tx_stat_size;
+    uint16_t rx_stat_size;
+    uint8_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_port_qstats_output_t, *phwrm_port_qstats_output_t;
+
+typedef struct hwrm_port_clr_stats_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint16_t port_id;
+    uint16_t unused_0[3];
+} __attribute__((packed)) hwrm_port_clr_stats_input_t, *phwrm_port_clr_stats_input_t;
+
+typedef struct hwrm_port_clr_stats_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_port_clr_stats_output_t, *phwrm_port_clr_stats_output_t;
+
+typedef struct hwrm_queue_qportcfg_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint32_t flags;
+    #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH UINT32_C(0x1)
+    #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX (UINT32_C(0x0) << 0)
+    #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX (UINT32_C(0x1) << 0)
+    uint16_t port_id;
+    uint16_t unused_0;
+} __attribute__((packed)) hwrm_queue_qportcfg_input_t, *phwrm_queue_qportcfg_input_t;
+
+typedef struct hwrm_queue_qportcfg_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint8_t max_configurable_queues;
+    uint8_t max_configurable_lossless_queues;
+    uint8_t queue_cfg_allowed;
+    uint8_t queue_buffers_cfg_allowed;
+    uint8_t queue_pfcenable_cfg_allowed;
+    uint8_t queue_pri2cos_cfg_allowed;
+    uint8_t queue_cos2bw_cfg_allowed;
+    uint8_t queue_id0;
+    uint8_t queue_id0_service_profile;
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY (UINT32_C(0x0) << 0)
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS (UINT32_C(0x1) << 0)
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN (UINT32_C(0xff) << 0)
+    uint8_t queue_id1;
+    uint8_t queue_id1_service_profile;
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY (UINT32_C(0x0) << 0)
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS (UINT32_C(0x1) << 0)
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN (UINT32_C(0xff) << 0)
+    uint8_t queue_id2;
+    uint8_t queue_id2_service_profile;
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY (UINT32_C(0x0) << 0)
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS (UINT32_C(0x1) << 0)
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN (UINT32_C(0xff) << 0)
+    uint8_t queue_id3;
+    uint8_t queue_id3_service_profile;
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY (UINT32_C(0x0) << 0)
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS (UINT32_C(0x1) << 0)
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN (UINT32_C(0xff) << 0)
+    uint8_t queue_id4;
+    uint8_t queue_id4_service_profile;
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY (UINT32_C(0x0) << 0)
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS (UINT32_C(0x1) << 0)
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN (UINT32_C(0xff) << 0)
+    uint8_t queue_id5;
+    uint8_t queue_id5_service_profile;
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY (UINT32_C(0x0) << 0)
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS (UINT32_C(0x1) << 0)
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN (UINT32_C(0xff) << 0)
+    uint8_t queue_id6;
+    uint8_t queue_id6_service_profile;
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY (UINT32_C(0x0) << 0)
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS (UINT32_C(0x1) << 0)
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN (UINT32_C(0xff) << 0)
+    uint8_t queue_id7;
+    uint8_t queue_id7_service_profile;
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY (UINT32_C(0x0) << 0)
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS (UINT32_C(0x1) << 0)
+    #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN (UINT32_C(0xff) << 0)
+    uint8_t valid;
+} __attribute__((packed)) hwrm_queue_qportcfg_output_t, *phwrm_queue_qportcfg_output_t;
+
+typedef struct hwrm_vnic_alloc_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint32_t flags;
+    #define HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT UINT32_C(0x1)
+    uint32_t unused_0;
+} __attribute__((packed)) hwrm_vnic_alloc_input_t, *phwrm_vnic_alloc_input_t;
+
+typedef struct hwrm_vnic_alloc_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t vnic_id;
+    uint8_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_vnic_alloc_output_t, *phwrm_vnic_alloc_output_t;
+
+typedef struct hwrm_vnic_free_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint32_t vnic_id;
+    uint32_t unused_0;
+} __attribute__((packed)) hwrm_vnic_free_input_t, *phwrm_vnic_free_input_t;
+
+typedef struct hwrm_vnic_free_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_vnic_free_output_t, *phwrm_vnic_free_output_t;
+
+typedef struct hwrm_vnic_cfg_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint32_t flags;
+    #define HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT UINT32_C(0x1)
+    #define HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE UINT32_C(0x2)
+    #define HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE UINT32_C(0x4)
+    uint32_t enables;
+    #define HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP UINT32_C(0x1)
+    #define HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE UINT32_C(0x2)
+    #define HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE UINT32_C(0x4)
+    #define HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE UINT32_C(0x8)
+    #define HWRM_VNIC_CFG_INPUT_ENABLES_MRU UINT32_C(0x10)
+    uint16_t vnic_id;
+    uint16_t dflt_ring_grp;
+    uint16_t rss_rule;
+    uint16_t cos_rule;
+    uint16_t lb_rule;
+    uint16_t mru;
+    uint32_t unused_0;
+} __attribute__((packed)) hwrm_vnic_cfg_input_t, *phwrm_vnic_cfg_input_t;
+
+typedef struct hwrm_vnic_cfg_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_vnic_cfg_output_t, *phwrm_vnic_cfg_output_t;
+
+typedef struct hwrm_vnic_rss_cfg_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint32_t hash_type;
+    #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 UINT32_C(0x1)
+    #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 UINT32_C(0x2)
+    #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 UINT32_C(0x4)
+    #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 UINT32_C(0x8)
+    #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 UINT32_C(0x10)
+    #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6 UINT32_C(0x20)
+    uint32_t unused_0;
+    uint64_t ring_grp_tbl_addr;
+    uint64_t hash_key_tbl_addr;
+    uint16_t rss_ctx_idx;
+    uint16_t unused_1[3];
+} __attribute__((packed)) hwrm_vnic_rss_cfg_input_t, *phwrm_vnic_rss_cfg_input_t;
+
+typedef struct hwrm_vnic_rss_cfg_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_vnic_rss_cfg_output_t, *phwrm_vnic_rss_cfg_output_t;
+
+typedef struct hwrm_vnic_rss_cos_lb_ctx_alloc_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+} __attribute__((packed)) hwrm_vnic_rss_cos_lb_ctx_alloc_input_t, *phwrm_vnic_rss_cos_lb_ctx_alloc_input_t;
+
+typedef struct hwrm_vnic_rss_cos_lb_ctx_alloc_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint16_t rss_cos_lb_ctx_id;
+    uint8_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t unused_4;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_vnic_rss_cos_lb_ctx_alloc_output_t, *phwrm_vnic_rss_cos_lb_ctx_alloc_output_t;
+
+typedef struct hwrm_vnic_rss_cos_lb_ctx_free_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint16_t rss_cos_lb_ctx_id;
+    uint16_t unused_0[3];
+} __attribute__((packed)) hwrm_vnic_rss_cos_lb_ctx_free_input_t, *phwrm_vnic_rss_cos_lb_ctx_free_input_t;
+
+typedef struct hwrm_vnic_rss_cos_lb_ctx_free_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_vnic_rss_cos_lb_ctx_free_output_t, *phwrm_vnic_rss_cos_lb_ctx_free_output_t;
+
+typedef struct hwrm_ring_alloc_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint32_t enables;
+    #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED1 UINT32_C(0x1)
+    #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED2 UINT32_C(0x2)
+    #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED3 UINT32_C(0x4)
+    #define HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID UINT32_C(0x8)
+    #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED4 UINT32_C(0x10)
+    #define HWRM_RING_ALLOC_INPUT_ENABLES_MAX_BW_VALID UINT32_C(0x20)
+    uint8_t ring_type;
+    #define HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL (UINT32_C(0x0) << 0)
+    #define HWRM_RING_ALLOC_INPUT_RING_TYPE_TX (UINT32_C(0x1) << 0)
+    #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX (UINT32_C(0x2) << 0)
+    uint8_t unused_0;
+    uint16_t unused_1;
+    uint64_t page_tbl_addr;
+    uint32_t fbo;
+    uint8_t page_size;
+    uint8_t page_tbl_depth;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint32_t length;
+    uint16_t logical_id;
+    uint16_t cmpl_ring_id;
+    uint16_t queue_id;
+    uint8_t unused_4;
+    uint8_t unused_5;
+    uint32_t reserved1;
+    uint16_t reserved2;
+    uint8_t unused_6;
+    uint8_t unused_7;
+    uint32_t reserved3;
+    uint32_t stat_ctx_id;
+    uint32_t reserved4;
+    uint32_t max_bw;
+    uint8_t int_mode;
+    #define HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY (UINT32_C(0x0) << 0)
+    #define HWRM_RING_ALLOC_INPUT_INT_MODE_RSVD (UINT32_C(0x1) << 0)
+    #define HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX (UINT32_C(0x2) << 0)
+    #define HWRM_RING_ALLOC_INPUT_INT_MODE_POLL (UINT32_C(0x3) << 0)
+    uint8_t unused_8[3];
+} __attribute__((packed)) hwrm_ring_alloc_input_t, *phwrm_ring_alloc_input_t;
+
+typedef struct hwrm_ring_alloc_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint16_t ring_id;
+    uint16_t logical_ring_id;
+    uint8_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_ring_alloc_output_t, *phwrm_ring_alloc_output_t;
+
+typedef struct hwrm_ring_free_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint8_t ring_type;
+    #define HWRM_RING_FREE_INPUT_RING_TYPE_CMPL (UINT32_C(0x0) << 0)
+    #define HWRM_RING_FREE_INPUT_RING_TYPE_TX (UINT32_C(0x1) << 0)
+    #define HWRM_RING_FREE_INPUT_RING_TYPE_RX (UINT32_C(0x2) << 0)
+    uint8_t unused_0;
+    uint16_t ring_id;
+    uint32_t unused_1;
+} __attribute__((packed)) hwrm_ring_free_input_t, *phwrm_ring_free_input_t;
+
+typedef struct hwrm_ring_free_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_ring_free_output_t, *phwrm_ring_free_output_t;
+
+typedef struct hwrm_ring_grp_alloc_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint16_t cr;
+    uint16_t rr;
+    uint16_t ar;
+    uint16_t sc;
+} __attribute__((packed)) hwrm_ring_grp_alloc_input_t, *phwrm_ring_grp_alloc_input_t;
+
+typedef struct hwrm_ring_grp_alloc_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t ring_group_id;
+    uint8_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_ring_grp_alloc_output_t, *phwrm_ring_grp_alloc_output_t;
+
+typedef struct hwrm_ring_grp_free_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint32_t ring_group_id;
+    uint32_t unused_0;
+} __attribute__((packed)) hwrm_ring_grp_free_input_t, *phwrm_ring_grp_free_input_t;
+
+typedef struct hwrm_ring_grp_free_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_ring_grp_free_output_t, *phwrm_ring_grp_free_output_t;
+
+typedef struct hwrm_cfa_l2_filter_alloc_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint32_t flags;
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_TX (UINT32_C(0x0) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX (UINT32_C(0x1) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK UINT32_C(0x2)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x4)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST UINT32_C(0x8)
+    uint32_t enables;
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR UINT32_C(0x1)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK UINT32_C(0x2)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN UINT32_C(0x4)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK UINT32_C(0x8)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN UINT32_C(0x10)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK UINT32_C(0x20)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR UINT32_C(0x40)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR_MASK UINT32_C(0x80)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN UINT32_C(0x100)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN_MASK UINT32_C(0x200)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN UINT32_C(0x400)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN_MASK UINT32_C(0x800)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE UINT32_C(0x1000)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID UINT32_C(0x2000)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE UINT32_C(0x4000)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID UINT32_C(0x8000)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID UINT32_C(0x10000)
+    uint8_t l2_addr[6];
+    uint8_t unused_0;
+    uint8_t unused_1;
+    uint8_t l2_addr_mask[6];
+    uint16_t l2_ovlan;
+    uint16_t l2_ovlan_mask;
+    uint16_t l2_ivlan;
+    uint16_t l2_ivlan_mask;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t t_l2_addr[6];
+    uint8_t unused_4;
+    uint8_t unused_5;
+    uint8_t t_l2_addr_mask[6];
+    uint16_t t_l2_ovlan;
+    uint16_t t_l2_ovlan_mask;
+    uint16_t t_l2_ivlan;
+    uint16_t t_l2_ivlan_mask;
+    uint8_t src_type;
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_NPORT (UINT32_C(0x0) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_PF (UINT32_C(0x1) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VF (UINT32_C(0x2) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VNIC (UINT32_C(0x3) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_KONG (UINT32_C(0x4) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_APE (UINT32_C(0x5) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_BONO (UINT32_C(0x6) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_TANG (UINT32_C(0x7) << 0)
+    uint8_t unused_6;
+    uint32_t src_id;
+    uint8_t tunnel_type;
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL (UINT32_C(0x0) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN (UINT32_C(0x1) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE (UINT32_C(0x2) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE (UINT32_C(0x3) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP (UINT32_C(0x4) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE (UINT32_C(0x5) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS (UINT32_C(0x6) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT (UINT32_C(0x7) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE (UINT32_C(0x8) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL (UINT32_C(0xff) << 0)
+    uint8_t unused_7;
+    uint16_t dst_id;
+    uint16_t mirror_vnic_id;
+    uint8_t pri_hint;
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER (UINT32_C(0x0) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE_FILTER (UINT32_C(0x1) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER (UINT32_C(0x2) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MAX (UINT32_C(0x3) << 0)
+    #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MIN (UINT32_C(0x4) << 0)
+    uint8_t unused_8;
+    uint32_t unused_9;
+    uint64_t l2_filter_id_hint;
+} __attribute__((packed)) hwrm_cfa_l2_filter_alloc_input_t, *phwrm_cfa_l2_filter_alloc_input_t;
+
+typedef struct hwrm_cfa_l2_filter_alloc_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint64_t l2_filter_id;
+    uint32_t flow_id;
+    uint8_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_cfa_l2_filter_alloc_output_t, *phwrm_cfa_l2_filter_alloc_output_t;
+
+typedef struct hwrm_cfa_l2_filter_free_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint64_t l2_filter_id;
+} __attribute__((packed)) hwrm_cfa_l2_filter_free_input_t, *phwrm_cfa_l2_filter_free_input_t;
+
+typedef struct hwrm_cfa_l2_filter_free_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_cfa_l2_filter_free_output_t, *phwrm_cfa_l2_filter_free_output_t;
+
+typedef struct hwrm_cfa_l2_set_rx_mask_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint32_t vnic_id;
+    uint32_t mask;
+    #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_RESERVED UINT32_C(0x1)
+    #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST UINT32_C(0x2)
+    #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST UINT32_C(0x4)
+    #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST UINT32_C(0x8)
+    #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS UINT32_C(0x10)
+    #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_OUTERMOST UINT32_C(0x20)
+    uint64_t mc_tbl_addr;
+    uint32_t num_mc_entries;
+    uint32_t unused_0;
+} __attribute__((packed)) hwrm_cfa_l2_set_rx_mask_input_t, *phwrm_cfa_l2_set_rx_mask_input_t;
+
+typedef struct hwrm_cfa_l2_set_rx_mask_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_cfa_l2_set_rx_mask_output_t, *phwrm_cfa_l2_set_rx_mask_output_t;
+
+typedef struct hwrm_stat_ctx_alloc_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint64_t stats_dma_addr;
+    uint32_t update_period_ms;
+    uint32_t unused_0;
+} __attribute__((packed)) hwrm_stat_ctx_alloc_input_t, *phwrm_stat_ctx_alloc_input_t;
+
+typedef struct hwrm_stat_ctx_alloc_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t stat_ctx_id;
+    uint8_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_stat_ctx_alloc_output_t, *phwrm_stat_ctx_alloc_output_t;
+
+typedef struct hwrm_stat_ctx_free_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint32_t stat_ctx_id;
+    uint32_t unused_0;
+} __attribute__((packed)) hwrm_stat_ctx_free_input_t, *phwrm_stat_ctx_free_input_t;
+
+typedef struct hwrm_stat_ctx_free_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t stat_ctx_id;
+    uint8_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_stat_ctx_free_output_t, *phwrm_stat_ctx_free_output_t;
+
+typedef struct hwrm_stat_ctx_clr_stats_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint32_t stat_ctx_id;
+    uint32_t unused_0;
+} __attribute__((packed)) hwrm_stat_ctx_clr_stats_input_t, *phwrm_stat_ctx_clr_stats_input_t;
+
+typedef struct hwrm_stat_ctx_clr_stats_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_stat_ctx_clr_stats_output_t, *phwrm_stat_ctx_clr_stats_output_t;
+
+typedef struct hwrm_exec_fwd_resp_input
+{
+    uint16_t req_type;
+    uint16_t cmpl_ring;
+    uint16_t seq_id;
+    uint16_t target_id;
+    uint64_t resp_addr;
+    uint32_t encap_request[26];
+    uint16_t encap_resp_target_id;
+    uint16_t unused_0[3];
+} __attribute__((packed)) hwrm_exec_fwd_resp_input_t, *phwrm_exec_fwd_resp_input_t;
+
+typedef struct hwrm_exec_fwd_resp_output
+{
+    uint16_t error_code;
+    uint16_t req_type;
+    uint16_t seq_id;
+    uint16_t resp_len;
+    uint32_t unused_0;
+    uint8_t unused_1;
+    uint8_t unused_2;
+    uint8_t unused_3;
+    uint8_t valid;
+} __attribute__((packed)) hwrm_exec_fwd_resp_output_t, *phwrm_exec_fwd_resp_output_t;
+#endif
diff --git a/drivers/net/bnxt/rte_pmd_bnxt_version.map b/drivers/net/bnxt/rte_pmd_bnxt_version.map
new file mode 100644
index 0000000..ef35398
--- /dev/null
+++ b/drivers/net/bnxt/rte_pmd_bnxt_version.map
@@ -0,0 +1,4 @@
+DPDK_2.0 {
+
+	local: *;
+};
diff --git a/lib/librte_eal/common/include/rte_pci_dev_ids.h b/lib/librte_eal/common/include/rte_pci_dev_ids.h
index d088191..9a8f254 100644
--- a/lib/librte_eal/common/include/rte_pci_dev_ids.h
+++ b/lib/librte_eal/common/include/rte_pci_dev_ids.h
@@ -63,11 +63,11 @@
  * This file contains a list of the PCI device IDs recognised by DPDK, which
  * can be used to fill out an array of structures describing the devices.
  *
- * Currently four families of devices are recognised: those supported by the
- * IGB driver, by EM driver, those supported by the IXGBE driver, and by virtio
- * driver which is a para virtualization driver running in guest virtual machine.
- * The inclusion of these in an array built using this file depends on the
- * definition of
+ * Currently five families of devices are recognised: those supported by the
+ * IGB driver, by EM driver, those supported by the IXGBE driver, by BNXT
+ * driver, and by virtio driver which is a para virtualization driver running
+ * in guest virtual machine.  The inclusion of these in an array built using
+ * this file depends on the definition of
  * RTE_PCI_DEV_ID_DECL_EM
  * RTE_PCI_DEV_ID_DECL_IGB
  * RTE_PCI_DEV_ID_DECL_IGBVF
@@ -76,6 +76,7 @@
  * RTE_PCI_DEV_ID_DECL_I40E
  * RTE_PCI_DEV_ID_DECL_I40EVF
  * RTE_PCI_DEV_ID_DECL_VIRTIO
+ * RTE_PCI_DEV_ID_DECL_BNXT
  * at the time when this file is included.
  *
  * In order to populate an array, the user of this file must define this macro:
@@ -167,6 +168,15 @@
 #define PCI_VENDOR_ID_VMWARE 0x15AD
 #endif
 
+#ifndef PCI_VENDOR_ID_BROADCOM
+/** Vendor ID used by Broadcom devices */
+#define PCI_VENDOR_ID_BROADCOM 0x14E4
+#endif
+
+#ifndef RTE_PCI_DEV_ID_DECL_BNXT
+#define RTE_PCI_DEV_ID_DECL_BNXT(vendor, dev)
+#endif
+
 #ifndef PCI_VENDOR_ID_CISCO
 /** Vendor ID used by Cisco VIC devices */
 #define PCI_VENDOR_ID_CISCO 0x1137
@@ -592,6 +602,30 @@ RTE_PCI_DEV_ID_DECL_VIRTIO(PCI_VENDOR_ID_QUMRANET, QUMRANET_DEV_ID_VIRTIO)
 
 RTE_PCI_DEV_ID_DECL_VMXNET3(PCI_VENDOR_ID_VMWARE, VMWARE_DEV_ID_VMXNET3)
 
+/****************** Broadcom BNXT devices ******************/
+
+#define BROADCOM_DEV_ID_57301			0x16c8
+#define BROADCOM_DEV_ID_57302			0x16c9
+#define BROADCOM_DEV_ID_57304_PF		0x16ca
+#define BROADCOM_DEV_ID_57304_VF		0x16cb
+#define BROADCOM_DEV_ID_57304_MF		0x16cc
+#define BROADCOM_DEV_ID_57402			0x16d0
+#define BROADCOM_DEV_ID_57404			0x16d1
+#define BROADCOM_DEV_ID_57406_PF		0x16d2
+#define BROADCOM_DEV_ID_57406_VF		0x16d3
+#define BROADCOM_DEV_ID_57406_MF		0x16d4
+
+RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301)
+RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302)
+RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF)
+RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF)
+RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_MF)
+RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402)
+RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404)
+RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF)
+RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF)
+RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF)
+
 /*************** Virtual FM10K devices from fm10k_type.h ***************/
 
 #define FM10K_DEV_ID_VF                   0x15A5
@@ -665,5 +699,6 @@ RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57840_MF)
 #undef RTE_PCI_DEV_ID_DECL_I40EVF
 #undef RTE_PCI_DEV_ID_DECL_VIRTIO
 #undef RTE_PCI_DEV_ID_DECL_VMXNET3
+#undef RTE_PCI_DEV_ID_DECL_BNXT
 #undef RTE_PCI_DEV_ID_DECL_FM10K
 #undef RTE_PCI_DEV_ID_DECL_FM10KVF
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 16da821..cb40bbb 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -254,10 +254,14 @@ struct rte_eth_link {
 #define ETH_LINK_SPEED_10       10      /**< 10 megabits/second. */
 #define ETH_LINK_SPEED_100      100     /**< 100 megabits/second. */
 #define ETH_LINK_SPEED_1000     1000    /**< 1 gigabits/second. */
+#define ETH_LINK_SPEED_2000     2000    /**< 2 gigabits/second. */
+#define ETH_LINK_SPEED_2500     2500    /**< 2.5 gigabits/second. */
 #define ETH_LINK_SPEED_10000    10000   /**< 10 gigabits/second. */
 #define ETH_LINK_SPEED_10G      10000   /**< alias of 10 gigabits/second. */
 #define ETH_LINK_SPEED_20G      20000   /**< 20 gigabits/second. */
+#define ETH_LINK_SPEED_25G      25000	/**< 25 gigabits/second. */
 #define ETH_LINK_SPEED_40G      40000   /**< 40 gigabits/second. */
+#define ETH_LINK_SPEED_50G      50000   /**< 50 gigabits/second. */
 
 #define ETH_LINK_AUTONEG_DUPLEX 0       /**< Auto-negotiate duplex. */
 #define ETH_LINK_HALF_DUPLEX    1       /**< Half-duplex connection. */
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 8ecab41..2b5153e 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -144,6 +144,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_I40E_PMD)       += -lrte_pmd_i40e
 _LDLIBS-$(CONFIG_RTE_LIBRTE_FM10K_PMD)      += -lrte_pmd_fm10k
 _LDLIBS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD)      += -lrte_pmd_ixgbe
 _LDLIBS-$(CONFIG_RTE_LIBRTE_E1000_PMD)      += -lrte_pmd_e1000
+_LDLIBS-$(CONFIG_RTE_LIBRTE_BNXT_PMD)       += -lrte_pmd_bnxt
 _LDLIBS-$(CONFIG_RTE_LIBRTE_MLX4_PMD)       += -lrte_pmd_mlx4
 _LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD)       += -lrte_pmd_mlx5
 _LDLIBS-$(CONFIG_RTE_LIBRTE_NFP_PMD)        += -lrte_pmd_nfp
-- 
1.9.1



More information about the dev mailing list