[dpdk-dev] [PATCH 10/56] net/sfc: import libefx SFN7xxx family support

Andrew Rybchenko arybchenko at solarflare.com
Mon Nov 21 16:00:24 CET 2016


SFN7xxx is the first family based on EF10 architecture.

EFSYS_OPT_HUNTINGTON should be enabled to use it.

>From Solarflare Communications Inc.

Signed-off-by: Andrew Rybchenko <arybchenko at solarflare.com>
---
 drivers/net/sfc/efx/base/ef10_ev.c         | 1226 +++++++++++++++++++
 drivers/net/sfc/efx/base/ef10_filter.c     | 1469 +++++++++++++++++++++++
 drivers/net/sfc/efx/base/ef10_impl.h       |  718 +++++++++++
 drivers/net/sfc/efx/base/ef10_intr.c       |  197 ++++
 drivers/net/sfc/efx/base/ef10_mac.c        |  446 +++++++
 drivers/net/sfc/efx/base/ef10_mcdi.c       |  342 ++++++
 drivers/net/sfc/efx/base/ef10_nic.c        | 1769 ++++++++++++++++++++++++++++
 drivers/net/sfc/efx/base/ef10_phy.c        |  393 ++++++
 drivers/net/sfc/efx/base/ef10_rx.c         |  397 +++++++
 drivers/net/sfc/efx/base/ef10_tlv_layout.h |  941 +++++++++++++++
 drivers/net/sfc/efx/base/ef10_tx.c         |  683 +++++++++++
 drivers/net/sfc/efx/base/efx.h             |   10 +
 drivers/net/sfc/efx/base/efx_check.h       |    6 +
 drivers/net/sfc/efx/base/efx_ev.c          |   18 +
 drivers/net/sfc/efx/base/efx_filter.c      |   18 +
 drivers/net/sfc/efx/base/efx_impl.h        |   36 +
 drivers/net/sfc/efx/base/efx_intr.c        |   20 +
 drivers/net/sfc/efx/base/efx_mac.c         |   22 +
 drivers/net/sfc/efx/base/efx_mcdi.c        |  122 ++
 drivers/net/sfc/efx/base/efx_nic.c         |   53 +
 drivers/net/sfc/efx/base/efx_phy.c         |   15 +
 drivers/net/sfc/efx/base/efx_regs_ef10.h   |  571 +++++++++
 drivers/net/sfc/efx/base/efx_rx.c          |   20 +
 drivers/net/sfc/efx/base/efx_sram.c        |   32 +
 drivers/net/sfc/efx/base/efx_tx.c          |   29 +
 drivers/net/sfc/efx/base/hunt_impl.h       |   74 ++
 drivers/net/sfc/efx/base/hunt_nic.c        |  395 +++++++
 27 files changed, 10022 insertions(+)
 create mode 100644 drivers/net/sfc/efx/base/ef10_ev.c
 create mode 100644 drivers/net/sfc/efx/base/ef10_filter.c
 create mode 100644 drivers/net/sfc/efx/base/ef10_impl.h
 create mode 100644 drivers/net/sfc/efx/base/ef10_intr.c
 create mode 100644 drivers/net/sfc/efx/base/ef10_mac.c
 create mode 100644 drivers/net/sfc/efx/base/ef10_mcdi.c
 create mode 100644 drivers/net/sfc/efx/base/ef10_nic.c
 create mode 100644 drivers/net/sfc/efx/base/ef10_phy.c
 create mode 100644 drivers/net/sfc/efx/base/ef10_rx.c
 create mode 100644 drivers/net/sfc/efx/base/ef10_tlv_layout.h
 create mode 100644 drivers/net/sfc/efx/base/ef10_tx.c
 create mode 100644 drivers/net/sfc/efx/base/efx_regs_ef10.h
 create mode 100644 drivers/net/sfc/efx/base/hunt_impl.h
 create mode 100644 drivers/net/sfc/efx/base/hunt_nic.c

diff --git a/drivers/net/sfc/efx/base/ef10_ev.c b/drivers/net/sfc/efx/base/ef10_ev.c
new file mode 100644
index 0000000..46ecd42
--- /dev/null
+++ b/drivers/net/sfc/efx/base/ef10_ev.c
@@ -0,0 +1,1226 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#define	EFX_EV_QSTAT_INCR(_eep, _stat)
+
+/*
+ * Non-interrupting event queue requires interrrupting event queue to
+ * refer to for wake-up events even if wake ups are never used.
+ * It could be even non-allocated event queue.
+ */
+#define	EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX	(0)
+
+static	__checkReturn	boolean_t
+ef10_ev_rx(
+	__in		efx_evq_t *eep,
+	__in		efx_qword_t *eqp,
+	__in		const efx_ev_callbacks_t *eecp,
+	__in_opt	void *arg);
+
+static	__checkReturn	boolean_t
+ef10_ev_tx(
+	__in		efx_evq_t *eep,
+	__in		efx_qword_t *eqp,
+	__in		const efx_ev_callbacks_t *eecp,
+	__in_opt	void *arg);
+
+static	__checkReturn	boolean_t
+ef10_ev_driver(
+	__in		efx_evq_t *eep,
+	__in		efx_qword_t *eqp,
+	__in		const efx_ev_callbacks_t *eecp,
+	__in_opt	void *arg);
+
+static	__checkReturn	boolean_t
+ef10_ev_drv_gen(
+	__in		efx_evq_t *eep,
+	__in		efx_qword_t *eqp,
+	__in		const efx_ev_callbacks_t *eecp,
+	__in_opt	void *arg);
+
+static	__checkReturn	boolean_t
+ef10_ev_mcdi(
+	__in		efx_evq_t *eep,
+	__in		efx_qword_t *eqp,
+	__in		const efx_ev_callbacks_t *eecp,
+	__in_opt	void *arg);
+
+
+static	__checkReturn	efx_rc_t
+efx_mcdi_set_evq_tmr(
+	__in		efx_nic_t *enp,
+	__in		uint32_t instance,
+	__in		uint32_t mode,
+	__in		uint32_t timer_ns)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN,
+			    MC_CMD_SET_EVQ_TMR_OUT_LEN)];
+	efx_rc_t rc;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_SET_EVQ_TMR;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
+	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
+	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
+	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
+		rc = EMSGSIZE;
+		goto fail2;
+	}
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+static	__checkReturn	efx_rc_t
+efx_mcdi_init_evq(
+	__in		efx_nic_t *enp,
+	__in		unsigned int instance,
+	__in		efsys_mem_t *esmp,
+	__in		size_t nevs,
+	__in		uint32_t irq,
+	__in		uint32_t us,
+	__in		uint32_t flags,
+	__in		boolean_t low_latency)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[
+	    MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
+		MC_CMD_INIT_EVQ_OUT_LEN)];
+	efx_qword_t *dma_addr;
+	uint64_t addr;
+	int npages;
+	int i;
+	boolean_t interrupting;
+	int ev_cut_through;
+	efx_rc_t rc;
+
+	npages = EFX_EVQ_NBUFS(nevs);
+	if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
+		rc = EINVAL;
+		goto fail1;
+	}
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_INIT_EVQ;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
+	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
+	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);
+
+	interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
+	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
+
+	/*
+	 * On Huntington RX and TX event batching can only be requested together
+	 * (even if the datapath firmware doesn't actually support RX
+	 * batching). If event cut through is enabled no RX batching will occur.
+	 *
+	 * So always enable RX and TX event batching, and enable event cut
+	 * through if we want low latency operation.
+	 */
+	switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
+	case EFX_EVQ_FLAGS_TYPE_AUTO:
+		ev_cut_through = low_latency ? 1 : 0;
+		break;
+	case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
+		ev_cut_through = 0;
+		break;
+	case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
+		ev_cut_through = 1;
+		break;
+	default:
+		rc = EINVAL;
+		goto fail2;
+	}
+	MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
+	    INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting,
+	    INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
+	    INIT_EVQ_IN_FLAG_INT_ARMD, 0,
+	    INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,
+	    INIT_EVQ_IN_FLAG_RX_MERGE, 1,
+	    INIT_EVQ_IN_FLAG_TX_MERGE, 1);
+
+	/* If the value is zero then disable the timer */
+	if (us == 0) {
+		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
+		    MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
+		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
+		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
+	} else {
+		unsigned int ticks;
+
+		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
+			goto fail3;
+
+		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
+		    MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
+		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);
+		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);
+	}
+
+	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
+	    MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
+	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);
+
+	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
+	addr = EFSYS_MEM_ADDR(esmp);
+
+	for (i = 0; i < npages; i++) {
+		EFX_POPULATE_QWORD_2(*dma_addr,
+		    EFX_DWORD_1, (uint32_t)(addr >> 32),
+		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+		dma_addr++;
+		addr += EFX_BUF_SIZE;
+	}
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail4;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
+		rc = EMSGSIZE;
+		goto fail5;
+	}
+
+	/* NOTE: ignore the returned IRQ param as firmware does not set it. */
+
+	return (0);
+
+fail5:
+	EFSYS_PROBE(fail5);
+fail4:
+	EFSYS_PROBE(fail4);
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+
+static	__checkReturn	efx_rc_t
+efx_mcdi_init_evq_v2(
+	__in		efx_nic_t *enp,
+	__in		unsigned int instance,
+	__in		efsys_mem_t *esmp,
+	__in		size_t nevs,
+	__in		uint32_t irq,
+	__in		uint32_t us,
+	__in		uint32_t flags)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[
+		MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
+		    MC_CMD_INIT_EVQ_V2_OUT_LEN)];
+	boolean_t interrupting;
+	unsigned int evq_type;
+	efx_qword_t *dma_addr;
+	uint64_t addr;
+	int npages;
+	int i;
+	efx_rc_t rc;
+
+	npages = EFX_EVQ_NBUFS(nevs);
+	if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) {
+		rc = EINVAL;
+		goto fail1;
+	}
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_INIT_EVQ;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
+	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
+	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
+
+	interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
+	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
+
+	switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
+	case EFX_EVQ_FLAGS_TYPE_AUTO:
+		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
+		break;
+	case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
+		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
+		break;
+	case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
+		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
+		break;
+	default:
+		rc = EINVAL;
+		goto fail2;
+	}
+	MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
+	    INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
+	    INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
+	    INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
+	    INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);
+
+	/* If the value is zero then disable the timer */
+	if (us == 0) {
+		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
+		    MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
+		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
+		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
+	} else {
+		unsigned int ticks;
+
+		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
+			goto fail3;
+
+		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
+		    MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
+		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
+		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
+	}
+
+	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
+	    MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
+	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
+
+	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
+	addr = EFSYS_MEM_ADDR(esmp);
+
+	for (i = 0; i < npages; i++) {
+		EFX_POPULATE_QWORD_2(*dma_addr,
+		    EFX_DWORD_1, (uint32_t)(addr >> 32),
+		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+		dma_addr++;
+		addr += EFX_BUF_SIZE;
+	}
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail4;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
+		rc = EMSGSIZE;
+		goto fail5;
+	}
+
+	/* NOTE: ignore the returned IRQ param as firmware does not set it. */
+
+	EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
+		    MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
+
+	return (0);
+
+fail5:
+	EFSYS_PROBE(fail5);
+fail4:
+	EFSYS_PROBE(fail4);
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+static	__checkReturn	efx_rc_t
+efx_mcdi_fini_evq(
+	__in		efx_nic_t *enp,
+	__in		uint32_t instance)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN,
+			    MC_CMD_FINI_EVQ_OUT_LEN)];
+	efx_rc_t rc;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_FINI_EVQ;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
+
+	efx_mcdi_execute_quiet(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+
+
+	__checkReturn	efx_rc_t
+ef10_ev_init(
+	__in		efx_nic_t *enp)
+{
+	_NOTE(ARGUNUSED(enp))
+	return (0);
+}
+
+			void
+ef10_ev_fini(
+	__in		efx_nic_t *enp)
+{
+	_NOTE(ARGUNUSED(enp))
+}
+
+	__checkReturn	efx_rc_t
+ef10_ev_qcreate(
+	__in		efx_nic_t *enp,
+	__in		unsigned int index,
+	__in		efsys_mem_t *esmp,
+	__in		size_t n,
+	__in		uint32_t id,
+	__in		uint32_t us,
+	__in		uint32_t flags,
+	__in		efx_evq_t *eep)
+{
+	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+	uint32_t irq;
+	efx_rc_t rc;
+
+	_NOTE(ARGUNUSED(id))	/* buftbl id managed by MC */
+	EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
+	EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
+
+	if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
+		rc = EINVAL;
+		goto fail1;
+	}
+
+	if (index >= encp->enc_evq_limit) {
+		rc = EINVAL;
+		goto fail2;
+	}
+
+	if (us > encp->enc_evq_timer_max_us) {
+		rc = EINVAL;
+		goto fail3;
+	}
+
+	/* Set up the handler table */
+	eep->ee_rx	= ef10_ev_rx;
+	eep->ee_tx	= ef10_ev_tx;
+	eep->ee_driver	= ef10_ev_driver;
+	eep->ee_drv_gen	= ef10_ev_drv_gen;
+	eep->ee_mcdi	= ef10_ev_mcdi;
+
+	/* Set up the event queue */
+	/* INIT_EVQ expects function-relative vector number */
+	if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
+	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
+		irq = index;
+	} else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) {
+		irq = index;
+		flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
+		    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
+	} else {
+		irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX;
+	}
+
+	/*
+	 * Interrupts may be raised for events immediately after the queue is
+	 * created. See bug58606.
+	 */
+
+	if (encp->enc_init_evq_v2_supported) {
+		/*
+		 * On Medford the low latency license is required to enable RX
+		 * and event cut through and to disable RX batching.  If event
+		 * queue type in flags is auto, we let the firmware decide the
+		 * settings to use. If the adapter has a low latency license,
+		 * it will choose the best settings for low latency, otherwise
+		 * it will choose the best settings for throughput.
+		 */
+		rc = efx_mcdi_init_evq_v2(enp, index, esmp, n, irq, us, flags);
+		if (rc != 0)
+			goto fail4;
+	} else {
+		/*
+		 * On Huntington we need to specify the settings to use.
+		 * If event queue type in flags is auto, we favour throughput
+		 * if the adapter is running virtualization supporting firmware
+		 * (i.e. the full featured firmware variant)
+		 * and latency otherwise. The Ethernet Virtual Bridging
+		 * capability is used to make this decision. (Note though that
+		 * the low latency firmware variant is also best for
+		 * throughput and corresponding type should be specified
+		 * to choose it.)
+		 */
+		boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
+		rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us, flags,
+		    low_latency);
+		if (rc != 0)
+			goto fail5;
+	}
+
+	return (0);
+
+fail5:
+	EFSYS_PROBE(fail5);
+fail4:
+	EFSYS_PROBE(fail4);
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+			void
+ef10_ev_qdestroy(
+	__in		efx_evq_t *eep)
+{
+	efx_nic_t *enp = eep->ee_enp;
+
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+	    enp->en_family == EFX_FAMILY_MEDFORD);
+
+	(void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index);
+}
+
+	__checkReturn	efx_rc_t
+ef10_ev_qprime(
+	__in		efx_evq_t *eep,
+	__in		unsigned int count)
+{
+	efx_nic_t *enp = eep->ee_enp;
+	uint32_t rptr;
+	efx_dword_t dword;
+
+	rptr = count & eep->ee_mask;
+
+	if (enp->en_nic_cfg.enc_bug35388_workaround) {
+		EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS >
+		    (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
+		EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS <
+		    (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
+
+		EFX_POPULATE_DWORD_2(dword,
+		    ERF_DD_EVQ_IND_RPTR_FLAGS,
+		    EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
+		    ERF_DD_EVQ_IND_RPTR,
+		    (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
+		EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
+		    &dword, B_FALSE);
+
+		EFX_POPULATE_DWORD_2(dword,
+		    ERF_DD_EVQ_IND_RPTR_FLAGS,
+		    EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
+		    ERF_DD_EVQ_IND_RPTR,
+		    rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
+		EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
+		    &dword, B_FALSE);
+	} else {
+		EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
+		EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
+		    &dword, B_FALSE);
+	}
+
+	return (0);
+}
+
+static	__checkReturn	efx_rc_t
+efx_mcdi_driver_event(
+	__in		efx_nic_t *enp,
+	__in		uint32_t evq,
+	__in		efx_qword_t data)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN,
+			    MC_CMD_DRIVER_EVENT_OUT_LEN)];
+	efx_rc_t rc;
+
+	req.emr_cmd = MC_CMD_DRIVER_EVENT;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
+
+	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
+	    EFX_QWORD_FIELD(data, EFX_DWORD_0));
+	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
+	    EFX_QWORD_FIELD(data, EFX_DWORD_1));
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+			void
+ef10_ev_qpost(
+	__in	efx_evq_t *eep,
+	__in	uint16_t data)
+{
+	efx_nic_t *enp = eep->ee_enp;
+	efx_qword_t event;
+
+	EFX_POPULATE_QWORD_3(event,
+	    ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
+	    ESF_DZ_DRV_SUB_CODE, 0,
+	    ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
+
+	(void) efx_mcdi_driver_event(enp, eep->ee_index, event);
+}
+
+	__checkReturn	efx_rc_t
+ef10_ev_qmoderate(
+	__in		efx_evq_t *eep,
+	__in		unsigned int us)
+{
+	efx_nic_t *enp = eep->ee_enp;
+	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+	efx_dword_t dword;
+	uint32_t mode;
+	efx_rc_t rc;
+
+	/* Check that hardware and MCDI use the same timer MODE values */
+	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
+	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
+	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
+	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
+	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
+	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
+	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
+	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
+
+	if (us > encp->enc_evq_timer_max_us) {
+		rc = EINVAL;
+		goto fail1;
+	}
+
+	/* If the value is zero then disable the timer */
+	if (us == 0) {
+		mode = FFE_CZ_TIMER_MODE_DIS;
+	} else {
+		mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
+	}
+
+	if (encp->enc_bug61265_workaround) {
+		uint32_t ns = us * 1000;
+
+		rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
+		if (rc != 0)
+			goto fail2;
+	} else {
+		unsigned int ticks;
+
+		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
+			goto fail3;
+
+		if (encp->enc_bug35388_workaround) {
+			EFX_POPULATE_DWORD_3(dword,
+			    ERF_DD_EVQ_IND_TIMER_FLAGS,
+			    EFE_DD_EVQ_IND_TIMER_FLAGS,
+			    ERF_DD_EVQ_IND_TIMER_MODE, mode,
+			    ERF_DD_EVQ_IND_TIMER_VAL, ticks);
+			EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT,
+			    eep->ee_index, &dword, 0);
+		} else {
+			EFX_POPULATE_DWORD_2(dword,
+			    ERF_DZ_TC_TIMER_MODE, mode,
+			    ERF_DZ_TC_TIMER_VAL, ticks);
+			EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG,
+			    eep->ee_index, &dword, 0);
+		}
+	}
+
+	return (0);
+
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+
+static	__checkReturn	boolean_t
+ef10_ev_rx(
+	__in		efx_evq_t *eep,
+	__in		efx_qword_t *eqp,
+	__in		const efx_ev_callbacks_t *eecp,
+	__in_opt	void *arg)
+{
+	efx_nic_t *enp = eep->ee_enp;
+	uint32_t size;
+	uint32_t label;
+	uint32_t mac_class;
+	uint32_t eth_tag_class;
+	uint32_t l3_class;
+	uint32_t l4_class;
+	uint32_t next_read_lbits;
+	uint16_t flags;
+	boolean_t cont;
+	boolean_t should_abort;
+	efx_evq_rxq_state_t *eersp;
+	unsigned int desc_count;
+	unsigned int last_used_id;
+
+	EFX_EV_QSTAT_INCR(eep, EV_RX);
+
+	/* Discard events after RXQ/TXQ errors */
+	if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
+		return (B_FALSE);
+
+	/* Basic packet information */
+	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
+	eersp = &eep->ee_rxq_state[label];
+
+	size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
+	next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
+	eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
+	mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
+	l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
+	l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS);
+	cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
+
+	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
+		/* Drop this event */
+		return (B_FALSE);
+	}
+	flags = 0;
+
+	if (cont != 0) {
+		/*
+		 * This may be part of a scattered frame, or it may be a
+		 * truncated frame if scatter is disabled on this RXQ.
+		 * Overlength frames can be received if e.g. a VF is configured
+		 * for 1500 MTU but connected to a port set to 9000 MTU
+		 * (see bug56567).
+		 * FIXME: There is not yet any driver that supports scatter on
+		 * Huntington.  Scatter support is required for OSX.
+		 */
+		flags |= EFX_PKT_CONT;
+	}
+
+	if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
+		flags |= EFX_PKT_UNICAST;
+
+	/* Increment the count of descriptors read */
+	desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
+	    EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
+	eersp->eers_rx_read_ptr += desc_count;
+
+	/*
+	 * FIXME: add error checking to make sure this a batched event.
+	 * This could also be an aborted scatter, see Bug36629.
+	 */
+	if (desc_count > 1) {
+		EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
+		flags |= EFX_PKT_PREFIX_LEN;
+	}
+
+	/* Calculate the index of the last descriptor consumed */
+	last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
+
+	/* Check for errors that invalidate checksum and L3/L4 fields */
+	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
+		/* RX frame truncated (error flag is misnamed) */
+		EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
+		flags |= EFX_DISCARD;
+		goto deliver;
+	}
+	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
+		/* Bad Ethernet frame CRC */
+		EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
+		flags |= EFX_DISCARD;
+		goto deliver;
+	}
+	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
+		/*
+		 * Hardware parse failed, due to malformed headers
+		 * or headers that are too long for the parser.
+		 * Headers and checksums must be validated by the host.
+		 */
+		/* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */
+		goto deliver;
+	}
+
+	if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
+	    (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
+		flags |= EFX_PKT_VLAN_TAGGED;
+	}
+
+	switch (l3_class) {
+	case ESE_DZ_L3_CLASS_IP4:
+	case ESE_DZ_L3_CLASS_IP4_FRAG:
+		flags |= EFX_PKT_IPV4;
+		if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
+			EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
+		} else {
+			flags |= EFX_CKSUM_IPV4;
+		}
+
+		if (l4_class == ESE_DZ_L4_CLASS_TCP) {
+			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
+			flags |= EFX_PKT_TCP;
+		} else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
+			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
+			flags |= EFX_PKT_UDP;
+		} else {
+			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
+		}
+		break;
+
+	case ESE_DZ_L3_CLASS_IP6:
+	case ESE_DZ_L3_CLASS_IP6_FRAG:
+		flags |= EFX_PKT_IPV6;
+
+		if (l4_class == ESE_DZ_L4_CLASS_TCP) {
+			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
+			flags |= EFX_PKT_TCP;
+		} else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
+			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
+			flags |= EFX_PKT_UDP;
+		} else {
+			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
+		}
+		break;
+
+	default:
+		EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
+		break;
+	}
+
+	if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
+		if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
+			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
+		} else {
+			flags |= EFX_CKSUM_TCPUDP;
+		}
+	}
+
+deliver:
+	/* If we're not discarding the packet then it is ok */
+	if (~flags & EFX_DISCARD)
+		EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
+
+	EFSYS_ASSERT(eecp->eec_rx != NULL);
+	should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
+
+	return (should_abort);
+}
+
+static	__checkReturn	boolean_t
+ef10_ev_tx(
+	__in		efx_evq_t *eep,
+	__in		efx_qword_t *eqp,
+	__in		const efx_ev_callbacks_t *eecp,
+	__in_opt	void *arg)
+{
+	efx_nic_t *enp = eep->ee_enp;
+	uint32_t id;
+	uint32_t label;
+	boolean_t should_abort;
+
+	EFX_EV_QSTAT_INCR(eep, EV_TX);
+
+	/* Discard events after RXQ/TXQ errors */
+	if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
+		return (B_FALSE);
+
+	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
+		/* Drop this event */
+		return (B_FALSE);
+	}
+
+	/* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
+	id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
+	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
+
+	EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
+
+	EFSYS_ASSERT(eecp->eec_tx != NULL);
+	should_abort = eecp->eec_tx(arg, label, id);
+
+	return (should_abort);
+}
+
+static	__checkReturn	boolean_t
+ef10_ev_driver(
+	__in		efx_evq_t *eep,
+	__in		efx_qword_t *eqp,
+	__in		const efx_ev_callbacks_t *eecp,
+	__in_opt	void *arg)
+{
+	unsigned int code;
+	boolean_t should_abort;
+
+	EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
+	should_abort = B_FALSE;
+
+	code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
+	switch (code) {
+	case ESE_DZ_DRV_TIMER_EV: {
+		uint32_t id;
+
+		id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
+
+		EFSYS_ASSERT(eecp->eec_timer != NULL);
+		should_abort = eecp->eec_timer(arg, id);
+		break;
+	}
+
+	case ESE_DZ_DRV_WAKE_UP_EV: {
+		uint32_t id;
+
+		id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
+
+		EFSYS_ASSERT(eecp->eec_wake_up != NULL);
+		should_abort = eecp->eec_wake_up(arg, id);
+		break;
+	}
+
+	case ESE_DZ_DRV_START_UP_EV:
+		EFSYS_ASSERT(eecp->eec_initialized != NULL);
+		should_abort = eecp->eec_initialized(arg);
+		break;
+
+	default:
+		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+		break;
+	}
+
+	return (should_abort);
+}
+
+static	__checkReturn	boolean_t
+ef10_ev_drv_gen(
+	__in		efx_evq_t *eep,
+	__in		efx_qword_t *eqp,
+	__in		const efx_ev_callbacks_t *eecp,
+	__in_opt	void *arg)
+{
+	uint32_t data;
+	boolean_t should_abort;
+
+	EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
+	should_abort = B_FALSE;
+
+	data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
+	if (data >= ((uint32_t)1 << 16)) {
+		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+		return (B_TRUE);
+	}
+
+	EFSYS_ASSERT(eecp->eec_software != NULL);
+	should_abort = eecp->eec_software(arg, (uint16_t)data);
+
+	return (should_abort);
+}
+
+static	__checkReturn	boolean_t
+ef10_ev_mcdi(
+	__in		efx_evq_t *eep,
+	__in		efx_qword_t *eqp,
+	__in		const efx_ev_callbacks_t *eecp,
+	__in_opt	void *arg)
+{
+	efx_nic_t *enp = eep->ee_enp;
+	unsigned int code;
+	boolean_t should_abort = B_FALSE;
+
+	EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
+
+	code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
+	switch (code) {
+	case MCDI_EVENT_CODE_BADSSERT:
+		efx_mcdi_ev_death(enp, EINTR);
+		break;
+
+	case MCDI_EVENT_CODE_CMDDONE:
+		efx_mcdi_ev_cpl(enp,
+		    MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
+		    MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
+		    MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
+		break;
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+	case MCDI_EVENT_CODE_PROXY_RESPONSE:
+		/*
+		 * This event notifies a function that an authorization request
+		 * has been processed. If the request was authorized then the
+		 * function can now re-send the original MCDI request.
+		 * See SF-113652-SW "SR-IOV Proxied Network Access Control".
+		 */
+		efx_mcdi_ev_proxy_response(enp,
+		    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
+		    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
+		break;
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+
+	case MCDI_EVENT_CODE_LINKCHANGE: {
+		efx_link_mode_t link_mode;
+
+		ef10_phy_link_ev(enp, eqp, &link_mode);
+		should_abort = eecp->eec_link_change(arg, link_mode);
+		break;
+	}
+
+	case MCDI_EVENT_CODE_SENSOREVT: {
+		break;
+	}
+
+	case MCDI_EVENT_CODE_SCHEDERR:
+		/* Informational only */
+		break;
+
+	case MCDI_EVENT_CODE_REBOOT:
+		/* Falcon/Siena only (should not been seen with Huntington). */
+		efx_mcdi_ev_death(enp, EIO);
+		break;
+
+	case MCDI_EVENT_CODE_MC_REBOOT:
+		/* MC_REBOOT event is used for Huntington (EF10) and later. */
+		efx_mcdi_ev_death(enp, EIO);
+		break;
+
+	case MCDI_EVENT_CODE_MAC_STATS_DMA:
+		break;
+
+	case MCDI_EVENT_CODE_FWALERT: {
+		uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
+
+		if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
+			should_abort = eecp->eec_exception(arg,
+				EFX_EXCEPTION_FWALERT_SRAM,
+				MCDI_EV_FIELD(eqp, FWALERT_DATA));
+		else
+			should_abort = eecp->eec_exception(arg,
+				EFX_EXCEPTION_UNKNOWN_FWALERT,
+				MCDI_EV_FIELD(eqp, DATA));
+		break;
+	}
+
+	case MCDI_EVENT_CODE_TX_ERR: {
+		/*
+		 * After a TXQ error is detected, firmware sends a TX_ERR event.
+		 * This may be followed by TX completions (which we discard),
+		 * and then finally by a TX_FLUSH event. Firmware destroys the
+		 * TXQ automatically after sending the TX_FLUSH event.
+		 */
+		enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
+
+		EFSYS_PROBE2(tx_descq_err,
+			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+		/* Inform the driver that a reset is required. */
+		eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
+		    MCDI_EV_FIELD(eqp, TX_ERR_DATA));
+		break;
+	}
+
+	case MCDI_EVENT_CODE_TX_FLUSH: {
+		uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
+
+		/*
+		 * EF10 firmware sends two TX_FLUSH events: one to the txq's
+		 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
+		 * We want to wait for all completions, so ignore the events
+		 * with TX_FLUSH_TO_DRIVER.
+		 */
+		if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
+			should_abort = B_FALSE;
+			break;
+		}
+
+		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
+
+		EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
+
+		EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
+		should_abort = eecp->eec_txq_flush_done(arg, txq_index);
+		break;
+	}
+
+	case MCDI_EVENT_CODE_RX_ERR: {
+		/*
+		 * After an RXQ error is detected, firmware sends an RX_ERR
+		 * event. This may be followed by RX events (which we discard),
+		 * and then finally by an RX_FLUSH event. Firmware destroys the
+		 * RXQ automatically after sending the RX_FLUSH event.
+		 */
+		enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
+
+		EFSYS_PROBE2(rx_descq_err,
+			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+		/* Inform the driver that a reset is required. */
+		eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
+		    MCDI_EV_FIELD(eqp, RX_ERR_DATA));
+		break;
+	}
+
+	case MCDI_EVENT_CODE_RX_FLUSH: {
+		uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
+
+		/*
+		 * EF10 firmware sends two RX_FLUSH events: one to the rxq's
+		 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
+		 * We want to wait for all completions, so ignore the events
+		 * with RX_FLUSH_TO_DRIVER.
+		 */
+		if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
+			should_abort = B_FALSE;
+			break;
+		}
+
+		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
+
+		EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
+
+		EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
+		should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
+		break;
+	}
+
+	default:
+		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+		break;
+	}
+
+	return (should_abort);
+}
+
+		void
+ef10_ev_rxlabel_init(
+	__in		efx_evq_t *eep,
+	__in		efx_rxq_t *erp,
+	__in		unsigned int label,
+	__in		boolean_t packed_stream)
+{
+	efx_evq_rxq_state_t *eersp;
+
+	EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
+	eersp = &eep->ee_rxq_state[label];
+
+	EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
+
+	eersp->eers_rx_read_ptr = 0;
+	eersp->eers_rx_mask = erp->er_mask;
+	EFSYS_ASSERT(!packed_stream);
+}
+
+		void
+ef10_ev_rxlabel_fini(
+	__in		efx_evq_t *eep,
+	__in		unsigned int label)
+{
+	efx_evq_rxq_state_t *eersp;
+
+	EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
+	eersp = &eep->ee_rxq_state[label];
+
+	EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
+
+	eersp->eers_rx_read_ptr = 0;
+	eersp->eers_rx_mask = 0;
+}
+
+#endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/efx/base/ef10_filter.c b/drivers/net/sfc/efx/base/ef10_filter.c
new file mode 100644
index 0000000..608a058
--- /dev/null
+++ b/drivers/net/sfc/efx/base/ef10_filter.c
@@ -0,0 +1,1469 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_FILTER
+
+#define	EFE_SPEC(eftp, index)	((eftp)->eft_entry[(index)].efe_spec)
+
+static			efx_filter_spec_t *
+ef10_filter_entry_spec(
+	__in		const ef10_filter_table_t *eftp,
+	__in		unsigned int index)
+{
+	return ((efx_filter_spec_t *)(EFE_SPEC(eftp, index) &
+		~(uintptr_t)EFX_EF10_FILTER_FLAGS));
+}
+
+static			boolean_t
+ef10_filter_entry_is_busy(
+	__in		const ef10_filter_table_t *eftp,
+	__in		unsigned int index)
+{
+	if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_BUSY)
+		return (B_TRUE);
+	else
+		return (B_FALSE);
+}
+
+static			boolean_t
+ef10_filter_entry_is_auto_old(
+	__in		const ef10_filter_table_t *eftp,
+	__in		unsigned int index)
+{
+	if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_AUTO_OLD)
+		return (B_TRUE);
+	else
+		return (B_FALSE);
+}
+
+static			void
+ef10_filter_set_entry(
+	__inout		ef10_filter_table_t *eftp,
+	__in		unsigned int index,
+	__in_opt	const efx_filter_spec_t *efsp)
+{
+	EFE_SPEC(eftp, index) = (uintptr_t)efsp;
+}
+
+static			void
+ef10_filter_set_entry_busy(
+	__inout		ef10_filter_table_t *eftp,
+	__in		unsigned int index)
+{
+	EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_BUSY;
+}
+
+static			void
+ef10_filter_set_entry_not_busy(
+	__inout		ef10_filter_table_t *eftp,
+	__in		unsigned int index)
+{
+	EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_BUSY;
+}
+
+static			void
+ef10_filter_set_entry_auto_old(
+	__inout		ef10_filter_table_t *eftp,
+	__in		unsigned int index)
+{
+	EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL);
+	EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD;
+}
+
+static			void
+ef10_filter_set_entry_not_auto_old(
+	__inout		ef10_filter_table_t *eftp,
+	__in		unsigned int index)
+{
+	EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD;
+	EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL);
+}
+
+	__checkReturn	efx_rc_t
+ef10_filter_init(
+	__in		efx_nic_t *enp)
+{
+	efx_rc_t rc;
+	ef10_filter_table_t *eftp;
+
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+
+#define	MATCH_MASK(match) (EFX_MASK32(match) << EFX_LOW_BIT(match))
+	EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_HOST ==
+	    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_IP));
+	EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_HOST ==
+	    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_IP));
+	EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_MAC ==
+	    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC));
+	EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_PORT ==
+	    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT));
+	EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_MAC ==
+	    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_MAC));
+	EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_PORT ==
+	    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_PORT));
+	EFX_STATIC_ASSERT(EFX_FILTER_MATCH_ETHER_TYPE ==
+	    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE));
+	EFX_STATIC_ASSERT(EFX_FILTER_MATCH_INNER_VID ==
+	    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN));
+	EFX_STATIC_ASSERT(EFX_FILTER_MATCH_OUTER_VID ==
+	    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN));
+	EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IP_PROTO ==
+	    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO));
+#undef MATCH_MASK
+
+	EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (ef10_filter_table_t), eftp);
+
+	if (!eftp) {
+		rc = ENOMEM;
+		goto fail1;
+	}
+
+	enp->en_filter.ef_ef10_filter_table = eftp;
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+			void
+ef10_filter_fini(
+	__in		efx_nic_t *enp)
+{
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+
+	if (enp->en_filter.ef_ef10_filter_table != NULL) {
+		EFSYS_KMEM_FREE(enp->en_esip, sizeof (ef10_filter_table_t),
+		    enp->en_filter.ef_ef10_filter_table);
+	}
+}
+
+static	__checkReturn	efx_rc_t
+efx_mcdi_filter_op_add(
+	__in		efx_nic_t *enp,
+	__in		efx_filter_spec_t *spec,
+	__in		unsigned int filter_op,
+	__inout		ef10_filter_handle_t *handle)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_FILTER_OP_IN_LEN,
+			    MC_CMD_FILTER_OP_OUT_LEN)];
+	uint32_t match_fields = 0;
+	efx_rc_t rc;
+
+	memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_FILTER_OP;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_FILTER_OP_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_FILTER_OP_OUT_LEN;
+
+	switch (filter_op) {
+	case MC_CMD_FILTER_OP_IN_OP_REPLACE:
+		MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_LO,
+		    handle->efh_lo);
+		MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_HI,
+		    handle->efh_hi);
+		/* Fall through */
+	case MC_CMD_FILTER_OP_IN_OP_INSERT:
+	case MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE:
+		MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP, filter_op);
+		break;
+	default:
+		EFSYS_ASSERT(0);
+		rc = EINVAL;
+		goto fail1;
+	}
+
+	if (spec->efs_match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
+		/*
+		 * The LOC_MAC_IG match flag can represent unknown unicast
+		 *  or multicast filters - use the MAC address to distinguish
+		 *  them.
+		 */
+		if (EFX_MAC_ADDR_IS_MULTICAST(spec->efs_loc_mac))
+			match_fields |= 1U <<
+				MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
+		else
+			match_fields |= 1U <<
+				MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
+	}
+
+	match_fields |= spec->efs_match_flags & (~EFX_FILTER_MATCH_LOC_MAC_IG);
+
+	MCDI_IN_SET_DWORD(req, FILTER_OP_IN_PORT_ID,
+	    EVB_PORT_ID_ASSIGNED);
+	MCDI_IN_SET_DWORD(req, FILTER_OP_IN_MATCH_FIELDS,
+	    match_fields);
+	MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_DEST,
+	    MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
+	MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_QUEUE,
+	    spec->efs_dmaq_id);
+	if (spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) {
+		MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_CONTEXT,
+		    spec->efs_rss_context);
+	}
+	MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_MODE,
+	    spec->efs_flags & EFX_FILTER_FLAG_RX_RSS ?
+	    MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
+	    MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
+	MCDI_IN_SET_DWORD(req, FILTER_OP_IN_TX_DEST,
+	    MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
+
+	if (filter_op != MC_CMD_FILTER_OP_IN_OP_REPLACE) {
+		/*
+		 * NOTE: Unlike most MCDI requests, the filter fields
+		 * are presented in network (big endian) byte order.
+		 */
+		memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_SRC_MAC),
+		    spec->efs_rem_mac, EFX_MAC_ADDR_LEN);
+		memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_DST_MAC),
+		    spec->efs_loc_mac, EFX_MAC_ADDR_LEN);
+
+		MCDI_IN_SET_WORD(req, FILTER_OP_IN_SRC_PORT,
+		    __CPU_TO_BE_16(spec->efs_rem_port));
+		MCDI_IN_SET_WORD(req, FILTER_OP_IN_DST_PORT,
+		    __CPU_TO_BE_16(spec->efs_loc_port));
+
+		MCDI_IN_SET_WORD(req, FILTER_OP_IN_ETHER_TYPE,
+		    __CPU_TO_BE_16(spec->efs_ether_type));
+
+		MCDI_IN_SET_WORD(req, FILTER_OP_IN_INNER_VLAN,
+		    __CPU_TO_BE_16(spec->efs_inner_vid));
+		MCDI_IN_SET_WORD(req, FILTER_OP_IN_OUTER_VLAN,
+		    __CPU_TO_BE_16(spec->efs_outer_vid));
+
+		/* IP protocol (in low byte, high byte is zero) */
+		MCDI_IN_SET_BYTE(req, FILTER_OP_IN_IP_PROTO,
+		    spec->efs_ip_proto);
+
+		EFX_STATIC_ASSERT(sizeof (spec->efs_rem_host) ==
+		    MC_CMD_FILTER_OP_IN_SRC_IP_LEN);
+		EFX_STATIC_ASSERT(sizeof (spec->efs_loc_host) ==
+		    MC_CMD_FILTER_OP_IN_DST_IP_LEN);
+
+		memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_SRC_IP),
+		    &spec->efs_rem_host.eo_byte[0],
+		    MC_CMD_FILTER_OP_IN_SRC_IP_LEN);
+		memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_DST_IP),
+		    &spec->efs_loc_host.eo_byte[0],
+		    MC_CMD_FILTER_OP_IN_DST_IP_LEN);
+	}
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail2;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_FILTER_OP_OUT_LEN) {
+		rc = EMSGSIZE;
+		goto fail3;
+	}
+
+	handle->efh_lo = MCDI_OUT_DWORD(req, FILTER_OP_OUT_HANDLE_LO);
+	handle->efh_hi = MCDI_OUT_DWORD(req, FILTER_OP_OUT_HANDLE_HI);
+
+	return (0);
+
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+
+}
+
+static	__checkReturn	efx_rc_t
+efx_mcdi_filter_op_delete(
+	__in		efx_nic_t *enp,
+	__in		unsigned int filter_op,
+	__inout		ef10_filter_handle_t *handle)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_FILTER_OP_IN_LEN,
+			    MC_CMD_FILTER_OP_OUT_LEN)];
+	efx_rc_t rc;
+
+	memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_FILTER_OP;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_FILTER_OP_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_FILTER_OP_OUT_LEN;
+
+	switch (filter_op) {
+	case MC_CMD_FILTER_OP_IN_OP_REMOVE:
+		MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP,
+		    MC_CMD_FILTER_OP_IN_OP_REMOVE);
+		break;
+	case MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE:
+		MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP,
+		    MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+		break;
+	default:
+		EFSYS_ASSERT(0);
+		rc = EINVAL;
+		goto fail1;
+	}
+
+	MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_LO, handle->efh_lo);
+	MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_HI, handle->efh_hi);
+
+	efx_mcdi_execute_quiet(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail2;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_FILTER_OP_OUT_LEN) {
+		rc = EMSGSIZE;
+		goto fail3;
+	}
+
+	return (0);
+
+fail3:
+	EFSYS_PROBE(fail3);
+
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+static	__checkReturn	boolean_t
+ef10_filter_equal(
+	__in		const efx_filter_spec_t *left,
+	__in		const efx_filter_spec_t *right)
+{
+	/* FIXME: Consider rx vs tx filters (look at efs_flags) */
+	if (left->efs_match_flags != right->efs_match_flags)
+		return (B_FALSE);
+	if (!EFX_OWORD_IS_EQUAL(left->efs_rem_host, right->efs_rem_host))
+		return (B_FALSE);
+	if (!EFX_OWORD_IS_EQUAL(left->efs_loc_host, right->efs_loc_host))
+		return (B_FALSE);
+	if (memcmp(left->efs_rem_mac, right->efs_rem_mac, EFX_MAC_ADDR_LEN))
+		return (B_FALSE);
+	if (memcmp(left->efs_loc_mac, right->efs_loc_mac, EFX_MAC_ADDR_LEN))
+		return (B_FALSE);
+	if (left->efs_rem_port != right->efs_rem_port)
+		return (B_FALSE);
+	if (left->efs_loc_port != right->efs_loc_port)
+		return (B_FALSE);
+	if (left->efs_inner_vid != right->efs_inner_vid)
+		return (B_FALSE);
+	if (left->efs_outer_vid != right->efs_outer_vid)
+		return (B_FALSE);
+	if (left->efs_ether_type != right->efs_ether_type)
+		return (B_FALSE);
+	if (left->efs_ip_proto != right->efs_ip_proto)
+		return (B_FALSE);
+
+	return (B_TRUE);
+
+}
+
+static	__checkReturn	boolean_t
+ef10_filter_same_dest(
+	__in		const efx_filter_spec_t *left,
+	__in		const efx_filter_spec_t *right)
+{
+	if ((left->efs_flags & EFX_FILTER_FLAG_RX_RSS) &&
+	    (right->efs_flags & EFX_FILTER_FLAG_RX_RSS)) {
+		if (left->efs_rss_context == right->efs_rss_context)
+			return (B_TRUE);
+	} else if ((~(left->efs_flags) & EFX_FILTER_FLAG_RX_RSS) &&
+	    (~(right->efs_flags) & EFX_FILTER_FLAG_RX_RSS)) {
+		if (left->efs_dmaq_id == right->efs_dmaq_id)
+			return (B_TRUE);
+	}
+	return (B_FALSE);
+}
+
+static	__checkReturn	uint32_t
+ef10_filter_hash(
+	__in		efx_filter_spec_t *spec)
+{
+	EFX_STATIC_ASSERT((sizeof (efx_filter_spec_t) % sizeof (uint32_t))
+			    == 0);
+	EFX_STATIC_ASSERT((EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid) %
+			    sizeof (uint32_t)) == 0);
+
+	/*
+	 * As the area of the efx_filter_spec_t we need to hash is DWORD
+	 * aligned and an exact number of DWORDs in size we can use the
+	 * optimised efx_hash_dwords() rather than efx_hash_bytes()
+	 */
+	return (efx_hash_dwords((const uint32_t *)&spec->efs_outer_vid,
+			(sizeof (efx_filter_spec_t) -
+			EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid)) /
+			sizeof (uint32_t), 0));
+}
+
+/*
+ * Decide whether a filter should be exclusive or else should allow
+ * delivery to additional recipients.  Currently we decide that
+ * filters for specific local unicast MAC and IP addresses are
+ * exclusive.
+ */
+static	__checkReturn	boolean_t
+ef10_filter_is_exclusive(
+	__in		efx_filter_spec_t *spec)
+{
+	if ((spec->efs_match_flags & EFX_FILTER_MATCH_LOC_MAC) &&
+	    !EFX_MAC_ADDR_IS_MULTICAST(spec->efs_loc_mac))
+		return (B_TRUE);
+
+	if ((spec->efs_match_flags &
+		(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+	    (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+		if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV4) &&
+		    ((spec->efs_loc_host.eo_u8[0] & 0xf) != 0xe))
+			return (B_TRUE);
+		if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV6) &&
+		    (spec->efs_loc_host.eo_u8[0] != 0xff))
+			return (B_TRUE);
+	}
+
+	return (B_FALSE);
+}
+
+	__checkReturn	efx_rc_t
+ef10_filter_restore(
+	__in		efx_nic_t *enp)
+{
+	int tbl_id;
+	efx_filter_spec_t *spec;
+	ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+	boolean_t restoring;
+	efsys_lock_state_t state;
+	efx_rc_t rc;
+
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+
+	for (tbl_id = 0; tbl_id < EFX_EF10_FILTER_TBL_ROWS; tbl_id++) {
+
+		EFSYS_LOCK(enp->en_eslp, state);
+
+		spec = ef10_filter_entry_spec(eftp, tbl_id);
+		if (spec == NULL) {
+			restoring = B_FALSE;
+		} else if (ef10_filter_entry_is_busy(eftp, tbl_id)) {
+			/* Ignore busy entries. */
+			restoring = B_FALSE;
+		} else {
+			ef10_filter_set_entry_busy(eftp, tbl_id);
+			restoring = B_TRUE;
+		}
+
+		EFSYS_UNLOCK(enp->en_eslp, state);
+
+		if (restoring == B_FALSE)
+			continue;
+
+		if (ef10_filter_is_exclusive(spec)) {
+			rc = efx_mcdi_filter_op_add(enp, spec,
+			    MC_CMD_FILTER_OP_IN_OP_INSERT,
+			    &eftp->eft_entry[tbl_id].efe_handle);
+		} else {
+			rc = efx_mcdi_filter_op_add(enp, spec,
+			    MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE,
+			    &eftp->eft_entry[tbl_id].efe_handle);
+		}
+
+		if (rc != 0)
+			goto fail1;
+
+		EFSYS_LOCK(enp->en_eslp, state);
+
+		ef10_filter_set_entry_not_busy(eftp, tbl_id);
+
+		EFSYS_UNLOCK(enp->en_eslp, state);
+	}
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+/*
+ * An arbitrary search limit for the software hash table. As per the linux net
+ * driver.
+ */
+#define	EF10_FILTER_SEARCH_LIMIT 200
+
+static	__checkReturn	efx_rc_t
+ef10_filter_add_internal(
+	__in		efx_nic_t *enp,
+	__inout		efx_filter_spec_t *spec,
+	__in		boolean_t may_replace,
+	__out_opt	uint32_t *filter_id)
+{
+	efx_rc_t rc;
+	ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+	efx_filter_spec_t *saved_spec;
+	uint32_t hash;
+	unsigned int depth;
+	int ins_index;
+	boolean_t replacing = B_FALSE;
+	unsigned int i;
+	efsys_lock_state_t state;
+	boolean_t locked = B_FALSE;
+
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+
+	hash = ef10_filter_hash(spec);
+
+	/*
+	 * FIXME: Add support for inserting filters of different priorities
+	 * and removing lower priority multicast filters (bug 42378)
+	 */
+
+	/*
+	 * Find any existing filters with the same match tuple or
+	 * else a free slot to insert at.  If any of them are busy,
+	 * we have to wait and retry.
+	 */
+	for (;;) {
+		ins_index = -1;
+		depth = 1;
+		EFSYS_LOCK(enp->en_eslp, state);
+		locked = B_TRUE;
+
+		for (;;) {
+			i = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1);
+			saved_spec = ef10_filter_entry_spec(eftp, i);
+
+			if (!saved_spec) {
+				if (ins_index < 0) {
+					ins_index = i;
+				}
+			} else if (ef10_filter_equal(spec, saved_spec)) {
+				if (ef10_filter_entry_is_busy(eftp, i))
+					break;
+				if (saved_spec->efs_priority
+					    == EFX_FILTER_PRI_AUTO) {
+					ins_index = i;
+					goto found;
+				} else if (ef10_filter_is_exclusive(spec)) {
+					if (may_replace) {
+						ins_index = i;
+						goto found;
+					} else {
+						rc = EEXIST;
+						goto fail1;
+					}
+				}
+
+				/* Leave existing */
+			}
+
+			/*
+			 * Once we reach the maximum search depth, use
+			 * the first suitable slot or return EBUSY if
+			 * there was none.
+			 */
+			if (depth == EF10_FILTER_SEARCH_LIMIT) {
+				if (ins_index < 0) {
+					rc = EBUSY;
+					goto fail2;
+				}
+				goto found;
+			}
+			depth++;
+		}
+		EFSYS_UNLOCK(enp->en_eslp, state);
+		locked = B_FALSE;
+	}
+
+found:
+	/*
+	 * Create a software table entry if necessary, and mark it
+	 * busy.  We might yet fail to insert, but any attempt to
+	 * insert a conflicting filter while we're waiting for the
+	 * firmware must find the busy entry.
+	 */
+	saved_spec = ef10_filter_entry_spec(eftp, ins_index);
+	if (saved_spec) {
+		if (saved_spec->efs_priority == EFX_FILTER_PRI_AUTO) {
+			/* This is a filter we are refreshing */
+			ef10_filter_set_entry_not_auto_old(eftp, ins_index);
+			goto out_unlock;
+
+		}
+		replacing = B_TRUE;
+	} else {
+		EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*spec), saved_spec);
+		if (!saved_spec) {
+			rc = ENOMEM;
+			goto fail3;
+		}
+		*saved_spec = *spec;
+		ef10_filter_set_entry(eftp, ins_index, saved_spec);
+	}
+	ef10_filter_set_entry_busy(eftp, ins_index);
+
+	EFSYS_UNLOCK(enp->en_eslp, state);
+	locked = B_FALSE;
+
+	/*
+	 * On replacing the filter handle may change after after a successful
+	 * replace operation.
+	 */
+	if (replacing) {
+		rc = efx_mcdi_filter_op_add(enp, spec,
+		    MC_CMD_FILTER_OP_IN_OP_REPLACE,
+		    &eftp->eft_entry[ins_index].efe_handle);
+	} else if (ef10_filter_is_exclusive(spec)) {
+		rc = efx_mcdi_filter_op_add(enp, spec,
+		    MC_CMD_FILTER_OP_IN_OP_INSERT,
+		    &eftp->eft_entry[ins_index].efe_handle);
+	} else {
+		rc = efx_mcdi_filter_op_add(enp, spec,
+		    MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE,
+		    &eftp->eft_entry[ins_index].efe_handle);
+	}
+
+	if (rc != 0)
+		goto fail4;
+
+	EFSYS_LOCK(enp->en_eslp, state);
+	locked = B_TRUE;
+
+	if (replacing) {
+		/* Update the fields that may differ */
+		saved_spec->efs_priority = spec->efs_priority;
+		saved_spec->efs_flags = spec->efs_flags;
+		saved_spec->efs_rss_context = spec->efs_rss_context;
+		saved_spec->efs_dmaq_id = spec->efs_dmaq_id;
+	}
+
+	ef10_filter_set_entry_not_busy(eftp, ins_index);
+
+out_unlock:
+
+	EFSYS_UNLOCK(enp->en_eslp, state);
+	locked = B_FALSE;
+
+	if (filter_id)
+		*filter_id = ins_index;
+
+	return (0);
+
+fail4:
+	EFSYS_PROBE(fail4);
+
+	if (!replacing) {
+		EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), saved_spec);
+		saved_spec = NULL;
+	}
+	ef10_filter_set_entry_not_busy(eftp, ins_index);
+	ef10_filter_set_entry(eftp, ins_index, NULL);
+
+fail3:
+	EFSYS_PROBE(fail3);
+
+fail2:
+	EFSYS_PROBE(fail2);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	if (locked)
+		EFSYS_UNLOCK(enp->en_eslp, state);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_filter_add(
+	__in		efx_nic_t *enp,
+	__inout		efx_filter_spec_t *spec,
+	__in		boolean_t may_replace)
+{
+	efx_rc_t rc;
+
+	rc = ef10_filter_add_internal(enp, spec, may_replace, NULL);
+	if (rc != 0)
+		goto fail1;
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+
+static	__checkReturn	efx_rc_t
+ef10_filter_delete_internal(
+	__in		efx_nic_t *enp,
+	__in		uint32_t filter_id)
+{
+	efx_rc_t rc;
+	ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+	efx_filter_spec_t *spec;
+	efsys_lock_state_t state;
+	uint32_t filter_idx = filter_id % EFX_EF10_FILTER_TBL_ROWS;
+
+	/*
+	 * Find the software table entry and mark it busy.  Don't
+	 * remove it yet; any attempt to update while we're waiting
+	 * for the firmware must find the busy entry.
+	 *
+	 * FIXME: What if the busy flag is never cleared?
+	 */
+	EFSYS_LOCK(enp->en_eslp, state);
+	while (ef10_filter_entry_is_busy(table, filter_idx)) {
+		EFSYS_UNLOCK(enp->en_eslp, state);
+		EFSYS_SPIN(1);
+		EFSYS_LOCK(enp->en_eslp, state);
+	}
+	if ((spec = ef10_filter_entry_spec(table, filter_idx)) != NULL) {
+		ef10_filter_set_entry_busy(table, filter_idx);
+	}
+	EFSYS_UNLOCK(enp->en_eslp, state);
+
+	if (spec == NULL) {
+		rc = ENOENT;
+		goto fail1;
+	}
+
+	/*
+	 * Try to remove the hardware filter. This may fail if the MC has
+	 * rebooted (which frees all hardware filter resources).
+	 */
+	if (ef10_filter_is_exclusive(spec)) {
+		rc = efx_mcdi_filter_op_delete(enp,
+		    MC_CMD_FILTER_OP_IN_OP_REMOVE,
+		    &table->eft_entry[filter_idx].efe_handle);
+	} else {
+		rc = efx_mcdi_filter_op_delete(enp,
+		    MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE,
+		    &table->eft_entry[filter_idx].efe_handle);
+	}
+
+	/* Free the software table entry */
+	EFSYS_LOCK(enp->en_eslp, state);
+	ef10_filter_set_entry_not_busy(table, filter_idx);
+	ef10_filter_set_entry(table, filter_idx, NULL);
+	EFSYS_UNLOCK(enp->en_eslp, state);
+
+	EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), spec);
+
+	/* Check result of hardware filter removal */
+	if (rc != 0)
+		goto fail2;
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_filter_delete(
+	__in		efx_nic_t *enp,
+	__inout		efx_filter_spec_t *spec)
+{
+	efx_rc_t rc;
+	ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+	efx_filter_spec_t *saved_spec;
+	unsigned int hash;
+	unsigned int depth;
+	unsigned int i;
+	efsys_lock_state_t state;
+	boolean_t locked = B_FALSE;
+
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+
+	hash = ef10_filter_hash(spec);
+
+	EFSYS_LOCK(enp->en_eslp, state);
+	locked = B_TRUE;
+
+	depth = 1;
+	for (;;) {
+		i = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1);
+		saved_spec = ef10_filter_entry_spec(table, i);
+		if (saved_spec && ef10_filter_equal(spec, saved_spec) &&
+		    ef10_filter_same_dest(spec, saved_spec)) {
+			break;
+		}
+		if (depth == EF10_FILTER_SEARCH_LIMIT) {
+			rc = ENOENT;
+			goto fail1;
+		}
+		depth++;
+	}
+
+	EFSYS_UNLOCK(enp->en_eslp, state);
+	locked = B_FALSE;
+
+	rc = ef10_filter_delete_internal(enp, i);
+	if (rc != 0)
+		goto fail2;
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	if (locked)
+		EFSYS_UNLOCK(enp->en_eslp, state);
+
+	return (rc);
+}
+
+static	__checkReturn	efx_rc_t
+efx_mcdi_get_parser_disp_info(
+	__in		efx_nic_t *enp,
+	__out		uint32_t *list,
+	__out		size_t *length)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_GET_PARSER_DISP_INFO_IN_LEN,
+			    MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX)];
+	efx_rc_t rc;
+	uint32_t i;
+	boolean_t support_unknown_ucast = B_FALSE;
+	boolean_t support_unknown_mcast = B_FALSE;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_GET_PARSER_DISP_INFO;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_GET_PARSER_DISP_INFO_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX;
+
+	MCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP,
+	    MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	*length = MCDI_OUT_DWORD(req,
+	    GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES);
+
+	if (req.emr_out_length_used <
+	    MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(*length)) {
+		rc = EMSGSIZE;
+		goto fail2;
+	}
+
+	memcpy(list,
+	    MCDI_OUT2(req,
+	    uint32_t,
+	    GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES),
+	    (*length) * sizeof (uint32_t));
+	EFX_STATIC_ASSERT(sizeof (uint32_t) ==
+	    MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN);
+
+	/*
+	 * Remove UNKNOWN UCAST and MCAST flags, and if both are present, change
+	 * the lower priority one to LOC_MAC_IG.
+	 */
+	for (i = 0; i < *length; i++) {
+		if (list[i] & MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN) {
+			list[i] &=
+			(~MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN);
+			support_unknown_ucast = B_TRUE;
+		}
+		if (list[i] & MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN) {
+			list[i] &=
+			(~MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN);
+			support_unknown_mcast = B_TRUE;
+		}
+
+		if (support_unknown_ucast && support_unknown_mcast) {
+			list[i] &= EFX_FILTER_MATCH_LOC_MAC_IG;
+			break;
+		}
+	}
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_filter_supported_filters(
+	__in		efx_nic_t *enp,
+	__out		uint32_t *list,
+	__out		size_t *length)
+{
+	efx_rc_t rc;
+
+	if ((rc = efx_mcdi_get_parser_disp_info(enp, list, length)) != 0)
+		goto fail1;
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+static	__checkReturn	efx_rc_t
+ef10_filter_insert_unicast(
+	__in				efx_nic_t *enp,
+	__in_ecount(6)			uint8_t const *addr,
+	__in				efx_filter_flag_t filter_flags)
+{
+	ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+	efx_filter_spec_t spec;
+	efx_rc_t rc;
+
+	/* Insert the filter for the local station address */
+	efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+	    filter_flags,
+	    eftp->eft_default_rxq);
+	efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, addr);
+
+	rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+	    &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]);
+	if (rc != 0)
+		goto fail1;
+
+	eftp->eft_unicst_filter_count++;
+	EFSYS_ASSERT(eftp->eft_unicst_filter_count <=
+		    EFX_EF10_FILTER_UNICAST_FILTERS_MAX);
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+	return (rc);
+}
+
+static	__checkReturn	efx_rc_t
+ef10_filter_insert_all_unicast(
+	__in				efx_nic_t *enp,
+	__in				efx_filter_flag_t filter_flags)
+{
+	ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+	efx_filter_spec_t spec;
+	efx_rc_t rc;
+
+	/* Insert the unknown unicast filter */
+	efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+	    filter_flags,
+	    eftp->eft_default_rxq);
+	efx_filter_spec_set_uc_def(&spec);
+	rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+	    &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]);
+	if (rc != 0)
+		goto fail1;
+
+	eftp->eft_unicst_filter_count++;
+	EFSYS_ASSERT(eftp->eft_unicst_filter_count <=
+		    EFX_EF10_FILTER_UNICAST_FILTERS_MAX);
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+	return (rc);
+}
+
+static	__checkReturn	efx_rc_t
+ef10_filter_insert_multicast_list(
+	__in				efx_nic_t *enp,
+	__in				boolean_t mulcst,
+	__in				boolean_t brdcst,
+	__in_ecount(6*count)		uint8_t const *addrs,
+	__in				uint32_t count,
+	__in				efx_filter_flag_t filter_flags,
+	__in				boolean_t rollback)
+{
+	ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+	efx_filter_spec_t spec;
+	uint8_t addr[6];
+	uint32_t i;
+	uint32_t filter_index;
+	uint32_t filter_count;
+	efx_rc_t rc;
+
+	if (mulcst == B_FALSE)
+		count = 0;
+
+	if (count + (brdcst ? 1 : 0) >
+	    EFX_ARRAY_SIZE(eftp->eft_mulcst_filter_indexes)) {
+		/* Too many MAC addresses */
+		rc = EINVAL;
+		goto fail1;
+	}
+
+	/* Insert/renew multicast address list filters */
+	filter_count = 0;
+	for (i = 0; i < count; i++) {
+		efx_filter_spec_init_rx(&spec,
+		    EFX_FILTER_PRI_AUTO,
+		    filter_flags,
+		    eftp->eft_default_rxq);
+
+		efx_filter_spec_set_eth_local(&spec,
+		    EFX_FILTER_SPEC_VID_UNSPEC,
+		    &addrs[i * EFX_MAC_ADDR_LEN]);
+
+		rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+					    &filter_index);
+
+		if (rc == 0) {
+			eftp->eft_mulcst_filter_indexes[filter_count] =
+				filter_index;
+			filter_count++;
+		} else if (rollback == B_TRUE) {
+			/* Only stop upon failure if told to rollback */
+			goto rollback;
+		}
+
+	}
+
+	if (brdcst == B_TRUE) {
+		/* Insert/renew broadcast address filter */
+		efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+		    filter_flags,
+		    eftp->eft_default_rxq);
+
+		EFX_MAC_BROADCAST_ADDR_SET(addr);
+		efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC,
+		    addr);
+
+		rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+					    &filter_index);
+
+		if (rc == 0) {
+			eftp->eft_mulcst_filter_indexes[filter_count] =
+				filter_index;
+			filter_count++;
+		} else if (rollback == B_TRUE) {
+			/* Only stop upon failure if told to rollback */
+			goto rollback;
+		}
+	}
+
+	eftp->eft_mulcst_filter_count = filter_count;
+	eftp->eft_using_all_mulcst = B_FALSE;
+
+	return (0);
+
+rollback:
+	/* Remove any filters we have inserted */
+	i = filter_count;
+	while (i--) {
+		(void) ef10_filter_delete_internal(enp,
+		    eftp->eft_mulcst_filter_indexes[i]);
+	}
+	eftp->eft_mulcst_filter_count = 0;
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+static	__checkReturn	efx_rc_t
+ef10_filter_insert_all_multicast(
+	__in				efx_nic_t *enp,
+	__in				efx_filter_flag_t filter_flags)
+{
+	ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+	efx_filter_spec_t spec;
+	efx_rc_t rc;
+
+	/* Insert the unknown multicast filter */
+	efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+	    filter_flags,
+	    eftp->eft_default_rxq);
+	efx_filter_spec_set_mc_def(&spec);
+
+	rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+	    &eftp->eft_mulcst_filter_indexes[0]);
+	if (rc != 0)
+		goto fail1;
+
+	eftp->eft_mulcst_filter_count = 1;
+	eftp->eft_using_all_mulcst = B_TRUE;
+
+	/*
+	 * FIXME: If brdcst == B_FALSE, add a filter to drop broadcast traffic.
+	 */
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+static			void
+ef10_filter_remove_old(
+	__in		efx_nic_t *enp)
+{
+	ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+	uint32_t i;
+
+	for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) {
+		if (ef10_filter_entry_is_auto_old(table, i)) {
+			(void) ef10_filter_delete_internal(enp, i);
+		}
+	}
+}
+
+
+static	__checkReturn	efx_rc_t
+ef10_filter_get_workarounds(
+	__in				efx_nic_t *enp)
+{
+	efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+	uint32_t implemented = 0;
+	uint32_t enabled = 0;
+	efx_rc_t rc;
+
+	rc = efx_mcdi_get_workarounds(enp, &implemented, &enabled);
+	if (rc == 0) {
+		/* Check if chained multicast filter support is enabled */
+		if (implemented & enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807)
+			encp->enc_bug26807_workaround = B_TRUE;
+		else
+			encp->enc_bug26807_workaround = B_FALSE;
+	} else if (rc == ENOTSUP) {
+		/*
+		 * Firmware is too old to support GET_WORKAROUNDS, and support
+		 * for this workaround was implemented later.
+		 */
+		encp->enc_bug26807_workaround = B_FALSE;
+	} else {
+		goto fail1;
+	}
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+
+}
+
+
+/*
+ * Reconfigure all filters.
+ * If all_unicst and/or all mulcst filters cannot be applied then
+ * return ENOTSUP (Note the filters for the specified addresses are
+ * still applied in this case).
+ */
+	__checkReturn	efx_rc_t
+ef10_filter_reconfigure(
+	__in				efx_nic_t *enp,
+	__in_ecount(6)			uint8_t const *mac_addr,
+	__in				boolean_t all_unicst,
+	__in				boolean_t mulcst,
+	__in				boolean_t all_mulcst,
+	__in				boolean_t brdcst,
+	__in_ecount(6*count)		uint8_t const *addrs,
+	__in				uint32_t count)
+{
+	efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+	ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+	efx_filter_flag_t filter_flags;
+	unsigned int i;
+	efx_rc_t all_unicst_rc = 0;
+	efx_rc_t all_mulcst_rc = 0;
+	efx_rc_t rc;
+
+	if (table->eft_default_rxq == NULL) {
+		/*
+		 * Filters direct traffic to the default RXQ, and so cannot be
+		 * inserted until it is available. Any currently configured
+		 * filters must be removed (ignore errors in case the MC
+		 * has rebooted, which removes hardware filters).
+		 */
+		for (i = 0; i < table->eft_unicst_filter_count; i++) {
+			(void) ef10_filter_delete_internal(enp,
+					table->eft_unicst_filter_indexes[i]);
+		}
+		table->eft_unicst_filter_count = 0;
+
+		for (i = 0; i < table->eft_mulcst_filter_count; i++) {
+			(void) ef10_filter_delete_internal(enp,
+					table->eft_mulcst_filter_indexes[i]);
+		}
+		table->eft_mulcst_filter_count = 0;
+
+		return (0);
+	}
+
+	if (table->eft_using_rss)
+		filter_flags = EFX_FILTER_FLAG_RX_RSS;
+	else
+		filter_flags = 0;
+
+	/* Mark old filters which may need to be removed */
+	for (i = 0; i < table->eft_unicst_filter_count; i++) {
+		ef10_filter_set_entry_auto_old(table,
+					table->eft_unicst_filter_indexes[i]);
+	}
+	for (i = 0; i < table->eft_mulcst_filter_count; i++) {
+		ef10_filter_set_entry_auto_old(table,
+					table->eft_mulcst_filter_indexes[i]);
+	}
+
+	/*
+	 * Insert or renew unicast filters.
+	 *
+	 * Frimware does not perform chaining on unicast filters. As traffic is
+	 * therefore only delivered to the first matching filter, we should
+	 * always insert the specific filter for our MAC address, to try and
+	 * ensure we get that traffic.
+	 *
+	 * (If the filter for our MAC address has already been inserted by
+	 * another function, we won't receive traffic sent to us, even if we
+	 * insert a unicast mismatch filter. To prevent traffic stealing, this
+	 * therefore relies on the privilege model only allowing functions to
+	 * insert filters for their own MAC address unless explicitly given
+	 * additional privileges by the user. This also means that, even on a
+	 * priviliged function, inserting a unicast mismatch filter may not
+	 * catch all traffic in multi PCI function scenarios.)
+	 */
+	table->eft_unicst_filter_count = 0;
+	rc = ef10_filter_insert_unicast(enp, mac_addr, filter_flags);
+	if (all_unicst || (rc != 0)) {
+		all_unicst_rc = ef10_filter_insert_all_unicast(enp,
+						    filter_flags);
+		if ((rc != 0) && (all_unicst_rc != 0))
+			goto fail1;
+	}
+
+	/*
+	 * WORKAROUND_BUG26807 controls firmware support for chained multicast
+	 * filters, and can only be enabled or disabled when the hardware filter
+	 * table is empty.
+	 *
+	 * Chained multicast filters require support from the datapath firmware,
+	 * and may not be available (e.g. low-latency variants or old Huntington
+	 * firmware).
+	 *
+	 * Firmware will reset (FLR) functions which have inserted filters in
+	 * the hardware filter table when the workaround is enabled/disabled.
+	 * Functions without any hardware filters are not reset.
+	 *
+	 * Re-check if the workaround is enabled after adding unicast hardware
+	 * filters. This ensures that encp->enc_bug26807_workaround matches the
+	 * firmware state, and that later changes to enable/disable the
+	 * workaround will result in this function seeing a reset (FLR).
+	 *
+	 * In common-code drivers, we only support multiple PCI function
+	 * scenarios with firmware that supports multicast chaining, so we can
+	 * assume it is enabled for such cases and hence simplify the filter
+	 * insertion logic. Firmware that does not support multicast chaining
+	 * does not support multiple PCI function configurations either, so
+	 * filter insertion is much simpler and the same strategies can still be
+	 * used.
+	 */
+	if ((rc = ef10_filter_get_workarounds(enp)) != 0)
+		goto fail2;
+
+	if ((table->eft_using_all_mulcst != all_mulcst) &&
+	    (encp->enc_bug26807_workaround == B_TRUE)) {
+		/*
+		 * Multicast filter chaining is enabled, so traffic that matches
+		 * more than one multicast filter will be replicated and
+		 * delivered to multiple recipients.  To avoid this duplicate
+		 * delivery, remove old multicast filters before inserting new
+		 * multicast filters.
+		 */
+		ef10_filter_remove_old(enp);
+	}
+
+	/* Insert or renew multicast filters */
+	if (all_mulcst == B_TRUE) {
+		/*
+		 * Insert the all multicast filter. If that fails, try to insert
+		 * all of our multicast filters (but without rollback on
+		 * failure).
+		 */
+		all_mulcst_rc = ef10_filter_insert_all_multicast(enp,
+							    filter_flags);
+		if (all_mulcst_rc != 0) {
+			rc = ef10_filter_insert_multicast_list(enp, B_TRUE,
+			    brdcst, addrs, count, filter_flags, B_FALSE);
+			if (rc != 0)
+				goto fail3;
+		}
+	} else {
+		/*
+		 * Insert filters for multicast addresses.
+		 * If any insertion fails, then rollback and try to insert the
+		 * all multicast filter instead.
+		 * If that also fails, try to insert all of the multicast
+		 * filters (but without rollback on failure).
+		 */
+		rc = ef10_filter_insert_multicast_list(enp, mulcst, brdcst,
+			    addrs, count, filter_flags, B_TRUE);
+		if (rc != 0) {
+			if ((table->eft_using_all_mulcst == B_FALSE) &&
+			    (encp->enc_bug26807_workaround == B_TRUE)) {
+				/*
+				 * Multicast filter chaining is on, so remove
+				 * old filters before inserting the multicast
+				 * all filter to avoid duplicate delivery caused
+				 * by packets matching multiple filters.
+				 */
+				ef10_filter_remove_old(enp);
+			}
+
+			rc = ef10_filter_insert_all_multicast(enp,
+							    filter_flags);
+			if (rc != 0) {
+				rc = ef10_filter_insert_multicast_list(enp,
+				    mulcst, brdcst,
+				    addrs, count, filter_flags, B_FALSE);
+				if (rc != 0)
+					goto fail4;
+			}
+		}
+	}
+
+	/* Remove old filters which were not renewed */
+	ef10_filter_remove_old(enp);
+
+	/* report if any optional flags were rejected */
+	if (((all_unicst != B_FALSE) && (all_unicst_rc != 0)) ||
+	    ((all_mulcst != B_FALSE) && (all_mulcst_rc != 0))) {
+		rc = ENOTSUP;
+	}
+
+	return (rc);
+
+fail4:
+	EFSYS_PROBE(fail4);
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	/* Clear auto old flags */
+	for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) {
+		if (ef10_filter_entry_is_auto_old(table, i)) {
+			ef10_filter_set_entry_not_auto_old(table, i);
+		}
+	}
+
+	return (rc);
+}
+
+		void
+ef10_filter_get_default_rxq(
+	__in		efx_nic_t *enp,
+	__out		efx_rxq_t **erpp,
+	__out		boolean_t *using_rss)
+{
+	ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+
+	*erpp = table->eft_default_rxq;
+	*using_rss = table->eft_using_rss;
+}
+
+
+		void
+ef10_filter_default_rxq_set(
+	__in		efx_nic_t *enp,
+	__in		efx_rxq_t *erp,
+	__in		boolean_t using_rss)
+{
+	ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+
+	EFSYS_ASSERT(using_rss == B_FALSE);
+	table->eft_using_rss = B_FALSE;
+	table->eft_default_rxq = erp;
+}
+
+		void
+ef10_filter_default_rxq_clear(
+	__in		efx_nic_t *enp)
+{
+	ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+
+	table->eft_default_rxq = NULL;
+	table->eft_using_rss = B_FALSE;
+}
+
+
+#endif /* EFSYS_OPT_FILTER */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/efx/base/ef10_impl.h b/drivers/net/sfc/efx/base/ef10_impl.h
new file mode 100644
index 0000000..3183210
--- /dev/null
+++ b/drivers/net/sfc/efx/base/ef10_impl.h
@@ -0,0 +1,718 @@
+/*
+ * Copyright (c) 2015-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef	_SYS_EF10_IMPL_H
+#define	_SYS_EF10_IMPL_H
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#if   EFSYS_OPT_HUNTINGTON
+#define	EF10_MAX_PIOBUF_NBUFS	HUNT_PIOBUF_NBUFS
+#endif
+
+/*
+ * FIXME: This is just a power of 2 which fits in an MCDI v1 message, and could
+ * possibly be increased, or the write size reported by newer firmware used
+ * instead.
+ */
+#define	EF10_NVRAM_CHUNK 0x80
+
+/* Alignment requirement for value written to RX WPTR:
+ *  the WPTR must be aligned to an 8 descriptor boundary
+ */
+#define	EF10_RX_WPTR_ALIGN 8
+
+/*
+ * Max byte offset into the packet the TCP header must start for the hardware
+ * to be able to parse the packet correctly.
+ */
+#define	EF10_TCP_HEADER_OFFSET_LIMIT	208
+
+/* Invalid RSS context handle */
+#define	EF10_RSS_CONTEXT_INVALID	(0xffffffff)
+
+
+/* EV */
+
+	__checkReturn	efx_rc_t
+ef10_ev_init(
+	__in		efx_nic_t *enp);
+
+			void
+ef10_ev_fini(
+	__in		efx_nic_t *enp);
+
+	__checkReturn	efx_rc_t
+ef10_ev_qcreate(
+	__in		efx_nic_t *enp,
+	__in		unsigned int index,
+	__in		efsys_mem_t *esmp,
+	__in		size_t n,
+	__in		uint32_t id,
+	__in		uint32_t us,
+	__in		uint32_t flags,
+	__in		efx_evq_t *eep);
+
+			void
+ef10_ev_qdestroy(
+	__in		efx_evq_t *eep);
+
+	__checkReturn	efx_rc_t
+ef10_ev_qprime(
+	__in		efx_evq_t *eep,
+	__in		unsigned int count);
+
+			void
+ef10_ev_qpost(
+	__in	efx_evq_t *eep,
+	__in	uint16_t data);
+
+	__checkReturn	efx_rc_t
+ef10_ev_qmoderate(
+	__in		efx_evq_t *eep,
+	__in		unsigned int us);
+
+		void
+ef10_ev_rxlabel_init(
+	__in		efx_evq_t *eep,
+	__in		efx_rxq_t *erp,
+	__in		unsigned int label,
+	__in		boolean_t packed_stream);
+
+		void
+ef10_ev_rxlabel_fini(
+	__in		efx_evq_t *eep,
+	__in		unsigned int label);
+
+/* INTR */
+
+	__checkReturn	efx_rc_t
+ef10_intr_init(
+	__in		efx_nic_t *enp,
+	__in		efx_intr_type_t type,
+	__in		efsys_mem_t *esmp);
+
+			void
+ef10_intr_enable(
+	__in		efx_nic_t *enp);
+
+			void
+ef10_intr_disable(
+	__in		efx_nic_t *enp);
+
+			void
+ef10_intr_disable_unlocked(
+	__in		efx_nic_t *enp);
+
+	__checkReturn	efx_rc_t
+ef10_intr_trigger(
+	__in		efx_nic_t *enp,
+	__in		unsigned int level);
+
+			void
+ef10_intr_status_line(
+	__in		efx_nic_t *enp,
+	__out		boolean_t *fatalp,
+	__out		uint32_t *qmaskp);
+
+			void
+ef10_intr_status_message(
+	__in		efx_nic_t *enp,
+	__in		unsigned int message,
+	__out		boolean_t *fatalp);
+
+			void
+ef10_intr_fatal(
+	__in		efx_nic_t *enp);
+			void
+ef10_intr_fini(
+	__in		efx_nic_t *enp);
+
+/* NIC */
+
+extern	__checkReturn	efx_rc_t
+ef10_nic_probe(
+	__in		efx_nic_t *enp);
+
+extern	__checkReturn	efx_rc_t
+ef10_nic_set_drv_limits(
+	__inout		efx_nic_t *enp,
+	__in		efx_drv_limits_t *edlp);
+
+extern	__checkReturn	efx_rc_t
+ef10_nic_get_vi_pool(
+	__in		efx_nic_t *enp,
+	__out		uint32_t *vi_countp);
+
+extern	__checkReturn	efx_rc_t
+ef10_nic_get_bar_region(
+	__in		efx_nic_t *enp,
+	__in		efx_nic_region_t region,
+	__out		uint32_t *offsetp,
+	__out		size_t *sizep);
+
+extern	__checkReturn	efx_rc_t
+ef10_nic_reset(
+	__in		efx_nic_t *enp);
+
+extern	__checkReturn	efx_rc_t
+ef10_nic_init(
+	__in		efx_nic_t *enp);
+
+extern			void
+ef10_nic_fini(
+	__in		efx_nic_t *enp);
+
+extern			void
+ef10_nic_unprobe(
+	__in		efx_nic_t *enp);
+
+
+/* MAC */
+
+extern	__checkReturn	efx_rc_t
+ef10_mac_poll(
+	__in		efx_nic_t *enp,
+	__out		efx_link_mode_t *link_modep);
+
+extern	__checkReturn	efx_rc_t
+ef10_mac_up(
+	__in		efx_nic_t *enp,
+	__out		boolean_t *mac_upp);
+
+extern	__checkReturn	efx_rc_t
+ef10_mac_addr_set(
+	__in	efx_nic_t *enp);
+
+extern	__checkReturn	efx_rc_t
+ef10_mac_pdu_set(
+	__in	efx_nic_t *enp);
+
+extern	__checkReturn	efx_rc_t
+ef10_mac_pdu_get(
+	__in	efx_nic_t *enp,
+	__out	size_t *pdu);
+
+extern	__checkReturn	efx_rc_t
+ef10_mac_reconfigure(
+	__in	efx_nic_t *enp);
+
+extern	__checkReturn	efx_rc_t
+ef10_mac_multicast_list_set(
+	__in				efx_nic_t *enp);
+
+extern	__checkReturn	efx_rc_t
+ef10_mac_filter_default_rxq_set(
+	__in		efx_nic_t *enp,
+	__in		efx_rxq_t *erp,
+	__in		boolean_t using_rss);
+
+extern			void
+ef10_mac_filter_default_rxq_clear(
+	__in		efx_nic_t *enp);
+
+
+/* MCDI */
+
+#if EFSYS_OPT_MCDI
+
+extern	__checkReturn	efx_rc_t
+ef10_mcdi_init(
+	__in		efx_nic_t *enp,
+	__in		const efx_mcdi_transport_t *mtp);
+
+extern			void
+ef10_mcdi_fini(
+	__in		efx_nic_t *enp);
+
+extern			void
+ef10_mcdi_send_request(
+	__in			efx_nic_t *enp,
+	__in_bcount(hdr_len)	void *hdrp,
+	__in			size_t hdr_len,
+	__in_bcount(sdu_len)	void *sdup,
+	__in			size_t sdu_len);
+
+extern	__checkReturn	boolean_t
+ef10_mcdi_poll_response(
+	__in		efx_nic_t *enp);
+
+extern			void
+ef10_mcdi_read_response(
+	__in			efx_nic_t *enp,
+	__out_bcount(length)	void *bufferp,
+	__in			size_t offset,
+	__in			size_t length);
+
+extern			efx_rc_t
+ef10_mcdi_poll_reboot(
+	__in		efx_nic_t *enp);
+
+extern	__checkReturn	efx_rc_t
+ef10_mcdi_feature_supported(
+	__in		efx_nic_t *enp,
+	__in		efx_mcdi_feature_id_t id,
+	__out		boolean_t *supportedp);
+
+extern			void
+ef10_mcdi_get_timeout(
+	__in		efx_nic_t *enp,
+	__in		efx_mcdi_req_t *emrp,
+	__out		uint32_t *timeoutp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+/* NVRAM */
+
+
+/* PHY */
+
+typedef struct ef10_link_state_s {
+	uint32_t		els_adv_cap_mask;
+	uint32_t		els_lp_cap_mask;
+	unsigned int		els_fcntl;
+	efx_link_mode_t		els_link_mode;
+	boolean_t		els_mac_up;
+} ef10_link_state_t;
+
+extern			void
+ef10_phy_link_ev(
+	__in		efx_nic_t *enp,
+	__in		efx_qword_t *eqp,
+	__out		efx_link_mode_t *link_modep);
+
+extern	__checkReturn	efx_rc_t
+ef10_phy_get_link(
+	__in		efx_nic_t *enp,
+	__out		ef10_link_state_t *elsp);
+
+extern	__checkReturn	efx_rc_t
+ef10_phy_power(
+	__in		efx_nic_t *enp,
+	__in		boolean_t on);
+
+extern	__checkReturn	efx_rc_t
+ef10_phy_reconfigure(
+	__in		efx_nic_t *enp);
+
+extern	__checkReturn	efx_rc_t
+ef10_phy_verify(
+	__in		efx_nic_t *enp);
+
+extern	__checkReturn	efx_rc_t
+ef10_phy_oui_get(
+	__in		efx_nic_t *enp,
+	__out		uint32_t *ouip);
+
+/* TX */
+
+extern	__checkReturn	efx_rc_t
+ef10_tx_init(
+	__in		efx_nic_t *enp);
+
+extern			void
+ef10_tx_fini(
+	__in		efx_nic_t *enp);
+
+extern	__checkReturn	efx_rc_t
+ef10_tx_qcreate(
+	__in		efx_nic_t *enp,
+	__in		unsigned int index,
+	__in		unsigned int label,
+	__in		efsys_mem_t *esmp,
+	__in		size_t n,
+	__in		uint32_t id,
+	__in		uint16_t flags,
+	__in		efx_evq_t *eep,
+	__in		efx_txq_t *etp,
+	__out		unsigned int *addedp);
+
+extern		void
+ef10_tx_qdestroy(
+	__in		efx_txq_t *etp);
+
+extern	__checkReturn	efx_rc_t
+ef10_tx_qpost(
+	__in		efx_txq_t *etp,
+	__in_ecount(n)	efx_buffer_t *eb,
+	__in		unsigned int n,
+	__in		unsigned int completed,
+	__inout		unsigned int *addedp);
+
+extern			void
+ef10_tx_qpush(
+	__in		efx_txq_t *etp,
+	__in		unsigned int added,
+	__in		unsigned int pushed);
+
+extern	__checkReturn	efx_rc_t
+ef10_tx_qpace(
+	__in		efx_txq_t *etp,
+	__in		unsigned int ns);
+
+extern	__checkReturn	efx_rc_t
+ef10_tx_qflush(
+	__in		efx_txq_t *etp);
+
+extern			void
+ef10_tx_qenable(
+	__in		efx_txq_t *etp);
+
+extern	__checkReturn	efx_rc_t
+ef10_tx_qpio_enable(
+	__in		efx_txq_t *etp);
+
+extern			void
+ef10_tx_qpio_disable(
+	__in		efx_txq_t *etp);
+
+extern	__checkReturn	efx_rc_t
+ef10_tx_qpio_write(
+	__in			efx_txq_t *etp,
+	__in_ecount(buf_length)	uint8_t *buffer,
+	__in			size_t buf_length,
+	__in			size_t pio_buf_offset);
+
+extern	__checkReturn	efx_rc_t
+ef10_tx_qpio_post(
+	__in			efx_txq_t *etp,
+	__in			size_t pkt_length,
+	__in			unsigned int completed,
+	__inout			unsigned int *addedp);
+
+extern	__checkReturn	efx_rc_t
+ef10_tx_qdesc_post(
+	__in		efx_txq_t *etp,
+	__in_ecount(n)	efx_desc_t *ed,
+	__in		unsigned int n,
+	__in		unsigned int completed,
+	__inout		unsigned int *addedp);
+
+extern	void
+ef10_tx_qdesc_dma_create(
+	__in	efx_txq_t *etp,
+	__in	efsys_dma_addr_t addr,
+	__in	size_t size,
+	__in	boolean_t eop,
+	__out	efx_desc_t *edp);
+
+extern	void
+ef10_tx_qdesc_tso_create(
+	__in	efx_txq_t *etp,
+	__in	uint16_t ipv4_id,
+	__in	uint32_t tcp_seq,
+	__in	uint8_t	 tcp_flags,
+	__out	efx_desc_t *edp);
+
+extern	void
+ef10_tx_qdesc_tso2_create(
+	__in			efx_txq_t *etp,
+	__in			uint16_t ipv4_id,
+	__in			uint32_t tcp_seq,
+	__in			uint16_t tcp_mss,
+	__out_ecount(count)	efx_desc_t *edp,
+	__in			int count);
+
+extern	void
+ef10_tx_qdesc_vlantci_create(
+	__in	efx_txq_t *etp,
+	__in	uint16_t vlan_tci,
+	__out	efx_desc_t *edp);
+
+
+typedef uint32_t	efx_piobuf_handle_t;
+
+#define	EFX_PIOBUF_HANDLE_INVALID	((efx_piobuf_handle_t) -1)
+
+extern	__checkReturn	efx_rc_t
+ef10_nic_pio_alloc(
+	__inout		efx_nic_t *enp,
+	__out		uint32_t *bufnump,
+	__out		efx_piobuf_handle_t *handlep,
+	__out		uint32_t *blknump,
+	__out		uint32_t *offsetp,
+	__out		size_t *sizep);
+
+extern	__checkReturn	efx_rc_t
+ef10_nic_pio_free(
+	__inout		efx_nic_t *enp,
+	__in		uint32_t bufnum,
+	__in		uint32_t blknum);
+
+extern	__checkReturn	efx_rc_t
+ef10_nic_pio_link(
+	__inout		efx_nic_t *enp,
+	__in		uint32_t vi_index,
+	__in		efx_piobuf_handle_t handle);
+
+extern	__checkReturn	efx_rc_t
+ef10_nic_pio_unlink(
+	__inout		efx_nic_t *enp,
+	__in		uint32_t vi_index);
+
+
+/* VPD */
+
+
+/* RX */
+
+extern	__checkReturn	efx_rc_t
+ef10_rx_init(
+	__in		efx_nic_t *enp);
+
+
+extern	__checkReturn	efx_rc_t
+ef10_rx_prefix_pktlen(
+	__in		efx_nic_t *enp,
+	__in		uint8_t *buffer,
+	__out		uint16_t *lengthp);
+
+extern			void
+ef10_rx_qpost(
+	__in		efx_rxq_t *erp,
+	__in_ecount(n)	efsys_dma_addr_t *addrp,
+	__in		size_t size,
+	__in		unsigned int n,
+	__in		unsigned int completed,
+	__in		unsigned int added);
+
+extern			void
+ef10_rx_qpush(
+	__in		efx_rxq_t *erp,
+	__in		unsigned int added,
+	__inout		unsigned int *pushedp);
+
+extern	__checkReturn	efx_rc_t
+ef10_rx_qflush(
+	__in		efx_rxq_t *erp);
+
+extern		void
+ef10_rx_qenable(
+	__in		efx_rxq_t *erp);
+
+extern	__checkReturn	efx_rc_t
+ef10_rx_qcreate(
+	__in		efx_nic_t *enp,
+	__in		unsigned int index,
+	__in		unsigned int label,
+	__in		efx_rxq_type_t type,
+	__in		efsys_mem_t *esmp,
+	__in		size_t n,
+	__in		uint32_t id,
+	__in		efx_evq_t *eep,
+	__in		efx_rxq_t *erp);
+
+extern			void
+ef10_rx_qdestroy(
+	__in		efx_rxq_t *erp);
+
+extern			void
+ef10_rx_fini(
+	__in		efx_nic_t *enp);
+
+#if EFSYS_OPT_FILTER
+
+typedef struct ef10_filter_handle_s {
+	uint32_t	efh_lo;
+	uint32_t	efh_hi;
+} ef10_filter_handle_t;
+
+typedef struct ef10_filter_entry_s {
+	uintptr_t efe_spec; /* pointer to filter spec plus busy bit */
+	ef10_filter_handle_t efe_handle;
+} ef10_filter_entry_t;
+
+/*
+ * BUSY flag indicates that an update is in progress.
+ * AUTO_OLD flag is used to mark and sweep MAC packet filters.
+ */
+#define	EFX_EF10_FILTER_FLAG_BUSY	1U
+#define	EFX_EF10_FILTER_FLAG_AUTO_OLD	2U
+#define	EFX_EF10_FILTER_FLAGS		3U
+
+/*
+ * Size of the hash table used by the driver. Doesn't need to be the
+ * same size as the hardware's table.
+ */
+#define	EFX_EF10_FILTER_TBL_ROWS 8192
+
+/* Only need to allow for one directed and one unknown unicast filter */
+#define	EFX_EF10_FILTER_UNICAST_FILTERS_MAX	2
+
+/* Allow for the broadcast address to be added to the multicast list */
+#define	EFX_EF10_FILTER_MULTICAST_FILTERS_MAX	(EFX_MAC_MULTICAST_LIST_MAX + 1)
+
+typedef struct ef10_filter_table_s {
+	ef10_filter_entry_t	eft_entry[EFX_EF10_FILTER_TBL_ROWS];
+	efx_rxq_t		*eft_default_rxq;
+	boolean_t		eft_using_rss;
+	uint32_t		eft_unicst_filter_indexes[
+	    EFX_EF10_FILTER_UNICAST_FILTERS_MAX];
+	boolean_t		eft_unicst_filter_count;
+	uint32_t		eft_mulcst_filter_indexes[
+	    EFX_EF10_FILTER_MULTICAST_FILTERS_MAX];
+	uint32_t		eft_mulcst_filter_count;
+	boolean_t		eft_using_all_mulcst;
+} ef10_filter_table_t;
+
+	__checkReturn	efx_rc_t
+ef10_filter_init(
+	__in		efx_nic_t *enp);
+
+			void
+ef10_filter_fini(
+	__in		efx_nic_t *enp);
+
+	__checkReturn	efx_rc_t
+ef10_filter_restore(
+	__in		efx_nic_t *enp);
+
+	__checkReturn	efx_rc_t
+ef10_filter_add(
+	__in		efx_nic_t *enp,
+	__inout		efx_filter_spec_t *spec,
+	__in		boolean_t may_replace);
+
+	__checkReturn	efx_rc_t
+ef10_filter_delete(
+	__in		efx_nic_t *enp,
+	__inout		efx_filter_spec_t *spec);
+
+extern	__checkReturn	efx_rc_t
+ef10_filter_supported_filters(
+	__in		efx_nic_t *enp,
+	__out		uint32_t *list,
+	__out		size_t *length);
+
+extern	__checkReturn	efx_rc_t
+ef10_filter_reconfigure(
+	__in				efx_nic_t *enp,
+	__in_ecount(6)			uint8_t const *mac_addr,
+	__in				boolean_t all_unicst,
+	__in				boolean_t mulcst,
+	__in				boolean_t all_mulcst,
+	__in				boolean_t brdcst,
+	__in_ecount(6*count)		uint8_t const *addrs,
+	__in				uint32_t count);
+
+extern		void
+ef10_filter_get_default_rxq(
+	__in		efx_nic_t *enp,
+	__out		efx_rxq_t **erpp,
+	__out		boolean_t *using_rss);
+
+extern		void
+ef10_filter_default_rxq_set(
+	__in		efx_nic_t *enp,
+	__in		efx_rxq_t *erp,
+	__in		boolean_t using_rss);
+
+extern		void
+ef10_filter_default_rxq_clear(
+	__in		efx_nic_t *enp);
+
+
+#endif /* EFSYS_OPT_FILTER */
+
+extern	__checkReturn			efx_rc_t
+efx_mcdi_get_function_info(
+	__in				efx_nic_t *enp,
+	__out				uint32_t *pfp,
+	__out_opt			uint32_t *vfp);
+
+extern	__checkReturn		efx_rc_t
+efx_mcdi_privilege_mask(
+	__in			efx_nic_t *enp,
+	__in			uint32_t pf,
+	__in			uint32_t vf,
+	__out			uint32_t *maskp);
+
+extern	__checkReturn	efx_rc_t
+efx_mcdi_get_port_assignment(
+	__in		efx_nic_t *enp,
+	__out		uint32_t *portp);
+
+extern	__checkReturn	efx_rc_t
+efx_mcdi_get_port_modes(
+	__in		efx_nic_t *enp,
+	__out		uint32_t *modesp,
+	__out_opt	uint32_t *current_modep);
+
+extern	__checkReturn	efx_rc_t
+ef10_nic_get_port_mode_bandwidth(
+	__in		uint32_t port_mode,
+	__out		uint32_t *bandwidth_mbpsp);
+
+extern	__checkReturn	efx_rc_t
+efx_mcdi_get_mac_address_pf(
+	__in			efx_nic_t *enp,
+	__out_ecount_opt(6)	uint8_t mac_addrp[6]);
+
+extern	__checkReturn	efx_rc_t
+efx_mcdi_get_mac_address_vf(
+	__in			efx_nic_t *enp,
+	__out_ecount_opt(6)	uint8_t mac_addrp[6]);
+
+extern	__checkReturn	efx_rc_t
+efx_mcdi_get_clock(
+	__in		efx_nic_t *enp,
+	__out		uint32_t *sys_freqp,
+	__out		uint32_t *dpcpu_freqp);
+
+
+extern	__checkReturn	efx_rc_t
+efx_mcdi_get_vector_cfg(
+	__in		efx_nic_t *enp,
+	__out_opt	uint32_t *vec_basep,
+	__out_opt	uint32_t *pf_nvecp,
+	__out_opt	uint32_t *vf_nvecp);
+
+extern	__checkReturn	efx_rc_t
+ef10_get_datapath_caps(
+	__in		efx_nic_t *enp);
+
+extern	__checkReturn		efx_rc_t
+ef10_get_privilege_mask(
+	__in			efx_nic_t *enp,
+	__out			uint32_t *maskp);
+
+extern	__checkReturn	efx_rc_t
+ef10_external_port_mapping(
+	__in		efx_nic_t *enp,
+	__in		uint32_t port,
+	__out		uint8_t *external_portp);
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_EF10_IMPL_H */
diff --git a/drivers/net/sfc/efx/base/ef10_intr.c b/drivers/net/sfc/efx/base/ef10_intr.c
new file mode 100644
index 0000000..16be3d8
--- /dev/null
+++ b/drivers/net/sfc/efx/base/ef10_intr.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+	__checkReturn	efx_rc_t
+ef10_intr_init(
+	__in		efx_nic_t *enp,
+	__in		efx_intr_type_t type,
+	__in		efsys_mem_t *esmp)
+{
+	_NOTE(ARGUNUSED(enp, type, esmp))
+	return (0);
+}
+
+
+			void
+ef10_intr_enable(
+	__in		efx_nic_t *enp)
+{
+	_NOTE(ARGUNUSED(enp))
+}
+
+
+			void
+ef10_intr_disable(
+	__in		efx_nic_t *enp)
+{
+	_NOTE(ARGUNUSED(enp))
+}
+
+
+			void
+ef10_intr_disable_unlocked(
+	__in		efx_nic_t *enp)
+{
+	_NOTE(ARGUNUSED(enp))
+}
+
+
+static	__checkReturn	efx_rc_t
+efx_mcdi_trigger_interrupt(
+	__in		efx_nic_t *enp,
+	__in		unsigned int level)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_TRIGGER_INTERRUPT_IN_LEN,
+			    MC_CMD_TRIGGER_INTERRUPT_OUT_LEN)];
+	efx_rc_t rc;
+
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+
+	if (level >= enp->en_nic_cfg.enc_intr_limit) {
+		rc = EINVAL;
+		goto fail1;
+	}
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_TRIGGER_INTERRUPT;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_TRIGGER_INTERRUPT_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_TRIGGER_INTERRUPT_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req, TRIGGER_INTERRUPT_IN_INTR_LEVEL, level);
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail2;
+	}
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_intr_trigger(
+	__in		efx_nic_t *enp,
+	__in		unsigned int level)
+{
+	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+	efx_rc_t rc;
+
+	if (encp->enc_bug41750_workaround) {
+		/*
+		 * bug 41750: Test interrupts don't work on Greenport
+		 * bug 50084: Test interrupts don't work on VFs
+		 */
+		rc = ENOTSUP;
+		goto fail1;
+	}
+
+	if ((rc = efx_mcdi_trigger_interrupt(enp, level)) != 0)
+		goto fail2;
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+			void
+ef10_intr_status_line(
+	__in		efx_nic_t *enp,
+	__out		boolean_t *fatalp,
+	__out		uint32_t *qmaskp)
+{
+	efx_dword_t dword;
+
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+
+	/* Read the queue mask and implicitly acknowledge the interrupt. */
+	EFX_BAR_READD(enp, ER_DZ_BIU_INT_ISR_REG, &dword, B_FALSE);
+	*qmaskp = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+
+	EFSYS_PROBE1(qmask, uint32_t, *qmaskp);
+
+	*fatalp = B_FALSE;
+}
+
+			void
+ef10_intr_status_message(
+	__in		efx_nic_t *enp,
+	__in		unsigned int message,
+	__out		boolean_t *fatalp)
+{
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+
+	_NOTE(ARGUNUSED(enp, message))
+
+	/* EF10 fatal errors are reported via events */
+	*fatalp = B_FALSE;
+}
+
+			void
+ef10_intr_fatal(
+	__in		efx_nic_t *enp)
+{
+	/* EF10 fatal errors are reported via events */
+	_NOTE(ARGUNUSED(enp))
+}
+
+			void
+ef10_intr_fini(
+	__in		efx_nic_t *enp)
+{
+	_NOTE(ARGUNUSED(enp))
+}
+
+#endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/efx/base/ef10_mac.c b/drivers/net/sfc/efx/base/ef10_mac.c
new file mode 100644
index 0000000..7960067
--- /dev/null
+++ b/drivers/net/sfc/efx/base/ef10_mac.c
@@ -0,0 +1,446 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+	__checkReturn	efx_rc_t
+ef10_mac_poll(
+	__in		efx_nic_t *enp,
+	__out		efx_link_mode_t *link_modep)
+{
+	efx_port_t *epp = &(enp->en_port);
+	ef10_link_state_t els;
+	efx_rc_t rc;
+
+	if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+		goto fail1;
+
+	epp->ep_adv_cap_mask = els.els_adv_cap_mask;
+	epp->ep_fcntl = els.els_fcntl;
+
+	*link_modep = els.els_link_mode;
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	*link_modep = EFX_LINK_UNKNOWN;
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_mac_up(
+	__in		efx_nic_t *enp,
+	__out		boolean_t *mac_upp)
+{
+	ef10_link_state_t els;
+	efx_rc_t rc;
+
+	/*
+	 * Because EF10 doesn't *require* polling, we can't rely on
+	 * ef10_mac_poll() being executed to populate epp->ep_mac_up.
+	 */
+	if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+		goto fail1;
+
+	*mac_upp = els.els_mac_up;
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+/*
+ * EF10 adapters use MC_CMD_VADAPTOR_SET_MAC to set the
+ * MAC address; the address field in MC_CMD_SET_MAC has no
+ * effect.
+ * MC_CMD_VADAPTOR_SET_MAC requires mac-spoofing privilege and
+ * the port to have no filters or queues active.
+ */
+static	__checkReturn	efx_rc_t
+efx_mcdi_vadapter_set_mac(
+	__in		efx_nic_t *enp)
+{
+	efx_port_t *epp = &(enp->en_port);
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_VADAPTOR_SET_MAC_IN_LEN,
+			    MC_CMD_VADAPTOR_SET_MAC_OUT_LEN)];
+	efx_rc_t rc;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_VADAPTOR_SET_MAC;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_VADAPTOR_SET_MAC_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_VADAPTOR_SET_MAC_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
+	    enp->en_vport_id);
+	EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, VADAPTOR_SET_MAC_IN_MACADDR),
+	    epp->ep_mac_addr);
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_mac_addr_set(
+	__in		efx_nic_t *enp)
+{
+	efx_rc_t rc;
+
+	if ((rc = efx_mcdi_vadapter_set_mac(enp)) != 0) {
+		if (rc != ENOTSUP)
+			goto fail1;
+
+		/*
+		 * Fallback for older Huntington firmware without Vadapter
+		 * support.
+		 */
+		if ((rc = ef10_mac_reconfigure(enp)) != 0)
+			goto fail2;
+	}
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+static	__checkReturn	efx_rc_t
+efx_mcdi_mtu_set(
+	__in		efx_nic_t *enp,
+	__in		uint32_t mtu)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_SET_MAC_EXT_IN_LEN,
+			    MC_CMD_SET_MAC_OUT_LEN)];
+	efx_rc_t rc;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_SET_MAC;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN;
+
+	/* Only configure the MTU in this call to MC_CMD_SET_MAC */
+	MCDI_IN_SET_DWORD(req, SET_MAC_EXT_IN_MTU, mtu);
+	MCDI_IN_POPULATE_DWORD_1(req, SET_MAC_EXT_IN_CONTROL,
+			    SET_MAC_EXT_IN_CFG_MTU, 1);
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+static	__checkReturn		efx_rc_t
+efx_mcdi_mtu_get(
+	__in		efx_nic_t *enp,
+	__out		size_t *mtu)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_SET_MAC_EXT_IN_LEN,
+			    MC_CMD_SET_MAC_V2_OUT_LEN)];
+	efx_rc_t rc;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_SET_MAC;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_SET_MAC_V2_OUT_LEN;
+
+	/*
+	 * With MC_CMD_SET_MAC_EXT_IN_CONTROL set to 0, this just queries the
+	 * MTU.  This should always be supported on Medford, but it is not
+	 * supported on older Huntington firmware.
+	 */
+	MCDI_IN_SET_DWORD(req, SET_MAC_EXT_IN_CONTROL, 0);
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+	if (req.emr_out_length_used < MC_CMD_SET_MAC_V2_OUT_MTU_OFST + 4) {
+		rc = EMSGSIZE;
+		goto fail2;
+	}
+
+	*mtu = MCDI_OUT_DWORD(req, SET_MAC_V2_OUT_MTU);
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_mac_pdu_set(
+	__in		efx_nic_t *enp)
+{
+	efx_port_t *epp = &(enp->en_port);
+	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+	efx_rc_t rc;
+
+	if (encp->enc_enhanced_set_mac_supported) {
+		if ((rc = efx_mcdi_mtu_set(enp, epp->ep_mac_pdu)) != 0)
+			goto fail1;
+	} else {
+		/*
+		 * Fallback for older Huntington firmware, which always
+		 * configure all of the parameters to MC_CMD_SET_MAC. This isn't
+		 * suitable for setting the MTU on unpriviliged functions.
+		 */
+		if ((rc = ef10_mac_reconfigure(enp)) != 0)
+			goto fail2;
+	}
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn		efx_rc_t
+ef10_mac_pdu_get(
+	__in		efx_nic_t *enp,
+	__out		size_t *pdu)
+{
+	efx_rc_t rc;
+
+	if ((rc = efx_mcdi_mtu_get(enp, pdu)) != 0)
+		goto fail1;
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+__checkReturn	efx_rc_t
+ef10_mac_reconfigure(
+	__in		efx_nic_t *enp)
+{
+	efx_port_t *epp = &(enp->en_port);
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_SET_MAC_IN_LEN,
+			    MC_CMD_SET_MAC_OUT_LEN)];
+	efx_rc_t rc;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_SET_MAC;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_SET_MAC_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req, SET_MAC_IN_MTU, epp->ep_mac_pdu);
+	MCDI_IN_SET_DWORD(req, SET_MAC_IN_DRAIN, epp->ep_mac_drain ? 1 : 0);
+	EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, SET_MAC_IN_ADDR),
+			    epp->ep_mac_addr);
+
+	/*
+	 * Note: The Huntington MAC does not support REJECT_BRDCST.
+	 * The REJECT_UNCST flag will also prevent multicast traffic
+	 * from reaching the filters. As Huntington filters drop any
+	 * traffic that does not match a filter it is ok to leave the
+	 * MAC running in promiscuous mode. See bug41141.
+	 *
+	 * FIXME: Does REJECT_UNCST behave the same way on Medford?
+	 */
+	MCDI_IN_POPULATE_DWORD_2(req, SET_MAC_IN_REJECT,
+				    SET_MAC_IN_REJECT_UNCST, 0,
+				    SET_MAC_IN_REJECT_BRDCST, 0);
+
+	/*
+	 * Flow control, whether it is auto-negotiated or not,
+	 * is set via the PHY advertised capabilities.  When set to
+	 * automatic the MAC will use the PHY settings to determine
+	 * the flow control settings.
+	 */
+	MCDI_IN_SET_DWORD(req, SET_MAC_IN_FCNTL, MC_CMD_FCNTL_AUTO);
+
+	/* Do not include the Ethernet frame checksum in RX packets */
+	MCDI_IN_POPULATE_DWORD_1(req, SET_MAC_IN_FLAGS,
+				    SET_MAC_IN_FLAG_INCLUDE_FCS, 0);
+
+	efx_mcdi_execute_quiet(enp, &req);
+
+	if (req.emr_rc != 0) {
+		/*
+		 * Unprivileged functions cannot control link state,
+		 * but still need to configure filters.
+		 */
+		if (req.emr_rc != EACCES) {
+			rc = req.emr_rc;
+			goto fail1;
+		}
+	}
+
+	/*
+	 * Apply the filters for the MAC configuration.
+	 * If the NIC isn't ready to accept filters this may
+	 * return success without setting anything.
+	 */
+	rc = efx_filter_reconfigure(enp, epp->ep_mac_addr,
+				    epp->ep_all_unicst, epp->ep_mulcst,
+				    epp->ep_all_mulcst, epp->ep_brdcst,
+				    epp->ep_mulcst_addr_list,
+				    epp->ep_mulcst_addr_count);
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn			efx_rc_t
+ef10_mac_multicast_list_set(
+	__in				efx_nic_t *enp)
+{
+	efx_port_t *epp = &(enp->en_port);
+	const efx_mac_ops_t *emop = epp->ep_emop;
+	efx_rc_t rc;
+
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+
+	if ((rc = emop->emo_reconfigure(enp)) != 0)
+		goto fail1;
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_mac_filter_default_rxq_set(
+	__in		efx_nic_t *enp,
+	__in		efx_rxq_t *erp,
+	__in		boolean_t using_rss)
+{
+	efx_port_t *epp = &(enp->en_port);
+	efx_rxq_t *old_rxq;
+	boolean_t old_using_rss;
+	efx_rc_t rc;
+
+	ef10_filter_get_default_rxq(enp, &old_rxq, &old_using_rss);
+
+	ef10_filter_default_rxq_set(enp, erp, using_rss);
+
+	rc = efx_filter_reconfigure(enp, epp->ep_mac_addr,
+				    epp->ep_all_unicst, epp->ep_mulcst,
+				    epp->ep_all_mulcst, epp->ep_brdcst,
+				    epp->ep_mulcst_addr_list,
+				    epp->ep_mulcst_addr_count);
+
+	if (rc != 0)
+		goto fail1;
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	ef10_filter_default_rxq_set(enp, old_rxq, old_using_rss);
+
+	return (rc);
+}
+
+			void
+ef10_mac_filter_default_rxq_clear(
+	__in		efx_nic_t *enp)
+{
+	efx_port_t *epp = &(enp->en_port);
+
+	ef10_filter_default_rxq_clear(enp);
+
+	efx_filter_reconfigure(enp, epp->ep_mac_addr,
+				    epp->ep_all_unicst, epp->ep_mulcst,
+				    epp->ep_all_mulcst, epp->ep_brdcst,
+				    epp->ep_mulcst_addr_list,
+				    epp->ep_mulcst_addr_count);
+}
+
+
+#endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/efx/base/ef10_mcdi.c b/drivers/net/sfc/efx/base/ef10_mcdi.c
new file mode 100644
index 0000000..5a26bda
--- /dev/null
+++ b/drivers/net/sfc/efx/base/ef10_mcdi.c
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_MCDI
+
+#ifndef WITH_MCDI_V2
+#error "WITH_MCDI_V2 required for EF10 MCDIv2 commands."
+#endif
+
+
+	__checkReturn	efx_rc_t
+ef10_mcdi_init(
+	__in		efx_nic_t *enp,
+	__in		const efx_mcdi_transport_t *emtp)
+{
+	efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+	efsys_mem_t *esmp = emtp->emt_dma_mem;
+	efx_dword_t dword;
+	efx_rc_t rc;
+
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+	EFSYS_ASSERT(enp->en_features & EFX_FEATURE_MCDI_DMA);
+
+	/*
+	 * All EF10 firmware supports MCDIv2 and MCDIv1.
+	 * Medford BootROM supports MCDIv2 and MCDIv1.
+	 * Huntington BootROM supports MCDIv1 only.
+	 */
+	emip->emi_max_version = 2;
+
+	/* A host DMA buffer is required for EF10 MCDI */
+	if (esmp == NULL) {
+		rc = EINVAL;
+		goto fail1;
+	}
+
+	/*
+	 * Ensure that the MC doorbell is in a known state before issuing MCDI
+	 * commands. The recovery algorithm requires that the MC command buffer
+	 * must be 256 byte aligned. See bug24769.
+	 */
+	if ((EFSYS_MEM_ADDR(esmp) & 0xFF) != 0) {
+		rc = EINVAL;
+		goto fail2;
+	}
+	EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 1);
+	EFX_BAR_WRITED(enp, ER_DZ_MC_DB_HWRD_REG, &dword, B_FALSE);
+
+	/* Save initial MC reboot status */
+	(void) ef10_mcdi_poll_reboot(enp);
+
+	/* Start a new epoch (allow fresh MCDI requests to succeed) */
+	efx_mcdi_new_epoch(enp);
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+			void
+ef10_mcdi_fini(
+	__in		efx_nic_t *enp)
+{
+	efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+
+	emip->emi_new_epoch = B_FALSE;
+}
+
+/*
+ * In older firmware all commands are processed in a single thread, so a long
+ * running command for one PCIe function can block processing for another
+ * function (see bug 61269).
+ *
+ * In newer firmware that supports multithreaded MCDI processing, we can extend
+ * the timeout for long-running requests which we know firmware may choose to
+ * process in a background thread.
+ */
+#define	EF10_MCDI_CMD_TIMEOUT_US	(10 * 1000 * 1000)
+#define	EF10_MCDI_CMD_LONG_TIMEOUT_US	(60 * 1000 * 1000)
+
+			void
+ef10_mcdi_get_timeout(
+	__in		efx_nic_t *enp,
+	__in		efx_mcdi_req_t *emrp,
+	__out		uint32_t *timeoutp)
+{
+	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+
+	switch (emrp->emr_cmd) {
+	case MC_CMD_POLL_BIST:
+	case MC_CMD_NVRAM_ERASE:
+	case MC_CMD_LICENSING_V3:
+	case MC_CMD_NVRAM_UPDATE_FINISH:
+		if (encp->enc_fw_verified_nvram_update_required != B_FALSE) {
+			/*
+			 * Potentially longer running commands, which firmware
+			 * may choose to process in a background thread.
+			 */
+			*timeoutp = EF10_MCDI_CMD_LONG_TIMEOUT_US;
+			break;
+		}
+		/* FALLTHRU */
+	default:
+		*timeoutp = EF10_MCDI_CMD_TIMEOUT_US;
+		break;
+	}
+}
+
+			void
+ef10_mcdi_send_request(
+	__in			efx_nic_t *enp,
+	__in_bcount(hdr_len)	void *hdrp,
+	__in			size_t hdr_len,
+	__in_bcount(sdu_len)	void *sdup,
+	__in			size_t sdu_len)
+{
+	const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+	efsys_mem_t *esmp = emtp->emt_dma_mem;
+	efx_dword_t dword;
+	unsigned int pos;
+
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+
+	/* Write the header */
+	for (pos = 0; pos < hdr_len; pos += sizeof (efx_dword_t)) {
+		dword = *(efx_dword_t *)((uint8_t *)hdrp + pos);
+		EFSYS_MEM_WRITED(esmp, pos, &dword);
+	}
+
+	/* Write the payload */
+	for (pos = 0; pos < sdu_len; pos += sizeof (efx_dword_t)) {
+		dword = *(efx_dword_t *)((uint8_t *)sdup + pos);
+		EFSYS_MEM_WRITED(esmp, hdr_len + pos, &dword);
+	}
+
+	/* Guarantee ordering of memory (MCDI request) and PIO (MC doorbell) */
+	EFSYS_DMA_SYNC_FOR_DEVICE(esmp, 0, hdr_len + sdu_len);
+	EFSYS_PIO_WRITE_BARRIER();
+
+	/* Ring the doorbell to post the command DMA address to the MC */
+	EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0,
+	    EFSYS_MEM_ADDR(esmp) >> 32);
+	EFX_BAR_WRITED(enp, ER_DZ_MC_DB_LWRD_REG, &dword, B_FALSE);
+
+	EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0,
+	    EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+	EFX_BAR_WRITED(enp, ER_DZ_MC_DB_HWRD_REG, &dword, B_FALSE);
+}
+
+	__checkReturn	boolean_t
+ef10_mcdi_poll_response(
+	__in		efx_nic_t *enp)
+{
+	const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+	efsys_mem_t *esmp = emtp->emt_dma_mem;
+	efx_dword_t hdr;
+
+	EFSYS_MEM_READD(esmp, 0, &hdr);
+	EFSYS_MEM_READ_BARRIER();
+
+	return (EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE) ? B_TRUE : B_FALSE);
+}
+
+			void
+ef10_mcdi_read_response(
+	__in			efx_nic_t *enp,
+	__out_bcount(length)	void *bufferp,
+	__in			size_t offset,
+	__in			size_t length)
+{
+	const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+	efsys_mem_t *esmp = emtp->emt_dma_mem;
+	unsigned int pos;
+	efx_dword_t data;
+
+	for (pos = 0; pos < length; pos += sizeof (efx_dword_t)) {
+		EFSYS_MEM_READD(esmp, offset + pos, &data);
+		memcpy((uint8_t *)bufferp + pos, &data,
+		    MIN(sizeof (data), length - pos));
+	}
+}
+
+			efx_rc_t
+ef10_mcdi_poll_reboot(
+	__in		efx_nic_t *enp)
+{
+	efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+	efx_dword_t dword;
+	uint32_t old_status;
+	uint32_t new_status;
+	efx_rc_t rc;
+
+	old_status = emip->emi_mc_reboot_status;
+
+	/* Update MC reboot status word */
+	EFX_BAR_TBL_READD(enp, ER_DZ_BIU_MC_SFT_STATUS_REG, 0, &dword, B_FALSE);
+	new_status = dword.ed_u32[0];
+
+	/* MC has rebooted if the value has changed */
+	if (new_status != old_status) {
+		emip->emi_mc_reboot_status = new_status;
+
+		/*
+		 * FIXME: Ignore detected MC REBOOT for now.
+		 *
+		 * The Siena support for checking for MC reboot from status
+		 * flags is broken - see comments in siena_mcdi_poll_reboot().
+		 * As the generic MCDI code is shared the EF10 reboot
+		 * detection suffers similar problems.
+		 *
+		 * Do not report an error when the boot status changes until
+		 * this can be handled by common code drivers (and reworked to
+		 * support Siena too).
+		 */
+		_NOTE(CONSTANTCONDITION)
+		if (B_FALSE) {
+			rc = EIO;
+			goto fail1;
+		}
+	}
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_mcdi_feature_supported(
+	__in		efx_nic_t *enp,
+	__in		efx_mcdi_feature_id_t id,
+	__out		boolean_t *supportedp)
+{
+	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+	uint32_t privilege_mask = encp->enc_privilege_mask;
+	efx_rc_t rc;
+
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+
+	/*
+	 * Use privilege mask state at MCDI attach.
+	 */
+
+	switch (id) {
+	case EFX_MCDI_FEATURE_FW_UPDATE:
+		/*
+		 * Admin privilege must be used prior to introduction of
+		 * specific flag.
+		 */
+		*supportedp =
+		    EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+		break;
+	case EFX_MCDI_FEATURE_LINK_CONTROL:
+		/*
+		 * Admin privilege used prior to introduction of
+		 * specific flag.
+		 */
+		*supportedp =
+		    EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, LINK) ||
+		    EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+		break;
+	case EFX_MCDI_FEATURE_MACADDR_CHANGE:
+		/*
+		 * Admin privilege must be used prior to introduction of
+		 * mac spoofing privilege (at v4.6), which is used up to
+		 * introduction of change mac spoofing privilege (at v4.7)
+		 */
+		*supportedp =
+		    EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, CHANGE_MAC) ||
+		    EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING) ||
+		    EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+		break;
+	case EFX_MCDI_FEATURE_MAC_SPOOFING:
+		/*
+		 * Admin privilege must be used prior to introduction of
+		 * mac spoofing privilege (at v4.6), which is used up to
+		 * introduction of mac spoofing TX privilege (at v4.7)
+		 */
+		*supportedp =
+		    EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING_TX) ||
+		    EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING) ||
+		    EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+		break;
+	default:
+		rc = ENOTSUP;
+		goto fail1;
+	}
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+#endif	/* EFSYS_OPT_MCDI */
+
+#endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/efx/base/ef10_nic.c b/drivers/net/sfc/efx/base/ef10_nic.c
new file mode 100644
index 0000000..538e18c
--- /dev/null
+++ b/drivers/net/sfc/efx/base/ef10_nic.c
@@ -0,0 +1,1769 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#include "ef10_tlv_layout.h"
+
+	__checkReturn	efx_rc_t
+efx_mcdi_get_port_assignment(
+	__in		efx_nic_t *enp,
+	__out		uint32_t *portp)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
+			    MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN)];
+	efx_rc_t rc;
+
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN;
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) {
+		rc = EMSGSIZE;
+		goto fail2;
+	}
+
+	*portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT);
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+efx_mcdi_get_port_modes(
+	__in		efx_nic_t *enp,
+	__out		uint32_t *modesp,
+	__out_opt	uint32_t *current_modep)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_GET_PORT_MODES_IN_LEN,
+			    MC_CMD_GET_PORT_MODES_OUT_LEN)];
+	efx_rc_t rc;
+
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_GET_PORT_MODES;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN;
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	/*
+	 * Require only Modes and DefaultMode fields, unless the current mode
+	 * was requested (CurrentMode field was added for Medford).
+	 */
+	if (req.emr_out_length_used <
+	    MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) {
+		rc = EMSGSIZE;
+		goto fail2;
+	}
+	if ((current_modep != NULL) && (req.emr_out_length_used <
+	    MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST + 4)) {
+		rc = EMSGSIZE;
+		goto fail3;
+	}
+
+	*modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES);
+
+	if (current_modep != NULL) {
+		*current_modep = MCDI_OUT_DWORD(req,
+					    GET_PORT_MODES_OUT_CURRENT_MODE);
+	}
+
+	return (0);
+
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_nic_get_port_mode_bandwidth(
+	__in		uint32_t port_mode,
+	__out		uint32_t *bandwidth_mbpsp)
+{
+	uint32_t bandwidth;
+	efx_rc_t rc;
+
+	switch (port_mode) {
+	case TLV_PORT_MODE_10G:
+		bandwidth = 10000;
+		break;
+	case TLV_PORT_MODE_10G_10G:
+		bandwidth = 10000 * 2;
+		break;
+	case TLV_PORT_MODE_10G_10G_10G_10G:
+	case TLV_PORT_MODE_10G_10G_10G_10G_Q:
+	case TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2:
+	case TLV_PORT_MODE_10G_10G_10G_10G_Q2:
+		bandwidth = 10000 * 4;
+		break;
+	case TLV_PORT_MODE_40G:
+		bandwidth = 40000;
+		break;
+	case TLV_PORT_MODE_40G_40G:
+		bandwidth = 40000 * 2;
+		break;
+	case TLV_PORT_MODE_40G_10G_10G:
+	case TLV_PORT_MODE_10G_10G_40G:
+		bandwidth = 40000 + (10000 * 2);
+		break;
+	default:
+		rc = EINVAL;
+		goto fail1;
+	}
+
+	*bandwidth_mbpsp = bandwidth;
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+static	__checkReturn		efx_rc_t
+efx_mcdi_vadaptor_alloc(
+	__in			efx_nic_t *enp,
+	__in			uint32_t port_id)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_VADAPTOR_ALLOC_IN_LEN,
+			    MC_CMD_VADAPTOR_ALLOC_OUT_LEN)];
+	efx_rc_t rc;
+
+	EFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL);
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+	MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS,
+	    VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED,
+	    enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0);
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+static	__checkReturn		efx_rc_t
+efx_mcdi_vadaptor_free(
+	__in			efx_nic_t *enp,
+	__in			uint32_t port_id)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_VADAPTOR_FREE_IN_LEN,
+			    MC_CMD_VADAPTOR_FREE_OUT_LEN)];
+	efx_rc_t rc;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_VADAPTOR_FREE;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+efx_mcdi_get_mac_address_pf(
+	__in			efx_nic_t *enp,
+	__out_ecount_opt(6)	uint8_t mac_addrp[6])
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
+			    MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)];
+	efx_rc_t rc;
+
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN;
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) {
+		rc = EMSGSIZE;
+		goto fail2;
+	}
+
+	if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) {
+		rc = ENOENT;
+		goto fail3;
+	}
+
+	if (mac_addrp != NULL) {
+		uint8_t *addrp;
+
+		addrp = MCDI_OUT2(req, uint8_t,
+		    GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE);
+
+		EFX_MAC_ADDR_COPY(mac_addrp, addrp);
+	}
+
+	return (0);
+
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+efx_mcdi_get_mac_address_vf(
+	__in			efx_nic_t *enp,
+	__out_ecount_opt(6)	uint8_t mac_addrp[6])
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
+			    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX)];
+	efx_rc_t rc;
+
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX;
+
+	MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
+	    EVB_PORT_ID_ASSIGNED);
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	if (req.emr_out_length_used <
+	    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) {
+		rc = EMSGSIZE;
+		goto fail2;
+	}
+
+	if (MCDI_OUT_DWORD(req,
+		VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) {
+		rc = ENOENT;
+		goto fail3;
+	}
+
+	if (mac_addrp != NULL) {
+		uint8_t *addrp;
+
+		addrp = MCDI_OUT2(req, uint8_t,
+		    VPORT_GET_MAC_ADDRESSES_OUT_MACADDR);
+
+		EFX_MAC_ADDR_COPY(mac_addrp, addrp);
+	}
+
+	return (0);
+
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+efx_mcdi_get_clock(
+	__in		efx_nic_t *enp,
+	__out		uint32_t *sys_freqp,
+	__out		uint32_t *dpcpu_freqp)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_GET_CLOCK_IN_LEN,
+			    MC_CMD_GET_CLOCK_OUT_LEN)];
+	efx_rc_t rc;
+
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_GET_CLOCK;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN;
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) {
+		rc = EMSGSIZE;
+		goto fail2;
+	}
+
+	*sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ);
+	if (*sys_freqp == 0) {
+		rc = EINVAL;
+		goto fail3;
+	}
+	*dpcpu_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_DPCPU_FREQ);
+	if (*dpcpu_freqp == 0) {
+		rc = EINVAL;
+		goto fail4;
+	}
+
+	return (0);
+
+fail4:
+	EFSYS_PROBE(fail4);
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+efx_mcdi_get_vector_cfg(
+	__in		efx_nic_t *enp,
+	__out_opt	uint32_t *vec_basep,
+	__out_opt	uint32_t *pf_nvecp,
+	__out_opt	uint32_t *vf_nvecp)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_GET_VECTOR_CFG_IN_LEN,
+			    MC_CMD_GET_VECTOR_CFG_OUT_LEN)];
+	efx_rc_t rc;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN;
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) {
+		rc = EMSGSIZE;
+		goto fail2;
+	}
+
+	if (vec_basep != NULL)
+		*vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE);
+	if (pf_nvecp != NULL)
+		*pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF);
+	if (vf_nvecp != NULL)
+		*vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF);
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+static	__checkReturn	efx_rc_t
+efx_mcdi_get_capabilities(
+	__in		efx_nic_t *enp,
+	__out		uint32_t *flagsp,
+	__out		uint32_t *flags2p,
+	__out		uint32_t *tso2ncp)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN,
+			    MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)];
+	efx_rc_t rc;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_GET_CAPABILITIES;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_GET_CAPABILITIES_V2_OUT_LEN;
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
+		rc = EMSGSIZE;
+		goto fail2;
+	}
+
+	*flagsp = MCDI_OUT_DWORD(req, GET_CAPABILITIES_OUT_FLAGS1);
+
+	if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) {
+		*flags2p = 0;
+		*tso2ncp = 0;
+	} else {
+		*flags2p = MCDI_OUT_DWORD(req, GET_CAPABILITIES_V2_OUT_FLAGS2);
+		*tso2ncp = MCDI_OUT_WORD(req,
+				GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS);
+	}
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+
+static	__checkReturn	efx_rc_t
+efx_mcdi_alloc_vis(
+	__in		efx_nic_t *enp,
+	__in		uint32_t min_vi_count,
+	__in		uint32_t max_vi_count,
+	__out		uint32_t *vi_basep,
+	__out		uint32_t *vi_countp,
+	__out		uint32_t *vi_shiftp)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_ALLOC_VIS_IN_LEN,
+			    MC_CMD_ALLOC_VIS_OUT_LEN)];
+	efx_rc_t rc;
+
+	if (vi_countp == NULL) {
+		rc = EINVAL;
+		goto fail1;
+	}
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_ALLOC_VIS;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_ALLOC_VIS_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count);
+	MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count);
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail2;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) {
+		rc = EMSGSIZE;
+		goto fail3;
+	}
+
+	*vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE);
+	*vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT);
+
+	/* Report VI_SHIFT if available (always zero for Huntington) */
+	if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN)
+		*vi_shiftp = 0;
+	else
+		*vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT);
+
+	return (0);
+
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+
+static	__checkReturn	efx_rc_t
+efx_mcdi_free_vis(
+	__in		efx_nic_t *enp)
+{
+	efx_mcdi_req_t req;
+	efx_rc_t rc;
+
+	EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0);
+	EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0);
+
+	req.emr_cmd = MC_CMD_FREE_VIS;
+	req.emr_in_buf = NULL;
+	req.emr_in_length = 0;
+	req.emr_out_buf = NULL;
+	req.emr_out_length = 0;
+
+	efx_mcdi_execute_quiet(enp, &req);
+
+	/* Ignore ELREADY (no allocated VIs, so nothing to free) */
+	if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+
+static	__checkReturn	efx_rc_t
+efx_mcdi_alloc_piobuf(
+	__in		efx_nic_t *enp,
+	__out		efx_piobuf_handle_t *handlep)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_ALLOC_PIOBUF_IN_LEN,
+			    MC_CMD_ALLOC_PIOBUF_OUT_LEN)];
+	efx_rc_t rc;
+
+	if (handlep == NULL) {
+		rc = EINVAL;
+		goto fail1;
+	}
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN;
+
+	efx_mcdi_execute_quiet(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail2;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
+		rc = EMSGSIZE;
+		goto fail3;
+	}
+
+	*handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
+
+	return (0);
+
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+static	__checkReturn	efx_rc_t
+efx_mcdi_free_piobuf(
+	__in		efx_nic_t *enp,
+	__in		efx_piobuf_handle_t handle)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_FREE_PIOBUF_IN_LEN,
+			    MC_CMD_FREE_PIOBUF_OUT_LEN)];
+	efx_rc_t rc;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_FREE_PIOBUF;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle);
+
+	efx_mcdi_execute_quiet(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+static	__checkReturn	efx_rc_t
+efx_mcdi_link_piobuf(
+	__in		efx_nic_t *enp,
+	__in		uint32_t vi_index,
+	__in		efx_piobuf_handle_t handle)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_LINK_PIOBUF_IN_LEN,
+			    MC_CMD_LINK_PIOBUF_OUT_LEN)];
+	efx_rc_t rc;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_LINK_PIOBUF;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle);
+	MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+static	__checkReturn	efx_rc_t
+efx_mcdi_unlink_piobuf(
+	__in		efx_nic_t *enp,
+	__in		uint32_t vi_index)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_UNLINK_PIOBUF_IN_LEN,
+			    MC_CMD_UNLINK_PIOBUF_OUT_LEN)];
+	efx_rc_t rc;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
+
+	efx_mcdi_execute_quiet(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+static			void
+ef10_nic_alloc_piobufs(
+	__in		efx_nic_t *enp,
+	__in		uint32_t max_piobuf_count)
+{
+	efx_piobuf_handle_t *handlep;
+	unsigned int i;
+
+	EFSYS_ASSERT3U(max_piobuf_count, <=,
+	    EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));
+
+	enp->en_arch.ef10.ena_piobuf_count = 0;
+
+	for (i = 0; i < max_piobuf_count; i++) {
+		handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+		if (efx_mcdi_alloc_piobuf(enp, handlep) != 0)
+			goto fail1;
+
+		enp->en_arch.ef10.ena_pio_alloc_map[i] = 0;
+		enp->en_arch.ef10.ena_piobuf_count++;
+	}
+
+	return;
+
+fail1:
+	for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+		handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+		efx_mcdi_free_piobuf(enp, *handlep);
+		*handlep = EFX_PIOBUF_HANDLE_INVALID;
+	}
+	enp->en_arch.ef10.ena_piobuf_count = 0;
+}
+
+
+static			void
+ef10_nic_free_piobufs(
+	__in		efx_nic_t *enp)
+{
+	efx_piobuf_handle_t *handlep;
+	unsigned int i;
+
+	for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+		handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+		efx_mcdi_free_piobuf(enp, *handlep);
+		*handlep = EFX_PIOBUF_HANDLE_INVALID;
+	}
+	enp->en_arch.ef10.ena_piobuf_count = 0;
+}
+
+/* Sub-allocate a block from a piobuf */
+	__checkReturn	efx_rc_t
+ef10_nic_pio_alloc(
+	__inout		efx_nic_t *enp,
+	__out		uint32_t *bufnump,
+	__out		efx_piobuf_handle_t *handlep,
+	__out		uint32_t *blknump,
+	__out		uint32_t *offsetp,
+	__out		size_t *sizep)
+{
+	efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+	efx_drv_cfg_t *edcp = &enp->en_drv_cfg;
+	uint32_t blk_per_buf;
+	uint32_t buf, blk;
+	efx_rc_t rc;
+
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+	EFSYS_ASSERT(bufnump);
+	EFSYS_ASSERT(handlep);
+	EFSYS_ASSERT(blknump);
+	EFSYS_ASSERT(offsetp);
+	EFSYS_ASSERT(sizep);
+
+	if ((edcp->edc_pio_alloc_size == 0) ||
+	    (enp->en_arch.ef10.ena_piobuf_count == 0)) {
+		rc = ENOMEM;
+		goto fail1;
+	}
+	blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size;
+
+	for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) {
+		uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf];
+
+		if (~(*map) == 0)
+			continue;
+
+		EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map)));
+		for (blk = 0; blk < blk_per_buf; blk++) {
+			if ((*map & (1u << blk)) == 0) {
+				*map |= (1u << blk);
+				goto done;
+			}
+		}
+	}
+	rc = ENOMEM;
+	goto fail2;
+
+done:
+	*handlep = enp->en_arch.ef10.ena_piobuf_handle[buf];
+	*bufnump = buf;
+	*blknump = blk;
+	*sizep = edcp->edc_pio_alloc_size;
+	*offsetp = blk * (*sizep);
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+/* Free a piobuf sub-allocated block */
+	__checkReturn	efx_rc_t
+ef10_nic_pio_free(
+	__inout		efx_nic_t *enp,
+	__in		uint32_t bufnum,
+	__in		uint32_t blknum)
+{
+	uint32_t *map;
+	efx_rc_t rc;
+
+	if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) ||
+	    (blknum >= (8 * sizeof (*map)))) {
+		rc = EINVAL;
+		goto fail1;
+	}
+
+	map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum];
+	if ((*map & (1u << blknum)) == 0) {
+		rc = ENOENT;
+		goto fail2;
+	}
+	*map &= ~(1u << blknum);
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_nic_pio_link(
+	__inout		efx_nic_t *enp,
+	__in		uint32_t vi_index,
+	__in		efx_piobuf_handle_t handle)
+{
+	return (efx_mcdi_link_piobuf(enp, vi_index, handle));
+}
+
+	__checkReturn	efx_rc_t
+ef10_nic_pio_unlink(
+	__inout		efx_nic_t *enp,
+	__in		uint32_t vi_index)
+{
+	return (efx_mcdi_unlink_piobuf(enp, vi_index));
+}
+
+static	__checkReturn	efx_rc_t
+ef10_mcdi_get_pf_count(
+	__in		efx_nic_t *enp,
+	__out		uint32_t *pf_countp)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_GET_PF_COUNT_IN_LEN,
+			    MC_CMD_GET_PF_COUNT_OUT_LEN)];
+	efx_rc_t rc;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_GET_PF_COUNT;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_GET_PF_COUNT_OUT_LEN;
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_GET_PF_COUNT_OUT_LEN) {
+		rc = EMSGSIZE;
+		goto fail2;
+	}
+
+	*pf_countp = *MCDI_OUT(req, uint8_t,
+				MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST);
+
+	EFSYS_ASSERT(*pf_countp != 0);
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_get_datapath_caps(
+	__in		efx_nic_t *enp)
+{
+	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+	uint32_t flags;
+	uint32_t flags2;
+	uint32_t tso2nc;
+	efx_rc_t rc;
+
+	if ((rc = efx_mcdi_get_capabilities(enp, &flags, &flags2,
+					    &tso2nc)) != 0)
+		goto fail1;
+
+	if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0)
+		goto fail1;
+
+#define	CAP_FLAG(flags1, field)		\
+	((flags1) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
+
+#define	CAP_FLAG2(flags2, field)	\
+	((flags2) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
+
+	/*
+	 * Huntington RXDP firmware inserts a 0 or 14 byte prefix.
+	 * We only support the 14 byte prefix here.
+	 */
+	if (CAP_FLAG(flags, RX_PREFIX_LEN_14) == 0) {
+		rc = ENOTSUP;
+		goto fail2;
+	}
+	encp->enc_rx_prefix_size = 14;
+
+	/* Check if the firmware supports TSO */
+	encp->enc_fw_assisted_tso_enabled =
+	    CAP_FLAG(flags, TX_TSO) ? B_TRUE : B_FALSE;
+
+	/* Check if the firmware supports FATSOv2 */
+	encp->enc_fw_assisted_tso_v2_enabled =
+	    CAP_FLAG2(flags2, TX_TSO_V2) ? B_TRUE : B_FALSE;
+
+	/* Get the number of TSO contexts (FATSOv2) */
+	encp->enc_fw_assisted_tso_v2_n_contexts =
+		CAP_FLAG2(flags2, TX_TSO_V2) ? tso2nc : 0;
+
+	/* Check if the firmware has vadapter/vport/vswitch support */
+	encp->enc_datapath_cap_evb =
+	    CAP_FLAG(flags, EVB) ? B_TRUE : B_FALSE;
+
+	/* Check if the firmware supports VLAN insertion */
+	encp->enc_hw_tx_insert_vlan_enabled =
+	    CAP_FLAG(flags, TX_VLAN_INSERTION) ? B_TRUE : B_FALSE;
+
+	/* Check if the firmware supports RX event batching */
+	encp->enc_rx_batching_enabled =
+	    CAP_FLAG(flags, RX_BATCHING) ? B_TRUE : B_FALSE;
+
+	/*
+	 * Even if batching isn't reported as supported, we may still get
+	 * batched events (see bug61153).
+	 */
+	encp->enc_rx_batch_max = 16;
+
+	/* Check if the firmware supports disabling scatter on RXQs */
+	encp->enc_rx_disable_scatter_supported =
+	    CAP_FLAG(flags, RX_DISABLE_SCATTER) ? B_TRUE : B_FALSE;
+
+	/* Check if the firmware supports packed stream mode */
+	encp->enc_rx_packed_stream_supported =
+	    CAP_FLAG(flags, RX_PACKED_STREAM) ? B_TRUE : B_FALSE;
+
+	/*
+	 * Check if the firmware supports configurable buffer sizes
+	 * for packed stream mode (otherwise buffer size is 1Mbyte)
+	 */
+	encp->enc_rx_var_packed_stream_supported =
+	    CAP_FLAG(flags, RX_PACKED_STREAM_VAR_BUFFERS) ? B_TRUE : B_FALSE;
+
+	/* Check if the firmware supports set mac with running filters */
+	encp->enc_allow_set_mac_with_installed_filters =
+	    CAP_FLAG(flags, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED) ?
+	    B_TRUE : B_FALSE;
+
+	/*
+	 * Check if firmware supports the extended MC_CMD_SET_MAC, which allows
+	 * specifying which parameters to configure.
+	 */
+	encp->enc_enhanced_set_mac_supported =
+		CAP_FLAG(flags, SET_MAC_ENHANCED) ? B_TRUE : B_FALSE;
+
+	/*
+	 * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows
+	 * us to let the firmware choose the settings to use on an EVQ.
+	 */
+	encp->enc_init_evq_v2_supported =
+		CAP_FLAG2(flags2, INIT_EVQ_V2) ? B_TRUE : B_FALSE;
+
+	/*
+	 * Check if firmware-verified NVRAM updates must be used.
+	 *
+	 * The firmware trusted installer requires all NVRAM updates to use
+	 * version 2 of MC_CMD_NVRAM_UPDATE_START (to enable verified update)
+	 * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated
+	 * partition and report the result).
+	 */
+	encp->enc_fw_verified_nvram_update_required =
+	    CAP_FLAG2(flags2, NVRAM_UPDATE_REPORT_VERIFY_RESULT) ?
+	    B_TRUE : B_FALSE;
+
+	/*
+	 * Check if firmware provides packet memory and Rx datapath
+	 * counters.
+	 */
+	encp->enc_pm_and_rxdp_counters =
+	    CAP_FLAG(flags, PM_AND_RXDP_COUNTERS) ? B_TRUE : B_FALSE;
+
+	/*
+	 * Check if the 40G MAC hardware is capable of reporting
+	 * statistics for Tx size bins.
+	 */
+	encp->enc_mac_stats_40g_tx_size_bins =
+	    CAP_FLAG2(flags2, MAC_STATS_40G_TX_SIZE_BINS) ? B_TRUE : B_FALSE;
+
+#undef CAP_FLAG
+#undef CAP_FLAG2
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+
+#define	EF10_LEGACY_PF_PRIVILEGE_MASK					\
+	(MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN			|	\
+	MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK			|	\
+	MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD			|	\
+	MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP			|	\
+	MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS		|	\
+	MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING		|	\
+	MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST			|	\
+	MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST			|	\
+	MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST			|	\
+	MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST		|	\
+	MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS)
+
+#define	EF10_LEGACY_VF_PRIVILEGE_MASK	0
+
+
+	__checkReturn		efx_rc_t
+ef10_get_privilege_mask(
+	__in			efx_nic_t *enp,
+	__out			uint32_t *maskp)
+{
+	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+	uint32_t mask;
+	efx_rc_t rc;
+
+	if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf,
+					    &mask)) != 0) {
+		if (rc != ENOTSUP)
+			goto fail1;
+
+		/* Fallback for old firmware without privilege mask support */
+		if (EFX_PCI_FUNCTION_IS_PF(encp)) {
+			/* Assume PF has admin privilege */
+			mask = EF10_LEGACY_PF_PRIVILEGE_MASK;
+		} else {
+			/* VF is always unprivileged by default */
+			mask = EF10_LEGACY_VF_PRIVILEGE_MASK;
+		}
+	}
+
+	*maskp = mask;
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+
+/*
+ * Table of mapping schemes from port number to the number of the external
+ * connector on the board. The external numbering does not distinguish
+ * off-board separated outputs such as from multi-headed cables.
+ *
+ * The count of adjacent port numbers that map to each external port
+ * and the offset in the numbering, is determined by the chip family and
+ * current port mode.
+ *
+ * For the Huntington family, the current port mode cannot be discovered,
+ * so the mapping used is instead the last match in the table to the full
+ * set of port modes to which the NIC can be configured. Therefore the
+ * ordering of entries in the the mapping table is significant.
+ */
+static struct {
+	efx_family_t	family;
+	uint32_t	modes_mask;
+	int32_t		count;
+	int32_t		offset;
+}	__ef10_external_port_mappings[] = {
+	/* Supported modes with 1 output per external port */
+	{
+		EFX_FAMILY_HUNTINGTON,
+		(1 << TLV_PORT_MODE_10G) |
+		(1 << TLV_PORT_MODE_10G_10G) |
+		(1 << TLV_PORT_MODE_10G_10G_10G_10G),
+		1,
+		1
+	},
+	{
+		EFX_FAMILY_MEDFORD,
+		(1 << TLV_PORT_MODE_10G) |
+		(1 << TLV_PORT_MODE_10G_10G),
+		1,
+		1
+	},
+	/* Supported modes with 2 outputs per external port */
+	{
+		EFX_FAMILY_HUNTINGTON,
+		(1 << TLV_PORT_MODE_40G) |
+		(1 << TLV_PORT_MODE_40G_40G) |
+		(1 << TLV_PORT_MODE_40G_10G_10G) |
+		(1 << TLV_PORT_MODE_10G_10G_40G),
+		2,
+		1
+	},
+	{
+		EFX_FAMILY_MEDFORD,
+		(1 << TLV_PORT_MODE_40G) |
+		(1 << TLV_PORT_MODE_40G_40G) |
+		(1 << TLV_PORT_MODE_40G_10G_10G) |
+		(1 << TLV_PORT_MODE_10G_10G_40G) |
+		(1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2),
+		2,
+		1
+	},
+	/* Supported modes with 4 outputs per external port */
+	{
+		EFX_FAMILY_MEDFORD,
+		(1 << TLV_PORT_MODE_10G_10G_10G_10G_Q) |
+		(1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1),
+		4,
+		1,
+	},
+	{
+		EFX_FAMILY_MEDFORD,
+		(1 << TLV_PORT_MODE_10G_10G_10G_10G_Q2),
+		4,
+		2
+	},
+};
+
+	__checkReturn	efx_rc_t
+ef10_external_port_mapping(
+	__in		efx_nic_t *enp,
+	__in		uint32_t port,
+	__out		uint8_t *external_portp)
+{
+	efx_rc_t rc;
+	int i;
+	uint32_t port_modes;
+	uint32_t matches;
+	uint32_t current;
+	int32_t count = 1; /* Default 1-1 mapping */
+	int32_t offset = 1; /* Default starting external port number */
+
+	if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, &current)) != 0) {
+		/*
+		 * No current port mode information
+		 * - infer mapping from available modes
+		 */
+		if ((rc = efx_mcdi_get_port_modes(enp,
+			    &port_modes, NULL)) != 0) {
+			/*
+			 * No port mode information available
+			 * - use default mapping
+			 */
+			goto out;
+		}
+	} else {
+		/* Only need to scan the current mode */
+		port_modes = 1 << current;
+	}
+
+	/*
+	 * Infer the internal port -> external port mapping from
+	 * the possible port modes for this NIC.
+	 */
+	for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
+		if (__ef10_external_port_mappings[i].family !=
+		    enp->en_family)
+			continue;
+		matches = (__ef10_external_port_mappings[i].modes_mask &
+		    port_modes);
+		if (matches != 0) {
+			count = __ef10_external_port_mappings[i].count;
+			offset = __ef10_external_port_mappings[i].offset;
+			port_modes &= ~matches;
+		}
+	}
+
+	if (port_modes != 0) {
+		/* Some advertised modes are not supported */
+		rc = ENOTSUP;
+		goto fail1;
+	}
+
+out:
+	/*
+	 * Scale as required by last matched mode and then convert to
+	 * correctly offset numbering
+	 */
+	*external_portp = (uint8_t)((port / count) + offset);
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+
+	__checkReturn	efx_rc_t
+ef10_nic_probe(
+	__in		efx_nic_t *enp)
+{
+	const efx_nic_ops_t *enop = enp->en_enop;
+	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+	efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+	efx_rc_t rc;
+
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+
+	/* Read and clear any assertion state */
+	if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+		goto fail1;
+
+	/* Exit the assertion handler */
+	if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+		if (rc != EACCES)
+			goto fail2;
+
+	if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
+		goto fail3;
+
+	if ((rc = enop->eno_board_cfg(enp)) != 0)
+		if (rc != EACCES)
+			goto fail4;
+
+	/*
+	 * Set default driver config limits (based on board config).
+	 *
+	 * FIXME: For now allocate a fixed number of VIs which is likely to be
+	 * sufficient and small enough to allow multiple functions on the same
+	 * port.
+	 */
+	edcp->edc_min_vi_count = edcp->edc_max_vi_count =
+	    MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
+
+	/* The client driver must configure and enable PIO buffer support */
+	edcp->edc_max_piobuf_count = 0;
+	edcp->edc_pio_alloc_size = 0;
+
+	encp->enc_features = enp->en_features;
+
+	return (0);
+
+fail4:
+	EFSYS_PROBE(fail4);
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_nic_set_drv_limits(
+	__inout		efx_nic_t *enp,
+	__in		efx_drv_limits_t *edlp)
+{
+	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+	efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+	uint32_t min_evq_count, max_evq_count;
+	uint32_t min_rxq_count, max_rxq_count;
+	uint32_t min_txq_count, max_txq_count;
+	efx_rc_t rc;
+
+	if (edlp == NULL) {
+		rc = EINVAL;
+		goto fail1;
+	}
+
+	/* Get minimum required and maximum usable VI limits */
+	min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
+	min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
+	min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
+
+	edcp->edc_min_vi_count =
+	    MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
+
+	max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
+	max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
+	max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
+
+	edcp->edc_max_vi_count =
+	    MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
+
+	/*
+	 * Check limits for sub-allocated piobuf blocks.
+	 * PIO is optional, so don't fail if the limits are incorrect.
+	 */
+	if ((encp->enc_piobuf_size == 0) ||
+	    (encp->enc_piobuf_limit == 0) ||
+	    (edlp->edl_min_pio_alloc_size == 0) ||
+	    (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) {
+		/* Disable PIO */
+		edcp->edc_max_piobuf_count = 0;
+		edcp->edc_pio_alloc_size = 0;
+	} else {
+		uint32_t blk_size, blk_count, blks_per_piobuf;
+
+		blk_size =
+		    MAX(edlp->edl_min_pio_alloc_size,
+			    encp->enc_piobuf_min_alloc_size);
+
+		blks_per_piobuf = encp->enc_piobuf_size / blk_size;
+		EFSYS_ASSERT3U(blks_per_piobuf, <=, 32);
+
+		blk_count = (encp->enc_piobuf_limit * blks_per_piobuf);
+
+		/* A zero max pio alloc count means unlimited */
+		if ((edlp->edl_max_pio_alloc_count > 0) &&
+		    (edlp->edl_max_pio_alloc_count < blk_count)) {
+			blk_count = edlp->edl_max_pio_alloc_count;
+		}
+
+		edcp->edc_pio_alloc_size = blk_size;
+		edcp->edc_max_piobuf_count =
+		    (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf;
+	}
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+
+	__checkReturn	efx_rc_t
+ef10_nic_reset(
+	__in		efx_nic_t *enp)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_ENTITY_RESET_IN_LEN,
+			    MC_CMD_ENTITY_RESET_OUT_LEN)];
+	efx_rc_t rc;
+
+	/* ef10_nic_reset() is called to recover from BADASSERT failures. */
+	if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+		goto fail1;
+	if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+		goto fail2;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_ENTITY_RESET;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
+
+	MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
+	    ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail3;
+	}
+
+	/* Clear RX/TX DMA queue errors */
+	enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
+
+	return (0);
+
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_nic_init(
+	__in		efx_nic_t *enp)
+{
+	efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+	uint32_t min_vi_count, max_vi_count;
+	uint32_t vi_count, vi_base, vi_shift;
+	uint32_t i;
+	uint32_t retry;
+	uint32_t delay_us;
+	efx_rc_t rc;
+
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+
+	/* Enable reporting of some events (e.g. link change) */
+	if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
+		goto fail1;
+
+	/* Allocate (optional) on-chip PIO buffers */
+	ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count);
+
+	/*
+	 * For best performance, PIO writes should use a write-combined
+	 * (WC) memory mapping. Using a separate WC mapping for the PIO
+	 * aperture of each VI would be a burden to drivers (and not
+	 * possible if the host page size is >4Kbyte).
+	 *
+	 * To avoid this we use a single uncached (UC) mapping for VI
+	 * register access, and a single WC mapping for extra VIs used
+	 * for PIO writes.
+	 *
+	 * Each piobuf must be linked to a VI in the WC mapping, and to
+	 * each VI that is using a sub-allocated block from the piobuf.
+	 */
+	min_vi_count = edcp->edc_min_vi_count;
+	max_vi_count =
+	    edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count;
+
+	/* Ensure that the previously attached driver's VIs are freed */
+	if ((rc = efx_mcdi_free_vis(enp)) != 0)
+		goto fail2;
+
+	/*
+	 * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
+	 * fails then retrying the request for fewer VI resources may succeed.
+	 */
+	vi_count = 0;
+	if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
+		    &vi_base, &vi_count, &vi_shift)) != 0)
+		goto fail3;
+
+	EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
+
+	if (vi_count < min_vi_count) {
+		rc = ENOMEM;
+		goto fail4;
+	}
+
+	enp->en_arch.ef10.ena_vi_base = vi_base;
+	enp->en_arch.ef10.ena_vi_count = vi_count;
+	enp->en_arch.ef10.ena_vi_shift = vi_shift;
+
+	if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) {
+		/* Not enough extra VIs to map piobufs */
+		ef10_nic_free_piobufs(enp);
+	}
+
+	enp->en_arch.ef10.ena_pio_write_vi_base =
+	    vi_count - enp->en_arch.ef10.ena_piobuf_count;
+
+	/* Save UC memory mapping details */
+	enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
+	if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+		enp->en_arch.ef10.ena_uc_mem_map_size =
+		    (ER_DZ_TX_PIOBUF_STEP *
+		    enp->en_arch.ef10.ena_pio_write_vi_base);
+	} else {
+		enp->en_arch.ef10.ena_uc_mem_map_size =
+		    (ER_DZ_TX_PIOBUF_STEP *
+		    enp->en_arch.ef10.ena_vi_count);
+	}
+
+	/* Save WC memory mapping details */
+	enp->en_arch.ef10.ena_wc_mem_map_offset =
+	    enp->en_arch.ef10.ena_uc_mem_map_offset +
+	    enp->en_arch.ef10.ena_uc_mem_map_size;
+
+	enp->en_arch.ef10.ena_wc_mem_map_size =
+	    (ER_DZ_TX_PIOBUF_STEP *
+	    enp->en_arch.ef10.ena_piobuf_count);
+
+	/* Link piobufs to extra VIs in WC mapping */
+	if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+		for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+			rc = efx_mcdi_link_piobuf(enp,
+			    enp->en_arch.ef10.ena_pio_write_vi_base + i,
+			    enp->en_arch.ef10.ena_piobuf_handle[i]);
+			if (rc != 0)
+				break;
+		}
+	}
+
+	/*
+	 * Allocate a vAdaptor attached to our upstream vPort/pPort.
+	 *
+	 * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF
+	 * driver has yet to bring up the EVB port. See bug 56147. In this case,
+	 * retry the request several times after waiting a while. The wait time
+	 * between retries starts small (10ms) and exponentially increases.
+	 * Total wait time is a little over two seconds. Retry logic in the
+	 * client driver may mean this whole loop is repeated if it continues to
+	 * fail.
+	 */
+	retry = 0;
+	delay_us = 10000;
+	while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) {
+		if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) ||
+		    (rc != ENOENT)) {
+			/*
+			 * Do not retry alloc for PF, or for other errors on
+			 * a VF.
+			 */
+			goto fail5;
+		}
+
+		/* VF startup before PF is ready. Retry allocation. */
+		if (retry > 5) {
+			/* Too many attempts */
+			rc = EINVAL;
+			goto fail6;
+		}
+		EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry);
+		EFSYS_SLEEP(delay_us);
+		retry++;
+		if (delay_us < 500000)
+			delay_us <<= 2;
+	}
+
+	enp->en_vport_id = EVB_PORT_ID_ASSIGNED;
+	enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
+
+	return (0);
+
+fail6:
+	EFSYS_PROBE(fail6);
+fail5:
+	EFSYS_PROBE(fail5);
+fail4:
+	EFSYS_PROBE(fail4);
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+
+	ef10_nic_free_piobufs(enp);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_nic_get_vi_pool(
+	__in		efx_nic_t *enp,
+	__out		uint32_t *vi_countp)
+{
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+
+	/*
+	 * Report VIs that the client driver can use.
+	 * Do not include VIs used for PIO buffer writes.
+	 */
+	*vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base;
+
+	return (0);
+}
+
+	__checkReturn	efx_rc_t
+ef10_nic_get_bar_region(
+	__in		efx_nic_t *enp,
+	__in		efx_nic_region_t region,
+	__out		uint32_t *offsetp,
+	__out		size_t *sizep)
+{
+	efx_rc_t rc;
+
+	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+		    enp->en_family == EFX_FAMILY_MEDFORD);
+
+	/*
+	 * TODO: Specify host memory mapping alignment and granularity
+	 * in efx_drv_limits_t so that they can be taken into account
+	 * when allocating extra VIs for PIO writes.
+	 */
+	switch (region) {
+	case EFX_REGION_VI:
+		/* UC mapped memory BAR region for VI registers */
+		*offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
+		*sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
+		break;
+
+	case EFX_REGION_PIO_WRITE_VI:
+		/* WC mapped memory BAR region for piobuf writes */
+		*offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
+		*sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
+		break;
+
+	default:
+		rc = EINVAL;
+		goto fail1;
+	}
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+			void
+ef10_nic_fini(
+	__in		efx_nic_t *enp)
+{
+	uint32_t i;
+	efx_rc_t rc;
+
+	(void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);
+	enp->en_vport_id = 0;
+
+	/* Unlink piobufs from extra VIs in WC mapping */
+	if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+		for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+			rc = efx_mcdi_unlink_piobuf(enp,
+			    enp->en_arch.ef10.ena_pio_write_vi_base + i);
+			if (rc != 0)
+				break;
+		}
+	}
+
+	ef10_nic_free_piobufs(enp);
+
+	(void) efx_mcdi_free_vis(enp);
+	enp->en_arch.ef10.ena_vi_count = 0;
+}
+
+			void
+ef10_nic_unprobe(
+	__in		efx_nic_t *enp)
+{
+	(void) efx_mcdi_drv_attach(enp, B_FALSE);
+}
+
+
+#endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/efx/base/ef10_phy.c b/drivers/net/sfc/efx/base/ef10_phy.c
new file mode 100644
index 0000000..36e2603
--- /dev/null
+++ b/drivers/net/sfc/efx/base/ef10_phy.c
@@ -0,0 +1,393 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+static			void
+mcdi_phy_decode_cap(
+	__in		uint32_t mcdi_cap,
+	__out		uint32_t *maskp)
+{
+	uint32_t mask;
+
+	mask = 0;
+	if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+		mask |= (1 << EFX_PHY_CAP_10HDX);
+	if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+		mask |= (1 << EFX_PHY_CAP_10FDX);
+	if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+		mask |= (1 << EFX_PHY_CAP_100HDX);
+	if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+		mask |= (1 << EFX_PHY_CAP_100FDX);
+	if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+		mask |= (1 << EFX_PHY_CAP_1000HDX);
+	if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+		mask |= (1 << EFX_PHY_CAP_1000FDX);
+	if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+		mask |= (1 << EFX_PHY_CAP_10000FDX);
+	if (mcdi_cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+		mask |= (1 << EFX_PHY_CAP_40000FDX);
+	if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+		mask |= (1 << EFX_PHY_CAP_PAUSE);
+	if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+		mask |= (1 << EFX_PHY_CAP_ASYM);
+	if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+		mask |= (1 << EFX_PHY_CAP_AN);
+
+	*maskp = mask;
+}
+
+static			void
+mcdi_phy_decode_link_mode(
+	__in		efx_nic_t *enp,
+	__in		uint32_t link_flags,
+	__in		unsigned int speed,
+	__in		unsigned int fcntl,
+	__out		efx_link_mode_t *link_modep,
+	__out		unsigned int *fcntlp)
+{
+	boolean_t fd = !!(link_flags &
+		    (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+	boolean_t up = !!(link_flags &
+		    (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+
+	_NOTE(ARGUNUSED(enp))
+
+	if (!up)
+		*link_modep = EFX_LINK_DOWN;
+	else if (speed == 40000 && fd)
+		*link_modep = EFX_LINK_40000FDX;
+	else if (speed == 10000 && fd)
+		*link_modep = EFX_LINK_10000FDX;
+	else if (speed == 1000)
+		*link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX;
+	else if (speed == 100)
+		*link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX;
+	else if (speed == 10)
+		*link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX;
+	else
+		*link_modep = EFX_LINK_UNKNOWN;
+
+	if (fcntl == MC_CMD_FCNTL_OFF)
+		*fcntlp = 0;
+	else if (fcntl == MC_CMD_FCNTL_RESPOND)
+		*fcntlp = EFX_FCNTL_RESPOND;
+	else if (fcntl == MC_CMD_FCNTL_GENERATE)
+		*fcntlp = EFX_FCNTL_GENERATE;
+	else if (fcntl == MC_CMD_FCNTL_BIDIR)
+		*fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+	else {
+		EFSYS_PROBE1(mc_pcol_error, int, fcntl);
+		*fcntlp = 0;
+	}
+}
+
+
+			void
+ef10_phy_link_ev(
+	__in		efx_nic_t *enp,
+	__in		efx_qword_t *eqp,
+	__out		efx_link_mode_t *link_modep)
+{
+	efx_port_t *epp = &(enp->en_port);
+	unsigned int link_flags;
+	unsigned int speed;
+	unsigned int fcntl;
+	efx_link_mode_t link_mode;
+	uint32_t lp_cap_mask;
+
+	/*
+	 * Convert the LINKCHANGE speed enumeration into mbit/s, in the
+	 * same way as GET_LINK encodes the speed
+	 */
+	switch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) {
+	case MCDI_EVENT_LINKCHANGE_SPEED_100M:
+		speed = 100;
+		break;
+	case MCDI_EVENT_LINKCHANGE_SPEED_1G:
+		speed = 1000;
+		break;
+	case MCDI_EVENT_LINKCHANGE_SPEED_10G:
+		speed = 10000;
+		break;
+	case MCDI_EVENT_LINKCHANGE_SPEED_40G:
+		speed = 40000;
+		break;
+	default:
+		speed = 0;
+		break;
+	}
+
+	link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS);
+	mcdi_phy_decode_link_mode(enp, link_flags, speed,
+				    MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL),
+				    &link_mode, &fcntl);
+	mcdi_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP),
+			    &lp_cap_mask);
+
+	/*
+	 * It's safe to update ep_lp_cap_mask without the driver's port lock
+	 * because presumably any concurrently running efx_port_poll() is
+	 * only going to arrive at the same value.
+	 *
+	 * ep_fcntl has two meanings. It's either the link common fcntl
+	 * (if the PHY supports AN), or it's the forced link state. If
+	 * the former, it's safe to update the value for the same reason as
+	 * for ep_lp_cap_mask. If the latter, then just ignore the value,
+	 * because we can race with efx_mac_fcntl_set().
+	 */
+	epp->ep_lp_cap_mask = lp_cap_mask;
+	epp->ep_fcntl = fcntl;
+
+	*link_modep = link_mode;
+}
+
+	__checkReturn	efx_rc_t
+ef10_phy_power(
+	__in		efx_nic_t *enp,
+	__in		boolean_t power)
+{
+	efx_rc_t rc;
+
+	if (!power)
+		return (0);
+
+	/* Check if the PHY is a zombie */
+	if ((rc = ef10_phy_verify(enp)) != 0)
+		goto fail1;
+
+	enp->en_reset_flags |= EFX_RESET_PHY;
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_phy_get_link(
+	__in		efx_nic_t *enp,
+	__out		ef10_link_state_t *elsp)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,
+			    MC_CMD_GET_LINK_OUT_LEN)];
+	efx_rc_t rc;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_GET_LINK;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_GET_LINK_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN;
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) {
+		rc = EMSGSIZE;
+		goto fail2;
+	}
+
+	mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP),
+			    &elsp->els_adv_cap_mask);
+	mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP),
+			    &elsp->els_lp_cap_mask);
+
+	mcdi_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS),
+			    MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED),
+			    MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL),
+			    &elsp->els_link_mode, &elsp->els_fcntl);
+
+	elsp->els_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0;
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_phy_reconfigure(
+	__in		efx_nic_t *enp)
+{
+	efx_port_t *epp = &(enp->en_port);
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_SET_LINK_IN_LEN,
+			    MC_CMD_SET_LINK_OUT_LEN)];
+	uint32_t cap_mask;
+	unsigned int led_mode;
+	unsigned int speed;
+	boolean_t supported;
+	efx_rc_t rc;
+
+	if ((rc = efx_mcdi_link_control_supported(enp, &supported)) != 0)
+		goto fail1;
+	if (supported == B_FALSE)
+		goto out;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_SET_LINK;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_SET_LINK_OUT_LEN;
+
+	cap_mask = epp->ep_adv_cap_mask;
+	MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP,
+		PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1,
+		PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1,
+		PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1,
+		PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1,
+		PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1,
+		PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1,
+		PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1,
+		PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1,
+		PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1,
+		PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1);
+	/* Too many fields for for POPULATE macros, so insert this afterwards */
+	MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+	    PHY_CAP_40000FDX, (cap_mask >> EFX_PHY_CAP_40000FDX) & 0x1);
+
+	MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE);
+	speed = 0;
+	MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed);
+
+	MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0);
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail2;
+	}
+
+	/* And set the blink mode */
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_SET_ID_LED;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT);
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail3;
+	}
+out:
+	return (0);
+
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_phy_verify(
+	__in		efx_nic_t *enp)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN,
+			    MC_CMD_GET_PHY_STATE_OUT_LEN)];
+	uint32_t state;
+	efx_rc_t rc;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_GET_PHY_STATE;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN;
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) {
+		rc = EMSGSIZE;
+		goto fail2;
+	}
+
+	state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE);
+	if (state != MC_CMD_PHY_STATE_OK) {
+		if (state != MC_CMD_PHY_STATE_ZOMBIE)
+			EFSYS_PROBE1(mc_pcol_error, int, state);
+		rc = ENOTACTIVE;
+		goto fail3;
+	}
+
+	return (0);
+
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_phy_oui_get(
+	__in		efx_nic_t *enp,
+	__out		uint32_t *ouip)
+{
+	_NOTE(ARGUNUSED(enp, ouip))
+
+	return (ENOTSUP);
+}
+
+#endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/efx/base/ef10_rx.c b/drivers/net/sfc/efx/base/ef10_rx.c
new file mode 100644
index 0000000..170125e
--- /dev/null
+++ b/drivers/net/sfc/efx/base/ef10_rx.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+
+static	__checkReturn	efx_rc_t
+efx_mcdi_init_rxq(
+	__in		efx_nic_t *enp,
+	__in		uint32_t size,
+	__in		uint32_t target_evq,
+	__in		uint32_t label,
+	__in		uint32_t instance,
+	__in		efsys_mem_t *esmp,
+	__in		boolean_t disable_scatter,
+	__in		uint32_t ps_bufsize)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_INIT_RXQ_EXT_IN_LEN,
+			    MC_CMD_INIT_RXQ_EXT_OUT_LEN)];
+	int npages = EFX_RXQ_NBUFS(size);
+	int i;
+	efx_qword_t *dma_addr;
+	uint64_t addr;
+	efx_rc_t rc;
+	uint32_t dma_mode;
+
+	/* If this changes, then the payload size might need to change. */
+	EFSYS_ASSERT3U(MC_CMD_INIT_RXQ_OUT_LEN, ==, 0);
+	EFSYS_ASSERT3U(size, <=, EFX_RXQ_MAXNDESCS);
+
+	if (ps_bufsize > 0)
+		dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM;
+	else
+		dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_INIT_RXQ;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_INIT_RXQ_EXT_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_INIT_RXQ_EXT_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, size);
+	MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, target_evq);
+	MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label);
+	MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance);
+	MCDI_IN_POPULATE_DWORD_8(req, INIT_RXQ_EXT_IN_FLAGS,
+	    INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0,
+	    INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0,
+	    INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0,
+	    INIT_RXQ_EXT_IN_CRC_MODE, 0,
+	    INIT_RXQ_EXT_IN_FLAG_PREFIX, 1,
+	    INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, disable_scatter,
+	    INIT_RXQ_EXT_IN_DMA_MODE,
+	    dma_mode,
+	    INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, ps_bufsize);
+	MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0);
+	MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR);
+	addr = EFSYS_MEM_ADDR(esmp);
+
+	for (i = 0; i < npages; i++) {
+		EFX_POPULATE_QWORD_2(*dma_addr,
+		    EFX_DWORD_1, (uint32_t)(addr >> 32),
+		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+		dma_addr++;
+		addr += EFX_BUF_SIZE;
+	}
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+static	__checkReturn	efx_rc_t
+efx_mcdi_fini_rxq(
+	__in		efx_nic_t *enp,
+	__in		uint32_t instance)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_FINI_RXQ_IN_LEN,
+			    MC_CMD_FINI_RXQ_OUT_LEN)];
+	efx_rc_t rc;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_FINI_RXQ;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance);
+
+	efx_mcdi_execute_quiet(enp, &req);
+
+	if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+
+	__checkReturn	efx_rc_t
+ef10_rx_init(
+	__in		efx_nic_t *enp)
+{
+
+	return (0);
+}
+
+
+/*
+ * EF10 RX pseudo-header
+ * ---------------------
+ *
+ * Receive packets are prefixed by an (optional) 14 byte pseudo-header:
+ *
+ *  +00: Toeplitz hash value.
+ *       (32bit little-endian)
+ *  +04: Outer VLAN tag. Zero if the packet did not have an outer VLAN tag.
+ *       (16bit big-endian)
+ *  +06: Inner VLAN tag. Zero if the packet did not have an inner VLAN tag.
+ *       (16bit big-endian)
+ *  +08: Packet Length. Zero if the RX datapath was in cut-through mode.
+ *       (16bit little-endian)
+ *  +10: MAC timestamp. Zero if timestamping is not enabled.
+ *       (32bit little-endian)
+ *
+ * See "The RX Pseudo-header" in SF-109306-TC.
+ */
+
+	__checkReturn	efx_rc_t
+ef10_rx_prefix_pktlen(
+	__in		efx_nic_t *enp,
+	__in		uint8_t *buffer,
+	__out		uint16_t *lengthp)
+{
+	_NOTE(ARGUNUSED(enp))
+
+	/*
+	 * The RX pseudo-header contains the packet length, excluding the
+	 * pseudo-header. If the hardware receive datapath was operating in
+	 * cut-through mode then the length in the RX pseudo-header will be
+	 * zero, and the packet length must be obtained from the DMA length
+	 * reported in the RX event.
+	 */
+	*lengthp = buffer[8] | (buffer[9] << 8);
+	return (0);
+}
+
+			void
+ef10_rx_qpost(
+	__in		efx_rxq_t *erp,
+	__in_ecount(n)	efsys_dma_addr_t *addrp,
+	__in		size_t size,
+	__in		unsigned int n,
+	__in		unsigned int completed,
+	__in		unsigned int added)
+{
+	efx_qword_t qword;
+	unsigned int i;
+	unsigned int offset;
+	unsigned int id;
+
+	/* The client driver must not overfill the queue */
+	EFSYS_ASSERT3U(added - completed + n, <=,
+	    EFX_RXQ_LIMIT(erp->er_mask + 1));
+
+	id = added & (erp->er_mask);
+	for (i = 0; i < n; i++) {
+		EFSYS_PROBE4(rx_post, unsigned int, erp->er_index,
+		    unsigned int, id, efsys_dma_addr_t, addrp[i],
+		    size_t, size);
+
+		EFX_POPULATE_QWORD_3(qword,
+		    ESF_DZ_RX_KER_BYTE_CNT, (uint32_t)(size),
+		    ESF_DZ_RX_KER_BUF_ADDR_DW0,
+		    (uint32_t)(addrp[i] & 0xffffffff),
+		    ESF_DZ_RX_KER_BUF_ADDR_DW1,
+		    (uint32_t)(addrp[i] >> 32));
+
+		offset = id * sizeof (efx_qword_t);
+		EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword);
+
+		id = (id + 1) & (erp->er_mask);
+	}
+}
+
+			void
+ef10_rx_qpush(
+	__in	efx_rxq_t *erp,
+	__in	unsigned int added,
+	__inout	unsigned int *pushedp)
+{
+	efx_nic_t *enp = erp->er_enp;
+	unsigned int pushed = *pushedp;
+	uint32_t wptr;
+	efx_dword_t dword;
+
+	/* Hardware has alignment restriction for WPTR */
+	wptr = P2ALIGN(added, EF10_RX_WPTR_ALIGN);
+	if (pushed == wptr)
+		return;
+
+	*pushedp = wptr;
+
+	/* Push the populated descriptors out */
+	wptr &= erp->er_mask;
+
+	EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, wptr);
+
+	/* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+	EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1,
+	    wptr, pushed & erp->er_mask);
+	EFSYS_PIO_WRITE_BARRIER();
+	EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
+			    erp->er_index, &dword, B_FALSE);
+}
+
+	__checkReturn	efx_rc_t
+ef10_rx_qflush(
+	__in	efx_rxq_t *erp)
+{
+	efx_nic_t *enp = erp->er_enp;
+	efx_rc_t rc;
+
+	if ((rc = efx_mcdi_fini_rxq(enp, erp->er_index)) != 0)
+		goto fail1;
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+		void
+ef10_rx_qenable(
+	__in	efx_rxq_t *erp)
+{
+	/* FIXME */
+	_NOTE(ARGUNUSED(erp))
+	/* FIXME */
+}
+
+	__checkReturn	efx_rc_t
+ef10_rx_qcreate(
+	__in		efx_nic_t *enp,
+	__in		unsigned int index,
+	__in		unsigned int label,
+	__in		efx_rxq_type_t type,
+	__in		efsys_mem_t *esmp,
+	__in		size_t n,
+	__in		uint32_t id,
+	__in		efx_evq_t *eep,
+	__in		efx_rxq_t *erp)
+{
+	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+	efx_rc_t rc;
+	boolean_t disable_scatter;
+	unsigned int ps_buf_size;
+
+	_NOTE(ARGUNUSED(id, erp))
+
+	EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << ESF_DZ_RX_QLABEL_WIDTH));
+	EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
+	EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit);
+
+	EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS));
+	EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS));
+
+	if (!ISP2(n) || (n < EFX_RXQ_MINNDESCS) || (n > EFX_RXQ_MAXNDESCS)) {
+		rc = EINVAL;
+		goto fail1;
+	}
+	if (index >= encp->enc_rxq_limit) {
+		rc = EINVAL;
+		goto fail2;
+	}
+
+	switch (type) {
+	case EFX_RXQ_TYPE_DEFAULT:
+	case EFX_RXQ_TYPE_SCATTER:
+		ps_buf_size = 0;
+		break;
+	default:
+		rc = ENOTSUP;
+		goto fail3;
+	}
+
+	EFSYS_ASSERT(ps_buf_size == 0);
+
+	/* Scatter can only be disabled if the firmware supports doing so */
+	if (type == EFX_RXQ_TYPE_SCATTER)
+		disable_scatter = B_FALSE;
+	else
+		disable_scatter = encp->enc_rx_disable_scatter_supported;
+
+	if ((rc = efx_mcdi_init_rxq(enp, n, eep->ee_index, label, index,
+		    esmp, disable_scatter, ps_buf_size)) != 0)
+		goto fail6;
+
+	erp->er_eep = eep;
+	erp->er_label = label;
+
+	ef10_ev_rxlabel_init(eep, erp, label, ps_buf_size != 0);
+
+	return (0);
+
+fail6:
+	EFSYS_PROBE(fail6);
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+		void
+ef10_rx_qdestroy(
+	__in	efx_rxq_t *erp)
+{
+	efx_nic_t *enp = erp->er_enp;
+	efx_evq_t *eep = erp->er_eep;
+	unsigned int label = erp->er_label;
+
+	ef10_ev_rxlabel_fini(eep, label);
+
+	EFSYS_ASSERT(enp->en_rx_qcount != 0);
+	--enp->en_rx_qcount;
+
+	EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
+}
+
+		void
+ef10_rx_fini(
+	__in	efx_nic_t *enp)
+{
+	_NOTE(ARGUNUSED(enp))
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/efx/base/ef10_tlv_layout.h b/drivers/net/sfc/efx/base/ef10_tlv_layout.h
new file mode 100644
index 0000000..7d099b8
--- /dev/null
+++ b/drivers/net/sfc/efx/base/ef10_tlv_layout.h
@@ -0,0 +1,941 @@
+/**************************************************************************\
+*//*! \file
+** <L5_PRIVATE L5_SOURCE>
+** \author  mjs
+**  \brief  TLV item layouts for EF10 static and dynamic config in NVRAM
+**   \date  2012/11/20
+**    \cop  (c) Solarflare Communications Inc.
+** </L5_PRIVATE>
+*//*
+\**************************************************************************/
+
+/* These structures define the layouts for the TLV items stored in static and
+ * dynamic configuration partitions in NVRAM for EF10 (Huntington etc.).
+ *
+ * They contain the same sort of information that was kept in the
+ * siena_mc_static_config_hdr_t and siena_mc_dynamic_config_hdr_t structures
+ * (defined in <ci/mgmt/mc_flash_layout.h> and <ci/mgmt/mc_dynamic_cfg.h>) for
+ * Siena.
+ *
+ * These are used directly by the MC and should also be usable directly on host
+ * systems which are little-endian and do not do strange things with structure
+ * padding.  (Big-endian host systems will require some byte-swapping.)
+ *
+ *                                    -----
+ *
+ * Please refer to SF-108797-SW for a general overview of the TLV partition
+ * format.
+ *
+ *                                    -----
+ *
+ * The current tag IDs have a general structure: with the exception of the
+ * special values defined in the document, they are of the form 0xLTTTNNNN,
+ * where:
+ *
+ *   -  L is a location, indicating where this tag is expected to be found:
+ *        0: static configuration
+ *        1: dynamic configuration
+ *        2: firmware internal use
+ *        3: license partition
+ *
+ *   -  TTT is a type, which is just a unique value.  The same type value
+ *      might appear in both locations, indicating a relationship between
+ *      the items (e.g. static and dynamic VPD below).
+ *
+ *   -  NNNN is an index of some form.  Some item types are per-port, some
+ *      are per-PF, some are per-partition-type.
+ *
+ *                                    -----
+ *
+ * As with the previous Siena structures, each structure here is laid out
+ * carefully: values are aligned to their natural boundary, with explicit
+ * padding fields added where necessary.  (No, technically this does not
+ * absolutely guarantee portability.  But, in practice, compilers are generally
+ * sensible enough not to introduce completely pointless padding, and it works
+ * well enough.)
+ */
+
+
+#ifndef CI_MGMT_TLV_LAYOUT_H
+#define CI_MGMT_TLV_LAYOUT_H
+
+
+/* ----------------------------------------------------------------------------
+ *  General structure (defined by SF-108797-SW)
+ * ----------------------------------------------------------------------------
+ */
+
+
+/* The "end" tag.
+ *
+ * (Note that this is *not* followed by length or value fields: anything after
+ * the tag itself is irrelevant.)
+ */
+
+#define TLV_TAG_END                     (0xEEEEEEEE)
+
+
+/* Other special reserved tag values.
+ */
+
+#define TLV_TAG_SKIP                    (0x00000000)
+#define TLV_TAG_INVALID                 (0xFFFFFFFF)
+
+
+/* TLV partition header.
+ *
+ * In a TLV partition, this must be the first item in the sequence, at offset
+ * 0.
+ */
+
+#define TLV_TAG_PARTITION_HEADER        (0xEF10DA7A)
+
+struct tlv_partition_header {
+  uint32_t tag;
+  uint32_t length;
+  uint16_t type_id;
+/* 0 indicates the default segment (always located at offset 0), while other values
+ * are for RFID-selectable presets that should immediately follow the default segment.
+ * The default segment may also have preset > 0, which means that it is a preset
+ * selected through an RFID command and copied by FW to the location at offset 0. */
+  uint16_t preset;
+  uint32_t generation;
+  uint32_t total_length;
+};
+
+
+/* TLV partition trailer.
+ *
+ * In a TLV partition, this must be the last item in the sequence, immediately
+ * preceding the TLV_TAG_END word.
+ */
+
+#define TLV_TAG_PARTITION_TRAILER       (0xEF101A57)
+
+struct tlv_partition_trailer {
+  uint32_t tag;
+  uint32_t length;
+  uint32_t generation;
+  uint32_t checksum;
+};
+
+
+/* Appendable TLV partition header.
+ *
+ * In an appendable TLV partition, this must be the first item in the sequence,
+ * at offset 0.  (Note that, unlike the configuration partitions, there is no
+ * trailer before the TLV_TAG_END word.)
+ */
+
+#define TLV_TAG_APPENDABLE_PARTITION_HEADER (0xEF10ADA7)
+
+struct tlv_appendable_partition_header {
+  uint32_t tag;
+  uint32_t length;
+  uint16_t type_id;
+  uint16_t reserved;
+};
+
+
+/* ----------------------------------------------------------------------------
+ *  Configuration items
+ * ----------------------------------------------------------------------------
+ */
+
+
+/* NIC global capabilities.
+ */
+
+#define TLV_TAG_GLOBAL_CAPABILITIES     (0x00010000)
+
+struct tlv_global_capabilities {
+  uint32_t tag;
+  uint32_t length;
+  uint32_t flags;
+};
+
+
+/* Siena-style per-port MAC address allocation.
+ *
+ * There are <count> addresses, starting at <base_address> and incrementing
+ * by adding <stride> to the low-order byte(s).
+ *
+ * (See also TLV_TAG_GLOBAL_MAC for an alternative, specifying a global pool
+ * of contiguous MAC addresses for the firmware to allocate as it sees fit.)
+ */
+
+#define TLV_TAG_PORT_MAC(port)          (0x00020000 + (port))
+
+struct tlv_port_mac {
+  uint32_t tag;
+  uint32_t length;
+  uint8_t  base_address[6];
+  uint16_t reserved;
+  uint16_t count;
+  uint16_t stride;
+};
+
+
+/* Static VPD.
+ *
+ * This is the portion of VPD which is set at manufacturing time and not
+ * expected to change.  It is formatted as a standard PCI VPD block. There are
+ * global and per-pf TLVs for this, the global TLV is new for Medford and is
+ * used in preference to the per-pf TLV.
+ */
+
+#define TLV_TAG_PF_STATIC_VPD(pf)       (0x00030000 + (pf))
+
+struct tlv_pf_static_vpd {
+  uint32_t tag;
+  uint32_t length;
+  uint8_t  bytes[];
+};
+
+#define TLV_TAG_GLOBAL_STATIC_VPD       (0x001f0000)
+
+struct tlv_global_static_vpd {
+  uint32_t tag;
+  uint32_t length;
+  uint8_t  bytes[];
+};
+
+
+/* Dynamic VPD.
+ *
+ * This is the portion of VPD which may be changed (e.g. by firmware updates).
+ * It is formatted as a standard PCI VPD block. There are global and per-pf TLVs
+ * for this, the global TLV is new for Medford and is used in preference to the
+ * per-pf TLV.
+ */
+
+#define TLV_TAG_PF_DYNAMIC_VPD(pf)      (0x10030000 + (pf))
+
+struct tlv_pf_dynamic_vpd {
+  uint32_t tag;
+  uint32_t length;
+  uint8_t  bytes[];
+};
+
+#define TLV_TAG_GLOBAL_DYNAMIC_VPD      (0x10200000)
+
+struct tlv_global_dynamic_vpd {
+  uint32_t tag;
+  uint32_t length;
+  uint8_t  bytes[];
+};
+
+
+/* "DBI" PCI config space changes.
+ *
+ * This is a set of edits made to the default PCI config space values before
+ * the device is allowed to enumerate. There are global and per-pf TLVs for
+ * this, the global TLV is new for Medford and is used in preference to the
+ * per-pf TLV.
+ */
+
+#define TLV_TAG_PF_DBI(pf)              (0x00040000 + (pf))
+
+struct tlv_pf_dbi {
+  uint32_t tag;
+  uint32_t length;
+  struct {
+    uint16_t addr;
+    uint16_t byte_enables;
+    uint32_t value;
+  } items[];
+};
+
+
+#define TLV_TAG_GLOBAL_DBI              (0x00210000)
+
+struct tlv_global_dbi {
+  uint32_t tag;
+  uint32_t length;
+  struct {
+    uint16_t addr;
+    uint16_t byte_enables;
+    uint32_t value;
+  } items[];
+};
+
+
+/* Partition subtype codes.
+ *
+ * A subtype may optionally be stored for each type of partition present in
+ * the NVRAM.  For example, this may be used to allow a generic firmware update
+ * utility to select a specific variant of firmware for a specific variant of
+ * board.
+ *
+ * The description[] field is an optional string which is returned in the
+ * MC_CMD_NVRAM_METADATA response if present.
+ */
+
+#define TLV_TAG_PARTITION_SUBTYPE(type) (0x00050000 + (type))
+
+struct tlv_partition_subtype {
+  uint32_t tag;
+  uint32_t length;
+  uint32_t subtype;
+  uint8_t  description[];
+};
+
+
+/* Partition version codes.
+ *
+ * A version may optionally be stored for each type of partition present in
+ * the NVRAM.  This provides a standard way of tracking the currently stored
+ * version of each of the various component images.
+ */
+
+#define TLV_TAG_PARTITION_VERSION(type) (0x10060000 + (type))
+
+struct tlv_partition_version {
+  uint32_t tag;
+  uint32_t length;
+  uint16_t version_w;
+  uint16_t version_x;
+  uint16_t version_y;
+  uint16_t version_z;
+};
+
+/* Global PCIe configuration */
+
+#define TLV_TAG_GLOBAL_PCIE_CONFIG (0x10070000)
+
+struct tlv_pcie_config {
+  uint32_t tag;
+  uint32_t length;
+  int16_t max_pf_number;                        /**< Largest PF RID (lower PFs may be hidden) */
+  uint16_t pf_aper;                             /**< BIU aperture for PF BAR2 */
+  uint16_t vf_aper;                             /**< BIU aperture for VF BAR0 */
+  uint16_t int_aper;                            /**< BIU aperture for PF BAR4 and VF BAR2 */
+#define TLV_MAX_PF_DEFAULT (-1)                 /* Use FW default for largest PF RID  */
+#define TLV_APER_DEFAULT (0xFFFF)               /* Use FW default for a given aperture */
+};
+
+/* Per-PF configuration. Note that not all these fields are necessarily useful
+ * as the apertures are constrained by the BIU settings (the one case we do
+ * use is to make BAR2 bigger than the BIU thinks to reserve space), but we can
+ * tidy things up later */
+
+#define TLV_TAG_PF_PCIE_CONFIG(pf)  (0x10080000 + (pf))
+
+struct tlv_per_pf_pcie_config {
+  uint32_t tag;
+  uint32_t length;
+  uint8_t vfs_total;
+  uint8_t port_allocation;
+  uint16_t vectors_per_pf;
+  uint16_t vectors_per_vf;
+  uint8_t pf_bar0_aperture;
+  uint8_t pf_bar2_aperture;
+  uint8_t vf_bar0_aperture;
+  uint8_t vf_base;
+  uint16_t supp_pagesz;
+  uint16_t msix_vec_base;
+};
+
+
+/* Development ONLY. This is a single TLV tag for all the gubbins
+ * that can be set through the MC command-line other than the PCIe
+ * settings. This is a temporary measure. */
+#define TLV_TAG_TMP_GUBBINS (0x10090000)        /* legacy symbol - do not use */
+#define TLV_TAG_TMP_GUBBINS_HUNT TLV_TAG_TMP_GUBBINS
+
+struct tlv_tmp_gubbins {
+  uint32_t tag;
+  uint32_t length;
+  /* Consumed by dpcpu.c */
+  uint64_t tx0_tags;     /* Bitmap */
+  uint64_t tx1_tags;     /* Bitmap */
+  uint64_t dl_tags;      /* Bitmap */
+  uint32_t flags;
+#define TLV_DPCPU_TX_STRIPE (1) /* No longer used, has no effect */
+#define TLV_DPCPU_BIU_TAGS  (2) /* Use BIU tag manager */
+#define TLV_DPCPU_TX0_TAGS  (4) /* tx0_tags is valid */
+#define TLV_DPCPU_TX1_TAGS  (8) /* tx1_tags is valid */
+#define TLV_DPCPU_DL_TAGS  (16) /* dl_tags is valid */
+  /* Consumed by features.c */
+  uint32_t dut_features;        /* All 1s -> leave alone */
+  int8_t with_rmon;             /* 0 -> off, 1 -> on, -1 -> leave alone */
+  /* Consumed by clocks_hunt.c */
+  int8_t clk_mode;             /* 0 -> off, 1 -> on, -1 -> leave alone */
+  /* No longer used, superseded by TLV_TAG_DESCRIPTOR_CACHE_CONFIG. */
+  int8_t rx_dc_size;           /* -1 -> leave alone */
+  int8_t tx_dc_size;
+  int16_t num_q_allocs;
+};
+
+/* Global port configuration
+ *
+ * This is now deprecated in favour of a platform-provided default
+ * and dynamic config override via tlv_global_port_options.
+ */
+#define TLV_TAG_GLOBAL_PORT_CONFIG      (0x000a0000)
+
+struct tlv_global_port_config {
+  uint32_t tag;
+  uint32_t length;
+  uint32_t ports_per_core;
+  uint32_t max_port_speed;
+};
+
+
+/* Firmware options.
+ *
+ * This is intended for user-configurable selection of optional firmware
+ * features and variants.
+ *
+ * Initially, this consists only of the satellite CPU firmware variant
+ * selection, but this tag could be extended in the future (using the
+ * tag length to determine whether additional fields are present).
+ */
+
+#define TLV_TAG_FIRMWARE_OPTIONS        (0x100b0000)
+
+struct tlv_firmware_options {
+  uint32_t tag;
+  uint32_t length;
+  uint32_t firmware_variant;
+#define TLV_FIRMWARE_VARIANT_DRIVER_SELECTED (0xffffffff)
+
+/* These are the values for overriding the driver's choice; the definitions
+ * are taken from MCDI so that they don't get out of step.  Include
+ * <ci/mgmt/mc_driver_pcol.h> or the equivalent from your driver's tree if
+ * you need to use these constants.
+ */
+#define TLV_FIRMWARE_VARIANT_FULL_FEATURED   MC_CMD_FW_FULL_FEATURED
+#define TLV_FIRMWARE_VARIANT_LOW_LATENCY     MC_CMD_FW_LOW_LATENCY
+#define TLV_FIRMWARE_VARIANT_PACKED_STREAM   MC_CMD_FW_PACKED_STREAM
+#define TLV_FIRMWARE_VARIANT_HIGH_TX_RATE    MC_CMD_FW_HIGH_TX_RATE
+#define TLV_FIRMWARE_VARIANT_PACKED_STREAM_HASH_MODE_1 \
+                                             MC_CMD_FW_PACKED_STREAM_HASH_MODE_1
+#define TLV_FIRMWARE_VARIANT_RULES_ENGINE    MC_CMD_FW_RULES_ENGINE
+};
+
+/* Voltage settings
+ *
+ * Intended for boards with A0 silicon where the core voltage may
+ * need tweaking. Most likely set once when the pass voltage is
+ * determined. */
+
+#define TLV_TAG_0V9_SETTINGS (0x000c0000)
+
+struct tlv_0v9_settings {
+  uint32_t tag;
+  uint32_t length;
+  uint16_t flags; /* Boards with high 0v9 settings may need active cooling */
+#define TLV_TAG_0V9_REQUIRES_FAN (1)
+  uint16_t target_voltage; /* In millivolts */
+  /* Since the limits are meant to be centred to the target (and must at least
+   * contain it) they need setting as well. */
+  uint16_t warn_low;       /* In millivolts */
+  uint16_t warn_high;      /* In millivolts */
+  uint16_t panic_low;      /* In millivolts */
+  uint16_t panic_high;     /* In millivolts */
+};
+
+
+/* Clock configuration */
+
+#define TLV_TAG_CLOCK_CONFIG       (0x000d0000) /* legacy symbol - do not use */
+#define TLV_TAG_CLOCK_CONFIG_HUNT  TLV_TAG_CLOCK_CONFIG
+
+struct tlv_clock_config {
+  uint32_t tag;
+  uint32_t length;
+  uint16_t clk_sys;        /* MHz */
+  uint16_t clk_dpcpu;      /* MHz */
+  uint16_t clk_icore;      /* MHz */
+  uint16_t clk_pcs;        /* MHz */
+};
+
+#define TLV_TAG_CLOCK_CONFIG_MEDFORD      (0x00100000)
+
+struct tlv_clock_config_medford {
+  uint32_t tag;
+  uint32_t length;
+  uint16_t clk_sys;        /* MHz */
+  uint16_t clk_mc;         /* MHz */
+  uint16_t clk_rmon;       /* MHz */
+  uint16_t clk_vswitch;    /* MHz */
+  uint16_t clk_dpcpu;      /* MHz */
+  uint16_t clk_pcs;        /* MHz */
+};
+
+
+/* EF10-style global pool of MAC addresses.
+ *
+ * There are <count> addresses, starting at <base_address>, which are
+ * contiguous.  Firmware is responsible for allocating addresses from this
+ * pool to ports / PFs as appropriate.
+ */
+
+#define TLV_TAG_GLOBAL_MAC              (0x000e0000)
+
+struct tlv_global_mac {
+  uint32_t tag;
+  uint32_t length;
+  uint8_t  base_address[6];
+  uint16_t reserved1;
+  uint16_t count;
+  uint16_t reserved2;
+};
+
+#define TLV_TAG_ATB_0V9_TARGET     (0x000f0000) /* legacy symbol - do not use */
+#define TLV_TAG_ATB_0V9_TARGET_HUNT     TLV_TAG_ATB_0V9_TARGET
+
+/* The target value for the 0v9 power rail measured on-chip at the
+ * analogue test bus */
+struct tlv_0v9_atb_target {
+  uint32_t tag;
+  uint32_t length;
+  uint16_t millivolts;
+  uint16_t reserved;
+};
+
+/* Factory settings for amplitude calibration of the PCIE TX serdes */
+#define TLV_TAG_TX_PCIE_AMP_CONFIG  (0x00220000)
+struct tlv_pcie_tx_amp_config {
+  uint32_t tag;
+  uint32_t length;
+  uint8_t quad_tx_imp2k[4];
+  uint8_t quad_tx_imp50[4];
+  uint8_t lane_amp[16];
+};
+
+
+/* Global PCIe configuration, second revision. This represents the visible PFs
+ * by a bitmap rather than having the number of the highest visible one. As such
+ * it can (for a 16-PF chip) represent a superset of what TLV_TAG_GLOBAL_PCIE_CONFIG
+ * can and it should be used in place of that tag in future (but compatibility with
+ * the old tag will be left in the firmware indefinitely).  */
+
+#define TLV_TAG_GLOBAL_PCIE_CONFIG_R2 (0x10100000)
+
+struct tlv_pcie_config_r2 {
+  uint32_t tag;
+  uint32_t length;
+  uint16_t visible_pfs;                         /**< Bitmap of visible PFs */
+  uint16_t pf_aper;                             /**< BIU aperture for PF BAR2 */
+  uint16_t vf_aper;                             /**< BIU aperture for VF BAR0 */
+  uint16_t int_aper;                            /**< BIU aperture for PF BAR4 and VF BAR2 */
+};
+
+/* Dynamic port mode.
+ *
+ * Allows selecting alternate port configuration for platforms that support it
+ * (e.g. 1x40G vs 2x10G on Milano, 1x40G vs 4x10G on Medford). This affects the
+ * number of externally visible ports (and, hence, PF to port mapping), so must
+ * be done at boot time.
+ *
+ * This tag supercedes tlv_global_port_config.
+ */
+
+#define TLV_TAG_GLOBAL_PORT_MODE         (0x10110000)
+
+struct tlv_global_port_mode {
+  uint32_t tag;
+  uint32_t length;
+  uint32_t port_mode;
+#define TLV_PORT_MODE_DEFAULT           (0xffffffff) /* Default for given platform */
+#define TLV_PORT_MODE_10G                        (0) /* 10G, single SFP/10G-KR */
+#define TLV_PORT_MODE_40G                        (1) /* 40G, single QSFP/40G-KR */
+#define TLV_PORT_MODE_10G_10G                    (2) /* 2x10G, dual SFP/10G-KR or single QSFP */
+#define TLV_PORT_MODE_40G_40G                    (3) /* 40G + 40G, dual QSFP/40G-KR (Greenport, Medford) */
+#define TLV_PORT_MODE_10G_10G_10G_10G            (4) /* 2x10G + 2x10G, quad SFP/10G-KR or dual QSFP (Greenport) */
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q1         (4) /* 4x10G, single QSFP, cage 0 (Medford) */
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q          (5) /* 4x10G, single QSFP, cage 0 (Medford) OBSOLETE DO NOT USE */
+#define TLV_PORT_MODE_40G_10G_10G                (6) /* 1x40G + 2x10G, dual QSFP (Greenport, Medford) */
+#define TLV_PORT_MODE_10G_10G_40G                (7) /* 2x10G + 1x40G, dual QSFP (Greenport, Medford) */
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q2         (8) /* 4x10G, single QSFP, cage 1 (Medford) */
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2      (9) /* 2x10G + 2x10G, dual QSFP (Medford) */
+#define TLV_PORT_MODE_MAX TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2
+};
+
+/* Type of the v-switch created implicitly by the firmware */
+
+#define TLV_TAG_VSWITCH_TYPE(port)       (0x10120000 + (port))
+
+struct tlv_vswitch_type {
+  uint32_t tag;
+  uint32_t length;
+  uint32_t vswitch_type;
+#define TLV_VSWITCH_TYPE_DEFAULT        (0xffffffff) /* Firmware default; equivalent to no TLV present for a given port */
+#define TLV_VSWITCH_TYPE_NONE                    (0)
+#define TLV_VSWITCH_TYPE_VLAN                    (1)
+#define TLV_VSWITCH_TYPE_VEB                     (2)
+#define TLV_VSWITCH_TYPE_VEPA                    (3)
+#define TLV_VSWITCH_TYPE_MUX                     (4)
+#define TLV_VSWITCH_TYPE_TEST                    (5)
+};
+
+/* A VLAN tag for the v-port created implicitly by the firmware */
+
+#define TLV_TAG_VPORT_VLAN_TAG(pf)               (0x10130000 + (pf))
+
+struct tlv_vport_vlan_tag {
+  uint32_t tag;
+  uint32_t length;
+  uint32_t vlan_tag;
+#define TLV_VPORT_NO_VLAN_TAG                    (0xFFFFFFFF) /* Default in the absence of TLV for a given PF */
+};
+
+/* Offset to be applied to the 0v9 setting, wherever it came from */
+
+#define TLV_TAG_ATB_0V9_OFFSET           (0x10140000)
+
+struct tlv_0v9_atb_offset {
+  uint32_t tag;
+  uint32_t length;
+  int16_t  offset_millivolts;
+  uint16_t reserved;
+};
+
+/* A privilege mask given on reset to all non-admin PCIe functions (that is other than first-PF-per-port).
+ * The meaning of particular bits is defined in mcdi_ef10.yml under MC_CMD_PRIVILEGE_MASK, see also bug 44583.
+ * TLV_TAG_PRIVILEGE_MASK_ADD specifies bits that should be added (ORed) to firmware default while
+ * TLV_TAG_PRIVILEGE_MASK_REM specifies bits that should be removed (ANDed) from firmware default:
+ * Initial_privilege_mask = (firmware_default_mask | privilege_mask_add) & ~privilege_mask_rem */
+
+#define TLV_TAG_PRIVILEGE_MASK          (0x10150000) /* legacy symbol - do not use */
+
+struct tlv_privilege_mask {                          /* legacy structure - do not use */
+  uint32_t tag;
+  uint32_t length;
+  uint32_t privilege_mask;
+};
+
+#define TLV_TAG_PRIVILEGE_MASK_ADD      (0x10150000)
+
+struct tlv_privilege_mask_add {
+  uint32_t tag;
+  uint32_t length;
+  uint32_t privilege_mask_add;
+};
+
+#define TLV_TAG_PRIVILEGE_MASK_REM      (0x10160000)
+
+struct tlv_privilege_mask_rem {
+  uint32_t tag;
+  uint32_t length;
+  uint32_t privilege_mask_rem;
+};
+
+/* Additional privileges given to all PFs.
+ * This tag takes precedence over TLV_TAG_PRIVILEGE_MASK_REM. */
+
+#define TLV_TAG_PRIVILEGE_MASK_ADD_ALL_PFS         (0x10190000)
+
+struct tlv_privilege_mask_add_all_pfs {
+  uint32_t tag;
+  uint32_t length;
+  uint32_t privilege_mask_add;
+};
+
+/* Additional privileges given to a selected PF.
+ * This tag takes precedence over TLV_TAG_PRIVILEGE_MASK_REM. */
+
+#define TLV_TAG_PRIVILEGE_MASK_ADD_SINGLE_PF(pf)   (0x101A0000 + (pf))
+
+struct tlv_privilege_mask_add_single_pf {
+  uint32_t tag;
+  uint32_t length;
+  uint32_t privilege_mask_add;
+};
+
+/* Turning on/off the PFIOV mode.
+ * This tag only takes effect if TLV_TAG_VSWITCH_TYPE is missing or set to DEFAULT. */
+
+#define TLV_TAG_PFIOV(port)             (0x10170000 + (port))
+
+struct tlv_pfiov {
+  uint32_t tag;
+  uint32_t length;
+  uint32_t pfiov;
+#define TLV_PFIOV_OFF                    (0) /* Default */
+#define TLV_PFIOV_ON                     (1)
+};
+
+/* Multicast filter chaining mode selection.
+ *
+ * When enabled, multicast packets are delivered to all recipients of all
+ * matching multicast filters, with the exception that IP multicast filters
+ * will steal traffic from MAC multicast filters on a per-function basis.
+ * (New behaviour.)
+ *
+ * When disabled, multicast packets will always be delivered only to the
+ * recipients of the highest priority matching multicast filter.
+ * (Legacy behaviour.)
+ *
+ * The DEFAULT mode (which is the same as the tag not being present at all)
+ * is equivalent to ENABLED in production builds, and DISABLED in eftest
+ * builds.
+ *
+ * This option is intended to provide run-time control over this feature
+ * while it is being stabilised and may be withdrawn at some point in the
+ * future; the new behaviour is intended to become the standard behaviour.
+ */
+
+#define TLV_TAG_MCAST_FILTER_CHAINING   (0x10180000)
+
+struct tlv_mcast_filter_chaining {
+  uint32_t tag;
+  uint32_t length;
+  uint32_t mode;
+#define TLV_MCAST_FILTER_CHAINING_DEFAULT  (0xffffffff)
+#define TLV_MCAST_FILTER_CHAINING_DISABLED (0)
+#define TLV_MCAST_FILTER_CHAINING_ENABLED  (1)
+};
+
+/* Pacer rate limit per PF */
+#define TLV_TAG_RATE_LIMIT(pf)    (0x101b0000 + (pf))
+
+struct tlv_rate_limit {
+  uint32_t tag;
+  uint32_t length;
+  uint32_t rate_mbps;
+};
+
+/* OCSD Enable/Disable
+ *
+ * This setting allows OCSD to be disabled. This is a requirement for HP
+ * servers to support PCI passthrough for virtualization.
+ *
+ * The DEFAULT mode (which is the same as the tag not being present) is
+ * equivalent to ENABLED.
+ *
+ * This option is not used by the MCFW, and is entirely handled by the various
+ * drivers that support OCSD, by reading the setting before they attempt
+ * to enable OCSD.
+ *
+ * bit0: OCSD Disabled/Enabled
+ */
+
+#define TLV_TAG_OCSD (0x101C0000)
+
+struct tlv_ocsd {
+  uint32_t tag;
+  uint32_t length;
+  uint32_t mode;
+#define TLV_OCSD_DISABLED 0
+#define TLV_OCSD_ENABLED 1 /* Default */
+};
+
+/* Descriptor cache config.
+ *
+ * Sets the sizes of the TX and RX descriptor caches as a power of 2. It also
+ * sets the total number of VIs. When the number of VIs is reduced VIs are taken
+ * away from the highest numbered port first, so a vi_count of 1024 means 1024
+ * VIs on the first port and 0 on the second (on a Torino).
+ */
+
+#define TLV_TAG_DESCRIPTOR_CACHE_CONFIG    (0x101d0000)
+
+struct tlv_descriptor_cache_config {
+  uint32_t tag;
+  uint32_t length;
+  uint8_t rx_desc_cache_size;
+  uint8_t tx_desc_cache_size;
+  uint16_t vi_count;
+};
+#define TLV_DESC_CACHE_DEFAULT (0xff)
+#define TLV_VI_COUNT_DEFAULT   (0xffff)
+
+/* RX event merging config (read batching).
+ *
+ * Sets the global maximum number of events for the merging bins, and the
+ * global timeout configuration for the bins.
+ */
+
+#define TLV_TAG_RX_EVENT_MERGING_CONFIG    (0x101e0000)
+
+struct tlv_rx_event_merging_config {
+  uint32_t  tag;
+  uint32_t  length;
+  uint32_t  max_events;
+#define TLV_RX_EVENT_MERGING_CONFIG_MAX_EVENTS_MAX ((1 << 4) - 1)
+  uint32_t  timeout_ns;
+};
+#define TLV_RX_EVENT_MERGING_MAX_EVENTS_DEFAULT (0xffffffff)
+#define TLV_RX_EVENT_MERGING_TIMEOUT_NS_DEFAULT (0xffffffff)
+
+#define TLV_TAG_PCIE_LINK_SETTINGS (0x101f0000)
+struct tlv_pcie_link_settings {
+  uint32_t tag;
+  uint32_t length;
+  uint16_t gen;   /* Target PCIe generation: 1, 2, 3 */
+  uint16_t width; /* Number of lanes */
+};
+
+/* TX event merging config.
+ *
+ * Sets the global maximum number of events for the merging bins, and the
+ * global timeout configuration for the bins, and the global timeout for
+ * empty queues.
+ */
+#define TLV_TAG_TX_EVENT_MERGING_CONFIG    (0x10210000)
+struct tlv_tx_event_merging_config {
+  uint32_t  tag;
+  uint32_t  length;
+  uint32_t  max_events;
+#define TLV_TX_EVENT_MERGING_CONFIG_MAX_EVENTS_MAX ((1 << 4) - 1)
+  uint32_t  timeout_ns;
+  uint32_t  qempty_timeout_ns; /* Medford only */
+};
+#define TLV_TX_EVENT_MERGING_MAX_EVENTS_DEFAULT (0xffffffff)
+#define TLV_TX_EVENT_MERGING_TIMEOUT_NS_DEFAULT (0xffffffff)
+#define TLV_TX_EVENT_MERGING_QEMPTY_TIMEOUT_NS_DEFAULT (0xffffffff)
+
+#define TLV_TAG_LICENSE (0x30800000)
+
+typedef struct tlv_license {
+  uint32_t  tag;
+  uint32_t  length;
+  uint8_t   data[];
+} tlv_license_t;
+
+/* TSA NIC IP address configuration
+ *
+ * Sets the TSA NIC IP address statically via configuration tool or dynamically
+ * via DHCP via snooping based on the mode selection (0=Static, 1=DHCP, 2=Snoop)
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_TSAN_CONFIG         (0x10220000)
+
+#define TLV_TSAN_IP_MODE_STATIC         (0)
+#define TLV_TSAN_IP_MODE_DHCP           (1)
+#define TLV_TSAN_IP_MODE_SNOOP          (2)
+typedef struct tlv_tsan_config {
+  uint32_t tag;
+  uint32_t length;
+  uint32_t mode;
+  uint32_t ip;
+  uint32_t netmask;
+  uint32_t gateway;
+  uint32_t port;
+  uint32_t bind_retry;  /* DEPRECATED */
+  uint32_t bind_bkout;  /* DEPRECATED */
+} tlv_tsan_config_t;
+
+/* TSA Controller IP address configuration
+ *
+ * Sets the TSA Controller IP address statically via configuration tool
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_TSAC_CONFIG         (0x10230000)
+
+#define TLV_MAX_TSACS (4)
+typedef struct tlv_tsac_config {
+  uint32_t tag;
+  uint32_t length;
+  uint32_t num_tsacs;
+  uint32_t ip[TLV_MAX_TSACS];
+  uint32_t port[TLV_MAX_TSACS];
+} tlv_tsac_config_t;
+
+/* Binding ticket
+ *
+ * Sets the TSA NIC binding ticket used for binding process between the TSA NIC
+ * and the TSA Controller
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_BINDING_TICKET      (0x10240000)
+
+typedef struct tlv_binding_ticket {
+  uint32_t tag;
+  uint32_t length;
+  uint8_t  bytes[];
+} tlv_binding_ticket_t;
+
+/* Solarflare private key  (DEPRECATED)
+ *
+ * Sets the Solareflare private key used for signing during the binding process
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_PIK_SF              (0x10250000)    /* DEPRECATED */
+
+typedef struct tlv_pik_sf {
+  uint32_t tag;
+  uint32_t length;
+  uint8_t  bytes[];
+} tlv_pik_sf_t;
+
+/* CA root certificate
+ *
+ * Sets the CA root certificate used for TSA Controller verfication during
+ * TLS connection setup between the TSA NIC and the TSA Controller
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_CA_ROOT_CERT        (0x10260000)
+
+typedef struct tlv_ca_root_cert {
+  uint32_t tag;
+  uint32_t length;
+  uint8_t  bytes[];
+} tlv_ca_root_cert_t;
+
+/* Tx vFIFO Low latency configuration
+ *
+ * To keep the desired booting behaviour for the switch, it just requires to
+ * know if the low latency mode is enabled.
+ */
+
+#define TLV_TAG_TX_VFIFO_ULL_MODE       (0x10270000)
+struct tlv_tx_vfifo_ull_mode {
+  uint32_t tag;
+  uint32_t length;
+  uint8_t  mode;
+#define TLV_TX_VFIFO_ULL_MODE_DEFAULT    0
+};
+
+/* BIU mode
+ *
+ * Medford2 tag for selecting VI window decode (see values below)
+ */
+#define TLV_TAG_BIU_VI_WINDOW_MODE       (0x10280000)
+struct tlv_biu_vi_window_mode {
+  uint32_t tag;
+  uint32_t length;
+  uint8_t  mode;
+#define TLV_BIU_VI_WINDOW_MODE_8K    0  /*  8k per VI, CTPIO not mapped, medford/hunt compatible */
+#define TLV_BIU_VI_WINDOW_MODE_16K   1  /* 16k per VI, CTPIO mapped */
+#define TLV_BIU_VI_WINDOW_MODE_64K   2  /* 64k per VI, CTPIO mapped, POWER-friendly */
+};
+
+/* FastPD mode
+ *
+ * Medford2 tag for configuring the FastPD mode (see values below)
+ */
+#define TLV_TAG_FASTPD_MODE(port)       (0x10290000 + (port))
+struct tlv_fastpd_mode {
+  uint32_t tag;
+  uint32_t length;
+  uint8_t  mode;
+#define TLV_FASTPD_MODE_SOFT_ALL       0  /* All packets to the SoftPD */
+#define TLV_FASTPD_MODE_FAST_ALL       1  /* All packets to the FastPD */
+#define TLV_FASTPD_MODE_FAST_SUPPORTED 2  /* Supported packet types to the FastPD; everything else to the SoftPD  */
+};
+
+#endif /* CI_MGMT_TLV_LAYOUT_H */
diff --git a/drivers/net/sfc/efx/base/ef10_tx.c b/drivers/net/sfc/efx/base/ef10_tx.c
new file mode 100644
index 0000000..59343a3
--- /dev/null
+++ b/drivers/net/sfc/efx/base/ef10_tx.c
@@ -0,0 +1,683 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#define	EFX_TX_QSTAT_INCR(_etp, _stat)
+
+static	__checkReturn	efx_rc_t
+efx_mcdi_init_txq(
+	__in		efx_nic_t *enp,
+	__in		uint32_t size,
+	__in		uint32_t target_evq,
+	__in		uint32_t label,
+	__in		uint32_t instance,
+	__in		uint16_t flags,
+	__in		efsys_mem_t *esmp)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),
+			    MC_CMD_INIT_TXQ_OUT_LEN)];
+	efx_qword_t *dma_addr;
+	uint64_t addr;
+	int npages;
+	int i;
+	efx_rc_t rc;
+
+	EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >=
+	    EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs));
+
+	npages = EFX_TXQ_NBUFS(size);
+	if (npages > MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM) {
+		rc = EINVAL;
+		goto fail1;
+	}
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_INIT_TXQ;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, size);
+	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);
+	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);
+	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);
+
+	MCDI_IN_POPULATE_DWORD_7(req, INIT_TXQ_IN_FLAGS,
+	    INIT_TXQ_IN_FLAG_BUFF_MODE, 0,
+	    INIT_TXQ_IN_FLAG_IP_CSUM_DIS,
+	    (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,
+	    INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,
+	    (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,
+	    INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,
+	    INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,
+	    INIT_TXQ_IN_CRC_MODE, 0,
+	    INIT_TXQ_IN_FLAG_TIMESTAMP, 0);
+
+	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0);
+	MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR);
+	addr = EFSYS_MEM_ADDR(esmp);
+
+	for (i = 0; i < npages; i++) {
+		EFX_POPULATE_QWORD_2(*dma_addr,
+		    EFX_DWORD_1, (uint32_t)(addr >> 32),
+		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+		dma_addr++;
+		addr += EFX_BUF_SIZE;
+	}
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail2;
+	}
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+static	__checkReturn	efx_rc_t
+efx_mcdi_fini_txq(
+	__in		efx_nic_t *enp,
+	__in		uint32_t instance)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN,
+			    MC_CMD_FINI_TXQ_OUT_LEN)];
+	efx_rc_t rc;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_FINI_TXQ;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance);
+
+	efx_mcdi_execute_quiet(enp, &req);
+
+	if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_tx_init(
+	__in		efx_nic_t *enp)
+{
+	_NOTE(ARGUNUSED(enp))
+	return (0);
+}
+
+			void
+ef10_tx_fini(
+	__in		efx_nic_t *enp)
+{
+	_NOTE(ARGUNUSED(enp))
+}
+
+	__checkReturn	efx_rc_t
+ef10_tx_qcreate(
+	__in		efx_nic_t *enp,
+	__in		unsigned int index,
+	__in		unsigned int label,
+	__in		efsys_mem_t *esmp,
+	__in		size_t n,
+	__in		uint32_t id,
+	__in		uint16_t flags,
+	__in		efx_evq_t *eep,
+	__in		efx_txq_t *etp,
+	__out		unsigned int *addedp)
+{
+	efx_qword_t desc;
+	efx_rc_t rc;
+
+	_NOTE(ARGUNUSED(id))
+
+	if ((rc = efx_mcdi_init_txq(enp, n, eep->ee_index, label, index, flags,
+	    esmp)) != 0)
+		goto fail1;
+
+	/*
+	 * A previous user of this TX queue may have written a descriptor to the
+	 * TX push collector, but not pushed the doorbell (e.g. after a crash).
+	 * The next doorbell write would then push the stale descriptor.
+	 *
+	 * Ensure the (per network port) TX push collector is cleared by writing
+	 * a no-op TX option descriptor. See bug29981 for details.
+	 */
+	*addedp = 1;
+	EFX_POPULATE_QWORD_4(desc,
+	    ESF_DZ_TX_DESC_IS_OPT, 1,
+	    ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
+	    ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
+	    (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
+	    ESF_DZ_TX_OPTION_IP_CSUM,
+	    (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0);
+
+	EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc);
+	ef10_tx_qpush(etp, *addedp, 0);
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+		void
+ef10_tx_qdestroy(
+	__in	efx_txq_t *etp)
+{
+	/* FIXME */
+	_NOTE(ARGUNUSED(etp))
+	/* FIXME */
+}
+
+	__checkReturn	efx_rc_t
+ef10_tx_qpio_enable(
+	__in		efx_txq_t *etp)
+{
+	efx_nic_t *enp = etp->et_enp;
+	efx_piobuf_handle_t handle;
+	efx_rc_t rc;
+
+	if (etp->et_pio_size != 0) {
+		rc = EALREADY;
+		goto fail1;
+	}
+
+	/* Sub-allocate a PIO block from a piobuf */
+	if ((rc = ef10_nic_pio_alloc(enp,
+		    &etp->et_pio_bufnum,
+		    &handle,
+		    &etp->et_pio_blknum,
+		    &etp->et_pio_offset,
+		    &etp->et_pio_size)) != 0) {
+		goto fail2;
+	}
+	EFSYS_ASSERT3U(etp->et_pio_size, !=, 0);
+
+	/* Link the piobuf to this TXQ */
+	if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) {
+		goto fail3;
+	}
+
+	/*
+	 * et_pio_offset is the offset of the sub-allocated block within the
+	 * hardware PIO buffer. It is used as the buffer address in the PIO
+	 * option descriptor.
+	 *
+	 * et_pio_write_offset is the offset of the sub-allocated block from the
+	 * start of the write-combined memory mapping, and is used for writing
+	 * data into the PIO buffer.
+	 */
+	etp->et_pio_write_offset =
+	    (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) +
+	    ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset;
+
+	return (0);
+
+fail3:
+	EFSYS_PROBE(fail3);
+	ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
+	etp->et_pio_size = 0;
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+			void
+ef10_tx_qpio_disable(
+	__in		efx_txq_t *etp)
+{
+	efx_nic_t *enp = etp->et_enp;
+
+	if (etp->et_pio_size != 0) {
+		/* Unlink the piobuf from this TXQ */
+		ef10_nic_pio_unlink(enp, etp->et_index);
+
+		/* Free the sub-allocated PIO block */
+		ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
+		etp->et_pio_size = 0;
+		etp->et_pio_write_offset = 0;
+	}
+}
+
+	__checkReturn	efx_rc_t
+ef10_tx_qpio_write(
+	__in			efx_txq_t *etp,
+	__in_ecount(length)	uint8_t *buffer,
+	__in			size_t length,
+	__in			size_t offset)
+{
+	efx_nic_t *enp = etp->et_enp;
+	efsys_bar_t *esbp = enp->en_esbp;
+	uint32_t write_offset;
+	uint32_t write_offset_limit;
+	efx_qword_t *eqp;
+	efx_rc_t rc;
+
+	EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0);
+
+	if (etp->et_pio_size == 0) {
+		rc = ENOENT;
+		goto fail1;
+	}
+	if (offset + length > etp->et_pio_size)	{
+		rc = ENOSPC;
+		goto fail2;
+	}
+
+	/*
+	 * Writes to PIO buffers must be 64 bit aligned, and multiples of
+	 * 64 bits.
+	 */
+	write_offset = etp->et_pio_write_offset + offset;
+	write_offset_limit = write_offset + length;
+	eqp = (efx_qword_t *)buffer;
+	while (write_offset < write_offset_limit) {
+		EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp);
+		eqp++;
+		write_offset += sizeof (efx_qword_t);
+	}
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_tx_qpio_post(
+	__in			efx_txq_t *etp,
+	__in			size_t pkt_length,
+	__in			unsigned int completed,
+	__inout			unsigned int *addedp)
+{
+	efx_qword_t pio_desc;
+	unsigned int id;
+	size_t offset;
+	unsigned int added = *addedp;
+	efx_rc_t rc;
+
+
+	if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+		rc = ENOSPC;
+		goto fail1;
+	}
+
+	if (etp->et_pio_size == 0) {
+		rc = ENOENT;
+		goto fail2;
+	}
+
+	id = added++ & etp->et_mask;
+	offset = id * sizeof (efx_qword_t);
+
+	EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index,
+		    unsigned int, id, uint32_t, etp->et_pio_offset,
+		    size_t, pkt_length);
+
+	EFX_POPULATE_QWORD_5(pio_desc,
+			ESF_DZ_TX_DESC_IS_OPT, 1,
+			ESF_DZ_TX_OPTION_TYPE, 1,
+			ESF_DZ_TX_PIO_CONT, 0,
+			ESF_DZ_TX_PIO_BYTE_CNT, pkt_length,
+			ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset);
+
+	EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc);
+
+	EFX_TX_QSTAT_INCR(etp, TX_POST_PIO);
+
+	*addedp = added;
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_tx_qpost(
+	__in		efx_txq_t *etp,
+	__in_ecount(n)	efx_buffer_t *eb,
+	__in		unsigned int n,
+	__in		unsigned int completed,
+	__inout		unsigned int *addedp)
+{
+	unsigned int added = *addedp;
+	unsigned int i;
+	efx_rc_t rc;
+
+	if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+		rc = ENOSPC;
+		goto fail1;
+	}
+
+	for (i = 0; i < n; i++) {
+		efx_buffer_t *ebp = &eb[i];
+		efsys_dma_addr_t addr = ebp->eb_addr;
+		size_t size = ebp->eb_size;
+		boolean_t eop = ebp->eb_eop;
+		unsigned int id;
+		size_t offset;
+		efx_qword_t qword;
+
+		/* Fragments must not span 4k boundaries. */
+		EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= (addr + size));
+
+		id = added++ & etp->et_mask;
+		offset = id * sizeof (efx_qword_t);
+
+		EFSYS_PROBE5(tx_post, unsigned int, etp->et_index,
+		    unsigned int, id, efsys_dma_addr_t, addr,
+		    size_t, size, boolean_t, eop);
+
+		EFX_POPULATE_QWORD_5(qword,
+		    ESF_DZ_TX_KER_TYPE, 0,
+		    ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
+		    ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
+		    ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
+		    ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
+
+		EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword);
+	}
+
+	EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+	*addedp = added;
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+/*
+ * This improves performance by pushing a TX descriptor at the same time as the
+ * doorbell. The descriptor must be added to the TXQ, so that can be used if the
+ * hardware decides not to use the pushed descriptor.
+ */
+			void
+ef10_tx_qpush(
+	__in		efx_txq_t *etp,
+	__in		unsigned int added,
+	__in		unsigned int pushed)
+{
+	efx_nic_t *enp = etp->et_enp;
+	unsigned int wptr;
+	unsigned int id;
+	size_t offset;
+	efx_qword_t desc;
+	efx_oword_t oword;
+
+	wptr = added & etp->et_mask;
+	id = pushed & etp->et_mask;
+	offset = id * sizeof (efx_qword_t);
+
+	EFSYS_MEM_READQ(etp->et_esmp, offset, &desc);
+	EFX_POPULATE_OWORD_3(oword,
+	    ERF_DZ_TX_DESC_WPTR, wptr,
+	    ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
+	    ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
+
+	/* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+	EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id);
+	EFSYS_PIO_WRITE_BARRIER();
+	EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, etp->et_index,
+				    &oword);
+}
+
+	__checkReturn	efx_rc_t
+ef10_tx_qdesc_post(
+	__in		efx_txq_t *etp,
+	__in_ecount(n)	efx_desc_t *ed,
+	__in		unsigned int n,
+	__in		unsigned int completed,
+	__inout		unsigned int *addedp)
+{
+	unsigned int added = *addedp;
+	unsigned int i;
+	efx_rc_t rc;
+
+	if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+		rc = ENOSPC;
+		goto fail1;
+	}
+
+	for (i = 0; i < n; i++) {
+		efx_desc_t *edp = &ed[i];
+		unsigned int id;
+		size_t offset;
+
+		id = added++ & etp->et_mask;
+		offset = id * sizeof (efx_desc_t);
+
+		EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
+	}
+
+	EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
+		    unsigned int, added, unsigned int, n);
+
+	EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+	*addedp = added;
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	void
+ef10_tx_qdesc_dma_create(
+	__in	efx_txq_t *etp,
+	__in	efsys_dma_addr_t addr,
+	__in	size_t size,
+	__in	boolean_t eop,
+	__out	efx_desc_t *edp)
+{
+	/* Fragments must not span 4k boundaries. */
+	EFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= addr + size);
+
+	EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
+		    efsys_dma_addr_t, addr,
+		    size_t, size, boolean_t, eop);
+
+	EFX_POPULATE_QWORD_5(edp->ed_eq,
+		    ESF_DZ_TX_KER_TYPE, 0,
+		    ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
+		    ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
+		    ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
+		    ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
+}
+
+	void
+ef10_tx_qdesc_tso_create(
+	__in	efx_txq_t *etp,
+	__in	uint16_t ipv4_id,
+	__in	uint32_t tcp_seq,
+	__in	uint8_t  tcp_flags,
+	__out	efx_desc_t *edp)
+{
+	EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index,
+		    uint16_t, ipv4_id, uint32_t, tcp_seq,
+		    uint8_t, tcp_flags);
+
+	EFX_POPULATE_QWORD_5(edp->ed_eq,
+			    ESF_DZ_TX_DESC_IS_OPT, 1,
+			    ESF_DZ_TX_OPTION_TYPE,
+			    ESE_DZ_TX_OPTION_DESC_TSO,
+			    ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
+			    ESF_DZ_TX_TSO_IP_ID, ipv4_id,
+			    ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
+}
+
+	void
+ef10_tx_qdesc_tso2_create(
+	__in			efx_txq_t *etp,
+	__in			uint16_t ipv4_id,
+	__in			uint32_t tcp_seq,
+	__in			uint16_t tcp_mss,
+	__out_ecount(count)	efx_desc_t *edp,
+	__in			int count)
+{
+	EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index,
+		    uint16_t, ipv4_id, uint32_t, tcp_seq,
+		    uint16_t, tcp_mss);
+
+	EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);
+
+	EFX_POPULATE_QWORD_5(edp[0].ed_eq,
+			    ESF_DZ_TX_DESC_IS_OPT, 1,
+			    ESF_DZ_TX_OPTION_TYPE,
+			    ESE_DZ_TX_OPTION_DESC_TSO,
+			    ESF_DZ_TX_TSO_OPTION_TYPE,
+			    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
+			    ESF_DZ_TX_TSO_IP_ID, ipv4_id,
+			    ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
+	EFX_POPULATE_QWORD_4(edp[1].ed_eq,
+			    ESF_DZ_TX_DESC_IS_OPT, 1,
+			    ESF_DZ_TX_OPTION_TYPE,
+			    ESE_DZ_TX_OPTION_DESC_TSO,
+			    ESF_DZ_TX_TSO_OPTION_TYPE,
+			    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
+			    ESF_DZ_TX_TSO_TCP_MSS, tcp_mss);
+}
+
+	void
+ef10_tx_qdesc_vlantci_create(
+	__in	efx_txq_t *etp,
+	__in	uint16_t  tci,
+	__out	efx_desc_t *edp)
+{
+	EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index,
+		    uint16_t, tci);
+
+	EFX_POPULATE_QWORD_4(edp->ed_eq,
+			    ESF_DZ_TX_DESC_IS_OPT, 1,
+			    ESF_DZ_TX_OPTION_TYPE,
+			    ESE_DZ_TX_OPTION_DESC_VLAN,
+			    ESF_DZ_TX_VLAN_OP, tci ? 1 : 0,
+			    ESF_DZ_TX_VLAN_TAG1, tci);
+}
+
+
+	__checkReturn	efx_rc_t
+ef10_tx_qpace(
+	__in		efx_txq_t *etp,
+	__in		unsigned int ns)
+{
+	efx_rc_t rc;
+
+	/* FIXME */
+	_NOTE(ARGUNUSED(etp, ns))
+	_NOTE(CONSTANTCONDITION)
+	if (B_FALSE) {
+		rc = ENOTSUP;
+		goto fail1;
+	}
+	/* FIXME */
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+ef10_tx_qflush(
+	__in		efx_txq_t *etp)
+{
+	efx_nic_t *enp = etp->et_enp;
+	efx_rc_t rc;
+
+	if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0)
+		goto fail1;
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+			void
+ef10_tx_qenable(
+	__in		efx_txq_t *etp)
+{
+	/* FIXME */
+	_NOTE(ARGUNUSED(etp))
+	/* FIXME */
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/efx/base/efx.h b/drivers/net/sfc/efx/base/efx.h
index 3899580..e61c865 100644
--- a/drivers/net/sfc/efx/base/efx.h
+++ b/drivers/net/sfc/efx/base/efx.h
@@ -184,6 +184,11 @@ efx_nic_check_pcie_link_speed(
 
 #if EFSYS_OPT_MCDI
 
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+/* Huntington and Medford require MCDIv2 commands */
+#define	WITH_MCDI_V2 1
+#endif
+
 typedef struct efx_mcdi_req_s efx_mcdi_req_t;
 
 typedef enum efx_mcdi_exception_e {
@@ -581,6 +586,11 @@ typedef struct efx_nic_cfg_s {
 #if EFSYS_OPT_MCDI
 	uint8_t			enc_mcdi_mdio_channel;
 #endif	/* EFSYS_OPT_MCDI */
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+	uint32_t		enc_pf;
+	uint32_t		enc_vf;
+	uint32_t		enc_privilege_mask;
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
 	boolean_t		enc_bug26807_workaround;
 	boolean_t		enc_bug35388_workaround;
 	boolean_t		enc_bug41750_workaround;
diff --git a/drivers/net/sfc/efx/base/efx_check.h b/drivers/net/sfc/efx/base/efx_check.h
index 190ac46..63c809c 100644
--- a/drivers/net/sfc/efx/base/efx_check.h
+++ b/drivers/net/sfc/efx/base/efx_check.h
@@ -70,6 +70,12 @@
 # endif
 #endif /* EFSYS_OPT_FILTER */
 
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# if !EFSYS_OPT_FILTER
+#  error "HUNTINGTON or MEDFORD requires FILTER"
+# endif
+#endif /* EFSYS_OPT_HUNTINGTON */
+
 #ifdef EFSYS_OPT_MAC_FALCON_GMAC
 # error "MAC_FALCON_GMAC is obsolete and is not supported."
 #endif
diff --git a/drivers/net/sfc/efx/base/efx_ev.c b/drivers/net/sfc/efx/base/efx_ev.c
index 59f4d02..65094c1 100644
--- a/drivers/net/sfc/efx/base/efx_ev.c
+++ b/drivers/net/sfc/efx/base/efx_ev.c
@@ -93,6 +93,18 @@ static const efx_ev_ops_t	__efx_ev_siena_ops = {
 };
 #endif /* EFSYS_OPT_SIENA */
 
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_ev_ops_t	__efx_ev_ef10_ops = {
+	ef10_ev_init,				/* eevo_init */
+	ef10_ev_fini,				/* eevo_fini */
+	ef10_ev_qcreate,			/* eevo_qcreate */
+	ef10_ev_qdestroy,			/* eevo_qdestroy */
+	ef10_ev_qprime,				/* eevo_qprime */
+	ef10_ev_qpost,				/* eevo_qpost */
+	ef10_ev_qmoderate,			/* eevo_qmoderate */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
 
 	__checkReturn	efx_rc_t
 efx_ev_init(
@@ -116,6 +128,12 @@ efx_ev_init(
 		break;
 #endif /* EFSYS_OPT_SIENA */
 
+#if EFSYS_OPT_HUNTINGTON
+	case EFX_FAMILY_HUNTINGTON:
+		eevop = &__efx_ev_ef10_ops;
+		break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
 	default:
 		EFSYS_ASSERT(0);
 		rc = ENOTSUP;
diff --git a/drivers/net/sfc/efx/base/efx_filter.c b/drivers/net/sfc/efx/base/efx_filter.c
index c612731..5c4b74d 100644
--- a/drivers/net/sfc/efx/base/efx_filter.c
+++ b/drivers/net/sfc/efx/base/efx_filter.c
@@ -79,6 +79,18 @@ static const efx_filter_ops_t	__efx_filter_siena_ops = {
 };
 #endif /* EFSYS_OPT_SIENA */
 
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_filter_ops_t	__efx_filter_ef10_ops = {
+	ef10_filter_init,		/* efo_init */
+	ef10_filter_fini,		/* efo_fini */
+	ef10_filter_restore,		/* efo_restore */
+	ef10_filter_add,		/* efo_add */
+	ef10_filter_delete,		/* efo_delete */
+	ef10_filter_supported_filters,	/* efo_supported_filters */
+	ef10_filter_reconfigure,	/* efo_reconfigure */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
 	__checkReturn	efx_rc_t
 efx_filter_insert(
 	__in		efx_nic_t *enp,
@@ -147,6 +159,12 @@ efx_filter_init(
 		break;
 #endif /* EFSYS_OPT_SIENA */
 
+#if EFSYS_OPT_HUNTINGTON
+	case EFX_FAMILY_HUNTINGTON:
+		efop = &__efx_filter_ef10_ops;
+		break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
 	default:
 		EFSYS_ASSERT(0);
 		rc = ENOTSUP;
diff --git a/drivers/net/sfc/efx/base/efx_impl.h b/drivers/net/sfc/efx/base/efx_impl.h
index 8d85f3f..10ab36b 100644
--- a/drivers/net/sfc/efx/base/efx_impl.h
+++ b/drivers/net/sfc/efx/base/efx_impl.h
@@ -45,6 +45,14 @@
 #include "siena_impl.h"
 #endif	/* EFSYS_OPT_SIENA */
 
+#if EFSYS_OPT_HUNTINGTON
+#include "hunt_impl.h"
+#endif	/* EFSYS_OPT_HUNTINGTON */
+
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+#include "ef10_impl.h"
+#endif	/* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */
+
 #ifdef	__cplusplus
 extern "C" {
 #endif
@@ -331,6 +339,9 @@ typedef struct efx_filter_s {
 #if EFSYS_OPT_SIENA
 	siena_filter_t		*ef_siena_filter;
 #endif /* EFSYS_OPT_SIENA */
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+	ef10_filter_table_t	*ef_ef10_filter_table;
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
 } efx_filter_t;
 
 #if EFSYS_OPT_SIENA
@@ -413,6 +424,24 @@ struct efx_nic_s {
 #endif	/* EFSYS_OPT_SIENA */
 		int	enu_unused;
 	} en_u;
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+	union en_arch {
+		struct {
+			int			ena_vi_base;
+			int			ena_vi_count;
+			int			ena_vi_shift;
+			efx_piobuf_handle_t	ena_piobuf_handle[EF10_MAX_PIOBUF_NBUFS];
+			uint32_t		ena_piobuf_count;
+			uint32_t		ena_pio_alloc_map[EF10_MAX_PIOBUF_NBUFS];
+			uint32_t		ena_pio_write_vi_base;
+			/* Memory BAR mapping regions */
+			uint32_t		ena_uc_mem_map_offset;
+			size_t			ena_uc_mem_map_size;
+			uint32_t		ena_wc_mem_map_offset;
+			size_t			ena_wc_mem_map_size;
+		} ef10;
+	} en_arch;
+#endif	/* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */
 };
 
 
@@ -469,6 +498,13 @@ struct efx_txq_s {
 	unsigned int			et_index;
 	unsigned int			et_mask;
 	efsys_mem_t			*et_esmp;
+#if EFSYS_OPT_HUNTINGTON
+	uint32_t			et_pio_bufnum;
+	uint32_t			et_pio_blknum;
+	uint32_t			et_pio_write_offset;
+	uint32_t			et_pio_offset;
+	size_t				et_pio_size;
+#endif
 };
 
 #define	EFX_TXQ_MAGIC	0x05092005
diff --git a/drivers/net/sfc/efx/base/efx_intr.c b/drivers/net/sfc/efx/base/efx_intr.c
index ecc09d3..50cf388 100644
--- a/drivers/net/sfc/efx/base/efx_intr.c
+++ b/drivers/net/sfc/efx/base/efx_intr.c
@@ -99,6 +99,20 @@ static const efx_intr_ops_t	__efx_intr_siena_ops = {
 };
 #endif	/* EFSYS_OPT_SIENA */
 
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_intr_ops_t	__efx_intr_ef10_ops = {
+	ef10_intr_init,			/* eio_init */
+	ef10_intr_enable,		/* eio_enable */
+	ef10_intr_disable,		/* eio_disable */
+	ef10_intr_disable_unlocked,	/* eio_disable_unlocked */
+	ef10_intr_trigger,		/* eio_trigger */
+	ef10_intr_status_line,		/* eio_status_line */
+	ef10_intr_status_message,	/* eio_status_message */
+	ef10_intr_fatal,		/* eio_fatal */
+	ef10_intr_fini,			/* eio_fini */
+};
+#endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
 	__checkReturn	efx_rc_t
 efx_intr_init(
 	__in		efx_nic_t *enp,
@@ -130,6 +144,12 @@ efx_intr_init(
 		break;
 #endif	/* EFSYS_OPT_SIENA */
 
+#if EFSYS_OPT_HUNTINGTON
+	case EFX_FAMILY_HUNTINGTON:
+		eiop = &__efx_intr_ef10_ops;
+		break;
+#endif	/* EFSYS_OPT_HUNTINGTON */
+
 	default:
 		EFSYS_ASSERT(B_FALSE);
 		rc = ENOTSUP;
diff --git a/drivers/net/sfc/efx/base/efx_mac.c b/drivers/net/sfc/efx/base/efx_mac.c
index ce27376..1d50128 100644
--- a/drivers/net/sfc/efx/base/efx_mac.c
+++ b/drivers/net/sfc/efx/base/efx_mac.c
@@ -53,6 +53,21 @@ static const efx_mac_ops_t	__efx_siena_mac_ops = {
 };
 #endif	/* EFSYS_OPT_SIENA */
 
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_mac_ops_t	__efx_ef10_mac_ops = {
+	ef10_mac_poll,				/* emo_poll */
+	ef10_mac_up,				/* emo_up */
+	ef10_mac_addr_set,			/* emo_addr_set */
+	ef10_mac_pdu_set,			/* emo_pdu_set */
+	ef10_mac_pdu_get,			/* emo_pdu_get */
+	ef10_mac_reconfigure,			/* emo_reconfigure */
+	ef10_mac_multicast_list_set,		/* emo_multicast_list_set */
+	ef10_mac_filter_default_rxq_set,	/* emo_filter_default_rxq_set */
+	ef10_mac_filter_default_rxq_clear,
+					/* emo_filter_default_rxq_clear */
+};
+#endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
 	__checkReturn			efx_rc_t
 efx_mac_pdu_set(
 	__in				efx_nic_t *enp,
@@ -494,6 +509,13 @@ efx_mac_select(
 		break;
 #endif /* EFSYS_OPT_SIENA */
 
+#if EFSYS_OPT_HUNTINGTON
+	case EFX_FAMILY_HUNTINGTON:
+		emop = &__efx_ef10_mac_ops;
+		type = EFX_MAC_HUNTINGTON;
+		break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
 	default:
 		rc = EINVAL;
 		goto fail1;
diff --git a/drivers/net/sfc/efx/base/efx_mcdi.c b/drivers/net/sfc/efx/base/efx_mcdi.c
index 7993ebf..4ad6da9 100644
--- a/drivers/net/sfc/efx/base/efx_mcdi.c
+++ b/drivers/net/sfc/efx/base/efx_mcdi.c
@@ -69,6 +69,21 @@ static const efx_mcdi_ops_t	__efx_mcdi_siena_ops = {
 
 #endif	/* EFSYS_OPT_SIENA */
 
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+static const efx_mcdi_ops_t	__efx_mcdi_ef10_ops = {
+	ef10_mcdi_init,			/* emco_init */
+	ef10_mcdi_send_request,		/* emco_send_request */
+	ef10_mcdi_poll_reboot,		/* emco_poll_reboot */
+	ef10_mcdi_poll_response,	/* emco_poll_response */
+	ef10_mcdi_read_response,	/* emco_read_response */
+	ef10_mcdi_fini,			/* emco_fini */
+	ef10_mcdi_feature_supported,	/* emco_feature_supported */
+	ef10_mcdi_get_timeout,		/* emco_get_timeout */
+};
+
+#endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
 
 
 	__checkReturn	efx_rc_t
@@ -89,6 +104,12 @@ efx_mcdi_init(
 		break;
 #endif	/* EFSYS_OPT_SIENA */
 
+#if EFSYS_OPT_HUNTINGTON
+	case EFX_FAMILY_HUNTINGTON:
+		emcop = &__efx_mcdi_ef10_ops;
+		break;
+#endif	/* EFSYS_OPT_HUNTINGTON */
+
 	default:
 		EFSYS_ASSERT(0);
 		rc = ENOTSUP;
@@ -1553,6 +1574,107 @@ efx_mcdi_log_ctrl(
 }
 
 
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+/*
+ * This function returns the pf and vf number of a function.  If it is a pf the
+ * vf number is 0xffff.  The vf number is the index of the vf on that
+ * function. So if you have 3 vfs on pf 0 the 3 vfs will return (pf=0,vf=0),
+ * (pf=0,vf=1), (pf=0,vf=2) aand the pf will return (pf=0, vf=0xffff).
+ */
+	__checkReturn		efx_rc_t
+efx_mcdi_get_function_info(
+	__in			efx_nic_t *enp,
+	__out			uint32_t *pfp,
+	__out_opt		uint32_t *vfp)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_GET_FUNCTION_INFO_IN_LEN,
+			    MC_CMD_GET_FUNCTION_INFO_OUT_LEN)];
+	efx_rc_t rc;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_GET_FUNCTION_INFO;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_GET_FUNCTION_INFO_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_GET_FUNCTION_INFO_OUT_LEN;
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_GET_FUNCTION_INFO_OUT_LEN) {
+		rc = EMSGSIZE;
+		goto fail2;
+	}
+
+	*pfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_PF);
+	if (vfp != NULL)
+		*vfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_VF);
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn		efx_rc_t
+efx_mcdi_privilege_mask(
+	__in			efx_nic_t *enp,
+	__in			uint32_t pf,
+	__in			uint32_t vf,
+	__out			uint32_t *maskp)
+{
+	efx_mcdi_req_t req;
+	uint8_t payload[MAX(MC_CMD_PRIVILEGE_MASK_IN_LEN,
+			    MC_CMD_PRIVILEGE_MASK_OUT_LEN)];
+	efx_rc_t rc;
+
+	(void) memset(payload, 0, sizeof (payload));
+	req.emr_cmd = MC_CMD_PRIVILEGE_MASK;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_PRIVILEGE_MASK_IN_LEN;
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_PRIVILEGE_MASK_OUT_LEN;
+
+	MCDI_IN_POPULATE_DWORD_2(req, PRIVILEGE_MASK_IN_FUNCTION,
+	    PRIVILEGE_MASK_IN_FUNCTION_PF, pf,
+	    PRIVILEGE_MASK_IN_FUNCTION_VF, vf);
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail1;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_PRIVILEGE_MASK_OUT_LEN) {
+		rc = EMSGSIZE;
+		goto fail2;
+	}
+
+	*maskp = MCDI_OUT_DWORD(req, PRIVILEGE_MASK_OUT_OLD_MASK);
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
 	__checkReturn		efx_rc_t
 efx_mcdi_set_workaround(
 	__in			efx_nic_t *enp,
diff --git a/drivers/net/sfc/efx/base/efx_nic.c b/drivers/net/sfc/efx/base/efx_nic.c
index f94ff49..8db2625 100644
--- a/drivers/net/sfc/efx/base/efx_nic.c
+++ b/drivers/net/sfc/efx/base/efx_nic.c
@@ -54,6 +54,26 @@ efx_family(
 			return (0);
 #endif /* EFSYS_OPT_SIENA */
 
+#if EFSYS_OPT_HUNTINGTON
+		case EFX_PCI_DEVID_HUNTINGTON_PF_UNINIT:
+			/*
+			 * Hardware default for PF0 of uninitialised Huntington.
+			 * manftest must be able to cope with this device id.
+			 */
+			*efp = EFX_FAMILY_HUNTINGTON;
+			return (0);
+
+		case EFX_PCI_DEVID_FARMINGDALE:
+		case EFX_PCI_DEVID_GREENPORT:
+			*efp = EFX_FAMILY_HUNTINGTON;
+			return (0);
+
+		case EFX_PCI_DEVID_FARMINGDALE_VF:
+		case EFX_PCI_DEVID_GREENPORT_VF:
+			*efp = EFX_FAMILY_HUNTINGTON;
+			return (0);
+#endif /* EFSYS_OPT_HUNTINGTON */
+
 		case EFX_PCI_DEVID_FALCON:	/* Obsolete, not supported */
 		default:
 			break;
@@ -152,6 +172,22 @@ static const efx_nic_ops_t	__efx_nic_siena_ops = {
 
 #endif	/* EFSYS_OPT_SIENA */
 
+#if EFSYS_OPT_HUNTINGTON
+
+static const efx_nic_ops_t	__efx_nic_hunt_ops = {
+	ef10_nic_probe,			/* eno_probe */
+	hunt_board_cfg,			/* eno_board_cfg */
+	ef10_nic_set_drv_limits,	/* eno_set_drv_limits */
+	ef10_nic_reset,			/* eno_reset */
+	ef10_nic_init,			/* eno_init */
+	ef10_nic_get_vi_pool,		/* eno_get_vi_pool */
+	ef10_nic_get_bar_region,	/* eno_get_bar_region */
+	ef10_nic_fini,			/* eno_fini */
+	ef10_nic_unprobe,		/* eno_unprobe */
+};
+
+#endif	/* EFSYS_OPT_HUNTINGTON */
+
 
 	__checkReturn	efx_rc_t
 efx_nic_create(
@@ -193,6 +229,23 @@ efx_nic_create(
 		break;
 #endif	/* EFSYS_OPT_SIENA */
 
+#if EFSYS_OPT_HUNTINGTON
+	case EFX_FAMILY_HUNTINGTON:
+		enp->en_enop = &__efx_nic_hunt_ops;
+		enp->en_features =
+		    EFX_FEATURE_IPV6 |
+		    EFX_FEATURE_LINK_EVENTS |
+		    EFX_FEATURE_PERIODIC_MAC_STATS |
+		    EFX_FEATURE_MCDI |
+		    EFX_FEATURE_MAC_HEADER_FILTERS |
+		    EFX_FEATURE_MCDI_DMA |
+		    EFX_FEATURE_PIO_BUFFERS |
+		    EFX_FEATURE_FW_ASSISTED_TSO |
+		    EFX_FEATURE_FW_ASSISTED_TSO_V2 |
+		    EFX_FEATURE_PACKED_STREAM;
+		break;
+#endif	/* EFSYS_OPT_HUNTINGTON */
+
 	default:
 		rc = ENOTSUP;
 		goto fail2;
diff --git a/drivers/net/sfc/efx/base/efx_phy.c b/drivers/net/sfc/efx/base/efx_phy.c
index a6a2af4..b663cf8 100644
--- a/drivers/net/sfc/efx/base/efx_phy.c
+++ b/drivers/net/sfc/efx/base/efx_phy.c
@@ -42,6 +42,16 @@ static const efx_phy_ops_t	__efx_phy_siena_ops = {
 };
 #endif	/* EFSYS_OPT_SIENA */
 
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_phy_ops_t	__efx_phy_ef10_ops = {
+	ef10_phy_power,			/* epo_power */
+	NULL,				/* epo_reset */
+	ef10_phy_reconfigure,		/* epo_reconfigure */
+	ef10_phy_verify,		/* epo_verify */
+	ef10_phy_oui_get,		/* epo_oui_get */
+};
+#endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
 	__checkReturn	efx_rc_t
 efx_phy_probe(
 	__in		efx_nic_t *enp)
@@ -63,6 +73,11 @@ efx_phy_probe(
 		epop = &__efx_phy_siena_ops;
 		break;
 #endif	/* EFSYS_OPT_SIENA */
+#if EFSYS_OPT_HUNTINGTON
+	case EFX_FAMILY_HUNTINGTON:
+		epop = &__efx_phy_ef10_ops;
+		break;
+#endif	/* EFSYS_OPT_HUNTINGTON */
 	default:
 		rc = ENOTSUP;
 		goto fail1;
diff --git a/drivers/net/sfc/efx/base/efx_regs_ef10.h b/drivers/net/sfc/efx/base/efx_regs_ef10.h
new file mode 100644
index 0000000..11a9184
--- /dev/null
+++ b/drivers/net/sfc/efx/base/efx_regs_ef10.h
@@ -0,0 +1,571 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef	_SYS_EFX_EF10_REGS_H
+#define	_SYS_EFX_EF10_REGS_H
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+/**************************************************************************
+ * NOTE: the line below marks the start of the autogenerated section
+ * EF10 registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/*
+ * BIU_HW_REV_ID_REG(32bit):
+ *
+ */
+
+#define	ER_DZ_BIU_HW_REV_ID_REG_OFST 0x00000000
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define	ER_DZ_BIU_HW_REV_ID_REG_RESET 0xeb14face
+
+
+#define	ERF_DZ_HW_REV_ID_LBN 0
+#define	ERF_DZ_HW_REV_ID_WIDTH 32
+
+
+/*
+ * BIU_MC_SFT_STATUS_REG(32bit):
+ *
+ */
+
+#define	ER_DZ_BIU_MC_SFT_STATUS_REG_OFST 0x00000010
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define	ER_DZ_BIU_MC_SFT_STATUS_REG_STEP 4
+#define	ER_DZ_BIU_MC_SFT_STATUS_REG_ROWS 8
+#define	ER_DZ_BIU_MC_SFT_STATUS_REG_RESET 0x1111face
+
+
+#define	ERF_DZ_MC_SFT_STATUS_LBN 0
+#define	ERF_DZ_MC_SFT_STATUS_WIDTH 32
+
+
+/*
+ * BIU_INT_ISR_REG(32bit):
+ *
+ */
+
+#define	ER_DZ_BIU_INT_ISR_REG_OFST 0x00000090
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define	ER_DZ_BIU_INT_ISR_REG_RESET 0x0
+
+
+#define	ERF_DZ_ISR_REG_LBN 0
+#define	ERF_DZ_ISR_REG_WIDTH 32
+
+
+/*
+ * MC_DB_LWRD_REG(32bit):
+ *
+ */
+
+#define	ER_DZ_MC_DB_LWRD_REG_OFST 0x00000200
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define	ER_DZ_MC_DB_LWRD_REG_RESET 0x0
+
+
+#define	ERF_DZ_MC_DOORBELL_L_LBN 0
+#define	ERF_DZ_MC_DOORBELL_L_WIDTH 32
+
+
+/*
+ * MC_DB_HWRD_REG(32bit):
+ *
+ */
+
+#define	ER_DZ_MC_DB_HWRD_REG_OFST 0x00000204
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define	ER_DZ_MC_DB_HWRD_REG_RESET 0x0
+
+
+#define	ERF_DZ_MC_DOORBELL_H_LBN 0
+#define	ERF_DZ_MC_DOORBELL_H_WIDTH 32
+
+
+/*
+ * EVQ_RPTR_REG(32bit):
+ *
+ */
+
+#define	ER_DZ_EVQ_RPTR_REG_OFST 0x00000400
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define	ER_DZ_EVQ_RPTR_REG_STEP 8192
+#define	ER_DZ_EVQ_RPTR_REG_ROWS 2048
+#define	ER_DZ_EVQ_RPTR_REG_RESET 0x0
+
+
+#define	ERF_DZ_EVQ_RPTR_VLD_LBN 15
+#define	ERF_DZ_EVQ_RPTR_VLD_WIDTH 1
+#define	ERF_DZ_EVQ_RPTR_LBN 0
+#define	ERF_DZ_EVQ_RPTR_WIDTH 15
+
+
+/*
+ * EVQ_TMR_REG(32bit):
+ *
+ */
+
+#define	ER_DZ_EVQ_TMR_REG_OFST 0x00000420
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define	ER_DZ_EVQ_TMR_REG_STEP 8192
+#define	ER_DZ_EVQ_TMR_REG_ROWS 2048
+#define	ER_DZ_EVQ_TMR_REG_RESET 0x0
+
+
+#define	ERF_DZ_TC_TIMER_MODE_LBN 14
+#define	ERF_DZ_TC_TIMER_MODE_WIDTH 2
+#define	ERF_DZ_TC_TIMER_VAL_LBN 0
+#define	ERF_DZ_TC_TIMER_VAL_WIDTH 14
+
+
+/*
+ * RX_DESC_UPD_REG(32bit):
+ *
+ */
+
+#define	ER_DZ_RX_DESC_UPD_REG_OFST 0x00000830
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define	ER_DZ_RX_DESC_UPD_REG_STEP 8192
+#define	ER_DZ_RX_DESC_UPD_REG_ROWS 2048
+#define	ER_DZ_RX_DESC_UPD_REG_RESET 0x0
+
+
+#define	ERF_DZ_RX_DESC_WPTR_LBN 0
+#define	ERF_DZ_RX_DESC_WPTR_WIDTH 12
+
+/*
+ * TX_DESC_UPD_REG(96bit):
+ *
+ */
+
+#define	ER_DZ_TX_DESC_UPD_REG_OFST 0x00000a10
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define	ER_DZ_TX_DESC_UPD_REG_STEP 8192
+#define	ER_DZ_TX_DESC_UPD_REG_ROWS 2048
+#define	ER_DZ_TX_DESC_UPD_REG_RESET 0x0
+
+
+#define	ERF_DZ_RSVD_LBN 76
+#define	ERF_DZ_RSVD_WIDTH 20
+#define	ERF_DZ_TX_DESC_WPTR_LBN 64
+#define	ERF_DZ_TX_DESC_WPTR_WIDTH 12
+#define	ERF_DZ_TX_DESC_HWORD_LBN 32
+#define	ERF_DZ_TX_DESC_HWORD_WIDTH 32
+#define	ERF_DZ_TX_DESC_LWORD_LBN 0
+#define	ERF_DZ_TX_DESC_LWORD_WIDTH 32
+
+
+/* ES_DRIVER_EV */
+#define	ESF_DZ_DRV_CODE_LBN 60
+#define	ESF_DZ_DRV_CODE_WIDTH 4
+#define	ESF_DZ_DRV_SUB_CODE_LBN 56
+#define	ESF_DZ_DRV_SUB_CODE_WIDTH 4
+#define	ESE_DZ_DRV_TIMER_EV 3
+#define	ESE_DZ_DRV_START_UP_EV 2
+#define	ESE_DZ_DRV_WAKE_UP_EV 1
+#define	ESF_DZ_DRV_SUB_DATA_DW0_LBN 0
+#define	ESF_DZ_DRV_SUB_DATA_DW0_WIDTH 32
+#define	ESF_DZ_DRV_SUB_DATA_DW1_LBN 32
+#define	ESF_DZ_DRV_SUB_DATA_DW1_WIDTH 24
+#define	ESF_DZ_DRV_SUB_DATA_LBN 0
+#define	ESF_DZ_DRV_SUB_DATA_WIDTH 56
+#define	ESF_DZ_DRV_EVQ_ID_LBN 0
+#define	ESF_DZ_DRV_EVQ_ID_WIDTH 14
+#define	ESF_DZ_DRV_TMR_ID_LBN 0
+#define	ESF_DZ_DRV_TMR_ID_WIDTH 14
+
+
+/* ES_EVENT_ENTRY */
+#define	ESF_DZ_EV_CODE_LBN 60
+#define	ESF_DZ_EV_CODE_WIDTH 4
+#define	ESE_DZ_EV_CODE_MCDI_EV 12
+#define	ESE_DZ_EV_CODE_DRIVER_EV 5
+#define	ESE_DZ_EV_CODE_TX_EV 2
+#define	ESE_DZ_EV_CODE_RX_EV 0
+#define	ESE_DZ_OTHER other
+#define	ESF_DZ_EV_DATA_DW0_LBN 0
+#define	ESF_DZ_EV_DATA_DW0_WIDTH 32
+#define	ESF_DZ_EV_DATA_DW1_LBN 32
+#define	ESF_DZ_EV_DATA_DW1_WIDTH 28
+#define	ESF_DZ_EV_DATA_LBN 0
+#define	ESF_DZ_EV_DATA_WIDTH 60
+
+
+/* ES_MC_EVENT */
+#define	ESF_DZ_MC_CODE_LBN 60
+#define	ESF_DZ_MC_CODE_WIDTH 4
+#define	ESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59
+#define	ESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1
+#define	ESF_DZ_MC_DROP_EVENT_LBN 58
+#define	ESF_DZ_MC_DROP_EVENT_WIDTH 1
+#define	ESF_DZ_MC_SOFT_DW0_LBN 0
+#define	ESF_DZ_MC_SOFT_DW0_WIDTH 32
+#define	ESF_DZ_MC_SOFT_DW1_LBN 32
+#define	ESF_DZ_MC_SOFT_DW1_WIDTH 26
+#define	ESF_DZ_MC_SOFT_LBN 0
+#define	ESF_DZ_MC_SOFT_WIDTH 58
+
+
+/* ES_RX_EVENT */
+#define	ESF_DZ_RX_CODE_LBN 60
+#define	ESF_DZ_RX_CODE_WIDTH 4
+#define	ESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59
+#define	ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1
+#define	ESF_DZ_RX_DROP_EVENT_LBN 58
+#define	ESF_DZ_RX_DROP_EVENT_WIDTH 1
+#define	ESF_DD_RX_EV_RSVD2_LBN 54
+#define	ESF_DD_RX_EV_RSVD2_WIDTH 4
+#define	ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN 57
+#define	ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1
+#define	ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN 56
+#define	ESF_EZ_RX_IP_INNER_CHKSUM_ERR_WIDTH 1
+#define	ESF_EZ_RX_EV_RSVD2_LBN 54
+#define	ESF_EZ_RX_EV_RSVD2_WIDTH 2
+#define	ESF_DZ_RX_EV_SOFT2_LBN 52
+#define	ESF_DZ_RX_EV_SOFT2_WIDTH 2
+#define	ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
+#define	ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
+#define	ESF_DZ_RX_L4_CLASS_LBN 45
+#define	ESF_DZ_RX_L4_CLASS_WIDTH 3
+#define	ESE_DZ_L4_CLASS_RSVD7 7
+#define	ESE_DZ_L4_CLASS_RSVD6 6
+#define	ESE_DZ_L4_CLASS_RSVD5 5
+#define	ESE_DZ_L4_CLASS_RSVD4 4
+#define	ESE_DZ_L4_CLASS_RSVD3 3
+#define	ESE_DZ_L4_CLASS_UDP 2
+#define	ESE_DZ_L4_CLASS_TCP 1
+#define	ESE_DZ_L4_CLASS_UNKNOWN 0
+#define	ESF_DZ_RX_L3_CLASS_LBN 42
+#define	ESF_DZ_RX_L3_CLASS_WIDTH 3
+#define	ESE_DZ_L3_CLASS_RSVD7 7
+#define	ESE_DZ_L3_CLASS_IP6_FRAG 6
+#define	ESE_DZ_L3_CLASS_ARP 5
+#define	ESE_DZ_L3_CLASS_IP4_FRAG 4
+#define	ESE_DZ_L3_CLASS_FCOE 3
+#define	ESE_DZ_L3_CLASS_IP6 2
+#define	ESE_DZ_L3_CLASS_IP4 1
+#define	ESE_DZ_L3_CLASS_UNKNOWN 0
+#define	ESF_DZ_RX_ETH_TAG_CLASS_LBN 39
+#define	ESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3
+#define	ESE_DZ_ETH_TAG_CLASS_RSVD7 7
+#define	ESE_DZ_ETH_TAG_CLASS_RSVD6 6
+#define	ESE_DZ_ETH_TAG_CLASS_RSVD5 5
+#define	ESE_DZ_ETH_TAG_CLASS_RSVD4 4
+#define	ESE_DZ_ETH_TAG_CLASS_RSVD3 3
+#define	ESE_DZ_ETH_TAG_CLASS_VLAN2 2
+#define	ESE_DZ_ETH_TAG_CLASS_VLAN1 1
+#define	ESE_DZ_ETH_TAG_CLASS_NONE 0
+#define	ESF_DZ_RX_ETH_BASE_CLASS_LBN 36
+#define	ESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3
+#define	ESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2
+#define	ESE_DZ_ETH_BASE_CLASS_LLC 1
+#define	ESE_DZ_ETH_BASE_CLASS_ETH2 0
+#define	ESF_DZ_RX_MAC_CLASS_LBN 35
+#define	ESF_DZ_RX_MAC_CLASS_WIDTH 1
+#define	ESE_DZ_MAC_CLASS_MCAST 1
+#define	ESE_DZ_MAC_CLASS_UCAST 0
+#define	ESF_DD_RX_EV_SOFT1_LBN 32
+#define	ESF_DD_RX_EV_SOFT1_WIDTH 3
+#define	ESF_EZ_RX_EV_SOFT1_LBN 34
+#define	ESF_EZ_RX_EV_SOFT1_WIDTH 1
+#define	ESF_EZ_RX_ENCAP_HDR_LBN 32
+#define	ESF_EZ_RX_ENCAP_HDR_WIDTH 2
+#define	ESE_EZ_ENCAP_HDR_GRE 2
+#define	ESE_EZ_ENCAP_HDR_VXLAN 1
+#define	ESE_EZ_ENCAP_HDR_NONE 0
+#define	ESF_DD_RX_EV_RSVD1_LBN 30
+#define	ESF_DD_RX_EV_RSVD1_WIDTH 2
+#define	ESF_EZ_RX_EV_RSVD1_LBN 31
+#define	ESF_EZ_RX_EV_RSVD1_WIDTH 1
+#define	ESF_EZ_RX_ABORT_LBN 30
+#define	ESF_EZ_RX_ABORT_WIDTH 1
+#define	ESF_DZ_RX_ECC_ERR_LBN 29
+#define	ESF_DZ_RX_ECC_ERR_WIDTH 1
+#define	ESF_DZ_RX_CRC1_ERR_LBN 28
+#define	ESF_DZ_RX_CRC1_ERR_WIDTH 1
+#define	ESF_DZ_RX_CRC0_ERR_LBN 27
+#define	ESF_DZ_RX_CRC0_ERR_WIDTH 1
+#define	ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26
+#define	ESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1
+#define	ESF_DZ_RX_IPCKSUM_ERR_LBN 25
+#define	ESF_DZ_RX_IPCKSUM_ERR_WIDTH 1
+#define	ESF_DZ_RX_ECRC_ERR_LBN 24
+#define	ESF_DZ_RX_ECRC_ERR_WIDTH 1
+#define	ESF_DZ_RX_QLABEL_LBN 16
+#define	ESF_DZ_RX_QLABEL_WIDTH 5
+#define	ESF_DZ_RX_PARSE_INCOMPLETE_LBN 15
+#define	ESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1
+#define	ESF_DZ_RX_CONT_LBN 14
+#define	ESF_DZ_RX_CONT_WIDTH 1
+#define	ESF_DZ_RX_BYTES_LBN 0
+#define	ESF_DZ_RX_BYTES_WIDTH 14
+
+
+/* ES_RX_KER_DESC */
+#define	ESF_DZ_RX_KER_RESERVED_LBN 62
+#define	ESF_DZ_RX_KER_RESERVED_WIDTH 2
+#define	ESF_DZ_RX_KER_BYTE_CNT_LBN 48
+#define	ESF_DZ_RX_KER_BYTE_CNT_WIDTH 14
+#define	ESF_DZ_RX_KER_BUF_ADDR_DW0_LBN 0
+#define	ESF_DZ_RX_KER_BUF_ADDR_DW0_WIDTH 32
+#define	ESF_DZ_RX_KER_BUF_ADDR_DW1_LBN 32
+#define	ESF_DZ_RX_KER_BUF_ADDR_DW1_WIDTH 16
+#define	ESF_DZ_RX_KER_BUF_ADDR_LBN 0
+#define	ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
+
+
+/* ES_TX_CSUM_TSTAMP_DESC */
+#define	ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define	ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define	ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define	ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define	ESE_DZ_TX_OPTION_DESC_TSO 7
+#define	ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define	ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define	ESF_DZ_TX_OPTION_TS_AT_TXDP_LBN 8
+#define	ESF_DZ_TX_OPTION_TS_AT_TXDP_WIDTH 1
+#define	ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_LBN 7
+#define	ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_WIDTH 1
+#define	ESF_DZ_TX_OPTION_INNER_IP_CSUM_LBN 6
+#define	ESF_DZ_TX_OPTION_INNER_IP_CSUM_WIDTH 1
+#define	ESF_DZ_TX_TIMESTAMP_LBN 5
+#define	ESF_DZ_TX_TIMESTAMP_WIDTH 1
+#define	ESF_DZ_TX_OPTION_CRC_MODE_LBN 2
+#define	ESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3
+#define	ESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5
+#define	ESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4
+#define	ESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3
+#define	ESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2
+#define	ESE_DZ_TX_OPTION_CRC_FCOE 1
+#define	ESE_DZ_TX_OPTION_CRC_OFF 0
+#define	ESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1
+#define	ESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1
+#define	ESF_DZ_TX_OPTION_IP_CSUM_LBN 0
+#define	ESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1
+
+
+/* ES_TX_EVENT */
+#define	ESF_DZ_TX_CODE_LBN 60
+#define	ESF_DZ_TX_CODE_WIDTH 4
+#define	ESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59
+#define	ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1
+#define	ESF_DZ_TX_DROP_EVENT_LBN 58
+#define	ESF_DZ_TX_DROP_EVENT_WIDTH 1
+#define	ESF_DD_TX_EV_RSVD_LBN 48
+#define	ESF_DD_TX_EV_RSVD_WIDTH 10
+#define	ESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_LBN 57
+#define	ESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1
+#define	ESF_EZ_IP_INNER_CHKSUM_ERR_LBN 56
+#define	ESF_EZ_IP_INNER_CHKSUM_ERR_WIDTH 1
+#define	ESF_EZ_TX_EV_RSVD_LBN 48
+#define	ESF_EZ_TX_EV_RSVD_WIDTH 8
+#define	ESF_DZ_TX_SOFT2_LBN 32
+#define	ESF_DZ_TX_SOFT2_WIDTH 16
+#define	ESF_DD_TX_SOFT1_LBN 24
+#define	ESF_DD_TX_SOFT1_WIDTH 8
+#define	ESF_EZ_TX_CAN_MERGE_LBN 31
+#define	ESF_EZ_TX_CAN_MERGE_WIDTH 1
+#define	ESF_EZ_TX_SOFT1_LBN 24
+#define	ESF_EZ_TX_SOFT1_WIDTH 7
+#define	ESF_DZ_TX_QLABEL_LBN 16
+#define	ESF_DZ_TX_QLABEL_WIDTH 5
+#define	ESF_DZ_TX_DESCR_INDX_LBN 0
+#define	ESF_DZ_TX_DESCR_INDX_WIDTH 16
+
+
+/* ES_TX_KER_DESC */
+#define	ESF_DZ_TX_KER_TYPE_LBN 63
+#define	ESF_DZ_TX_KER_TYPE_WIDTH 1
+#define	ESF_DZ_TX_KER_CONT_LBN 62
+#define	ESF_DZ_TX_KER_CONT_WIDTH 1
+#define	ESF_DZ_TX_KER_BYTE_CNT_LBN 48
+#define	ESF_DZ_TX_KER_BYTE_CNT_WIDTH 14
+#define	ESF_DZ_TX_KER_BUF_ADDR_DW0_LBN 0
+#define	ESF_DZ_TX_KER_BUF_ADDR_DW0_WIDTH 32
+#define	ESF_DZ_TX_KER_BUF_ADDR_DW1_LBN 32
+#define	ESF_DZ_TX_KER_BUF_ADDR_DW1_WIDTH 16
+#define	ESF_DZ_TX_KER_BUF_ADDR_LBN 0
+#define	ESF_DZ_TX_KER_BUF_ADDR_WIDTH 48
+
+
+/* ES_TX_PIO_DESC */
+#define	ESF_DZ_TX_PIO_TYPE_LBN 63
+#define	ESF_DZ_TX_PIO_TYPE_WIDTH 1
+#define	ESF_DZ_TX_PIO_OPT_LBN 60
+#define	ESF_DZ_TX_PIO_OPT_WIDTH 3
+#define	ESF_DZ_TX_PIO_CONT_LBN 59
+#define	ESF_DZ_TX_PIO_CONT_WIDTH 1
+#define	ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
+#define	ESF_DZ_TX_PIO_BYTE_CNT_WIDTH 12
+#define	ESF_DZ_TX_PIO_BUF_ADDR_LBN 0
+#define	ESF_DZ_TX_PIO_BUF_ADDR_WIDTH 12
+
+
+/* ES_TX_TSO_DESC */
+#define	ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define	ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define	ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define	ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define	ESE_DZ_TX_OPTION_DESC_TSO 7
+#define	ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define	ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define	ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
+#define	ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define	ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
+#define	ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
+#define	ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
+#define	ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8
+#define	ESF_DZ_TX_TSO_IP_ID_LBN 32
+#define	ESF_DZ_TX_TSO_IP_ID_WIDTH 16
+#define	ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
+#define	ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
+
+
+/* TX_TSO_FATSO2A_DESC */
+#define	ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define	ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define	ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define	ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define	ESE_DZ_TX_OPTION_DESC_TSO 7
+#define	ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define	ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define	ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
+#define	ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define	ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define	ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
+#define	ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
+#define	ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
+#define	ESF_DZ_TX_TSO_IP_ID_LBN 32
+#define	ESF_DZ_TX_TSO_IP_ID_WIDTH 16
+#define	ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
+#define	ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
+
+
+/* TX_TSO_FATSO2B_DESC */
+#define	ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define	ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define	ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define	ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define	ESE_DZ_TX_OPTION_DESC_TSO 7
+#define	ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define	ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define	ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
+#define	ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define	ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define	ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
+#define	ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
+#define	ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
+#define	ESF_DZ_TX_TSO_OUTER_IP_ID_LBN 16
+#define	ESF_DZ_TX_TSO_OUTER_IP_ID_WIDTH 16
+#define	ESF_DZ_TX_TSO_TCP_MSS_LBN 32
+#define	ESF_DZ_TX_TSO_TCP_MSS_WIDTH 16
+#define	ESF_DZ_TX_TSO_INNER_PE_CSUM_LBN 0
+#define	ESF_DZ_TX_TSO_INNER_PE_CSUM_WIDTH 16
+
+
+/* ES_TX_VLAN_DESC */
+#define	ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define	ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define	ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define	ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define	ESE_DZ_TX_OPTION_DESC_TSO 7
+#define	ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define	ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define	ESF_DZ_TX_VLAN_OP_LBN 32
+#define	ESF_DZ_TX_VLAN_OP_WIDTH 2
+#define	ESF_DZ_TX_VLAN_TAG2_LBN 16
+#define	ESF_DZ_TX_VLAN_TAG2_WIDTH 16
+#define	ESF_DZ_TX_VLAN_TAG1_LBN 0
+#define	ESF_DZ_TX_VLAN_TAG1_WIDTH 16
+
+
+/*************************************************************************
+ * NOTE: the comment line above marks the end of the autogenerated section
+ */
+
+/*
+ * The workaround for bug 35388 requires multiplexing writes through
+ * the ERF_DZ_TX_DESC_WPTR address.
+ * TX_DESC_UPD: 0ppppppppppp               (bit 11 lost)
+ * EVQ_RPTR:    1000hhhhhhhh, 1001llllllll (split into high and low bits)
+ * EVQ_TMR:     11mmvvvvvvvv               (bits 8:13 of value lost)
+ */
+#define	ER_DD_EVQ_INDIRECT_OFST (ER_DZ_TX_DESC_UPD_REG_OFST + 2 * 4)
+#define	ER_DD_EVQ_INDIRECT_STEP ER_DZ_TX_DESC_UPD_REG_STEP
+#define	ERF_DD_EVQ_IND_RPTR_FLAGS_LBN 8
+#define	ERF_DD_EVQ_IND_RPTR_FLAGS_WIDTH 4
+#define	EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH 8
+#define	EFE_DD_EVQ_IND_RPTR_FLAGS_LOW 9
+#define	ERF_DD_EVQ_IND_RPTR_LBN 0
+#define	ERF_DD_EVQ_IND_RPTR_WIDTH 8
+#define	ERF_DD_EVQ_IND_TIMER_FLAGS_LBN 10
+#define	ERF_DD_EVQ_IND_TIMER_FLAGS_WIDTH 2
+#define	EFE_DD_EVQ_IND_TIMER_FLAGS 3
+#define	ERF_DD_EVQ_IND_TIMER_MODE_LBN 8
+#define	ERF_DD_EVQ_IND_TIMER_MODE_WIDTH 2
+#define	ERF_DD_EVQ_IND_TIMER_VAL_LBN 0
+#define	ERF_DD_EVQ_IND_TIMER_VAL_WIDTH 8
+
+/* Packed stream magic doorbell command */
+#define	ERF_DZ_RX_DESC_MAGIC_DOORBELL_LBN 11
+#define	ERF_DZ_RX_DESC_MAGIC_DOORBELL_WIDTH 1
+
+#define	ERF_DZ_RX_DESC_MAGIC_CMD_LBN 8
+#define	ERF_DZ_RX_DESC_MAGIC_CMD_WIDTH 3
+#define	ERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS 0
+
+#define	ERF_DZ_RX_DESC_MAGIC_DATA_LBN 0
+#define	ERF_DZ_RX_DESC_MAGIC_DATA_WIDTH 8
+
+/* Packed stream RX packet prefix */
+#define	ES_DZ_PS_RX_PREFIX_TSTAMP_LBN 0
+#define	ES_DZ_PS_RX_PREFIX_TSTAMP_WIDTH 32
+#define	ES_DZ_PS_RX_PREFIX_CAP_LEN_LBN 32
+#define	ES_DZ_PS_RX_PREFIX_CAP_LEN_WIDTH 16
+#define	ES_DZ_PS_RX_PREFIX_ORIG_LEN_LBN 48
+#define	ES_DZ_PS_RX_PREFIX_ORIG_LEN_WIDTH 16
+
+/*
+ * An extra flag for the packed stream mode,
+ * signalling the start of a new buffer
+ */
+#define	ESF_DZ_RX_EV_ROTATE_LBN 53
+#define	ESF_DZ_RX_EV_ROTATE_WIDTH 1
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_EF10_REGS_H */
diff --git a/drivers/net/sfc/efx/base/efx_rx.c b/drivers/net/sfc/efx/base/efx_rx.c
index 97da63d..cf6ee3a 100644
--- a/drivers/net/sfc/efx/base/efx_rx.c
+++ b/drivers/net/sfc/efx/base/efx_rx.c
@@ -104,6 +104,20 @@ static const efx_rx_ops_t __efx_rx_siena_ops = {
 };
 #endif	/* EFSYS_OPT_SIENA */
 
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_rx_ops_t __efx_rx_ef10_ops = {
+	ef10_rx_init,				/* erxo_init */
+	ef10_rx_fini,				/* erxo_fini */
+	ef10_rx_prefix_pktlen,			/* erxo_prefix_pktlen */
+	ef10_rx_qpost,				/* erxo_qpost */
+	ef10_rx_qpush,				/* erxo_qpush */
+	ef10_rx_qflush,				/* erxo_qflush */
+	ef10_rx_qenable,			/* erxo_qenable */
+	ef10_rx_qcreate,			/* erxo_qcreate */
+	ef10_rx_qdestroy,			/* erxo_qdestroy */
+};
+#endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
 
 	__checkReturn	efx_rc_t
 efx_rx_init(
@@ -132,6 +146,12 @@ efx_rx_init(
 		break;
 #endif /* EFSYS_OPT_SIENA */
 
+#if EFSYS_OPT_HUNTINGTON
+	case EFX_FAMILY_HUNTINGTON:
+		erxop = &__efx_rx_ef10_ops;
+		break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
 	default:
 		EFSYS_ASSERT(0);
 		rc = ENOTSUP;
diff --git a/drivers/net/sfc/efx/base/efx_sram.c b/drivers/net/sfc/efx/base/efx_sram.c
index 0f16376..a55b06e 100644
--- a/drivers/net/sfc/efx/base/efx_sram.c
+++ b/drivers/net/sfc/efx/base/efx_sram.c
@@ -49,6 +49,22 @@ efx_sram_buf_tbl_set(
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
 
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+	if (enp->en_family == EFX_FAMILY_HUNTINGTON ||
+	    enp->en_family == EFX_FAMILY_MEDFORD) {
+		/*
+		 * FIXME: the efx_sram_buf_tbl_*() functionality needs to be
+		 * pulled inside the Falcon/Siena queue create/destroy code,
+		 * and then the original functions can be removed (see bug30834
+		 * comment #1).  But, for now, we just ensure that they are
+		 * no-ops for EF10, to allow bringing up existing drivers
+		 * without modification.
+		 */
+
+		return (0);
+	}
+#endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
 	if (stop >= EFX_BUF_TBL_SIZE) {
 		rc = EFBIG;
 		goto fail1;
@@ -155,6 +171,22 @@ efx_sram_buf_tbl_clear(
 	EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
 	EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
 
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+	if (enp->en_family == EFX_FAMILY_HUNTINGTON ||
+	    enp->en_family == EFX_FAMILY_MEDFORD) {
+		/*
+		 * FIXME: the efx_sram_buf_tbl_*() functionality needs to be
+		 * pulled inside the Falcon/Siena queue create/destroy code,
+		 * and then the original functions can be removed (see bug30834
+		 * comment #1).  But, for now, we just ensure that they are
+		 * no-ops for EF10, to allow bringing up existing drivers
+		 * without modification.
+		 */
+
+		return;
+	}
+#endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
 	EFSYS_ASSERT3U(stop, <, EFX_BUF_TBL_SIZE);
 
 	EFSYS_PROBE2(buf, uint32_t, start, uint32_t, stop - 1);
diff --git a/drivers/net/sfc/efx/base/efx_tx.c b/drivers/net/sfc/efx/base/efx_tx.c
index 7333f0a..ed66695 100644
--- a/drivers/net/sfc/efx/base/efx_tx.c
+++ b/drivers/net/sfc/efx/base/efx_tx.c
@@ -129,6 +129,29 @@ static const efx_tx_ops_t	__efx_tx_siena_ops = {
 };
 #endif /* EFSYS_OPT_SIENA */
 
+#if EFSYS_OPT_HUNTINGTON
+static const efx_tx_ops_t	__efx_tx_hunt_ops = {
+	ef10_tx_init,				/* etxo_init */
+	ef10_tx_fini,				/* etxo_fini */
+	ef10_tx_qcreate,			/* etxo_qcreate */
+	ef10_tx_qdestroy,			/* etxo_qdestroy */
+	ef10_tx_qpost,				/* etxo_qpost */
+	ef10_tx_qpush,				/* etxo_qpush */
+	ef10_tx_qpace,				/* etxo_qpace */
+	ef10_tx_qflush,				/* etxo_qflush */
+	ef10_tx_qenable,			/* etxo_qenable */
+	ef10_tx_qpio_enable,			/* etxo_qpio_enable */
+	ef10_tx_qpio_disable,			/* etxo_qpio_disable */
+	ef10_tx_qpio_write,			/* etxo_qpio_write */
+	ef10_tx_qpio_post,			/* etxo_qpio_post */
+	ef10_tx_qdesc_post,			/* etxo_qdesc_post */
+	ef10_tx_qdesc_dma_create,		/* etxo_qdesc_dma_create */
+	ef10_tx_qdesc_tso_create,		/* etxo_qdesc_tso_create */
+	ef10_tx_qdesc_tso2_create,		/* etxo_qdesc_tso2_create */
+	ef10_tx_qdesc_vlantci_create,		/* etxo_qdesc_vlantci_create */
+};
+#endif /* EFSYS_OPT_HUNTINGTON */
+
 	__checkReturn	efx_rc_t
 efx_tx_init(
 	__in		efx_nic_t *enp)
@@ -156,6 +179,12 @@ efx_tx_init(
 		break;
 #endif /* EFSYS_OPT_SIENA */
 
+#if EFSYS_OPT_HUNTINGTON
+	case EFX_FAMILY_HUNTINGTON:
+		etxop = &__efx_tx_hunt_ops;
+		break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
 	default:
 		EFSYS_ASSERT(0);
 		rc = ENOTSUP;
diff --git a/drivers/net/sfc/efx/base/hunt_impl.h b/drivers/net/sfc/efx/base/hunt_impl.h
new file mode 100644
index 0000000..0e0c870
--- /dev/null
+++ b/drivers/net/sfc/efx/base/hunt_impl.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_HUNT_IMPL_H
+#define	_SYS_HUNT_IMPL_H
+
+#include "efx.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+#include "efx_mcdi.h"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+/* Missing register definitions */
+#ifndef	ER_DZ_TX_PIOBUF_OFST
+#define	ER_DZ_TX_PIOBUF_OFST 0x00001000
+#endif
+#ifndef	ER_DZ_TX_PIOBUF_STEP
+#define	ER_DZ_TX_PIOBUF_STEP 8192
+#endif
+#ifndef	ER_DZ_TX_PIOBUF_ROWS
+#define	ER_DZ_TX_PIOBUF_ROWS 2048
+#endif
+
+#ifndef	ER_DZ_TX_PIOBUF_SIZE
+#define	ER_DZ_TX_PIOBUF_SIZE 2048
+#endif
+
+#define	HUNT_PIOBUF_NBUFS	(16)
+#define	HUNT_PIOBUF_SIZE	(ER_DZ_TX_PIOBUF_SIZE)
+
+#define	HUNT_MIN_PIO_ALLOC_SIZE	(HUNT_PIOBUF_SIZE / 32)
+
+
+/* NIC */
+
+extern	__checkReturn	efx_rc_t
+hunt_board_cfg(
+	__in		efx_nic_t *enp);
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_HUNT_IMPL_H */
diff --git a/drivers/net/sfc/efx/base/hunt_nic.c b/drivers/net/sfc/efx/base/hunt_nic.c
new file mode 100644
index 0000000..263f474
--- /dev/null
+++ b/drivers/net/sfc/efx/base/hunt_nic.c
@@ -0,0 +1,395 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON
+
+#include "ef10_tlv_layout.h"
+
+static	__checkReturn	efx_rc_t
+hunt_nic_get_required_pcie_bandwidth(
+	__in		efx_nic_t *enp,
+	__out		uint32_t *bandwidth_mbpsp)
+{
+	uint32_t port_modes;
+	uint32_t max_port_mode;
+	uint32_t bandwidth;
+	efx_rc_t rc;
+
+	/*
+	 * On Huntington, the firmware may not give us the current port mode, so
+	 * we need to go by the set of available port modes and assume the most
+	 * capable mode is in use.
+	 */
+
+	if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, NULL)) != 0) {
+		/* No port mode info available */
+		bandwidth = 0;
+		goto out;
+	}
+
+	if (port_modes & (1 << TLV_PORT_MODE_40G_40G)) {
+		/*
+		 * This needs the full PCIe bandwidth (and could use
+		 * more) - roughly 64 Gbit/s for 8 lanes of Gen3.
+		 */
+		if ((rc = efx_nic_calculate_pcie_link_bandwidth(8,
+			    EFX_PCIE_LINK_SPEED_GEN3, &bandwidth)) != 0)
+			goto fail1;
+	} else {
+		if (port_modes & (1 << TLV_PORT_MODE_40G)) {
+			max_port_mode = TLV_PORT_MODE_40G;
+		} else if (port_modes & (1 << TLV_PORT_MODE_10G_10G_10G_10G)) {
+			max_port_mode = TLV_PORT_MODE_10G_10G_10G_10G;
+		} else {
+			/* Assume two 10G ports */
+			max_port_mode = TLV_PORT_MODE_10G_10G;
+		}
+
+		if ((rc = ef10_nic_get_port_mode_bandwidth(max_port_mode,
+							    &bandwidth)) != 0)
+			goto fail2;
+	}
+
+out:
+	*bandwidth_mbpsp = bandwidth;
+
+	return (0);
+
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+	__checkReturn	efx_rc_t
+hunt_board_cfg(
+	__in		efx_nic_t *enp)
+{
+	efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+	uint8_t mac_addr[6] = { 0 };
+	uint32_t board_type = 0;
+	ef10_link_state_t els;
+	efx_port_t *epp = &(enp->en_port);
+	uint32_t port;
+	uint32_t pf;
+	uint32_t vf;
+	uint32_t mask;
+	uint32_t flags;
+	uint32_t sysclk, dpcpu_clk;
+	uint32_t base, nvec;
+	uint32_t bandwidth;
+	efx_rc_t rc;
+
+	if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
+		goto fail1;
+
+	/*
+	 * NOTE: The MCDI protocol numbers ports from zero.
+	 * The common code MCDI interface numbers ports from one.
+	 */
+	emip->emi_port = port + 1;
+
+	if ((rc = ef10_external_port_mapping(enp, port,
+		    &encp->enc_external_port)) != 0)
+		goto fail2;
+
+	/*
+	 * Get PCIe function number from firmware (used for
+	 * per-function privilege and dynamic config info).
+	 *  - PCIe PF: pf = PF number, vf = 0xffff.
+	 *  - PCIe VF: pf = parent PF, vf = VF number.
+	 */
+	if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
+		goto fail3;
+
+	encp->enc_pf = pf;
+	encp->enc_vf = vf;
+
+	/* MAC address for this function */
+	if (EFX_PCI_FUNCTION_IS_PF(encp)) {
+		rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
+		if ((rc == 0) && (mac_addr[0] & 0x02)) {
+			/*
+			 * If the static config does not include a global MAC
+			 * address pool then the board may return a locally
+			 * administered MAC address (this should only happen on
+			 * incorrectly programmed boards).
+			 */
+			rc = EINVAL;
+		}
+	} else {
+		rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
+	}
+	if (rc != 0)
+		goto fail4;
+
+	EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
+
+	/* Board configuration */
+	rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
+	if (rc != 0) {
+		/* Unprivileged functions may not be able to read board cfg */
+		if (rc == EACCES)
+			board_type = 0;
+		else
+			goto fail5;
+	}
+
+	encp->enc_board_type = board_type;
+	encp->enc_clk_mult = 1; /* not used for Huntington */
+
+	/* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
+	if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
+		goto fail6;
+
+	/* Obtain the default PHY advertised capabilities */
+	if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+		goto fail7;
+	epp->ep_default_adv_cap_mask = els.els_adv_cap_mask;
+	epp->ep_adv_cap_mask = els.els_adv_cap_mask;
+
+	/*
+	 * Enable firmware workarounds for hardware errata.
+	 * Expected responses are:
+	 *  - 0 (zero):
+	 *	Success: workaround enabled or disabled as requested.
+	 *  - MC_CMD_ERR_ENOSYS (reported as ENOTSUP):
+	 *	Firmware does not support the MC_CMD_WORKAROUND request.
+	 *	(assume that the workaround is not supported).
+	 *  - MC_CMD_ERR_ENOENT (reported as ENOENT):
+	 *	Firmware does not support the requested workaround.
+	 *  - MC_CMD_ERR_EPERM  (reported as EACCES):
+	 *	Unprivileged function cannot enable/disable workarounds.
+	 *
+	 * See efx_mcdi_request_errcode() for MCDI error translations.
+	 */
+
+	/*
+	 * If the bug35388 workaround is enabled, then use an indirect access
+	 * method to avoid unsafe EVQ writes.
+	 */
+	rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG35388, B_TRUE,
+	    NULL);
+	if ((rc == 0) || (rc == EACCES))
+		encp->enc_bug35388_workaround = B_TRUE;
+	else if ((rc == ENOTSUP) || (rc == ENOENT))
+		encp->enc_bug35388_workaround = B_FALSE;
+	else
+		goto fail8;
+
+	/*
+	 * If the bug41750 workaround is enabled, then do not test interrupts,
+	 * as the test will fail (seen with Greenport controllers).
+	 */
+	rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG41750, B_TRUE,
+	    NULL);
+	if (rc == 0) {
+		encp->enc_bug41750_workaround = B_TRUE;
+	} else if (rc == EACCES) {
+		/* Assume a controller with 40G ports needs the workaround. */
+		if (epp->ep_default_adv_cap_mask & EFX_PHY_CAP_40000FDX)
+			encp->enc_bug41750_workaround = B_TRUE;
+		else
+			encp->enc_bug41750_workaround = B_FALSE;
+	} else if ((rc == ENOTSUP) || (rc == ENOENT)) {
+		encp->enc_bug41750_workaround = B_FALSE;
+	} else {
+		goto fail9;
+	}
+	if (EFX_PCI_FUNCTION_IS_VF(encp)) {
+		/* Interrupt testing does not work for VFs. See bug50084. */
+		encp->enc_bug41750_workaround = B_TRUE;
+	}
+
+	/*
+	 * If the bug26807 workaround is enabled, then firmware has enabled
+	 * support for chained multicast filters. Firmware will reset (FLR)
+	 * functions which have filters in the hardware filter table when the
+	 * workaround is enabled/disabled.
+	 *
+	 * We must recheck if the workaround is enabled after inserting the
+	 * first hardware filter, in case it has been changed since this check.
+	 */
+	rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG26807,
+	    B_TRUE, &flags);
+	if (rc == 0) {
+		encp->enc_bug26807_workaround = B_TRUE;
+		if (flags & (1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN)) {
+			/*
+			 * Other functions had installed filters before the
+			 * workaround was enabled, and they have been reset
+			 * by firmware.
+			 */
+			EFSYS_PROBE(bug26807_workaround_flr_done);
+			/* FIXME: bump MC warm boot count ? */
+		}
+	} else if (rc == EACCES) {
+		/*
+		 * Unprivileged functions cannot enable the workaround in older
+		 * firmware.
+		 */
+		encp->enc_bug26807_workaround = B_FALSE;
+	} else if ((rc == ENOTSUP) || (rc == ENOENT)) {
+		encp->enc_bug26807_workaround = B_FALSE;
+	} else {
+		goto fail10;
+	}
+
+	/* Get clock frequencies (in MHz). */
+	if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
+		goto fail11;
+
+	/*
+	 * The Huntington timer quantum is 1536 sysclk cycles, documented for
+	 * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
+	 */
+	encp->enc_evq_timer_quantum_ns = 1536000UL / sysclk; /* 1536 cycles */
+	if (encp->enc_bug35388_workaround) {
+		encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+		ERF_DD_EVQ_IND_TIMER_VAL_WIDTH) / 1000;
+	} else {
+		encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+		FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
+	}
+
+	encp->enc_bug61265_workaround = B_FALSE; /* Medford only */
+
+	/* Check capabilities of running datapath firmware */
+	if ((rc = ef10_get_datapath_caps(enp)) != 0)
+		goto fail12;
+
+	/* Alignment for receive packet DMA buffers */
+	encp->enc_rx_buf_align_start = 1;
+	encp->enc_rx_buf_align_end = 64; /* RX DMA end padding */
+
+	/* Alignment for WPTR updates */
+	encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
+
+	/*
+	 * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
+	 * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
+	 * resources (allocated to this PCIe function), which is zero until
+	 * after we have allocated VIs.
+	 */
+	encp->enc_evq_limit = 1024;
+	encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
+	encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
+
+	/*
+	 * The workaround for bug35388 uses the top bit of transmit queue
+	 * descriptor writes, preventing the use of 4096 descriptor TXQs.
+	 */
+	encp->enc_txq_max_ndescs = encp->enc_bug35388_workaround ? 2048 : 4096;
+
+	encp->enc_buftbl_limit = 0xFFFFFFFF;
+
+	encp->enc_piobuf_limit = HUNT_PIOBUF_NBUFS;
+	encp->enc_piobuf_size = HUNT_PIOBUF_SIZE;
+	encp->enc_piobuf_min_alloc_size = HUNT_MIN_PIO_ALLOC_SIZE;
+
+	/*
+	 * Get the current privilege mask. Note that this may be modified
+	 * dynamically, so this value is informational only. DO NOT use
+	 * the privilege mask to check for sufficient privileges, as that
+	 * can result in time-of-check/time-of-use bugs.
+	 */
+	if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
+		goto fail13;
+	encp->enc_privilege_mask = mask;
+
+	/* Get interrupt vector limits */
+	if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
+		if (EFX_PCI_FUNCTION_IS_PF(encp))
+			goto fail14;
+
+		/* Ignore error (cannot query vector limits from a VF). */
+		base = 0;
+		nvec = 1024;
+	}
+	encp->enc_intr_vec_base = base;
+	encp->enc_intr_limit = nvec;
+
+	/*
+	 * Maximum number of bytes into the frame the TCP header can start for
+	 * firmware assisted TSO to work.
+	 */
+	encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
+
+	if ((rc = hunt_nic_get_required_pcie_bandwidth(enp, &bandwidth)) != 0)
+		goto fail15;
+	encp->enc_required_pcie_bandwidth_mbps = bandwidth;
+
+	/* All Huntington devices have a PCIe Gen3, 8 lane connector */
+	encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
+
+	return (0);
+
+fail15:
+	EFSYS_PROBE(fail15);
+fail14:
+	EFSYS_PROBE(fail14);
+fail13:
+	EFSYS_PROBE(fail13);
+fail12:
+	EFSYS_PROBE(fail12);
+fail11:
+	EFSYS_PROBE(fail11);
+fail10:
+	EFSYS_PROBE(fail10);
+fail9:
+	EFSYS_PROBE(fail9);
+fail8:
+	EFSYS_PROBE(fail8);
+fail7:
+	EFSYS_PROBE(fail7);
+fail6:
+	EFSYS_PROBE(fail6);
+fail5:
+	EFSYS_PROBE(fail5);
+fail4:
+	EFSYS_PROBE(fail4);
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+	return (rc);
+}
+
+
+#endif	/* EFSYS_OPT_HUNTINGTON */
-- 
2.5.5



More information about the dev mailing list