[dpdk-dev] [RFC 5/5] Add Intel OPAE Share Code

Rosen Xu rosen.xu at intel.com
Fri Mar 9 16:39:04 CET 2018


Signed-off-by: Tianfei Zhang <tianfei.zhang at intel.com>
---
 drivers/raw/ifpga_rawdev/base/Makefile             |   54 +
 drivers/raw/ifpga_rawdev/base/ifpga_api.c          |  420 +++++
 drivers/raw/ifpga_rawdev/base/ifpga_api.h          |   78 +
 drivers/raw/ifpga_rawdev/base/ifpga_compat.h       |   85 +
 drivers/raw/ifpga_rawdev/base/ifpga_defines.h      | 1699 ++++++++++++++++++++
 drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c    |  808 ++++++++++
 drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h    |   39 +
 drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c  |  305 ++++
 drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h  |  197 +++
 drivers/raw/ifpga_rawdev/base/ifpga_fme.c          |  731 +++++++++
 drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c    |  297 ++++
 drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c    |  399 +++++
 drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c    |  711 ++++++++
 drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c       |  364 +++++
 drivers/raw/ifpga_rawdev/base/ifpga_hw.h           |  145 ++
 drivers/raw/ifpga_rawdev/base/ifpga_port.c         |  699 ++++++++
 drivers/raw/ifpga_rawdev/base/ifpga_port_error.c   |  112 ++
 drivers/raw/ifpga_rawdev/base/opae_debug.c         |   95 ++
 drivers/raw/ifpga_rawdev/base/opae_debug.h         |   15 +
 drivers/raw/ifpga_rawdev/base/opae_hw_api.c        |  355 ++++
 drivers/raw/ifpga_rawdev/base/opae_hw_api.h        |  235 +++
 drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c  |  120 ++
 drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h  |  253 +++
 drivers/raw/ifpga_rawdev/base/opae_osdep.h         |   87 +
 .../ifpga_rawdev/base/osdep_raw/osdep_generic.h    |   69 +
 .../ifpga_rawdev/base/osdep_rte/osdep_generic.h    |   41 +
 26 files changed, 8413 insertions(+)
 create mode 100644 drivers/raw/ifpga_rawdev/base/Makefile
 create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_api.c
 create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_api.h
 create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_compat.h
 create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_defines.h
 create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c
 create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h
 create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c
 create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h
 create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_fme.c
 create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c
 create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c
 create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c
 create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c
 create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_hw.h
 create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_port.c
 create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_port_error.c
 create mode 100644 drivers/raw/ifpga_rawdev/base/opae_debug.c
 create mode 100644 drivers/raw/ifpga_rawdev/base/opae_debug.h
 create mode 100644 drivers/raw/ifpga_rawdev/base/opae_hw_api.c
 create mode 100644 drivers/raw/ifpga_rawdev/base/opae_hw_api.h
 create mode 100644 drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c
 create mode 100644 drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h
 create mode 100644 drivers/raw/ifpga_rawdev/base/opae_osdep.h
 create mode 100644 drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h
 create mode 100644 drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h

diff --git a/drivers/raw/ifpga_rawdev/base/Makefile b/drivers/raw/ifpga_rawdev/base/Makefile
new file mode 100644
index 0000000..9c0d64b
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/Makefile
@@ -0,0 +1,54 @@
+#   BSD LICENSE
+#
+#   Copyright(c) 2017-2018 Intel Corporation. All rights reserved.
+#   All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ifneq ($(CONFIG_RTE_LIBRTE_EAL),)
+OSDEP := osdep_rte
+else
+OSDEP := osdep_raw
+endif
+
+CFLAGS += -I$(RTE_SDK)/drivers/raw/ifpga_rawdev/base/$(OSDEP)
+
+SRCS-y += ifpga_api.c
+SRCS-y += ifpga_enumerate.c
+SRCS-y += ifpga_feature_dev.c
+SRCS-y += ifpga_fme.c
+SRCS-y += ifpga_fme_iperf.c
+SRCS-y += ifpga_fme_dperf.c
+SRCS-y += ifpga_fme_error.c
+SRCS-y += ifpga_port.c
+SRCS-y += ifpga_port_error.c
+SRCS-y += ifpga_fme_pr.c
+SRCS-y += opae_hw_api.c
+SRCS-y += opae_ifpga_hw_api.c
+SRCS-y += opae_debug.c
+
+SRCS-y += $(wildcard $(SRCDIR)/base/$(OSDEP)/*.c)
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_api.c b/drivers/raw/ifpga_rawdev/base/ifpga_api.c
new file mode 100644
index 0000000..07e9be5
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_api.c
@@ -0,0 +1,420 @@
+/*******************************************************************************
+
+Copyright (c) 2017, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ifpga_api.h"
+#include "ifpga_enumerate.h"
+#include "ifpga_feature_dev.h"
+
+#include "opae_hw_api.h"
+
+/* Accelerator APIs */
+static int ifpga_acc_set_irq(struct opae_accelerator *acc,
+			     u32 start, u32 count, s32 evtfds[])
+{
+	struct opae_bridge *br = acc->br;
+	struct ifpga_port_hw *port;
+	struct fpga_uafu_irq_set irq_set;
+
+	if (!br || !br->data)
+		return -EINVAL;
+
+	port = br->data;
+
+	irq_set.start = start;
+	irq_set.count = count;
+	irq_set.evtfds = evtfds;
+
+	return ifpga_set_irq(port->parent, FEATURE_FIU_ID_PORT, port->port_id,
+			     PORT_FEATURE_ID_UINT, &irq_set);
+}
+
+struct opae_accelerator_ops ifpga_acc_ops = {
+	.set_irq = ifpga_acc_set_irq,
+};
+
+/* Bridge APIs */
+
+static int ifpga_br_reset(struct opae_bridge *br)
+{
+	struct ifpga_port_hw *port = br->data;
+
+	return fpga_port_reset(port);
+}
+
+struct opae_bridge_ops ifpga_br_ops = {
+	.reset = ifpga_br_reset,
+};
+
+/* Manager APIs */
+static int ifpga_mgr_flash(struct opae_manager *mgr, int id, void *buf,
+			   u32 size, u64 *status)
+{
+	struct ifpga_fme_hw *fme = mgr->data;
+	struct ifpga_hw *hw = fme->parent;
+
+	return ifpga_pr(hw, id, buf, size, status);
+}
+
+struct opae_manager_ops ifpga_mgr_ops = {
+	.flash = ifpga_mgr_flash,
+};
+
+/* Adapter APIs */
+static int ifpga_adapter_enumerate(struct opae_adapter *adapter)
+{
+	struct ifpga_hw *hw = malloc(sizeof(*hw));
+
+	if (hw) {
+		memset(hw, 0, sizeof(*hw));
+		hw->pci_data = adapter->data;
+		hw->adapter = adapter;
+		if (ifpga_bus_enumerate(hw))
+			goto error;
+		return ifpga_bus_init(hw);
+	}
+
+error:
+	return -ENOMEM;
+}
+
+struct opae_adapter_ops ifpga_adapter_ops = {
+	.enumerate = ifpga_adapter_enumerate,
+};
+
+/**
+ *  ifpga_enumerate - enumerate the Device Feature List
+ *  @hw: pointer to the HW structure
+ *
+ *  This function enumerate the Device Feature List to discover
+ *  the FME and Port devices. The FME device and Port devices
+ *  will fill into HW structure when enumerate done.
+ *
+ *  @return
+ *   - 0: Success, device enumated.
+ *   - <0: Error code returned in enumeration.
+ **/
+int ifpga_enumerate(struct ifpga_hw *hw)
+{
+	return ifpga_bus_enumerate(hw);
+}
+
+/**
+ *  ifpga_reset_port - reset a port device
+ *  @hw: pointer to the HW structure
+ *  @port_id: port device id
+ *
+ *  @return
+ *   - 0: Success
+ *   - <0: Failure to reset a port device.
+ **/
+int ifpga_reset_port(struct ifpga_hw *hw, u32 port_id)
+{
+	if (!is_valid_port_id(hw, port_id))
+		return -ENODEV;
+
+	return fpga_port_reset(&hw->port[port_id]);
+}
+
+/**
+ *  ifpga_get_afu_mmio_addr - get UAFU MMIO address
+ *  @hw: pointer to the HW structure
+ *  @port_id: port device id
+ *  @mem_resource: afu mmio resource
+ *  @num_resource: number of resource
+ *
+ *  return 0 on success or error code
+ **/
+int ifpga_get_afu_mmio_info(struct ifpga_hw *hw, u32 port_id,
+		struct rte_mem_resource *mem_resource,
+		u32 *num_resource)
+{
+	if (!is_valid_port_id(hw, port_id))
+		return -ENODEV;
+
+	mem_resource->addr =
+		hw->port[port_id].sub_feature[PORT_FEATURE_ID_UAFU].addr;
+	mem_resource->len =
+		hw->port[port_id].sub_feature[PORT_FEATURE_ID_UAFU].size;
+	mem_resource->phys_addr =
+		hw->port[port_id].sub_feature[PORT_FEATURE_ID_UAFU].phys_addr;
+
+	*num_resource = 1;
+
+	return 0;
+}
+
+/**
+ *  ifpga_get_afu_uuid - get AFU uuid
+ *  @hw: pointer to the HW structure
+ *  @port_id: port device id
+ *  @uuid: the AFU's uuid
+ *
+ *  @return
+ *   - 0: Success
+ *   - <0: Failure to get uuid.
+ **/
+int ifpga_get_afu_uuid(struct ifpga_hw *hw, u32 port_id, uuid *uuid)
+{
+	if (!is_valid_port_id(hw, port_id))
+		return -ENODEV;
+
+	if (!hw->port[port_id].sub_feature[PORT_FEATURE_ID_UAFU].size)
+		return -ENODEV;
+
+	return fpga_get_afu_uuid(&hw->port[port_id], uuid);
+}
+
+/**
+ *  ifpga_pr - do the partial reconfiguration for a given port device
+ *  @hw: pointer to the HW structure
+ *  @port_id: the port device id
+ *  @buffer: the buffer of the bitstream
+ *  @size: the size of the bitstream
+ *  @status: hardware status including PR error code if return -EIO.
+ *
+ *  @return
+ *   - 0: Success, partial reconfiguration finished.
+ *   - <0: Error code returned in partial reconfiguration.
+ **/
+int ifpga_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
+	     u64 *status)
+{
+	if (!is_valid_port_id(hw, port_id))
+		return -ENODEV;
+
+	return do_pr(hw, port_id, buffer, size, status);
+}
+
+int ifpga_get_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+		   struct feature_prop *prop)
+{
+	if (!hw || !prop)
+		return -EINVAL;
+
+	switch (fiu_id) {
+	case FEATURE_FIU_ID_FME:
+		return fme_get_prop(&hw->fme, prop);
+	case FEATURE_FIU_ID_PORT:
+		if (!is_valid_port_id(hw, port_id))
+			return -ENODEV;
+		return port_get_prop(&hw->port[port_id], prop);
+	}
+
+	return -ENOENT;
+}
+
+int ifpga_set_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+		   struct feature_prop *prop)
+{
+	if (!hw || !prop)
+		return -EINVAL;
+
+	switch (fiu_id) {
+	case FEATURE_FIU_ID_FME:
+		return fme_set_prop(&hw->fme, prop);
+	case FEATURE_FIU_ID_PORT:
+		if (!is_valid_port_id(hw, port_id))
+			return -ENODEV;
+		return port_set_prop(&hw->port[port_id], prop);
+	}
+
+	return -ENOENT;
+}
+
+int ifpga_set_irq(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+		  u32 feature_id, void *irq_set)
+{
+	if (!hw || !irq_set)
+		return -EINVAL;
+
+	switch (fiu_id) {
+	case FEATURE_FIU_ID_FME:
+		return fme_set_irq(&hw->fme, feature_id, irq_set);
+	case FEATURE_FIU_ID_PORT:
+		if (!is_valid_port_id(hw, port_id))
+			return -ENODEV;
+		return port_set_irq(&hw->port[port_id], feature_id, irq_set);
+	}
+
+	return -ENOENT;
+}
+
+int ifpga_port_umsg_enable(struct ifpga_hw *hw, u32 port_id, bool enable)
+{
+	struct ifpga_port_hw *port = get_port(hw, port_id);
+	if (!port)
+		return -ENODEV;
+
+	spinlock_lock(&port->lock);
+	if(afu_port_umsg_enable(port, enable))
+		return -ENODEV;
+	spinlock_unlock(&port->lock);
+
+	return 0;
+}
+
+int ifpga_port_umsg_set_mode(struct ifpga_hw *hw, u32 port_id, u32 mode)
+{
+	struct ifpga_port_hw *port = get_port(hw, port_id);
+	if (!port)
+		return -ENODEV;
+
+	spinlock_lock(&port->lock);
+	if(afu_port_umsg_set_mode(port, mode))
+		return -ENODEV;
+	spinlock_unlock(&port->lock);
+
+	return 0;
+}
+
+void ifpga_show_pr_error(u64 pr_error)
+{
+#if 0
+	int i = 0;
+
+	for_each_set_bit(i, &pr_error, PR_MAX_ERR_NUM)
+		printf("PR Error: %s\n", pr_err_msg[i]);
+#endif
+	printf("PR Error: 0x%lx\n", pr_error);
+}
+
+int ifpga_mmio64_read(struct ifpga_hw *hw, u32 port_id, u64 offset, u64 *value)
+{
+	u8 *addr;
+	u32 size;
+
+	if (!is_valid_port_id(hw, port_id))
+		return -ENODEV;
+
+	addr = hw->port[port_id].sub_feature[PORT_FEATURE_ID_UAFU].addr;
+	size = hw->port[port_id].sub_feature[PORT_FEATURE_ID_UAFU].size;
+
+	if (offset > size)
+		return -EINVAL;
+
+	*value = readq(addr + offset);
+
+	return 0;
+}
+
+int ifpga_mmio64_write(struct ifpga_hw *hw, u32 port_id, u64 offset, u64 value)
+{
+	u8 *addr;
+	u32 size;
+	if (!is_valid_port_id(hw, port_id))
+		return -ENODEV;
+
+	addr = hw->port[port_id].sub_feature[PORT_FEATURE_ID_UAFU].addr;
+	size = hw->port[port_id].sub_feature[PORT_FEATURE_ID_UAFU].size;
+
+	if (offset > size)
+		return -EINVAL;
+
+	writeq(value, addr + offset);
+
+	return 0;
+}
+
+int ifpga_mmio32_read(struct ifpga_hw *hw, u32 port_id, u64 offset, u32 *value)
+{
+	u8 *addr;
+	u32 size;
+	if (!is_valid_port_id(hw, port_id))
+		return -ENODEV;
+
+	addr = hw->port[port_id].sub_feature[PORT_FEATURE_ID_UAFU].addr;
+	size = hw->port[port_id].sub_feature[PORT_FEATURE_ID_UAFU].size;
+
+	if (offset > size)
+		return -EINVAL;
+
+	*value = readl(addr + offset);
+
+	return 0;
+}
+
+int ifpga_mmio32_write(struct ifpga_hw *hw, u32 port_id, u64 offset, u32 value)
+{
+	u8 *addr;
+	u32 size;
+	if (!is_valid_port_id(hw, port_id))
+		return -ENODEV;
+
+	addr = hw->port[port_id].sub_feature[PORT_FEATURE_ID_UAFU].addr;
+	size = hw->port[port_id].sub_feature[PORT_FEATURE_ID_UAFU].size;
+
+	if (offset > size)
+		return -EINVAL;
+
+	writel(value, addr + offset);
+
+	return 0;
+}
+
+int ifpga_fme_hw_init(struct ifpga_hw *hw)
+{
+	return fme_hw_init(&hw->fme);
+}
+
+void ifpga_fme_hw_uinit(struct ifpga_hw *hw)
+{
+	fme_hw_uinit(&hw->fme);
+}
+
+void ifpga_port_hw_uinit(struct ifpga_hw *hw, int port_id)
+{
+	struct ifpga_port_hw *port;
+
+	if (!is_valid_port_id(hw, port_id))
+		return;
+
+	port = &hw->port[port_id];
+
+	if (port->state == IFPGA_PORT_ATTACHED)
+		port_hw_uinit(port);
+}
+
+int ifpga_port_hw_init(struct ifpga_hw *hw, int port_id)
+{
+	struct ifpga_port_hw *port;
+
+	if (!is_valid_port_id(hw, port_id))
+		return -ENODEV;
+
+	port = &hw->port[port_id];
+
+	if (port->state != IFPGA_PORT_ATTACHED)
+		return -EINVAL;
+
+	return port_hw_init(port);
+}
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_api.h b/drivers/raw/ifpga_rawdev/base/ifpga_api.h
new file mode 100644
index 0000000..dba6938
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_api.h
@@ -0,0 +1,78 @@
+/*******************************************************************************
+
+Copyright (c) 2017, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+#ifndef _IFPGA_API_H_
+#define _IFPGA_API_H_
+
+#include "opae_hw_api.h"
+#include "ifpga_hw.h"
+
+extern struct opae_adapter_ops ifpga_adapter_ops;
+extern struct opae_manager_ops ifpga_mgr_ops;
+extern struct opae_bridge_ops ifpga_br_ops;
+extern struct opae_accelerator_ops ifpga_acc_ops;
+
+/* common APIs */
+int ifpga_enumerate(struct ifpga_hw *hw);
+int ifpga_fme_hw_init(struct ifpga_hw *hw);
+void ifpga_fme_hw_uinit(struct ifpga_hw *hw);
+int ifpga_port_hw_init(struct ifpga_hw *hw, int port_id);
+void ifpga_port_hw_uinit(struct ifpga_hw *hw, int port_id);
+int ifpga_get_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+		   struct feature_prop *prop);
+int ifpga_set_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+		   struct feature_prop *prop);
+int ifpga_set_irq(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+		  u32 feature_id, void *irq_set);
+
+/* Port/AFU APIs */
+int ifpga_reset_port(struct ifpga_hw *hw, u32 port_id);
+int ifpga_get_afu_mmio_info(struct ifpga_hw *hw, u32 port_id,
+			struct rte_mem_resource *mem_resource,
+			u32 *num_resource);
+int ifpga_get_afu_uuid(struct ifpga_hw *hw, u32 port_id, uuid *uuid);
+
+int ifpga_mmio64_read(struct ifpga_hw *hw, u32 port_id, u64 offset, u64 *value);
+int ifpga_mmio64_write(struct ifpga_hw *hw, u32 port_id, u64 offset, u64 value);
+int ifpga_mmio32_read(struct ifpga_hw *hw, u32 port_id, u64 offset, u32 *value);
+int ifpga_mmio32_write(struct ifpga_hw *hw, u32 port_id, u64 offset, u32 value);
+
+/* FME APIs */
+int ifpga_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
+	     u64 *status);
+void ifpga_show_pr_error(u64 pr_error);
+
+/* Port APIs */
+int ifpga_port_umsg_enable(struct ifpga_hw *hw, u32 port_id, bool enable);
+int ifpga_port_umsg_set_mode(struct ifpga_hw *hw, u32 port_id, u32 mode);
+
+#endif /* _IFPGA_API_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_compat.h b/drivers/raw/ifpga_rawdev/base/ifpga_compat.h
new file mode 100644
index 0000000..a761626
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_compat.h
@@ -0,0 +1,85 @@
+/*******************************************************************************
+
+Copyright (c) 2017, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+#ifndef _IFPGA_COMPAT_H_
+#define _IFPGA_COMPAT_H_
+
+#include "opae_osdep.h"
+
+#undef container_of
+#define container_of(ptr, type, member) ({ \
+		typeof(((type *)0)->member)(*__mptr) = (ptr); \
+		(type *)((char *)__mptr - offsetof(type, member)); })
+
+#define PAGE_SHIFT       12
+#define PAGE_SIZE        (1 << PAGE_SHIFT)
+#define PAGE_MASK        (~(PAGE_SIZE - 1))
+#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
+#define ALIGN(x, a)  (((x) + (a) -1) &~ ((a) - 1))
+
+#define IS_ALIGNED(x, a)		(((x) & ((typeof(x))(a) - 1)) == 0)
+#define PAGE_ALIGNED(addr)	IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
+
+#define readl(addr) opae_readl(addr)
+#define readq(addr) opae_readq(addr)
+#define writel(value, addr) opae_writel(value, addr)
+#define writeq(value, addr) opae_writeq(value, addr)
+
+#define malloc(size) opae_malloc(size)
+#define zmalloc(size) opae_zmalloc(size)
+#define free(size) opae_free(size)
+
+/*
+ * Wait register's _field to be changed to the given value (_expect's _field)
+ * by polling with given interval and timeout.
+ */
+#define fpga_wait_register_field(_field, _expect, _reg_addr, _timeout, _invl)\
+({									     \
+	int wait = 0;							     \
+	int ret = -ETIMEDOUT;						     \
+	typeof(_expect) value;						     \
+	for (; wait <= _timeout; wait += _invl) {			     \
+		value.csr = readq(_reg_addr);				     \
+		if (_expect._field == value._field) {			     \
+			ret = 0;					     \
+			break;						     \
+		}							     \
+		udelay(_invl);						     \
+	}								     \
+	ret;								     \
+})
+
+#define __maybe_unused __attribute__((__unused__))
+
+#define UNUSED(x)	(void)(x)
+
+#endif /* _IFPGA_COMPAT_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_defines.h b/drivers/raw/ifpga_rawdev/base/ifpga_defines.h
new file mode 100644
index 0000000..f9bb1ce
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_defines.h
@@ -0,0 +1,1699 @@
+/*******************************************************************************
+
+Copyright (c) 2017, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+#ifndef _IFPGA_DEFINES_H_
+#define _IFPGA_DEFINES_H_
+
+#include "ifpga_compat.h"
+
+#define MAX_FPGA_PORT_NUM  4
+
+#define FME_FEATURE_HEADER          "fme_hdr"
+#define FME_FEATURE_THERMAL_MGMT    "fme_thermal"
+#define FME_FEATURE_POWER_MGMT      "fme_power"
+#define FME_FEATURE_GLOBAL_IPERF    "fme_iperf"
+#define FME_FEATURE_GLOBAL_ERR      "fme_error"
+#define FME_FEATURE_PR_MGMT         "fme_pr"
+#define FME_FEATURE_HSSI_ETH        "fme_hssi"
+#define FME_FEATURE_GLOBAL_DPERF    "fme_dperf"
+#define FME_FEATURE_QSPI_FLASH	    "fme_qspi_flash"
+
+#define PORT_FEATURE_HEADER         "port_hdr"
+#define PORT_FEATURE_UAFU           "port_uafu"
+#define PORT_FEATURE_ERR            "port_err"
+#define PORT_FEATURE_UMSG           "port_umsg"
+#define PORT_FEATURE_PR             "port_pr"
+#define PORT_FEATURE_UINT           "port_uint"
+#define PORT_FEATURE_STP            "port_stp"
+
+/*
+ * do not check the revision id as id may be dynamic under
+ * some cases, e.g, UAFU.
+ */
+#define SKIP_REVISION_CHECK		0xff
+
+#define FME_HEADER_REVISION		1
+#define FME_THERMAL_MGMT_REVISION	0
+#define FME_POWER_MGMT_REVISION		1
+#define FME_GLOBAL_IPERF_REVISION	1
+#define FME_GLOBAL_ERR_REVISION		1
+#define FME_PR_MGMT_REVISION		2
+#define FME_HSSI_ETH_REVISION		0
+#define FME_GLOBAL_DPERF_REVISION	0
+#define FME_QSPI_REVISION		0
+
+#define PORT_HEADER_REVISION		0
+/* UAFU's header info depends on the downloaded GBS */
+#define PORT_UAFU_REVISION		SKIP_REVISION_CHECK
+#define PORT_ERR_REVISION		1
+#define PORT_UMSG_REVISION		0
+#define PORT_UINT_REVISION		0
+#define PORT_STP_REVISION		1
+
+#define FEATURE_TYPE_AFU	0x1
+#define FEATURE_TYPE_BBB        0x2
+#define FEATURE_TYPE_PRIVATE	0x3
+#define FEATURE_TYPE_FIU	0x4
+
+#define FEATURE_FIU_ID_FME	0x0
+#define FEATURE_FIU_ID_PORT	0x1
+
+#define FEATURE_ID_HEADER	0x0
+#define FEATURE_ID_AFU		0xff
+
+enum fpga_id_type {
+	FME_ID,
+	PORT_ID,
+	FPGA_ID_MAX,
+};
+
+enum fme_feature_id {
+	FME_FEATURE_ID_HEADER = 0x0,
+
+	FME_FEATURE_ID_THERMAL_MGMT	= 0x1,
+	FME_FEATURE_ID_POWER_MGMT = 0x2,
+	FME_FEATURE_ID_GLOBAL_IPERF = 0x3,
+	FME_FEATURE_ID_GLOBAL_ERR = 0x4,
+	FME_FEATURE_ID_PR_MGMT = 0x5,
+	FME_FEATURE_ID_HSSI_ETH = 0x6,
+	FME_FEATURE_ID_GLOBAL_DPERF = 0x7,
+	FME_FEATURE_ID_QSPI_FLASH = 0x8,
+
+	/* one for fme header. */
+	FME_FEATURE_ID_MAX = 0x9,
+};
+
+enum port_feature_id {
+	PORT_FEATURE_ID_HEADER = 0x0,
+	PORT_FEATURE_ID_ERROR = 0x1,
+	PORT_FEATURE_ID_UMSG = 0x2,
+	PORT_FEATURE_ID_UINT = 0x3,
+	PORT_FEATURE_ID_STP = 0x4,
+	PORT_FEATURE_ID_UAFU = 0x5,
+	PORT_FEATURE_ID_MAX = 0x6,
+};
+
+typedef struct {
+	__u8 b[16];
+} uuid_le;
+
+typedef uuid_le uuid;
+
+/*
+ * All headers and structures must be byte-packed to match the
+ * SAS spec.
+ */
+#pragma pack(1)
+
+struct feature_header {
+	union {
+		u64 csr;
+		struct {
+			u16 id:12;
+			u8  revision:4;
+			u32 next_header_offset:24;
+			u8  end_of_list:1;
+			u32 reserved:19;
+			u8  type:4;
+		};
+	};
+};
+
+struct feature_bbb_header {
+	uuid_le guid;
+};
+
+struct feature_afu_header {
+	uuid_le guid;
+	union {
+		u64 csr;
+		struct {
+			u64 next_afu:24;
+			u64 reserved:40;
+		};
+	};
+};
+
+struct feature_fiu_header {
+	uuid_le guid;
+	union {
+		u64 csr;
+		struct {
+			u64 next_afu:24;
+			u64 reserved:40;
+		};
+	};
+};
+
+struct feature_fme_capability {
+	union {
+		u64 csr;
+		struct {
+			u8  fabric_verid;	/* Fabric version ID */
+			u8  socket_id:1;	/* Socket id */
+			u8  rsvd1:3;		/* Reserved */
+			/* pci0 link available yes /no */
+			u8  pci0_link_avile:1;
+			/* pci1 link available yes /no */
+			u8  pci1_link_avile:1;
+			/* Coherent (QPI/UPI) link available yes /no */
+			u8  qpi_link_avile:1;
+			u8  rsvd2:1;		/* Reserved */
+			/* IOMMU or VT-d supported  yes/no */
+			u8  iommu_support:1;
+			u8  num_ports:3;	/* Number of ports */
+			u8  sf_fab_ctl:1;	/* Internal validation bit */
+			u8  rsvd3:3;		/* Reserved */
+			/*
+			 * Address width supported in bits
+			 * BXT -0x26 , SKX -0x30
+			 */
+			u8  address_width_bits:6;
+			u8  rsvd4:2;		/* Reserved */
+			/* Size of cache supported in kb */
+			u16 cache_size:12;
+			u8  cache_assoc:4;	/* Cache Associativity */
+			u16 rsvd5:15;		/* Reserved */
+			u8  lock_bit:1;		/* Lock bit */
+		};
+	};
+};
+
+#define FME_AFU_ACCESS_PF		0
+#define FME_AFU_ACCESS_VF		1
+
+struct feature_fme_port {
+	union {
+		u64 csr;
+		struct {
+			u32 port_offset:24;
+			u8  reserved1;
+			u8  port_bar:3;
+			u32 reserved2:20;
+			u8  afu_access_control:1;
+			u8  reserved3:4;
+			u8  port_implemented:1;
+			u8  reserved4:3;
+		};
+	};
+};
+
+struct feature_fme_fab_status {
+	union {
+		u64 csr;
+		struct {
+			u8  upilink_status:4;   /* UPI Link Status */
+			u8  rsvd1:4;		/* Reserved */
+			u8  pci0link_status:1;  /* pci0 link status */
+			u8  rsvd2:3;            /* Reserved */
+			u8  pci1link_status:1;  /* pci1 link status */
+			u64 rsvd3:51;           /* Reserved */
+		};
+	};
+};
+
+struct feature_fme_genprotrange2_base {
+	union {
+		u64 csr;
+		struct {
+			u16 rsvd1;           /* Reserved */
+			/* Base Address of memory range */
+			u8  protected_base_addrss:4;
+			u64 rsvd2:44;           /* Reserved */
+		};
+	};
+};
+
+struct feature_fme_genprotrange2_limit {
+	union {
+		u64 csr;
+		struct {
+			u16 rsvd1;           /* Reserved */
+			/* Limit Address of memory range */
+			u8  protected_limit_addrss:4;
+			u16 rsvd2:11;           /* Reserved */
+			u8  enable:1;        /* Enable GENPROTRANGE check */
+			u32 rsvd3;           /* Reserved */
+		};
+	};
+};
+
+struct feature_fme_dxe_lock {
+	union {
+		u64 csr;
+		struct {
+			/*
+			 * Determines write access to the DXE region CSRs
+			 * 1 - CSR region is locked;
+			 * 0 - it is open for write access.
+			 */
+			u8  dxe_early_lock:1;
+			/*
+			 * Determines write access to the HSSI CSR
+			 * 1 - CSR region is locked;
+			 * 0 - it is open for write access.
+			 */
+			u8  dxe_late_lock:1;
+			u64 rsvd:62;
+		};
+	};
+};
+
+#define HSSI_ID_NO_HASSI	0
+#define HSSI_ID_PCIE_RP		1
+#define HSSI_ID_ETHERNET	2
+
+struct feature_fme_bitstream_id {
+	union {
+		u64 csr;
+		struct {
+			u32 gitrepo_hash:32;	/* GIT repository hash */
+			/*
+			 * HSSI configuration identifier:
+			 * 0 - No HSSI
+			 * 1 - PCIe-RP
+			 * 2 - Ethernet
+			 */
+			u8  hssi_id:4;
+			u16 rsvd1:12;		/* Reserved */
+			/* Bitstream version patch number */
+			u8  bs_verpatch:4;
+			/* Bitstream version minor number */
+			u8  bs_verminor:4;
+			/* Bitstream version major number */
+			u8  bs_vermajor:4;
+			/* Bitstream version debug number */
+			u8  bs_verdebug:4;
+		};
+	};
+};
+
+struct feature_fme_bitstream_md {
+	union {
+		u64 csr;
+		struct {
+			/* Seed number userd for synthesis flow */
+			u8  synth_seed:4;
+			/* Synthesis date(day number - 2 digits) */
+			u8  synth_day:8;
+			/* Synthesis date(month number - 2 digits) */
+			u8  synth_month:8;
+			/* Synthesis date(year number - 2 digits) */
+			u8  synth_year:8;
+			u64 rsvd:36;		/* Reserved */
+		};
+	};
+};
+
+struct feature_fme_iommu_ctrl {
+	union {
+		u64 csr;
+		struct {
+			/* Disables IOMMU prefetcher for C0 channel */
+			u8 prefetch_disableC0:1;
+			/* Disables IOMMU prefetcher for C1 channel */
+			u8 prefetch_disableC1:1;
+			/* Disables IOMMU partial cache line writes */
+			u8 prefetch_wrdisable:1;
+			u8 rsvd1:1;		/* Reserved */
+			/*
+			 * Select counter and read value from register
+			 * iommu_stat.dbg_counters
+			 * 0 - Number of 4K page translation response
+			 * 1 - Number of 2M page translation response
+			 * 2 - Number of 1G page translation response
+			 */
+			u8 counter_sel:2;
+			u32 rsvd2:26;		/* Reserved */
+			/* Connected to IOMMU SIP Capabilities */
+			u32 capecap_defeature;
+		};
+	};
+};
+
+struct feature_fme_iommu_stat {
+	union {
+		u64 csr;
+		struct {
+			/* Translation Enable bit from IOMMU SIP */
+			u8 translation_enable:1;
+			/* Drain request in progress */
+			u8 drain_req_inprog:1;
+			/* Invalidation current state */
+			u8 inv_state:3;
+			/* C0 Response Buffer current state */
+			u8 respbuffer_stateC0:3;
+			/* C1 Response Buffer current state */
+			u8 respbuffer_stateC1:3;
+			/* Last request ID to IOMMU SIP */
+			u8 last_reqID:4;
+			/* Last IOMMU SIP response ID value */
+			u8 last_respID:4;
+			/* Last IOMMU SIP response status value */
+			u8 last_respstatus:3;
+			/* C0 Transaction Buffer is not empty */
+			u8 transbuf_notEmptyC0:1;
+			/* C1 Transaction Buffer is not empty */
+			u8 transbuf_notEmptyC1:1;
+			/* C0 Request FIFO is not empty */
+			u8 reqFIFO_notemptyC0:1;
+			/* C1 Request FIFO is not empty */
+			u8 reqFIFO_notemptyC1:1;
+			/* C0 Response FIFO is not empty */
+			u8 respFIFO_notemptyC0:1;
+			/* C1 Response FIFO is not empty */
+			u8 respFIFO_notemptyC1:1;
+			/* C0 Response FIFO overflow detected */
+			u8 respFIFO_overflowC0:1;
+			/* C1 Response FIFO overflow detected */
+			u8 respFIFO_overflowC1:1;
+			/* C0 Transaction Buffer overflow detected */
+			u8 tranbuf_overflowC0:1;
+			/* C1 Transaction Buffer overflow detected */
+			u8 tranbuf_overflowC1:1;
+			/* Request FIFO overflow detected */
+			u8 reqFIFO_overflow:1;
+			/* IOMMU memory read in progress */
+			u8 memrd_inprog:1;
+			/* IOMMU memory write in progress */
+			u8 memwr_inprog:1;
+			u8 rsvd1:1;	/* Reserved */
+			/* Value of counter selected by iommu_ctl.counter_sel */
+			u16 dbg_counters:16;
+			u16 rsvd2:12;	/* Reserved */
+		};
+	};
+};
+
+struct feature_fme_pcie0_ctrl {
+	union {
+		u64 csr;
+		struct {
+			u64 vtd_bar_lock:1;	/* Lock VT-D BAR register */
+			u64 rsvd1:3;
+			u64 rciep:1;		/* Configure PCIE0 as RCiEP */
+			u64 rsvd2:59;
+		};
+	};
+};
+
+struct feature_fme_llpr_smrr_base {
+	union {
+		u64 csr;
+		struct {
+			u64 rsvd1:12;
+			u64 base:20;	/* SMRR2 memory range base address */
+			u64 rsvd2:32;
+		};
+	};
+};
+
+struct feature_fme_llpr_smrr_mask {
+	union {
+		u64 csr;
+		struct {
+			u64 rsvd1:11;
+			u64 valid:1;	/* LLPR_SMRR rule is valid or not */
+			/*
+			 * SMRR memory range mask which determines the range
+			 * of region being mapped
+			 */
+			u64 phys_mask:20;
+			u64 rsvd2:32;
+		};
+	};
+};
+
+struct feature_fme_llpr_smrr2_base {
+	union {
+		u64 csr;
+		struct {
+			u64 rsvd1:12;
+			u64 base:20;	/* SMRR2 memory range base address */
+			u64 rsvd2:32;
+		};
+	};
+};
+
+struct feature_fme_llpr_smrr2_mask {
+	union {
+		u64 csr;
+		struct {
+			u64 rsvd1:11;
+			u64 valid:1;	/* LLPR_SMRR2 rule is valid or not */
+			/*
+			 * SMRR2 memory range mask which determines the range
+			 * of region being mapped
+			 */
+			u64 phys_mask:20;
+			u64 rsvd2:32;
+		};
+	};
+};
+
+struct feature_fme_llpr_meseg_base {
+	union {
+		u64 csr;
+		struct {
+			/* A[45:19] of base address memory range */
+			u64 me_base:27;
+			u64 rsvd:37;
+		};
+	};
+};
+
+struct feature_fme_llpr_meseg_limit {
+	union {
+		u64 csr;
+		struct {
+			/* A[45:19] of limit address memory range */
+			u64 me_limit:27;
+			u64 rsvd1:4;
+			u64 enable:1;	/* Enable LLPR MESEG rule */
+			u64 rsvd2:32;
+		};
+	};
+};
+
+struct feature_fme_header {
+	struct feature_header header;
+	struct feature_afu_header afu_header;
+	u64 reserved;
+	u64 scratchpad;
+	struct feature_fme_capability capability;
+	struct feature_fme_port port[MAX_FPGA_PORT_NUM];
+	struct feature_fme_fab_status fab_status;
+	struct feature_fme_bitstream_id bitstream_id;
+	struct feature_fme_bitstream_md bitstream_md;
+	struct feature_fme_genprotrange2_base genprotrange2_base;
+	struct feature_fme_genprotrange2_limit genprotrange2_limit;
+	struct feature_fme_dxe_lock dxe_lock;
+	struct feature_fme_iommu_ctrl iommu_ctrl;
+	struct feature_fme_iommu_stat iommu_stat;
+	struct feature_fme_pcie0_ctrl pcie0_control;
+	struct feature_fme_llpr_smrr_base smrr_base;
+	struct feature_fme_llpr_smrr_mask smrr_mask;
+	struct feature_fme_llpr_smrr2_base smrr2_base;
+	struct feature_fme_llpr_smrr2_mask smrr2_mask;
+	struct feature_fme_llpr_meseg_base meseg_base;
+	struct feature_fme_llpr_meseg_limit meseg_limit;
+};
+
+struct feature_port_capability {
+	union {
+		u64 csr;
+		struct {
+			u8 port_number:2;	/* Port Number 0-3 */
+			u8 rsvd1:6;		/* Reserved */
+			u16 mmio_size;		/* User MMIO size in KB */
+			u8 rsvd2;		/* Reserved */
+			u8 sp_intr_num:4;	/* Supported interrupts num */
+			u32 rsvd3:28;		/* Reserved */
+		};
+	};
+};
+
+struct feature_port_control {
+	union {
+		u64 csr;
+		struct {
+			u8 port_sftrst:1;	/* Port Soft Reset */
+			u8 rsvd1:1;		/* Reserved */
+			u8 latency_tolerance:1;/* '1' >= 40us, '0' < 40us */
+			u8 rsvd2:1;		/* Reserved */
+			u8 port_sftrst_ack:1;	/* HW ACK for Soft Reset */
+			u64 rsvd3:59;		/* Reserved */
+		};
+	};
+};
+
+#define PORT_POWER_STATE_NORMAL		0
+#define PORT_POWER_STATE_AP1		1
+#define PORT_POWER_STATE_AP2		2
+#define PORT_POWER_STATE_AP6		6
+
+struct feature_port_status {
+	union {
+		u64 csr;
+		struct {
+			u8 port_freeze:1;	/* '1' - freezed '0' - normal */
+			u8 rsvd1:7;		/* Reserved */
+			u8 power_state:4;	/* Power State */
+			u8 ap1_event:1;		/* AP1 event was detected  */
+			u8 ap2_event:1;		/* AP2 event was detected  */
+			u64 rsvd2:50;		/* Reserved */
+		};
+	};
+};
+
+/* Port Header Register Set */
+struct feature_port_header {
+	struct feature_header header;
+	struct feature_afu_header afu_header;
+	u64 port_mailbox;
+	u64 scratchpad;
+	struct feature_port_capability capability;
+	struct feature_port_control control;
+	struct feature_port_status status;
+	u64 rsvd2;
+	u64 user_clk_freq_cmd0;
+	u64 user_clk_freq_cmd1;
+	u64 user_clk_freq_sts0;
+	u64 user_clk_freq_sts1;
+};
+
+struct feature_fme_tmp_threshold {
+	union {
+		u64 csr;
+		struct {
+			u8  tmp_thshold1:7;	  /* temperature Threshold 1 */
+			/* temperature Threshold 1 enable/disable */
+			u8  tmp_thshold1_enable:1;
+			u8  tmp_thshold2:7;       /* temperature Threshold 2 */
+			/* temperature Threshold 2 enable /disable */
+			u8  tmp_thshold2_enable:1;
+			u8  pro_hot_setpoint:7;   /* Proc Hot set point */
+			u8  rsvd4:1;              /* Reserved */
+			u8  therm_trip_thshold:7; /* Thermeal Trip Threshold */
+			u8  rsvd3:1;              /* Reserved */
+			u8  thshold1_status:1;	  /* Threshold 1 Status */
+			u8  thshold2_status:1;    /* Threshold 2 Status */
+			u8  rsvd5:1;              /* Reserved */
+			/* Thermeal Trip Threshold status */
+			u8  therm_trip_thshold_status:1;
+			u8  rsvd6:4;		  /* Reserved */
+			/* Validation mode- Force Proc Hot */
+			u8  valmodeforce:1;
+			/* Validation mode - Therm trip Hot */
+			u8  valmodetherm:1;
+			u8  rsvd2:2;              /* Reserved */
+			u8  thshold_policy:1;     /* threshold policy */
+			u32 rsvd:19;              /* Reserved */
+		};
+	};
+};
+
+/* Temperature Sensor Read values format 1 */
+struct feature_fme_temp_rdsensor_fmt1 {
+	union {
+		u64 csr;
+		struct {
+			/* Reads out FPGA temperature in celsius */
+			u8  fpga_temp:7;
+			u8  rsvd0:1;			/* Reserved */
+			/* Temperature reading sequence number */
+			u16 tmp_reading_seq_num;
+			/* Temperature reading is valid */
+			u8  tmp_reading_valid:1;
+			u8  rsvd1:7;			/* Reserved */
+			u16 dbg_mode:10;		/* Debug mode */
+			u32 rsvd2:22;			/* Reserved */
+		};
+	};
+};
+
+/* Temperature sensor read values format 2 */
+struct feature_fme_temp_rdsensor_fmt2 {
+	u64 rsvd;	/* Reserved */
+};
+
+/* Temperature Threshold Capability Register */
+struct feature_fme_tmp_threshold_cap {
+	union {
+		u64 csr;
+		struct {
+			/* Temperature Threshold Unsupported */
+			u8  tmp_thshold_disabled:1;
+			u64 rsvd:63;			/* Reserved */
+		};
+	};
+};
+
+/* FME THERNAL FEATURE */
+struct feature_fme_thermal {
+	struct feature_header header;
+	struct feature_fme_tmp_threshold threshold;
+	struct feature_fme_temp_rdsensor_fmt1 rdsensor_fm1;
+	struct feature_fme_temp_rdsensor_fmt2 rdsensor_fm2;
+	struct feature_fme_tmp_threshold_cap threshold_cap;
+};
+
+/* Power Status register */
+struct feature_fme_pm_status {
+	union {
+		u64 csr;
+		struct {
+			/* FPGA Power consumed, The format is to be defined */
+			u32 pwr_consumed:18;
+			/* FPGA Latency Tolerance Reporting */
+			u8  fpga_latency_report:1;
+			u64 rsvd:45;			/* Reserved */
+		};
+	};
+};
+
+/* AP Thresholds */
+struct feature_fme_pm_ap_threshold {
+	union {
+		u64 csr;
+		struct {
+			/*
+			 * Number of clocks (5ns period) for assertion
+			 * of FME_data
+			 */
+			u8  threshold1:7;
+			u8  rsvd1:1;
+			u8  threshold2:7;
+			u8  rsvd2:1;
+			u8  threshold1_status:1;
+			u8  threshold2_status:1;
+			u64 rsvd3:46;		/* Reserved */
+		};
+	};
+};
+
+/* Xeon Power Limit */
+struct feature_fme_pm_xeon_limit {
+	union {
+		u64 csr;
+		struct {
+			/* Power limit in Watts in 12.3 format */
+			u16 pwr_limit:15;
+			/* Indicates that power limit has been written */
+			u8  enable:1;
+			/* 0 - Turbe range, 1 - Entire range */
+			u8  clamping:1;
+			/* Time constant in XXYYY format */
+			u8  time:7;
+			u64 rsvd:40;		/* Reserved */
+		};
+	};
+};
+
+/* MCP Power Limit */
+struct feature_fme_pm_fpga_limit {
+	union {
+		u64 csr;
+		struct {
+			/* Power limit in Watts in 12.3 format */
+			u16 pwr_limit:15;
+			/* Indicates that power limit has been written */
+			u8  enable:1;
+			/* 0 - Turbe range, 1 - Entire range */
+			u8  clamping:1;
+			/* Time constant in XXYYY format */
+			u8  time:7;
+			u64 rsvd:40;		/* Reserved */
+		};
+	};
+};
+
+/* FME POWER FEATURE */
+struct feature_fme_power {
+	struct feature_header header;
+	struct feature_fme_pm_status status;
+	struct feature_fme_pm_ap_threshold threshold;
+	struct feature_fme_pm_xeon_limit xeon_limit;
+	struct feature_fme_pm_fpga_limit fpga_limit;
+};
+
+#define CACHE_CHANNEL_RD	0
+#define CACHE_CHANNEL_WR	1
+
+enum iperf_cache_events {
+	IPERF_CACHE_RD_HIT,
+	IPERF_CACHE_WR_HIT,
+	IPERF_CACHE_RD_MISS,
+	IPERF_CACHE_WR_MISS,
+	IPERF_CACHE_RSVD, /* reserved */
+	IPERF_CACHE_HOLD_REQ,
+	IPERF_CACHE_DATA_WR_PORT_CONTEN,
+	IPERF_CACHE_TAG_WR_PORT_CONTEN,
+	IPERF_CACHE_TX_REQ_STALL,
+	IPERF_CACHE_RX_REQ_STALL,
+	IPERF_CACHE_EVICTIONS,
+};
+
+/* FPMON Cache Control */
+struct feature_fme_ifpmon_ch_ctl {
+	union {
+		u64 csr;
+		struct {
+			u8  reset_counters:1;	/* Reset Counters */
+			u8  rsvd1:7;		/* Reserved */
+			u8  freeze:1;		/* Freeze if set to 1 */
+			u8  rsvd2:7;		/* Reserved */
+			u8  cache_event:4;	/* Select the cache event */
+			u8  cci_chsel:1;	/* Select the channel */
+			u64 rsvd3:43;		/* Reserved */
+		};
+	};
+};
+
+/* FPMON Cache Counter */
+struct feature_fme_ifpmon_ch_ctr {
+	union {
+		u64 csr;
+		struct {
+			/* Cache Counter for even addresse */
+			u64 cache_counter:48;
+			u16 rsvd:12;		/* Reserved */
+			/* Cache Event being reported */
+			u8  event_code:4;
+		};
+	};
+};
+
+enum iperf_fab_events {
+	IPERF_FAB_PCIE0_RD,
+	IPERF_FAB_PCIE0_WR,
+	IPERF_FAB_PCIE1_RD,
+	IPERF_FAB_PCIE1_WR,
+	IPERF_FAB_UPI_RD,
+	IPERF_FAB_UPI_WR,
+	IPERF_FAB_MMIO_RD,
+	IPERF_FAB_MMIO_WR,
+};
+
+#define FAB_DISABLE_FILTER     0
+#define FAB_ENABLE_FILTER      1
+
+/* FPMON FAB Control */
+struct feature_fme_ifpmon_fab_ctl {
+	union {
+		u64 csr;
+		struct {
+			u8  reset_counters:1;	/* Reset Counters */
+			u8  rsvd:7;		/* Reserved */
+			u8  freeze:1;		/* Set to 1 frozen counter */
+			u8  rsvd1:7;		/* Reserved */
+			u8  fab_evtcode:4;	/* Fabric Event Code */
+			u8  port_id:2;		/* Port ID */
+			u8  rsvd2:1;		/* Reserved */
+			u8  port_filter:1;	/* Port Filter */
+			u64 rsvd3:40;		/* Reserved */
+		};
+	};
+};
+
+/* FPMON Event Counter */
+struct feature_fme_ifpmon_fab_ctr {
+	union {
+		u64 csr;
+		struct {
+			u64 fab_cnt:60;	/* Fabric event counter */
+			/* Fabric event code being reported */
+			u8  event_code:4;
+		};
+	};
+};
+
+/* FPMON Clock Counter */
+struct feature_fme_ifpmon_clk_ctr {
+	u64 afu_interf_clock;		/* Clk_16UI (AFU clock) counter. */
+};
+
+enum iperf_vtd_events {
+	IPERF_VTD_AFU_MEM_RD_TRANS,
+	IPERF_VTD_AFU_MEM_WR_TRANS,
+	IPERF_VTD_AFU_DEVTLB_RD_HIT,
+	IPERF_VTD_AFU_DEVTLB_WR_HIT,
+	IPERF_VTD_DEVTLB_4K_FILL,
+	IPERF_VTD_DEVTLB_2M_FILL,
+	IPERF_VTD_DEVTLB_1G_FILL,
+};
+
+/* VT-d control register */
+struct feature_fme_ifpmon_vtd_ctl {
+	union {
+		u64 csr;
+		struct {
+			u8  reset_counters:1;	/* Reset Counters */
+			u8  rsvd:7;		/* Reserved */
+			u8  freeze:1;		/* Set to 1 frozen counter */
+			u8  rsvd1:7;		/* Reserved */
+			u8  vtd_evtcode:4;	/* VTd and TLB event code */
+			u64 rsvd2:44;		/* Reserved */
+		};
+	};
+};
+
+/* VT-d event counter */
+struct feature_fme_ifpmon_vtd_ctr {
+	union {
+		u64 csr;
+		struct {
+			u64 vtd_counter:48;	/* VTd event counter */
+			u16 rsvd:12;		/* Reserved */
+			u8  event_code:4;	/* VTd event code */
+		};
+	};
+};
+
+enum iperf_vtd_sip_events {
+	IPERF_VTD_SIP_IOTLB_4K_HIT,
+	IPERF_VTD_SIP_IOTLB_2M_HIT,
+	IPERF_VTD_SIP_IOTLB_1G_HIT,
+	IPERF_VTD_SIP_SLPWC_L3_HIT,
+	IPERF_VTD_SIP_SLPWC_L4_HIT,
+	IPERF_VTD_SIP_RCC_HIT,
+	IPERF_VTD_SIP_IOTLB_4K_MISS,
+	IPERF_VTD_SIP_IOTLB_2M_MISS,
+	IPERF_VTD_SIP_IOTLB_1G_MISS,
+	IPERF_VTD_SIP_SLPWC_L3_MISS,
+	IPERF_VTD_SIP_SLPWC_L4_MISS,
+	IPERF_VTD_SIP_RCC_MISS,
+};
+
+/* VT-d SIP control register */
+struct feature_fme_ifpmon_vtd_sip_ctl {
+	union {
+		u64 csr;
+		struct {
+			u8  reset_counters:1;	/* Reset Counters */
+			u8  rsvd:7;		/* Reserved */
+			u8  freeze:1;		/* Set to 1 frozen counter */
+			u8  rsvd1:7;		/* Reserved */
+			u8  vtd_evtcode:4;	/* VTd and TLB event code */
+			u64 rsvd2:44;		/* Reserved */
+		};
+	};
+};
+
+/* VT-d SIP event counter */
+struct feature_fme_ifpmon_vtd_sip_ctr {
+	union {
+		u64 csr;
+		struct {
+			u64 vtd_counter:48;	/* VTd event counter */
+			u16 rsvd:12;		/* Reserved */
+			u8 event_code:4;	/* VTd event code */
+		};
+	};
+};
+
+/* FME IPERF FEATURE */
+struct feature_fme_iperf {
+	struct feature_header header;
+	struct feature_fme_ifpmon_ch_ctl ch_ctl;
+	struct feature_fme_ifpmon_ch_ctr ch_ctr0;
+	struct feature_fme_ifpmon_ch_ctr ch_ctr1;
+	struct feature_fme_ifpmon_fab_ctl fab_ctl;
+	struct feature_fme_ifpmon_fab_ctr fab_ctr;
+	struct feature_fme_ifpmon_clk_ctr clk;
+	struct feature_fme_ifpmon_vtd_ctl vtd_ctl;
+	struct feature_fme_ifpmon_vtd_ctr vtd_ctr;
+	struct feature_fme_ifpmon_vtd_sip_ctl vtd_sip_ctl;
+	struct feature_fme_ifpmon_vtd_sip_ctr vtd_sip_ctr;
+};
+
+enum dperf_fab_events {
+	DPERF_FAB_PCIE0_RD,
+	DPERF_FAB_PCIE0_WR,
+	DPERF_FAB_MMIO_RD = 6,
+	DPERF_FAB_MMIO_WR,
+};
+
+#define DCP_FAB_DISABLE_FILTER     0
+#define DCP_FAB_ENABLE_FILTER      1
+
+/* FPMON FAB Control */
+struct feature_fme_dfpmon_fab_ctl {
+	union {
+		u64 csr;
+		struct {
+			u8  reset_counters:1;	/* Reset Counters */
+			u8  rsvd:7;		/* Reserved */
+			u8  freeze:1;		/* Set to 1 frozen counter */
+			u8  rsvd1:7;		/* Reserved */
+			u8  fab_evtcode:4;	/* Fabric Event Code */
+			u8  port_id:2;		/* Port ID */
+			u8  rsvd2:1;		/* Reserved */
+			u8  port_filter:1;	/* Port Filter */
+			u64 rsvd3:40;		/* Reserved */
+		};
+	};
+};
+
+/* FPMON Event Counter */
+struct feature_fme_dfpmon_fab_ctr {
+	union {
+		u64 csr;
+		struct {
+			u64 fab_cnt:60;	/* Fabric event counter */
+			/* Fabric event code being reported */
+			u8  event_code:4;
+		};
+	};
+};
+
+/* FPMON Clock Counter */
+struct feature_fme_dfpmon_clk_ctr {
+	u64 afu_interf_clock;		/* Clk_16UI (AFU clock) counter. */
+};
+
+/* FME DPERF FEATURE */
+struct feature_fme_dperf {
+	struct feature_header header;
+	u64 rsvd[3];
+	struct feature_fme_dfpmon_fab_ctl fab_ctl;
+	struct feature_fme_dfpmon_fab_ctr fab_ctr;
+	struct feature_fme_dfpmon_clk_ctr clk;
+};
+
+struct feature_fme_error0 {
+#define FME_ERROR0_MASK        0xFFUL
+#define FME_ERROR0_MASK_DEFAULT 0x40UL  /* pcode workaround */
+	union {
+		u64 csr;
+		struct {
+			u8  fabric_err:1;	/* Fabric error */
+			u8  fabfifo_overflow:1;	/* Fabric fifo overflow */
+			u8  kticdc_parity_err:2;/* KTI CDC Parity Error */
+			u8  iommu_parity_err:1;	/* IOMMU Parity error */
+			/* AFU PF/VF access mismatch detected */
+			u8  afu_acc_mode_err:1;
+			u8  mbp_err:1;		/* Indicates an MBP event */
+			/* PCIE0 CDC Parity Error */
+			u8  pcie0cdc_parity_err:5;
+			/* PCIE1 CDC Parity Error */
+			u8  pcie1cdc_parity_err:5;
+			/* CVL CDC Parity Error */
+			u8  cvlcdc_parity_err:3;
+			u64 rsvd:44;		/* Reserved */
+		};
+	};
+};
+
+/* PCIe0 Error Status register */
+struct feature_fme_pcie0_error {
+#define FME_PCIE0_ERROR_MASK   0xFFUL
+	union {
+		u64 csr;
+		struct {
+			u8  formattype_err:1;	/* TLP format/type error */
+			u8  MWAddr_err:1;	/* TLP MW address error */
+			u8  MWAddrLength_err:1;	/* TLP MW length error */
+			u8  MRAddr_err:1;	/* TLP MR address error */
+			u8  MRAddrLength_err:1;	/* TLP MR length error */
+			u8  cpl_tag_err:1;	/* TLP CPL tag error */
+			u8  cpl_status_err:1;	/* TLP CPL status error */
+			u8  cpl_timeout_err:1;	/* TLP CPL timeout */
+			u8  cci_parity_err:1;	/* CCI bridge parity error */
+			u8  rxpoison_tlp_err:1;	/* Received a TLP with EP set */
+			u64 rsvd:52;		/* Reserved */
+			u8  vfnumb_err:1;	/* Number of error VF */
+			u8  funct_type_err:1;	/* Virtual (1) or Physical */
+		};
+	};
+};
+
+/* PCIe1 Error Status register */
+struct feature_fme_pcie1_error {
+#define FME_PCIE1_ERROR_MASK   0xFFUL
+	union {
+		u64 csr;
+		struct {
+			u8  formattype_err:1;	/* TLP format/type error */
+			u8  MWAddr_err:1;	/* TLP MW address error */
+			u8  MWAddrLength_err:1;	/* TLP MW length error */
+			u8  MRAddr_err:1;	/* TLP MR address error */
+			u8  MRAddrLength_err:1;	/* TLP MR length error */
+			u8  cpl_tag_err:1;	/* TLP CPL tag error */
+			u8  cpl_status_err:1;	/* TLP CPL status error */
+			u8  cpl_timeout_err:1;	/* TLP CPL timeout */
+			u8  cci_parity_err:1;	/* CCI bridge parity error */
+			u8  rxpoison_tlp_err:1;	/* Received a TLP with EP set */
+			u64 rsvd:54;		/* Reserved */
+		};
+	};
+};
+
+/* FME First Error register */
+struct feature_fme_first_error {
+#define FME_FIRST_ERROR_MASK   ((1UL << 60) - 1)
+	union {
+		u64 csr;
+		struct {
+			/*
+			 * Indicates the Error Register that was
+			 * triggered first
+			 */
+			u64 err_reg_status:60;
+			/*
+			 * Holds 60 LSBs from the Error register that was
+			 * triggered first
+			 */
+			u8 errReg_id:4;
+		};
+	};
+};
+
+/* FME Next Error register */
+struct feature_fme_next_error {
+#define FME_NEXT_ERROR_MASK    ((1UL << 60) - 1)
+	union {
+		u64 csr;
+		struct {
+			/*
+			 * Indicates the Error Register that was
+			 * triggered second
+			 */
+			u64 err_reg_status:60;
+			/*
+			 * Holds 60 LSBs from the Error register that was
+			 * triggered second
+			 */
+			u8  errReg_id:4;
+		};
+	};
+};
+
+/* RAS Non Fatal Error Status register */
+struct feature_fme_ras_nonfaterror {
+	union {
+		u64 csr;
+		struct {
+			/* thremal threshold AP1 */
+			u8  temp_thresh_ap1:1;
+			/* thremal threshold AP2 */
+			u8  temp_thresh_ap2:1;
+			u8  pcie_error:1;	/* pcie Error */
+			u8  portfatal_error:1;	/* port fatal error */
+			u8  proc_hot:1;		/* Indicates a ProcHot event */
+			/* Indicates an AFU PF/VF access mismatch */
+			u8  afu_acc_mode_err:1;
+			/* Injected nonfata Error */
+			u8  injected_nonfata_err:1;
+			u8  rsvd1:2;
+			/* Temperature threshold triggered AP6*/
+			u8  temp_thresh_AP6:1;
+			/* Power threshold triggered AP1 */
+			u8  power_thresh_AP1:1;
+			/* Power threshold triggered AP2 */
+			u8  power_thresh_AP2:1;
+			/* Indicates a MBP event */
+			u8  mbp_err:1;
+			u64 rsvd2:51;		/* Reserved */
+		};
+	};
+};
+
+/* RAS Catastrophic Fatal Error Status register */
+struct feature_fme_ras_catfaterror {
+	union {
+		u64 csr;
+		struct {
+			/* KTI Link layer error detected */
+			u8  ktilink_fatal_err:1;
+			/* tag-n-cache error detected */
+			u8  tagcch_fatal_err:1;
+			/* CCI error detected */
+			u8  cci_fatal_err:1;
+			/* KTI Protocol error detected */
+			u8  ktiprpto_fatal_err:1;
+			/* Fatal DRAM error detected */
+			u8  dram_fatal_err:1;
+			/* IOMMU detected */
+			u8  iommu_fatal_err:1;
+			/* Fabric Fatal Error */
+			u8  fabric_fatal_err:1;
+			/* PCIe possion Error */
+			u8  pcie_poison_err:1;
+			/* Injected fatal Error */
+			u8  inject_fata_err:1;
+			/* Catastrophic CRC Error */
+			u8  crc_catast_err:1;
+			/* Catastrophic Thermal Error */
+			u8  therm_catast_err:1;
+			/* Injected Catastrophic Error */
+			u8  injected_catast_err:1;
+			u64 rsvd:52;
+		};
+	};
+};
+
+/* RAS Error injection register */
+struct feature_fme_ras_error_inj {
+#define FME_RAS_ERROR_INJ_MASK      0x7UL
+	union {
+		u64 csr;
+		struct {
+			u8  catast_error:1;	/* Catastrophic error flag */
+			u8  fatal_error:1;	/* Fatal error flag */
+			u8  nonfatal_error:1;	/* NonFatal error flag */
+			u64 rsvd:61;		/* Reserved */
+		};
+	};
+};
+
+/* FME error capabilities */
+struct feature_fme_error_capability {
+	union {
+	u64 csr;
+		struct {
+			u8 support_intr:1;
+			/* MSI-X vector table entry number */
+			u16 intr_vector_num:12;
+			u64 rsvd:51;	/* Reserved */
+		};
+	};
+};
+
+/* FME ERR FEATURE */
+struct feature_fme_err {
+	struct feature_header header;
+	struct feature_fme_error0 fme_err_mask;
+	struct feature_fme_error0 fme_err;
+	struct feature_fme_pcie0_error pcie0_err_mask;
+	struct feature_fme_pcie0_error pcie0_err;
+	struct feature_fme_pcie1_error pcie1_err_mask;
+	struct feature_fme_pcie1_error pcie1_err;
+	struct feature_fme_first_error fme_first_err;
+	struct feature_fme_next_error fme_next_err;
+	struct feature_fme_ras_nonfaterror ras_nonfat_mask;
+	struct feature_fme_ras_nonfaterror ras_nonfaterr;
+	struct feature_fme_ras_catfaterror ras_catfat_mask;
+	struct feature_fme_ras_catfaterror ras_catfaterr;
+	struct feature_fme_ras_error_inj ras_error_inj;
+	struct feature_fme_error_capability fme_err_capability;
+};
+
+/* FME Partial Reconfiguration Control */
+struct feature_fme_pr_ctl {
+	union {
+		u64 csr;
+		struct {
+			u8  pr_reset:1;		/* Reset PR Engine */
+			u8  rsvd3:3;		/* Reserved */
+			u8  pr_reset_ack:1;	/* Reset PR Engine Ack */
+			u8  rsvd4:3;		/* Reserved */
+			u8  pr_regionid:2;	/* PR Region ID */
+			u8  rsvd1:2;		/* Reserved */
+			u8  pr_start_req:1;	/* PR Start Request */
+			u8  pr_push_complete:1;	/* PR Data push complete */
+			u8  pr_kind:1;		/* PR Data push complete */
+			u32 rsvd:17;		/* Reserved */
+			u32 config_data;	/* Config data TBD */
+		};
+	};
+};
+
+/* FME Partial Reconfiguration Status */
+struct feature_fme_pr_status {
+	union {
+		u64 csr;
+		struct {
+			u16 pr_credit:9;	/* PR Credits */
+			u8  rsvd2:7;		/* Reserved */
+			u8  pr_status:1;	/* PR status */
+			u8  rsvd:3;		/* Reserved */
+			/* Altra PR Controller Block status */
+			u8  pr_controller_status:3;
+			u8  rsvd1:1;            /* Reserved */
+			u8  pr_host_status:4;   /* PR Host status */
+			u8  rsvd3:4;		/* Reserved */
+			/* Security Block Status fields (TBD) */
+			u32 security_bstatus;
+		};
+	};
+};
+
+/* FME Partial Reconfiguration Data */
+struct feature_fme_pr_data {
+	union {
+		u64 csr;	/* PR data from the raw-binary file */
+		struct {
+			/* PR data from the raw-binary file */
+			u32 pr_data_raw;
+			u32 rsvd;
+		};
+	};
+};
+
+/* FME PR Public Key */
+struct feature_fme_pr_key {
+	u64 key;		/* FME PR Public Hash */
+};
+
+/* FME PR FEATURE */
+struct feature_fme_pr {
+	struct feature_header header;
+	/*Partial Reconfiguration control */
+	struct feature_fme_pr_ctl	ccip_fme_pr_control;
+
+	/* Partial Reconfiguration Status */
+	struct feature_fme_pr_status	ccip_fme_pr_status;
+
+	/* Partial Reconfiguration data */
+	struct feature_fme_pr_data	ccip_fme_pr_data;
+
+	/* Partial Reconfiguration data */
+	u64				ccip_fme_pr_err;
+
+	u64 rsvd1[3];
+
+	/* Partial Reconfiguration data registers */
+	u64 fme_pr_data1;
+	u64 fme_pr_data2;
+	u64 fme_pr_data3;
+	u64 fme_pr_data4;
+	u64 fme_pr_data5;
+	u64 fme_pr_data6;
+	u64 fme_pr_data7;
+	u64 fme_pr_data8;
+
+	u64 rsvd2[5];
+
+	/* PR Interface ID */
+	u64 fme_pr_intfc_id_l;
+	u64 fme_pr_intfc_id_h;
+
+	/* MSIX filed to be Added */
+};
+
+/* FME HSSI Control */
+struct feature_fme_hssi_eth_ctrl {
+	union {
+		u64 csr;
+		struct {
+			u32 data:32;		/* HSSI data */
+			u16 address:16;		/* HSSI address */
+			/*
+			 * HSSI comamnd
+			 * 0x0 - No request
+			 * 0x08 - SW register RD request
+			 * 0x10 - SW register WR request
+			 * 0x40 - Auxiliar bus RD request
+			 * 0x80 - Auxiliar bus WR request
+			 */
+			u16 cmd:16;
+		};
+	};
+};
+
+/* FME HSSI Status */
+struct feature_fme_hssi_eth_stat {
+	union {
+		u64 csr;
+		struct {
+			u32 data:32;		/* HSSI data */
+			u8  acknowledge:1;	/* HSSI acknowledge */
+			u8  spare:1;		/* HSSI spare */
+			u32 rsvd:30;		/* Reserved */
+		};
+	};
+};
+
+/* FME HSSI FEATURE */
+struct feature_fme_hssi {
+	struct feature_header header;
+	struct feature_fme_hssi_eth_ctrl	hssi_control;
+	struct feature_fme_hssi_eth_stat	hssi_status;
+};
+
+#define PORT_ERR_MASK		0xfff0703ff001f
+struct feature_port_err_key {
+	union {
+		u64 csr;
+		struct {
+			/* Tx Channel0: Overflow */
+			u8 tx_ch0_overflow:1;
+			/* Tx Channel0: Invalid request encoding */
+			u8 tx_ch0_invaldreq :1;
+			/* Tx Channel0: Request with cl_len=3 not supported */
+			u8 tx_ch0_cl_len3:1;
+			/* Tx Channel0: Request with cl_len=2 not aligned 2CL */
+			u8 tx_ch0_cl_len2:1;
+			/* Tx Channel0: Request with cl_len=4 not aligned 4CL */
+			u8 tx_ch0_cl_len4:1;
+
+			u16 rsvd1:4;			/* Reserved */
+
+			/* AFU MMIO RD received while PORT is in reset */
+			u8 mmio_rd_whilerst:1;
+			/* AFU MMIO WR received while PORT is in reset */
+			u8 mmio_wr_whilerst:1;
+
+			u16 rsvd2:5;			/* Reserved */
+
+			/* Tx Channel1: Overflow */
+			u8 tx_ch1_overflow:1;
+			/* Tx Channel1: Invalid request encoding */
+			u8 tx_ch1_invaldreq:1;
+			/* Tx Channel1: Request with cl_len=3 not supported */
+			u8 tx_ch1_cl_len3:1;
+			/* Tx Channel1: Request with cl_len=2 not aligned 2CL */
+			u8 tx_ch1_cl_len2:1;
+			/* Tx Channel1: Request with cl_len=4 not aligned 4CL */
+			u8 tx_ch1_cl_len4:1;
+
+			/* Tx Channel1: Insufficient data payload */
+			u8 tx_ch1_insuff_data:1;
+			/* Tx Channel1: Data payload overrun */
+			u8 tx_ch1_data_overrun:1;
+			/* Tx Channel1 : Incorrect address */
+			u8 tx_ch1_incorr_addr:1;
+			/* Tx Channel1 : NON-Zero SOP Detected */
+			u8 tx_ch1_nzsop:1;
+			/* Tx Channel1 : Illegal VC_SEL, atomic request VLO */
+			u8 tx_ch1_illegal_vcsel:1;
+
+			u8 rsvd3:6;			/* Reserved */
+
+			/* MMIO Read Timeout in AFU */
+			u8 mmioread_timeout:1;
+
+			/* Tx Channel2: FIFO Overflow */
+			u8 tx_ch2_fifo_overflow:1;
+
+			/* MMIO read is not matching pending request */
+			u8 unexp_mmio_resp:1;
+
+			u8 rsvd4:5;			/* Reserved */
+
+			/* Number of pending Requests: counter overflow */
+			u8 tx_req_counter_overflow:1;
+			/* Req with Address violating SMM Range */
+			u8 llpr_smrr_err:1;
+			/* Req with Address violating second SMM Range */
+			u8 llpr_smrr2_err:1;
+			/* Req with Address violating ME Stolen message */
+			u8 llpr_mesg_err:1;
+			/* Req with Address violating Generic Protected Range */
+			u8 genprot_range_err:1;
+			/* Req with Address violating Legacy Range low */
+			u8 legrange_low_err:1;
+			/* Req with Address violating Legacy Range High */
+			u8 legrange_high_err:1;
+			/* Req with Address violating VGA memory range */
+			u8 vgmem_range_err:1;
+			u8 page_fault_err:1;		/* Page fault */
+			u8 pmr_err:1;			/* PMR Error */
+			u8 ap6_event:1;			/* AP6 event */
+			/* VF FLR detected on Port with PF access control */
+			u8 vfflr_access_err:1;
+			u16 rsvd5:12;			/* Reserved */
+		};
+	};
+};
+
+/* Port first error register, not contain all error bits in error register. */
+struct feature_port_first_err_key {
+	union {
+		u64 csr;
+		struct {
+			u8 tx_ch0_overflow:1;
+			u8 tx_ch0_invaldreq :1;
+			u8 tx_ch0_cl_len3:1;
+			u8 tx_ch0_cl_len2:1;
+			u8 tx_ch0_cl_len4:1;
+			u8 rsvd1:4;			/* Reserved */
+			u8 mmio_rd_whilerst:1;
+			u8 mmio_wr_whilerst:1;
+			u8 rsvd2:5;			/* Reserved */
+			u8 tx_ch1_overflow:1;
+			u8 tx_ch1_invaldreq:1;
+			u8 tx_ch1_cl_len3:1;
+			u8 tx_ch1_cl_len2:1;
+			u8 tx_ch1_cl_len4:1;
+			u8 tx_ch1_insuff_data:1;
+			u8 tx_ch1_data_overrun:1;
+			u8 tx_ch1_incorr_addr:1;
+			u8 tx_ch1_nzsop:1;
+			u8 tx_ch1_illegal_vcsel:1;
+			u8 rsvd3:6;			/* Reserved */
+			u8 mmioread_timeout:1;
+			u8 tx_ch2_fifo_overflow:1;
+			u8 rsvd4:6;			/* Reserved */
+			u8 tx_req_counter_overflow:1;
+			u32 rsvd5:23;			/* Reserved */
+		};
+	};
+};
+
+/* Port malformed Req0 */
+struct feature_port_malformed_req0 {
+	u64 header_lsb;
+};
+
+/* Port malformed Req1 */
+struct feature_port_malformed_req1 {
+	u64 header_msb;
+};
+
+/* Port debug register */
+struct feature_port_debug {
+	u64 port_debug;
+};
+
+/* Port error capabilities */
+struct feature_port_err_capability {
+	union {
+		u64 csr;
+		struct {
+			u8  support_intr:1;
+			/* MSI-X vector table entry number */
+			u16 intr_vector_num:12;
+			u64 rsvd:51;            /* Reserved */
+		};
+	};
+};
+
+/* PORT FEATURE ERROR */
+struct feature_port_error {
+	struct feature_header header;
+	struct feature_port_err_key error_mask;
+	struct feature_port_err_key port_error;
+	struct feature_port_first_err_key port_first_error;
+	struct feature_port_malformed_req0 malreq0;
+	struct feature_port_malformed_req1 malreq1;
+	struct feature_port_debug port_debug;
+	struct feature_port_err_capability error_capability;
+};
+
+/* Port UMSG Capability */
+struct feature_port_umsg_cap {
+	union {
+		u64 csr;
+		struct {
+			/* Number of umsg allocated to this port */
+			u8 umsg_allocated;
+			/* Enable / Disable UMsg engine for this port */
+			u8 umsg_enable:1;
+			/* Usmg initialization status */
+			u8 umsg_init_complete:1;
+			/* IOMMU can not translate the umsg base address */
+			u8 umsg_trans_error:1;
+			u64 rsvd:53;		/* Reserved */
+		};
+	};
+};
+
+/* Port UMSG base address */
+struct feature_port_umsg_baseaddr {
+	union {
+		u64 csr;
+		struct {
+			u64 base_addr:48;	/* 48 bit physical address */
+			u16 rsvd;		/* Reserved */
+		};
+	};
+};
+
+struct feature_port_umsg_mode {
+	union {
+		u64 csr;
+		struct {
+			u32 umsg_hint_enable;	/* UMSG hint enable/disable */
+			u32 rsvd;		/* Reserved */
+		};
+	};
+};
+
+/* PORT FEATURE UMSG */
+struct feature_port_umsg {
+	struct feature_header header;
+	struct feature_port_umsg_cap capability;
+	struct feature_port_umsg_baseaddr baseaddr;
+	struct feature_port_umsg_mode mode;
+};
+
+#define UMSG_EN_POLL_INVL 10 /* us */
+#define UMSG_EN_POLL_TIMEOUT 1000 /* us */
+
+/* Port UINT Capability */
+struct feature_port_uint_cap {
+	union {
+		u64 csr;
+		struct {
+			u16 intr_num:12;	/* Supported interrupts num */
+			/* First MSI-X vector table entry number */
+			u16 first_vec_num:12;
+			u64 rsvd:40;
+		};
+	};
+};
+
+/* PORT FEATURE UINT */
+struct feature_port_uint {
+	struct feature_header header;
+	struct feature_port_uint_cap capability;
+};
+
+/* STP region supports mmap operation, so use page aligned size. */
+#define PORT_FEATURE_STP_REGION_SIZE PAGE_ALIGN(sizeof(struct feature_port_stp))
+
+/* Port STP status register (for debug only)*/
+struct feature_port_stp_status {
+	union {
+		u64 csr;
+		struct {
+			/* SLD Hub end-point read/write timeout */
+			u8 sld_ep_timeout:1;
+			/* Remote STP in reset/disable */
+			u8 rstp_disabled:1;
+			u8 unsupported_read:1;
+			/* MMIO timeout detected and faked with a response */
+			u8 mmio_timeout:1;
+			u8 txfifo_count:4;
+			u8 rxfifo_count:4;
+			u8 txfifo_overflow:1;
+			u8 txfifo_underflow:1;
+			u8 rxfifo_overflow:1;
+			u8 rxfifo_underflow:1;
+			/* Number of MMIO write requests */
+			u16 write_requests;
+			/* Number of MMIO read requests */
+			u16 read_requests;
+			/* Number of MMIO read responses */
+			u16 read_responses;
+		};
+	};
+};
+
+/*
+ * PORT FEATURE STP
+ * Most registers in STP region are not touched by driver, but mmapped to user
+ * space. So they are not defined in below data structure, as its actual size
+ * is 0x18c per spec.
+ */
+struct feature_port_stp {
+	struct feature_header header;
+	struct feature_port_stp_status stp_status;
+};
+
+/**
+ * enum fpga_pr_states - fpga PR states
+ * @FPGA_PR_STATE_UNKNOWN: can't determine state
+ * @FPGA_PR_STATE_WRITE_INIT: preparing FPGA for programming
+ * @FPGA_PR_STATE_WRITE_INIT_ERR: Error during WRITE_INIT stage
+ * @FPGA_PR_STATE_WRITE: writing image to FPGA
+ * @FPGA_PR_STATE_WRITE_ERR: Error while writing FPGA
+ * @FPGA_PR_STATE_WRITE_COMPLETE: Doing post programming steps
+ * @FPGA_PR_STATE_WRITE_COMPLETE_ERR: Error during WRITE_COMPLETE
+ * @FPGA_PR_STATE_OPERATING: FPGA PR done
+ */
+enum fpga_pr_states {
+	/* canot determine state states */
+	FPGA_PR_STATE_UNKNOWN,
+
+	/* write sequence: init, write, complete */
+	FPGA_PR_STATE_WRITE_INIT,
+	FPGA_PR_STATE_WRITE_INIT_ERR,
+	FPGA_PR_STATE_WRITE,
+	FPGA_PR_STATE_WRITE_ERR,
+	FPGA_PR_STATE_WRITE_COMPLETE,
+	FPGA_PR_STATE_WRITE_COMPLETE_ERR,
+
+	/* FPGA PR done */
+	FPGA_PR_STATE_DONE,
+};
+
+/*
+ * FPGA Manager flags
+ * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
+ */
+#define FPGA_MGR_PARTIAL_RECONFIG	BIT(0)
+
+/**
+ * struct fpga_pr_info - specific information to a FPGA PR
+ * @flags: boolean flags as defined above
+ * @pr_err: PR error code
+ * @state: fpga manager state
+ * @port_id: port id
+ */
+struct fpga_pr_info {
+	u32 flags;
+	u64 pr_err;
+	enum fpga_pr_states state;
+	int port_id;
+};
+
+#define DEFINE_FPGA_PR_ERR_MSG(_name_)			\
+static const char * const _name_[] = {			\
+	"PR operation error detected",			\
+	"PR CRC error detected",			\
+	"PR incompatiable bitstream error detected",	\
+	"PR IP protocol error detected",		\
+	"PR FIFO overflow error detected",		\
+	"PR timeout error detected",			\
+	"PR secure load error detected",		\
+}
+
+#define RST_POLL_INVL 10 /* us */
+#define RST_POLL_TIMEOUT 1000 /* us */
+
+#define PR_WAIT_TIMEOUT   15000000
+
+#define PR_HOST_STATUS_IDLE	0
+#define PR_MAX_ERR_NUM	7
+
+DEFINE_FPGA_PR_ERR_MSG(pr_err_msg);
+
+/*
+ * green bitstream header must be byte-packed to match the
+ * real file format.
+ */
+struct bts_header {
+	u64 guid_h;
+	u64 guid_l;
+	u32 metadata_len;
+};
+
+#define GBS_GUID_H		0x414750466e6f6558
+#define GBS_GUID_L		0x31303076534247b7
+#define is_valid_bts(bts_hdr)				\
+	(((bts_hdr)->guid_h == GBS_GUID_H) &&		\
+	((bts_hdr)->guid_l == GBS_GUID_L))
+
+#endif /* _BASE_IFPGA_DEFINES_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c
new file mode 100644
index 0000000..bc4708d
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c
@@ -0,0 +1,808 @@
+/*******************************************************************************
+
+Copyright (c) 2017, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "opae_hw_api.h"
+#include "ifpga_api.h"
+
+#include "ifpga_hw.h"
+#include "ifpga_enumerate.h"
+#include "ifpga_feature_dev.h"
+
+struct build_feature_devs_info {
+	struct opae_adapter_data_pci *pci_data;
+	struct opae_manager *mgr;
+	struct opae_bridge *br;
+
+	void *ioaddr;
+	void *ioend;
+	uint64_t phys_addr;
+	int current_bar;
+
+	void *pfme_hdr;
+	enum fpga_id_type current_type;
+	int current_port_id;
+
+	struct ifpga_hw *hw;
+};
+
+struct feature_info {
+	const char *name;
+	uint32_t resource_size;
+	int feature_index;
+	int revision_id;
+	unsigned int vec_start;
+	unsigned int vec_cnt;
+
+	struct feature_ops *ops;
+};
+
+/* indexed by fme feature IDs which are defined in 'enum fme_feature_id'. */
+static struct feature_info fme_features[] = {
+	{
+		.name = FME_FEATURE_HEADER,
+		.resource_size = sizeof(struct feature_fme_header),
+		.feature_index = FME_FEATURE_ID_HEADER,
+		.revision_id = FME_HEADER_REVISION,
+		.ops = &fme_hdr_ops,
+	},
+	{
+		.name = FME_FEATURE_THERMAL_MGMT,
+		.resource_size = sizeof(struct feature_fme_thermal),
+		.feature_index = FME_FEATURE_ID_THERMAL_MGMT,
+		.revision_id = FME_THERMAL_MGMT_REVISION,
+		.ops = &fme_thermal_mgmt_ops,
+	},
+	{
+		.name = FME_FEATURE_POWER_MGMT,
+		.resource_size = sizeof(struct feature_fme_power),
+		.feature_index = FME_FEATURE_ID_POWER_MGMT,
+		.revision_id = FME_POWER_MGMT_REVISION,
+		.ops = &fme_power_mgmt_ops,
+	},
+	{
+		.name = FME_FEATURE_GLOBAL_IPERF,
+		.resource_size = sizeof(struct feature_fme_iperf),
+		.feature_index = FME_FEATURE_ID_GLOBAL_IPERF,
+		.revision_id = FME_GLOBAL_IPERF_REVISION,
+		.ops = &fme_global_iperf_ops,
+	},
+	{
+		.name = FME_FEATURE_GLOBAL_ERR,
+		.resource_size = sizeof(struct feature_fme_err),
+		.feature_index = FME_FEATURE_ID_GLOBAL_ERR,
+		.revision_id = FME_GLOBAL_ERR_REVISION,
+		.ops = &fme_global_err_ops,
+	},
+	{
+		.name = FME_FEATURE_PR_MGMT,
+		.resource_size = sizeof(struct feature_fme_pr),
+		.feature_index = FME_FEATURE_ID_PR_MGMT,
+		.revision_id = FME_PR_MGMT_REVISION,
+		.ops = &fme_pr_mgmt_ops,
+	},
+	{
+		.name = FME_FEATURE_HSSI_ETH,
+		.resource_size = sizeof(struct feature_fme_hssi),
+		.feature_index = FME_FEATURE_ID_HSSI_ETH,
+		.revision_id = FME_HSSI_ETH_REVISION
+	},
+	{
+		.name = FME_FEATURE_GLOBAL_DPERF,
+		.resource_size = sizeof(struct feature_fme_dperf),
+		.feature_index = FME_FEATURE_ID_GLOBAL_DPERF,
+		.revision_id = FME_GLOBAL_DPERF_REVISION,
+		.ops = &fme_global_dperf_ops,
+	}
+};
+
+static struct feature_info port_features[] = {
+	{
+		.name = PORT_FEATURE_HEADER,
+		.resource_size = sizeof(struct feature_port_header),
+		.feature_index = PORT_FEATURE_ID_HEADER,
+		.revision_id = PORT_HEADER_REVISION,
+		.ops = &port_hdr_ops,
+	},
+	{
+		.name = PORT_FEATURE_ERR,
+		.resource_size = sizeof(struct feature_port_error),
+		.feature_index = PORT_FEATURE_ID_ERROR,
+		.revision_id = PORT_ERR_REVISION,
+		.ops = &port_error_ops,
+	},
+	{
+		.name = PORT_FEATURE_UMSG,
+		.resource_size = sizeof(struct feature_port_umsg),
+		.feature_index = PORT_FEATURE_ID_UMSG,
+		.revision_id = PORT_UMSG_REVISION,
+		.ops = &port_umsg_ops,
+	},
+	{
+		.name = PORT_FEATURE_UINT,
+		.resource_size = sizeof(struct feature_port_uint),
+		.feature_index = PORT_FEATURE_ID_UINT,
+		.revision_id = PORT_UINT_REVISION,
+		.ops = &port_uint_ops,
+	},
+	{
+		.name = PORT_FEATURE_STP,
+		.resource_size = PORT_FEATURE_STP_REGION_SIZE,
+		.feature_index = PORT_FEATURE_ID_STP,
+		.revision_id = PORT_STP_REVISION,
+		.ops = &port_stp_ops,
+	},
+	{
+		.name = PORT_FEATURE_UAFU,
+		/* UAFU feature size should be read from PORT_CAP.MMIOSIZE.
+		 * Will set uafu feature size while parse port device.
+		 */
+		.resource_size = 0,
+		.feature_index = PORT_FEATURE_ID_UAFU,
+		.revision_id = PORT_UAFU_REVISION
+	},
+};
+
+static u64 feature_id(void __iomem *start)
+{
+	struct feature_header header;
+
+	header.csr = readq(start);
+
+	switch (header.type) {
+	case FEATURE_TYPE_FIU:
+		return FEATURE_ID_HEADER;
+	case FEATURE_TYPE_PRIVATE:
+		return header.id;
+	case FEATURE_TYPE_AFU:
+		return FEATURE_ID_AFU;
+	}
+
+	WARN_ON(1);
+	return 0;
+}
+
+static int
+build_info_add_sub_feature(struct build_feature_devs_info *binfo,
+			   struct feature_info *finfo, void __iomem *start)
+{
+	struct ifpga_hw *hw = binfo->hw;
+	struct feature *feature = NULL;
+	int feature_idx = finfo->feature_index;
+	unsigned int vec_start = finfo->vec_start;
+	unsigned int vec_cnt = finfo->vec_cnt;
+	struct feature_irq_ctx *ctx = NULL;
+	int port_id, ret = 0;
+	unsigned int i;
+
+	if (binfo->current_type == FME_ID) {
+		feature = &hw->fme.sub_feature[feature_idx];
+		feature->parent = &hw->fme;
+	} else if (binfo->current_type == PORT_ID) {
+		port_id = binfo->current_port_id;
+		feature = &hw->port[port_id].sub_feature[feature_idx];
+		feature->parent = &hw->port[port_id];
+	} else {
+		return -EFAULT;
+	}
+
+	feature->state = IFPGA_FEATURE_ATTACHED;
+	feature->addr = start;
+	feature->id = feature_id(start);
+	feature->size = finfo->resource_size;
+	feature->name = finfo->name;
+	feature->revision = finfo->revision_id;
+	feature->ops = finfo->ops;
+	feature->phys_addr = binfo->phys_addr +
+				((u8 *)start - (u8 *)binfo->ioaddr);
+
+	if (vec_cnt) {
+		if (vec_start + vec_cnt <= vec_start)
+			return -EINVAL;
+
+		ctx = zmalloc(sizeof(*ctx) * vec_cnt);
+		if (!ctx)
+			return -ENOMEM;
+
+		for (i = 0; i < vec_cnt; i++) {
+			ctx[i].eventfd = -1;
+			ctx[i].idx = vec_start + i;
+		}
+	}
+
+	feature->ctx = ctx;
+	feature->ctx_num = vec_cnt;
+	feature->vfio_dev_fd = binfo->pci_data->vfio_dev_fd;
+
+	return ret;
+}
+
+static int
+create_feature_instance(struct build_feature_devs_info *binfo,
+			void __iomem *start, struct feature_info *finfo)
+{
+	struct feature_header *hdr = start;
+
+	if (((u8 *)binfo->ioend - (u8 *)start) < finfo->resource_size)
+		return -EINVAL;
+
+	if (finfo->revision_id != SKIP_REVISION_CHECK &&
+	    hdr->revision > finfo->revision_id) {
+		dev_err(binfo, "feature %s revision :default:%x, now at:%x, mis-match.\n",
+			finfo->name, finfo->revision_id, hdr->revision);
+	}
+
+	return build_info_add_sub_feature(binfo, finfo, start);
+}
+
+/*
+ * UAFU GUID is dynamic as it can be changed after FME downloads different
+ * Green Bitstream to the port, so we treat the unknown GUIDs which are
+ * attached on port's feature list as UAFU.
+ */
+static bool feature_is_UAFU(struct build_feature_devs_info *binfo)
+{
+	if (binfo->current_type != PORT_ID)
+		return false;
+
+	return true;
+}
+
+static int parse_feature_port_uafu(struct build_feature_devs_info *binfo,
+				   struct feature_header *hdr)
+{
+	enum port_feature_id id = PORT_FEATURE_ID_UAFU;
+	struct ifpga_hw *hw = binfo->hw;
+	struct opae_accelerator *acc;
+	struct opae_acc_reg_region_info *resource;
+	void *start = (void *)hdr;
+	int ret;
+
+	if (port_features[id].resource_size) {
+		ret = create_feature_instance(binfo, hdr, &port_features[id]);
+		//port_features[id].resource_size = 0;
+	} else {
+		dev_err(binfo, "the uafu feature header is mis-configured.\n");
+		ret = -EINVAL;
+	}
+
+	if (ret)
+		return ret;
+
+	/* FIXME: need to figure out a better name */
+	resource = malloc(sizeof(*resource));
+	if (!resource)
+		return -ENOMEM;
+
+	resource->region[0].addr = start;
+	resource->region[0].phys_addr = binfo->phys_addr +
+			(uint8_t *)start - (uint8_t *)binfo->ioaddr;
+	resource->region[0].len = port_features[id].resource_size;
+	port_features[id].resource_size = 0;
+	resource->num_regions = 1;
+
+	acc = opae_accelerator_alloc(hw->adapter->name,
+				     &ifpga_acc_ops, resource);
+
+	/* check this accelerator is under which bridge */
+	if (binfo->br) {
+		acc->br = binfo->br;
+		acc->index = acc->br->id;
+		binfo->br = NULL;
+	}
+
+	opae_adapter_add_acc(hw->adapter, acc);
+
+	return ret;
+}
+
+static int parse_feature_afus(struct build_feature_devs_info *binfo,
+			      struct feature_header *hdr)
+{
+	int ret;
+	struct feature_afu_header *afu_hdr, header;
+	u8 __iomem *start;
+	u8 __iomem *end = binfo->ioend;
+
+	start = (u8 __iomem *)hdr;
+	for (; start < end; start += header.next_afu) {
+		if (end - start <
+			(unsigned int)(sizeof(*afu_hdr) + sizeof(*hdr)))
+			return -EINVAL;
+
+		hdr = (struct feature_header *)start;
+		afu_hdr = (struct feature_afu_header *)(hdr + 1);
+		header.csr = readq(&afu_hdr->csr);
+
+		if (feature_is_UAFU(binfo))
+			ret = parse_feature_port_uafu(binfo, hdr);
+			if (ret)
+				return ret;
+
+		if (!header.next_afu)
+			break;
+	}
+
+	return 0;
+}
+
+static int parse_feature_fme(struct build_feature_devs_info *binfo,
+			     struct feature_header *start)
+{
+	struct ifpga_hw *hw = binfo->hw;
+	struct ifpga_fme_hw *fme = &hw->fme;
+	struct opae_manager *mgr;
+	int ret;
+
+	/*found FME device*/
+	fme->state = IFPGA_FME_IMPLEMENTED;
+	fme->parent = hw;
+	binfo->current_type = FME_ID;
+
+	spinlock_init(&fme->lock);
+
+	ret = create_feature_instance(binfo, start,
+				      &fme_features[FME_FEATURE_ID_HEADER]);
+
+	mgr = opae_manager_alloc(hw->adapter->name, &ifpga_mgr_ops, fme);
+	hw->adapter->mgr = mgr;
+	mgr->adapter = hw->adapter;
+	binfo->mgr = mgr;
+
+	return ret;
+}
+
+static int parse_feature_port(struct build_feature_devs_info *binfo,
+			      void __iomem *start)
+{
+	struct ifpga_hw *hw = binfo->hw;
+	struct feature_port_header *port_hdr;
+	struct feature_port_capability capability;
+	struct ifpga_port_hw *port;
+	struct opae_bridge *br;
+	unsigned int port_id;
+	int ret;
+
+	/* Get current port's id */
+	port_hdr = (struct feature_port_header *)start;
+	capability.csr = readq(&port_hdr->capability);
+	port_id = capability.port_number;
+
+	/*found a Port device*/
+	port = &hw->port[port_id];
+	binfo->current_port_id = port_id;
+	port->port_id = binfo->current_port_id;
+	port->parent = hw;
+	port->state = IFPGA_PORT_ATTACHED;
+	binfo->current_type = PORT_ID;
+
+	spinlock_init(&port->lock);
+
+	ret = create_feature_instance(binfo, start,
+				      &port_features[PORT_FEATURE_ID_HEADER]);
+	/* FIXME: find a better name */
+	br = opae_bridge_alloc(hw->adapter->name, &ifpga_br_ops, port);
+	br->id = port_id;
+	binfo->br = br;
+
+	return ret;
+}
+
+static void enable_port_uafu(struct build_feature_devs_info *binfo,
+			     void __iomem *start)
+{
+	enum port_feature_id id = PORT_FEATURE_ID_UAFU;
+	struct feature_port_header *port_hdr;
+	struct feature_port_capability capability;
+	struct ifpga_port_hw *port = &binfo->hw->port[binfo->current_port_id];
+
+	port_hdr = (struct feature_port_header *)start;
+	capability.csr = readq(&port_hdr->capability);
+	port_features[id].resource_size = (capability.mmio_size << 10);
+
+	/*
+	 * From SAS spec, to Enable UAFU, we should reset related port,
+	 * or the whole mmio space in this UAFU will be invalid
+	 */
+	if (port_features[id].resource_size)
+		fpga_port_reset(port);
+}
+
+static int parse_feature_fiu(struct build_feature_devs_info *binfo,
+			     struct feature_header *hdr)
+{
+	struct feature_header header;
+	struct feature_fiu_header *fiu_hdr, fiu_header;
+	u8 __iomem *start = (u8 __iomem *)hdr;
+	int ret;
+
+	header.csr = readq(hdr);
+
+	switch (header.id) {
+	case FEATURE_FIU_ID_FME:
+		ret = parse_feature_fme(binfo, hdr);
+		//check_features_header(binfo->pdev, hdr, FPGA_DEVT_FME, 0);
+		binfo->pfme_hdr = hdr;
+		if (ret)
+			return ret;
+		break;
+	case FEATURE_FIU_ID_PORT:
+		ret = parse_feature_port(binfo, hdr);
+		//check_features_header(binfo->pdev, hdr, FPGA_DEVT_PORT, 0);
+		enable_port_uafu(binfo, hdr);
+		if (ret)
+			return ret;
+
+		/* Check Port FIU's next_afu pointer to User AFU DFH */
+		fiu_hdr = (struct feature_fiu_header *)(hdr + 1);
+		fiu_header.csr = readq(&fiu_hdr->csr);
+
+		if (fiu_header.next_afu) {
+			start += fiu_header.next_afu;
+			ret = parse_feature_afus(binfo,
+					(struct feature_header *)start);
+			if (ret)
+				return ret;
+		} else {
+			dev_info(binfo, "No AFUs detected on Port\n");
+		}
+
+		break;
+	default:
+		dev_info(binfo, "FIU TYPE %d is not supported yet.\n",
+			 header.id);
+	}
+
+	return 0;
+}
+
+static void parse_feature_irqs(struct build_feature_devs_info *binfo,
+				void __iomem *start, struct feature_info *finfo)
+{
+	finfo->vec_start = 0;
+	finfo->vec_cnt = 0;
+
+	UNUSED(binfo);
+
+	if (!strcmp(finfo->name, PORT_FEATURE_UINT)) {
+		struct feature_port_uint *port_uint = start;
+		struct feature_port_uint_cap uint_cap;
+
+		uint_cap.csr = readq(&port_uint->capability);
+		if (uint_cap.intr_num) {
+			finfo->vec_start = uint_cap.first_vec_num;
+			finfo->vec_cnt = uint_cap.intr_num;
+		} else {
+			dev_debug(binfo, "UAFU doesn't support interrupt\n");
+		}
+
+	} else if (!strcmp(finfo->name, PORT_FEATURE_ERR)) {
+		struct feature_port_error *port_err = start;
+		struct feature_port_err_capability port_err_cap;
+
+		port_err_cap.csr = readq(&port_err->error_capability);
+		if (port_err_cap.support_intr) {
+			finfo->vec_start = port_err_cap.intr_vector_num;
+			finfo->vec_cnt = 1;
+		} else
+			dev_debug(&binfo, "Port error doesn't support interrupt\n");
+
+	} else if (!strcmp(finfo->name, FME_FEATURE_GLOBAL_ERR)) {
+		struct feature_fme_err *fme_err = start;
+		struct feature_fme_error_capability fme_err_cap;
+
+		fme_err_cap.csr = readq(&fme_err->fme_err_capability);
+		if (fme_err_cap.support_intr) {
+			finfo->vec_start = fme_err_cap.intr_vector_num;
+			finfo->vec_cnt = 1;
+		} else
+			dev_debug(&binfo, "FME error doesn't support interrupt\n");
+	}
+}
+
+static int parse_feature_fme_private(struct build_feature_devs_info *binfo,
+				     struct feature_header *hdr)
+{
+	struct feature_header header;
+
+	header.csr = readq(hdr);
+
+	if (header.id >= ARRAY_SIZE(fme_features)) {
+		dev_err(binfo, "FME feature id %x is not supported yet.\n",
+			header.id);
+		return 0;
+	}
+
+	parse_feature_irqs(binfo, hdr, &fme_features[header.id]);
+
+	//check_features_header(binfo->pdev, hdr, FPGA_DEVT_FME, header.id);
+
+	return create_feature_instance(binfo, hdr, &fme_features[header.id]);
+}
+
+static int parse_feature_port_private(struct build_feature_devs_info *binfo,
+				      struct feature_header *hdr)
+{
+	struct feature_header header;
+	enum port_feature_id id;
+
+	header.csr = readq(hdr);
+	/*
+	 * the region of port feature id is [0x10, 0x13], + 1 to reserve 0
+	 * which is dedicated for port-hdr.
+	 */
+	id = (header.id & 0x000f) + 1;
+
+	if (id >= ARRAY_SIZE(port_features)) {
+		dev_err(binfo, "Port feature id %x is not supported yet.\n",
+			header.id);
+		return 0;
+	}
+
+	parse_feature_irqs(binfo, hdr, &port_features[id]);
+
+	//check_features_header(binfo->pdev, hdr, FPGA_DEVT_PORT, id);
+
+	return create_feature_instance(binfo, hdr, &port_features[id]);
+}
+
+static int parse_feature_private(struct build_feature_devs_info *binfo,
+				 struct feature_header *hdr)
+{
+	struct feature_header header;
+
+	header.csr = readq(hdr);
+
+	switch (binfo->current_type) {
+	case FME_ID:
+		return parse_feature_fme_private(binfo, hdr);
+	case PORT_ID:
+		return parse_feature_port_private(binfo, hdr);
+	default:
+		dev_err(binfo, "private feature %x belonging to AFU %d (unknown_type) is not supported yet.\n",
+			header.id, binfo->current_type);
+	}
+	return 0;
+}
+
+static int parse_feature(struct build_feature_devs_info *binfo,
+			 struct feature_header *hdr)
+{
+	struct feature_header header;
+	int ret = 0;
+
+	header.csr = readq(hdr);
+
+	switch (header.type) {
+	case FEATURE_TYPE_AFU:
+		ret = parse_feature_afus(binfo, hdr);
+		break;
+	case FEATURE_TYPE_PRIVATE:
+		ret = parse_feature_private(binfo, hdr);
+		break;
+	case FEATURE_TYPE_FIU:
+		ret = parse_feature_fiu(binfo, hdr);
+		break;
+	default:
+		dev_err(binfo, "Feature Type %x is not supported.\n",
+			hdr->type);
+	};
+
+	return ret;
+}
+
+static int
+parse_feature_list(struct build_feature_devs_info *binfo, u8 __iomem *start)
+{
+	struct feature_header *hdr, header;
+	u8 __iomem *end = (u8 __iomem *)binfo->ioend;
+	int ret = 0;
+
+	for (; start < end; start += header.next_header_offset) {
+		if (end - start < (unsigned int)sizeof(*hdr)) {
+			dev_err(binfo, "The region is too small to contain a feature.\n");
+			ret =  -EINVAL;
+			break;
+		}
+
+		hdr = (struct feature_header *)start;
+		ret = parse_feature(binfo, hdr);
+		if (ret)
+			break;
+
+		header.csr = readq(hdr);
+		if (!header.next_header_offset)
+			break;
+	}
+
+	return ret;
+}
+
+/* switch the memory mapping to BAR# @bar */
+static int parse_switch_to(struct build_feature_devs_info *binfo, int bar)
+{
+	struct opae_adapter_data_pci *pci_data = binfo->pci_data;
+
+	if (!pci_data->region[bar].addr)
+		return -ENOMEM;
+
+	binfo->ioaddr = pci_data->region[bar].addr;
+	binfo->ioend = (u8 __iomem *)binfo->ioaddr + pci_data->region[bar].len;
+	binfo->phys_addr = pci_data->region[bar].phys_addr;
+	binfo->current_bar = bar;
+
+	return 0;
+}
+
+static int parse_ports_from_fme(struct build_feature_devs_info *binfo)
+{
+	struct feature_fme_header *fme_hdr;
+	struct feature_fme_port port;
+	int i = 0, ret = 0;
+
+	if (!binfo->pfme_hdr) {
+		dev_info(binfo,  "VF is detected.\n");
+		return ret;
+	}
+
+	fme_hdr = binfo->pfme_hdr;
+
+	do {
+		port.csr = readq(&fme_hdr->port[i]);
+		if (!port.port_implemented)
+			break;
+
+		/* skip port which only could be accessed via VF */
+		if (port.afu_access_control == FME_AFU_ACCESS_VF)
+			continue;
+
+		ret = parse_switch_to(binfo, port.port_bar);
+		if (ret)
+			break;
+
+		ret = parse_feature_list(binfo,
+			(u8 __iomem *)binfo->ioaddr + port.port_offset);
+		if (ret)
+			break;
+	} while (++i < MAX_FPGA_PORT_NUM);
+
+	return ret;
+}
+
+static struct build_feature_devs_info *
+build_info_alloc_and_init(struct ifpga_hw *hw)
+{
+	struct build_feature_devs_info *binfo;
+
+	binfo = zmalloc(sizeof(*binfo));
+	if (!binfo)
+		return binfo;
+
+	binfo->hw = hw;
+	binfo->pci_data = hw->pci_data;
+
+	/* fpga feature list starts from BAR 0 */
+	if (parse_switch_to(binfo, 0)) {
+		free(binfo);
+		return NULL;
+	}
+
+	return binfo;
+}
+
+static void build_info_free(struct build_feature_devs_info *binfo)
+{
+	free(binfo);
+}
+
+static void ifpga_print_device_feature_list(struct ifpga_hw *hw)
+{
+	struct ifpga_fme_hw *fme = &hw->fme;
+	struct ifpga_port_hw *port;
+	struct feature *feature;
+	int i, j;
+
+#if 0
+	dev_info(hw, "found a fpga device : %x:%x.%x\n", pci->addr.bus,
+			pci->addr.devid, pci->addr.function);
+#endif
+
+	dev_info(hw, "found fme_device, is in PF: %s\n",
+			is_ifpga_hw_pf(hw) ? "yes":"no");
+
+	for (i = 0; i < FME_FEATURE_ID_MAX; i++) {
+		feature = &fme->sub_feature[i];
+		if (feature->state != IFPGA_FEATURE_ATTACHED)
+			continue;
+
+		dev_info(hw, "%12s:	0x%llx - 0x%llx  - paddr: 0x%lx\n", feature->name,
+			 (unsigned long long)feature->addr,
+			 (unsigned long long)feature->addr + feature->size - 1,
+			 feature->phys_addr);
+	}
+
+	for (i = 0; i < MAX_FPGA_PORT_NUM; i++) {
+		port = &hw->port[i];
+	
+		if (port->state != IFPGA_PORT_ATTACHED)
+			continue;
+
+		dev_info(hw, "port device: %d\n", port->port_id);
+
+		for (j = 0; j < PORT_FEATURE_ID_MAX; j++) {
+			feature = &port->sub_feature[j];
+			if (feature->state != IFPGA_FEATURE_ATTACHED)
+				continue;
+
+			dev_info(hw, "%12s:	0x%llx - 0x%llx  - paddr:0x%lx\n", feature->name,
+				 (unsigned long long)feature->addr,
+				 (unsigned long long)feature->addr +
+				 feature->size - 1,
+				 feature->phys_addr);
+		}
+	}
+}
+
+int ifpga_bus_enumerate(struct ifpga_hw *hw)
+{
+	struct build_feature_devs_info *binfo;
+	int ret;
+
+	binfo = build_info_alloc_and_init(hw);
+	if (!binfo)
+		return -ENOMEM;
+
+	ret = parse_feature_list(binfo, binfo->ioaddr);
+	if (ret)
+		goto exit;
+
+	ret = parse_ports_from_fme(binfo);
+	if (ret)
+		goto exit;
+
+	ifpga_print_device_feature_list(hw);
+
+exit:
+	build_info_free(binfo);
+	return ret;
+}
+
+int ifpga_bus_init(struct ifpga_hw *hw)
+{
+	int i;
+
+	fme_hw_init(&hw->fme);
+	for (i = 0; i < MAX_FPGA_PORT_NUM; i++)
+		port_hw_init(&hw->port[i]);
+
+	return 0;
+}
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h
new file mode 100644
index 0000000..8289a3a
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h
@@ -0,0 +1,39 @@
+/*******************************************************************************
+
+Copyright (c) 2017, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+#ifndef _IFPGA_ENUMERATE_H_
+#define _IFPGA_ENUMERATE_H_
+
+int ifpga_bus_init(struct ifpga_hw *hw);
+int ifpga_bus_enumerate(struct ifpga_hw *hw);
+
+#endif /* _IFPGA_ENUMERATE_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c
new file mode 100644
index 0000000..b641adb
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c
@@ -0,0 +1,305 @@
+#include <linux/vfio.h>
+#include <sys/ioctl.h>
+
+#include "ifpga_feature_dev.h"
+
+/*
+ * Enable Port by clear the port soft reset bit, which is set by default.
+ * The AFU is unable to respond to any MMIO access while in reset.
+ * __fpga_port_enable function should only be used after __fpga_port_disable
+ * function.
+ */
+void __fpga_port_enable(struct ifpga_port_hw *port)
+{
+	struct feature_port_header *port_hdr;
+	struct feature_port_control control;
+
+	WARN_ON(!port->disable_count);
+
+	if (--port->disable_count != 0)
+		return;
+
+	port_hdr = get_port_feature_ioaddr_by_index(port,
+						    PORT_FEATURE_ID_HEADER);
+	WARN_ON(!port_hdr);
+
+	control.csr = readq(&port_hdr->control);
+	control.port_sftrst = 0x0;
+	writeq(control.csr, &port_hdr->control);
+}
+
+int __fpga_port_disable(struct ifpga_port_hw *port)
+{
+	struct feature_port_header *port_hdr;
+	struct feature_port_control control;
+
+	if (port->disable_count++ != 0)
+		return 0;
+
+	port_hdr = get_port_feature_ioaddr_by_index(port,
+						    PORT_FEATURE_ID_HEADER);
+	WARN_ON(!port_hdr);
+
+	/* Set port soft reset */
+	control.csr = readq(&port_hdr->control);
+	control.port_sftrst = 0x1;
+	writeq(control.csr, &port_hdr->control);
+
+	/*
+	 * HW sets ack bit to 1 when all outstanding requests have been drained
+	 * on this port and minimum soft reset pulse width has elapsed.
+	 * Driver polls port_soft_reset_ack to determine if reset done by HW.
+	 */
+	control.port_sftrst_ack = 1;
+
+	if (fpga_wait_register_field(port_sftrst_ack, control,
+				     &port_hdr->control, RST_POLL_TIMEOUT,
+				     RST_POLL_INVL)) {
+		dev_err(port, "timeout, fail to reset device\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+int fpga_get_afu_uuid(struct ifpga_port_hw *port, uuid *uuid)
+{
+	struct feature_port_header *port_hdr;
+	u64 guidl, guidh;
+
+	port_hdr = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_UAFU);
+
+	spinlock_lock(&port->lock);
+	guidl = readq(&port_hdr->afu_header.guid.b[0]);
+	guidh = readq(&port_hdr->afu_header.guid.b[8]);
+	spinlock_unlock(&port->lock);
+
+	printf("%s: guidl=0x%lx, guidh=0x%lx\n", __func__, guidl, guidh);
+
+	memcpy(uuid->b, &guidl, sizeof(u64));
+	memcpy(uuid->b + 8, &guidh, sizeof(u64));
+
+	return 0;
+}
+
+/* Mask / Unmask Port Errors by the Error Mask register. */
+void port_err_mask(void *addr, bool mask)
+{
+	struct feature_port_error *port_err;
+	struct feature_port_err_key err_mask;
+
+	port_err = addr;
+
+	if (mask)
+		err_mask.csr = PORT_ERR_MASK;
+	else
+		err_mask.csr = 0;
+
+	writeq(err_mask.csr, &port_err->error_mask);
+}
+
+/* Clear All Port Errors. */
+static int port_err_clear(struct ifpga_port_hw *port, u64 err)
+{
+	struct feature_port_header *port_hdr;
+	struct feature_port_error *port_err;
+	struct feature_port_err_key mask;
+	struct feature_port_first_err_key first;
+	struct feature_port_status status;
+	int ret = 0;
+
+	port_err = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_ERROR);
+	port_hdr = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_HEADER);
+
+	/*
+	 * Clear All Port Errors
+	 *
+	 * - Check for AP6 State
+	 * - Halt Port by keeping Port in reset
+	 * - Set PORT Error mask to all 1 to mask errors
+	 * - Clear all errors
+	 * - Set Port mask to all 0 to enable errors
+	 * - All errors start capturing new errors
+	 * - Enable Port by pulling the port out of reset
+	 */
+
+	/* If device is still in AP6 state, can not clear any error.*/
+	status.csr = readq(&port_hdr->status);
+	if (status.power_state == PORT_POWER_STATE_AP6) {
+		dev_err(dev, "Could not clear errors, device in AP6 state.\n");
+		return -EBUSY;
+	}
+
+	/* Halt Port by keeping Port in reset */
+	ret = __fpga_port_disable(port);
+	if (ret)
+		return ret;
+
+	/* Mask all errors */
+	port_err_mask(port_err, true);
+
+	/* Clear errors if err input matches with current port errors.*/
+	mask.csr = readq(&port_err->port_error);
+
+	if (mask.csr == err) {
+		writeq(mask.csr, &port_err->port_error);
+
+		first.csr = readq(&port_err->port_first_error);
+		writeq(first.csr, &port_err->port_first_error);
+	} else
+		ret = -EBUSY;
+
+	/* Clear mask */
+	port_err_mask(port_err, false);
+
+	/* Enable the Port by clear the reset */
+	__fpga_port_enable(port);
+
+	return ret;
+}
+
+int port_clear_error(struct ifpga_port_hw *port)
+{
+	struct feature_port_error *port_err;
+	struct feature_port_err_key error;
+	u64 port_error;
+
+	port_err = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_ERROR);
+	port_error = error.csr = readq(&port_err->port_error);
+
+	dev_info(port, "read port error: 0x%lx\n", port_error);
+
+	return port_err_clear(port, port_error);
+}
+
+void fme_hw_uinit(struct ifpga_fme_hw *fme)
+{
+	struct feature *feature;
+	int i;
+
+	if (fme->state != IFPGA_FME_IMPLEMENTED)
+		return;
+	
+	for (i = 0; i < FME_FEATURE_ID_MAX; i++) {
+		feature = &fme->sub_feature[i];
+		if (feature->state == IFPGA_FEATURE_ATTACHED &&
+			feature->ops && feature->ops->uinit)
+			feature->ops->uinit(feature);
+	}
+}
+
+int fme_hw_init(struct ifpga_fme_hw *fme)
+{
+	struct feature *feature;
+	int i, ret;
+
+	if (fme->state != IFPGA_FME_IMPLEMENTED)
+		return -EINVAL;
+
+	for (i = 0; i < FME_FEATURE_ID_MAX; i++) {
+		feature = &fme->sub_feature[i];
+		if (feature->state == IFPGA_FEATURE_ATTACHED &&
+			feature->ops && feature->ops->init) {
+			ret = feature->ops->init(feature);
+			if (ret) {
+				fme_hw_uinit(fme);
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
+void port_hw_uinit(struct ifpga_port_hw *port)
+{
+	struct feature *feature;
+	int i;
+
+	for (i = 0; i < PORT_FEATURE_ID_MAX; i++) {
+		feature = &port->sub_feature[i];
+		if (feature->state == IFPGA_FEATURE_ATTACHED &&
+			feature->ops && feature->ops->uinit)
+			feature->ops->uinit(feature);
+	}
+}
+
+int port_hw_init(struct ifpga_port_hw *port)
+{
+	struct feature *feature;
+	int i, ret;
+
+	if (port->state == IFPGA_PORT_UNUSED)
+		return 0;
+
+	for (i = 0; i < PORT_FEATURE_ID_MAX; i++) {
+		feature = &port->sub_feature[i];
+		if (feature->ops && feature->ops->init)
+			ret = feature->ops->init(feature);
+			if (ret) {
+				port_hw_uinit(port);
+				return ret;
+			}
+	}
+
+	return 0;
+}
+
+/*
+ * FIXME: we should get msix vec count during pci enumeration instead of
+ * below hardcode value.
+ */
+#define FPGA_MSIX_VEC_COUNT	20
+/* irq set buffer length for interrupt */
+#define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
+				sizeof(int) * FPGA_MSIX_VEC_COUNT)
+
+/* only support msix for now*/
+static int vfio_msix_enable_block(s32 vfio_dev_fd, unsigned int vec_start,
+				  unsigned int count, s32 *fds)
+{
+	char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+	struct vfio_irq_set *irq_set;
+	int len, ret;
+	int *fd_ptr;
+
+	len = sizeof(irq_set_buf);
+
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = len;
+	irq_set->count = count;
+	irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+				VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = vec_start;
+
+	fd_ptr = (int *)&irq_set->data;
+	memcpy(fd_ptr, fds, sizeof(int) * count);
+
+	ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (ret)
+		printf("Error enabling MSI-X interrupts\n");
+
+	return ret;
+}
+
+int fpga_msix_set_block(struct feature *feature, unsigned int start,
+			unsigned int count, s32 *fds)
+{
+	struct feature_irq_ctx *ctx = feature->ctx;
+	unsigned int i;
+	int ret;
+
+	if (start >= feature->ctx_num || start + count > feature->ctx_num)
+		return -EINVAL;
+
+	/* assume that each feature has continuous vector space in msix*/
+	ret = vfio_msix_enable_block(feature->vfio_dev_fd,
+				     ctx[start].idx, count, fds);
+	if (!ret) {
+		for (i = 0; i < count; i++)
+			ctx[i].eventfd = fds[i];
+	}
+
+	return ret;
+}
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h
new file mode 100644
index 0000000..b621405
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h
@@ -0,0 +1,197 @@
+/*******************************************************************************
+
+Copyright (c) 2017, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+#ifndef _IFPGA_FEATURE_DEV_H_
+#define _IFPGA_FEATURE_DEV_H_
+
+#include "ifpga_hw.h"
+
+static inline struct ifpga_port_hw *
+get_port(struct ifpga_hw *hw, u32 port_id)
+{
+	if (!is_valid_port_id(hw, port_id))
+		return NULL;
+
+	return &hw->port[port_id];
+}
+
+#define ifpga_for_each_feature(hw, feature)		\
+	for ((feature) = (hw)->sub_feature;			\
+	   (feature) < (hw)->sub_feature + (FME_FEATURE_ID_MAX); (feature)++)
+
+static inline struct feature *
+get_fme_feature_by_id(struct ifpga_fme_hw *fme, u64 id)
+{
+	struct feature *feature;
+
+	ifpga_for_each_feature(fme, feature) {
+		if (feature->id == id)
+			return feature;
+	}
+
+	return NULL;
+}
+
+static inline struct feature *
+get_port_feature_by_id(struct ifpga_port_hw *port, u64 id)
+{
+	struct feature *feature;
+
+	ifpga_for_each_feature(port, feature) {
+		if (feature->id == id)
+			return feature;
+	}
+
+	return NULL;
+}
+
+static inline void  *
+get_fme_feature_ioaddr_by_index(struct ifpga_fme_hw *fme, int index)
+{
+	return fme->sub_feature[index].addr;
+}
+
+static inline void  *
+get_port_feature_ioaddr_by_index(struct ifpga_port_hw *port, int index)
+{
+	return port->sub_feature[index].addr;
+}
+
+static inline bool
+is_fme_feature_present(struct ifpga_fme_hw *fme, int index)
+{
+	return !!get_fme_feature_ioaddr_by_index(fme, index);
+}
+
+static inline bool
+is_port_feature_present(struct ifpga_port_hw *port, int index)
+{
+	return !!get_port_feature_ioaddr_by_index(port, index);
+}
+
+int fpga_get_afu_uuid(struct ifpga_port_hw *port, uuid *uuid);
+
+int __fpga_port_disable(struct ifpga_port_hw *port);
+void __fpga_port_enable(struct ifpga_port_hw *port);
+
+static inline int fpga_port_disable(struct ifpga_port_hw *port)
+{
+	int ret;
+
+	spinlock_lock(&port->lock);
+	ret = __fpga_port_disable(port);
+	spinlock_unlock(&port->lock);
+	return ret;
+}
+
+static inline int fpga_port_enable(struct ifpga_port_hw *port)
+{
+	spinlock_lock(&port->lock);
+	__fpga_port_enable(port);
+	spinlock_unlock(&port->lock);
+
+	return 0;
+}
+
+static inline int __fpga_port_reset(struct ifpga_port_hw *port)
+{
+	int ret;
+
+	ret = __fpga_port_disable(port);
+	if (ret)
+		return ret;
+
+	__fpga_port_enable(port);
+
+	return 0;
+}
+
+static inline int fpga_port_reset(struct ifpga_port_hw *port)
+{
+	int ret;
+
+	spinlock_lock(&port->lock);
+	ret = __fpga_port_reset(port);
+	spinlock_unlock(&port->lock);
+	return ret;
+}
+
+int do_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
+	   u64 *status);
+
+int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop);
+int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop);
+int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set);
+
+int fme_hw_init(struct ifpga_fme_hw *fme);
+void fme_hw_uinit(struct ifpga_fme_hw *fme);
+void port_hw_uinit(struct ifpga_port_hw *port);
+int port_hw_init(struct ifpga_port_hw *port);
+int port_clear_error(struct ifpga_port_hw *port);
+void port_err_mask(void *addr, bool mask);
+
+extern struct feature_ops fme_hdr_ops;
+extern struct feature_ops fme_thermal_mgmt_ops;
+extern struct feature_ops fme_power_mgmt_ops;
+extern struct feature_ops fme_global_err_ops;
+extern struct feature_ops fme_pr_mgmt_ops;
+extern struct feature_ops fme_global_iperf_ops;
+extern struct feature_ops fme_global_dperf_ops;
+
+int port_get_prop(struct ifpga_port_hw *port, struct feature_prop *prop);
+int port_set_prop(struct ifpga_port_hw *port, struct feature_prop *prop);
+
+/* This struct is used when parsing uafu irq_set */
+struct fpga_uafu_irq_set {
+	u32 start;
+	u32 count;
+	s32 *evtfds;
+};
+
+int port_set_irq(struct ifpga_port_hw *port, u32 feature_id, void *irq_set);
+
+extern struct feature_ops port_hdr_ops;
+extern struct feature_ops port_error_ops;
+extern struct feature_ops port_stp_ops;
+extern struct feature_ops port_umsg_ops;
+extern struct feature_ops port_uint_ops;
+
+int afu_port_umsg_enable(struct ifpga_port_hw *port, bool enable);
+int afu_port_umsg_set_mode(struct ifpga_port_hw *port, u32 mode);
+
+/* help functions for feature ops */
+int fpga_msix_set_block(struct feature *feature, unsigned int start,
+			unsigned int count, s32 *fds);
+void port_check_reg(void __iomem *addr, const char *reg_name, u64 dflt);
+
+
+#endif /* _IFPGA_FEATURE_DEV_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme.c
new file mode 100644
index 0000000..7e348df
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme.c
@@ -0,0 +1,731 @@
+#include "ifpga_feature_dev.h"
+
+#define PWR_THRESHOLD_MAX       0x7F
+
+int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
+{
+	struct feature *feature;
+
+	if (!fme)
+		return -ENOENT;
+
+	feature = get_fme_feature_by_id(fme, prop->feature_id);
+
+	if (feature && feature->ops && feature->ops->get_prop)
+		return feature->ops->get_prop(feature, prop);
+
+	return -ENOENT;
+}
+
+int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
+{
+	struct feature *feature;
+
+	if (!fme)
+		return -ENOENT;
+
+	feature = get_fme_feature_by_id(fme, prop->feature_id);
+
+	if (feature && feature->ops && feature->ops->set_prop)
+		return feature->ops->set_prop(feature, prop);
+
+	return -ENOENT;
+}
+
+int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set)
+{
+	struct feature *feature;
+
+	if (!fme)
+		return -ENOENT;
+
+	feature = get_fme_feature_by_id(fme, feature_id);
+
+	if (feature && feature->ops && feature->ops->set_irq)
+		return feature->ops->set_irq(feature, irq_set);
+
+	return -ENOENT;
+}
+
+/* fme private feature head */
+static int fme_hdr_init(struct feature *feature)
+{
+	struct feature_fme_header *fme_hdr;
+
+	fme_hdr = (struct feature_fme_header *)feature->addr;
+
+	dev_info(NULL, "FME HDR Init.\n");
+	dev_info(NULL, "FME cap %llx.\n",
+		 (unsigned long long)fme_hdr->capability.csr);
+
+	return 0;
+}
+
+static void fme_hdr_uinit(struct feature *feature)
+{
+	UNUSED(feature);
+
+	dev_info(NULL, "FME HDR UInit.\n");
+}
+
+static int fme_hdr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+	struct feature_fme_header *fme_hdr
+		= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+	struct feature_header header;
+
+	header.csr = readq(&fme_hdr->header);
+	*revision = header.revision;
+
+	return 0;
+}
+
+static int fme_hdr_get_ports_num(struct ifpga_fme_hw *fme, u64 *ports_num)
+{
+	struct feature_fme_header *fme_hdr
+		= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+	struct feature_fme_capability fme_capability;
+
+	fme_capability.csr = readq(&fme_hdr->capability);
+	*ports_num = fme_capability.num_ports;
+
+	return 0;
+}
+
+static int fme_hdr_get_cache_size(struct ifpga_fme_hw *fme, u64 *cache_size)
+{
+	struct feature_fme_header *fme_hdr
+		= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+	struct feature_fme_capability fme_capability;
+
+	fme_capability.csr = readq(&fme_hdr->capability);
+	*cache_size = fme_capability.cache_size;
+
+	return 0;
+}
+
+static int fme_hdr_get_version(struct ifpga_fme_hw *fme, u64 *version)
+{
+	struct feature_fme_header *fme_hdr
+		= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+	struct feature_fme_capability fme_capability;
+
+	fme_capability.csr = readq(&fme_hdr->capability);
+	*version = fme_capability.fabric_verid;
+
+	return 0;
+}
+
+static int fme_hdr_get_socket_id(struct ifpga_fme_hw *fme, u64 *socket_id)
+{
+	struct feature_fme_header *fme_hdr
+		= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+	struct feature_fme_capability fme_capability;
+
+	fme_capability.csr = readq(&fme_hdr->capability);
+	*socket_id = fme_capability.socket_id;
+
+	return 0;
+}
+
+static int fme_hdr_get_bitstream_id(struct ifpga_fme_hw *fme,
+				    u64 *bitstream_id)
+{
+	struct feature_fme_header *fme_hdr
+		= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+	*bitstream_id = readq(&fme_hdr->bitstream_id);
+
+	return 0;
+}
+
+static int fme_hdr_get_bitstream_metadata(struct ifpga_fme_hw *fme,
+					  u64 *bitstream_metadata)
+{
+	struct feature_fme_header *fme_hdr
+		= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+	*bitstream_metadata = readq(&fme_hdr->bitstream_md);
+
+	return 0;
+}
+
+static int
+fme_hdr_get_prop(struct feature *feature, struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme = feature->parent;
+
+	switch (prop->prop_id) {
+	case FME_HDR_PROP_REVISION:
+		return fme_hdr_get_revision(fme, &prop->data);
+	case FME_HDR_PORTS_NUM:
+		return fme_hdr_get_ports_num(fme, &prop->data);
+	case FME_HDR_CACHE_SIZE:
+		return fme_hdr_get_cache_size(fme, &prop->data);
+	case FME_HDR_VERSION:
+		return fme_hdr_get_version(fme, &prop->data);
+	case FME_HDR_SOCKET_ID:
+		return fme_hdr_get_socket_id(fme, &prop->data);
+	case FME_HDR_BITSTREAM_ID:
+		return fme_hdr_get_bitstream_id(fme, &prop->data);
+	case FME_HDR_BITSTREAM_METADATA:
+		return fme_hdr_get_bitstream_metadata(fme, &prop->data);
+	}
+
+	return -ENOENT;
+}
+
+struct feature_ops fme_hdr_ops = {
+	.init = fme_hdr_init,
+	.uinit = fme_hdr_uinit,
+	.get_prop = fme_hdr_get_prop,
+};
+
+/* thermal management */
+static int fme_thermal_get_threshold1(struct ifpga_fme_hw *fme, u64 *thres1)
+{
+	struct feature_fme_thermal *fme_thermal;
+	struct feature_fme_tmp_threshold temp_threshold;
+
+	fme_thermal = get_fme_feature_ioaddr_by_index(fme,
+					FME_FEATURE_ID_THERMAL_MGMT);
+
+	temp_threshold.csr = readq(&fme_thermal->threshold);
+	*thres1 = temp_threshold.tmp_thshold1;
+
+	return 0;
+}
+
+static int fme_thermal_set_threshold1(struct ifpga_fme_hw *fme, u64 thres1)
+{
+	struct feature_fme_thermal *fme_thermal;
+	struct feature_fme_header *fme_hdr;
+	struct feature_fme_tmp_threshold tmp_threshold;
+	struct feature_fme_capability fme_capability;
+
+	fme_thermal = get_fme_feature_ioaddr_by_index(fme,
+				FME_FEATURE_ID_THERMAL_MGMT);
+	fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+	spinlock_lock(&fme->lock);
+	tmp_threshold.csr = readq(&fme_thermal->threshold);
+	fme_capability.csr = readq(&fme_hdr->capability);
+
+	if (fme_capability.lock_bit == 1) {
+		spinlock_unlock(&fme->lock);
+		return -EBUSY;
+	} else if (thres1 > 100) {
+		spinlock_unlock(&fme->lock);
+		return -EINVAL;
+	} else if (thres1 == 0) {
+		tmp_threshold.tmp_thshold1_enable = 0;
+		tmp_threshold.tmp_thshold1 = thres1;
+	} else {
+		tmp_threshold.tmp_thshold1_enable = 1;
+		tmp_threshold.tmp_thshold1 = thres1;
+	}
+
+	writeq(tmp_threshold.csr, &fme_thermal->threshold);
+	spinlock_unlock(&fme->lock);
+
+	return 0;
+}
+
+static int fme_thermal_get_threshold2(struct ifpga_fme_hw *fme, u64 *thres2)
+{
+	struct feature_fme_thermal *fme_thermal;
+	struct feature_fme_tmp_threshold temp_threshold;
+
+	fme_thermal = get_fme_feature_ioaddr_by_index(fme,
+					FME_FEATURE_ID_THERMAL_MGMT);
+
+	temp_threshold.csr = readq(&fme_thermal->threshold);
+	*thres2 = temp_threshold.tmp_thshold2;
+
+	return 0;
+}
+
+static int fme_thermal_set_threshold2(struct ifpga_fme_hw *fme, u64 thres2)
+{
+	struct feature_fme_thermal *fme_thermal;
+	struct feature_fme_header *fme_hdr;
+	struct feature_fme_tmp_threshold tmp_threshold;
+	struct feature_fme_capability fme_capability;
+
+	fme_thermal = get_fme_feature_ioaddr_by_index(fme,
+						FME_FEATURE_ID_THERMAL_MGMT);
+	fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+	spinlock_lock(&fme->lock);
+	tmp_threshold.csr = readq(&fme_thermal->threshold);
+	fme_capability.csr = readq(&fme_hdr->capability);
+
+	if (fme_capability.lock_bit == 1) {
+		spinlock_unlock(&fme->lock);
+		return -EBUSY;
+	} else if (thres2 > 100) {
+		spinlock_unlock(&fme->lock);
+		return -EINVAL;
+	} else if (thres2 == 0) {
+		tmp_threshold.tmp_thshold2_enable = 0;
+		tmp_threshold.tmp_thshold2 = thres2;
+	} else {
+		tmp_threshold.tmp_thshold2_enable = 1;
+		tmp_threshold.tmp_thshold2 = thres2;
+	}
+
+	writeq(tmp_threshold.csr, &fme_thermal->threshold);
+	spinlock_unlock(&fme->lock);
+
+	return 0;
+}
+
+static int fme_thermal_get_threshold_trip(struct ifpga_fme_hw *fme,
+					  u64 *thres_trip)
+{
+	struct feature_fme_thermal *fme_thermal;
+	struct feature_fme_tmp_threshold temp_threshold;
+
+	fme_thermal = get_fme_feature_ioaddr_by_index(fme,
+					FME_FEATURE_ID_THERMAL_MGMT);
+
+	temp_threshold.csr = readq(&fme_thermal->threshold);
+	*thres_trip = temp_threshold.therm_trip_thshold;
+
+	return 0;
+}
+
+static int fme_thermal_get_threshold1_reached(struct ifpga_fme_hw *fme,
+					      u64 *thres1_reached)
+{
+	struct feature_fme_thermal *fme_thermal;
+	struct feature_fme_tmp_threshold temp_threshold;
+
+	fme_thermal = get_fme_feature_ioaddr_by_index(fme,
+					FME_FEATURE_ID_THERMAL_MGMT);
+
+	temp_threshold.csr = readq(&fme_thermal->threshold);
+	*thres1_reached = temp_threshold.thshold1_status;
+
+	return 0;
+}
+
+static int fme_thermal_get_threshold2_reached(struct ifpga_fme_hw *fme,
+					      u64 *thres1_reached)
+{
+	struct feature_fme_thermal *fme_thermal;
+	struct feature_fme_tmp_threshold temp_threshold;
+
+	fme_thermal = get_fme_feature_ioaddr_by_index(fme,
+					FME_FEATURE_ID_THERMAL_MGMT);
+
+	temp_threshold.csr = readq(&fme_thermal->threshold);
+	*thres1_reached = temp_threshold.thshold2_status;
+
+	return 0;
+}
+
+static int fme_thermal_get_threshold1_policy(struct ifpga_fme_hw *fme,
+					     u64 *thres1_policy)
+{
+	struct feature_fme_thermal *fme_thermal;
+	struct feature_fme_tmp_threshold temp_threshold;
+
+	fme_thermal = get_fme_feature_ioaddr_by_index(fme,
+					FME_FEATURE_ID_THERMAL_MGMT);
+
+	temp_threshold.csr = readq(&fme_thermal->threshold);
+	*thres1_policy = temp_threshold.thshold_policy;
+
+	return 0;
+}
+
+static int fme_thermal_set_threshold1_policy(struct ifpga_fme_hw *fme,
+					     u64 thres1_policy)
+{
+	struct feature_fme_thermal *fme_thermal;
+	struct feature_fme_tmp_threshold tmp_threshold;
+
+	fme_thermal = get_fme_feature_ioaddr_by_index(fme,
+						FME_FEATURE_ID_THERMAL_MGMT);
+
+	spinlock_lock(&fme->lock);
+	tmp_threshold.csr = readq(&fme_thermal->threshold);
+
+	if (thres1_policy == 0)
+		tmp_threshold.thshold_policy = 0;
+	else if (thres1_policy == 1)
+		tmp_threshold.thshold_policy = 1;
+	else {
+		spinlock_unlock(&fme->lock);
+		return -EINVAL;
+	}
+
+	writeq(tmp_threshold.csr, &fme_thermal->threshold);
+	spinlock_unlock(&fme->lock);
+
+	return 0;
+}
+
+static int fme_thermal_get_temperature(struct ifpga_fme_hw *fme, u64 *temp)
+{
+	struct feature_fme_thermal *fme_thermal;
+	struct feature_fme_temp_rdsensor_fmt1 temp_rdsensor_fmt1;
+
+	fme_thermal = get_fme_feature_ioaddr_by_index(fme,
+					FME_FEATURE_ID_THERMAL_MGMT);
+
+	temp_rdsensor_fmt1.csr = readq(&fme_thermal->rdsensor_fm1);
+	*temp = temp_rdsensor_fmt1.fpga_temp;
+
+	return 0;
+}
+
+static int fme_thermal_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+	struct feature_fme_thermal *fme_thermal
+		= get_fme_feature_ioaddr_by_index(fme,
+						  FME_FEATURE_ID_THERMAL_MGMT);
+	struct feature_header header;
+
+	header.csr = readq(&fme_thermal->header);
+	*revision = header.revision;
+
+	return 0;
+}
+
+#define FME_THERMAL_CAP_NO_TMP_THRESHOLD	0x1
+
+static int fme_thermal_mgmt_init(struct feature *feature)
+{
+	struct feature_fme_thermal *fme_thermal;
+	struct feature_fme_tmp_threshold_cap thermal_cap;
+
+	UNUSED(feature);
+
+	dev_info(NULL, "FME thermal mgmt Init.\n");
+
+	fme_thermal = (struct feature_fme_thermal *)feature->addr;
+	thermal_cap.csr = readq(&fme_thermal->threshold_cap);
+
+	dev_info(NULL, "FME thermal cap %llx.\n",
+		 (unsigned long long)fme_thermal->threshold_cap.csr);
+
+	if (thermal_cap.tmp_thshold_disabled)
+		feature->cap |= FME_THERMAL_CAP_NO_TMP_THRESHOLD;
+
+	return 0;
+}
+
+static void fme_thermal_mgmt_uinit(struct feature *feature)
+{
+	UNUSED(feature);
+
+	dev_info(NULL, "FME thermal mgmt UInit.\n");
+}
+
+static int
+fme_thermal_set_prop(struct feature *feature, struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme = feature->parent;
+
+	if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD)
+		return -ENOENT;
+
+	switch (prop->prop_id) {
+	case FME_THERMAL_PROP_THRESHOLD1:
+		return fme_thermal_set_threshold1(fme, prop->data);
+	case FME_THERMAL_PROP_THRESHOLD2:
+		return fme_thermal_set_threshold2(fme, prop->data);
+	case FME_THERMAL_PROP_THRESHOLD1_POLICY:
+		return fme_thermal_set_threshold1_policy(fme, prop->data);
+	}
+
+	return -ENOENT;
+}
+
+static int
+fme_thermal_get_prop(struct feature *feature, struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme = feature->parent;
+
+	if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD &&
+	    prop->prop_id != FME_THERMAL_PROP_TEMPERATURE)
+		return -ENOENT;
+
+	switch (prop->prop_id) {
+	case FME_THERMAL_PROP_THRESHOLD1:
+		return fme_thermal_get_threshold1(fme, &prop->data);
+	case FME_THERMAL_PROP_THRESHOLD2:
+		return fme_thermal_get_threshold2(fme, &prop->data);
+	case FME_THERMAL_PROP_THRESHOLD_TRIP:
+		return fme_thermal_get_threshold_trip(fme, &prop->data);
+	case FME_THERMAL_PROP_THRESHOLD1_REACHED:
+		return fme_thermal_get_threshold1_reached(fme, &prop->data);
+	case FME_THERMAL_PROP_THRESHOLD2_REACHED:
+		return fme_thermal_get_threshold2_reached(fme, &prop->data);
+	case FME_THERMAL_PROP_THRESHOLD1_POLICY:
+		return fme_thermal_get_threshold1_policy(fme, &prop->data);
+	case FME_THERMAL_PROP_TEMPERATURE:
+		return fme_thermal_get_temperature(fme, &prop->data);
+	case FME_THERMAL_PROP_REVISION:
+		return fme_thermal_get_revision(fme, &prop->data);
+	}
+
+	return -ENOENT;
+}
+
+struct feature_ops fme_thermal_mgmt_ops = {
+	.init = fme_thermal_mgmt_init,
+	.uinit = fme_thermal_mgmt_uinit,
+	.get_prop = fme_thermal_get_prop,
+	.set_prop = fme_thermal_set_prop,
+};
+
+
+static int fme_pwr_get_consumed(struct ifpga_fme_hw *fme, u64 *consumed)
+{
+	struct feature_fme_power *fme_power
+		= get_fme_feature_ioaddr_by_index(fme,
+				FME_FEATURE_ID_POWER_MGMT);
+	struct feature_fme_pm_status pm_status;
+
+	pm_status.csr = readq(&fme_power->status);
+
+	*consumed = pm_status.pwr_consumed;
+
+	return 0;
+}
+
+static int fme_pwr_get_threshold1(struct ifpga_fme_hw *fme, u64 *threshold)
+{
+	struct feature_fme_power *fme_power
+		= get_fme_feature_ioaddr_by_index(fme,
+				FME_FEATURE_ID_POWER_MGMT);
+	struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+	pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+	*threshold = pm_ap_threshold.threshold1;
+
+	return 0;
+}
+
+static int fme_pwr_set_threshold1(struct ifpga_fme_hw *fme, u64 threshold)
+{
+	struct feature_fme_power *fme_power
+		= get_fme_feature_ioaddr_by_index(fme,
+				FME_FEATURE_ID_POWER_MGMT);
+	struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+	spinlock_lock(&fme->lock);
+	pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+	if (threshold <= PWR_THRESHOLD_MAX)
+		pm_ap_threshold.threshold1 = threshold;
+	else {
+		spinlock_unlock(&fme->lock);
+		return -EINVAL;
+	}
+
+	writeq(pm_ap_threshold.csr, &fme_power->threshold);
+	spinlock_unlock(&fme->lock);
+
+	return 0;
+}
+
+static int fme_pwr_get_threshold2(struct ifpga_fme_hw *fme, u64 *threshold)
+{
+	struct feature_fme_power *fme_power
+		= get_fme_feature_ioaddr_by_index(fme,
+				FME_FEATURE_ID_POWER_MGMT);
+	struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+	pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+	*threshold = pm_ap_threshold.threshold2;
+
+	return 0;
+}
+
+static int fme_pwr_set_threshold2(struct ifpga_fme_hw *fme, u64 threshold)
+{
+	struct feature_fme_power *fme_power
+		= get_fme_feature_ioaddr_by_index(fme,
+				FME_FEATURE_ID_POWER_MGMT);
+	struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+	spinlock_lock(&fme->lock);
+	pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+	if (threshold <= PWR_THRESHOLD_MAX)
+		pm_ap_threshold.threshold2 = threshold;
+	else {
+		spinlock_unlock(&fme->lock);
+		return -EINVAL;
+	}
+
+	writeq(pm_ap_threshold.csr, &fme_power->threshold);
+	spinlock_unlock(&fme->lock);
+
+	return 0;
+}
+
+static int fme_pwr_get_threshold1_status(struct ifpga_fme_hw *fme,
+					 u64 *threshold_status)
+{
+	struct feature_fme_power *fme_power
+		= get_fme_feature_ioaddr_by_index(fme,
+				FME_FEATURE_ID_POWER_MGMT);
+	struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+	pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+	*threshold_status = pm_ap_threshold.threshold1_status;
+
+	return 0;
+}
+
+static int fme_pwr_get_threshold2_status(struct ifpga_fme_hw *fme,
+					 u64 *threshold_status)
+{
+	struct feature_fme_power *fme_power
+		= get_fme_feature_ioaddr_by_index(fme,
+				FME_FEATURE_ID_POWER_MGMT);
+	struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+	pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+	*threshold_status = pm_ap_threshold.threshold2_status;
+
+	return 0;
+}
+
+static int fme_pwr_get_rtl(struct ifpga_fme_hw *fme, u64 *rtl)
+{
+	struct feature_fme_power *fme_power
+		= get_fme_feature_ioaddr_by_index(fme,
+				FME_FEATURE_ID_POWER_MGMT);
+	struct feature_fme_pm_status pm_status;
+
+	pm_status.csr = readq(&fme_power->status);
+
+	*rtl = pm_status.fpga_latency_report;
+
+	return 0;
+}
+
+static int fme_pwr_get_xeon_limit(struct ifpga_fme_hw *fme, u64 *limit)
+{
+	struct feature_fme_power *fme_power
+		= get_fme_feature_ioaddr_by_index(fme,
+				FME_FEATURE_ID_POWER_MGMT);
+	struct feature_fme_pm_xeon_limit xeon_limit;
+
+	xeon_limit.csr = readq(&fme_power->xeon_limit);
+
+	if (!xeon_limit.enable)
+		xeon_limit.pwr_limit = 0;
+
+	*limit = xeon_limit.pwr_limit;
+
+	return 0;
+
+}
+
+static int fme_pwr_get_fpga_limit(struct ifpga_fme_hw *fme, u64 *limit)
+{
+	struct feature_fme_power *fme_power
+		= get_fme_feature_ioaddr_by_index(fme,
+				FME_FEATURE_ID_POWER_MGMT);
+	struct feature_fme_pm_fpga_limit fpga_limit;
+
+	fpga_limit.csr = readq(&fme_power->fpga_limit);
+
+	if (!fpga_limit.enable)
+		fpga_limit.pwr_limit = 0;
+
+	*limit = fpga_limit.pwr_limit;
+
+	return 0;
+}
+
+static int fme_pwr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+	struct feature_fme_power *fme_power
+		= get_fme_feature_ioaddr_by_index(fme,
+						  FME_FEATURE_ID_POWER_MGMT);
+	struct feature_header header;
+
+	header.csr = readq(&fme_power->header);
+	*revision = header.revision;
+
+	return 0;
+}
+
+static int fme_power_mgmt_init(struct feature *feature)
+{
+	UNUSED(feature);
+
+	dev_info(NULL, "FME power mgmt Init.\n");
+
+	return 0;
+}
+
+static void fme_power_mgmt_uinit(struct feature *feature)
+{
+	UNUSED(feature);
+
+	dev_info(NULL, "FME power mgmt UInit.\n");
+}
+
+static int fme_power_mgmt_get_prop(struct feature *feature,
+				   struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme = feature->parent;
+
+	switch (prop->prop_id) {
+	case FME_PWR_PROP_CONSUMED:
+		return fme_pwr_get_consumed(fme, &prop->data);
+	case FME_PWR_PROP_THRESHOLD1:
+		return fme_pwr_get_threshold1(fme, &prop->data);
+	case FME_PWR_PROP_THRESHOLD2:
+		return fme_pwr_get_threshold2(fme, &prop->data);
+	case FME_PWR_PROP_THRESHOLD1_STATUS:
+		return fme_pwr_get_threshold1_status(fme, &prop->data);
+	case FME_PWR_PROP_THRESHOLD2_STATUS:
+		return fme_pwr_get_threshold2_status(fme, &prop->data);
+	case FME_PWR_PROP_RTL:
+		return fme_pwr_get_rtl(fme, &prop->data);
+	case FME_PWR_PROP_XEON_LIMIT:
+		return fme_pwr_get_xeon_limit(fme, &prop->data);
+	case FME_PWR_PROP_FPGA_LIMIT:
+		return fme_pwr_get_fpga_limit(fme, &prop->data);
+	case FME_PWR_PROP_REVISION:
+		return fme_pwr_get_revision(fme, &prop->data);
+	}
+
+	return -ENOENT;
+}
+
+static int fme_power_mgmt_set_prop(struct feature *feature,
+				   struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme = feature->parent;
+
+	switch (prop->prop_id) {
+	case FME_PWR_PROP_THRESHOLD1:
+		return fme_pwr_set_threshold1(fme, prop->data);
+	case FME_PWR_PROP_THRESHOLD2:
+		return fme_pwr_set_threshold2(fme, prop->data);
+	}
+
+	return -ENOENT;
+}
+
+struct feature_ops fme_power_mgmt_ops = {
+	.init = fme_power_mgmt_init,
+	.uinit = fme_power_mgmt_uinit,
+	.get_prop = fme_power_mgmt_get_prop,
+	.set_prop = fme_power_mgmt_set_prop,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c
new file mode 100644
index 0000000..df28db0
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c
@@ -0,0 +1,297 @@
+#include "ifpga_feature_dev.h"
+
+#define PERF_OBJ_ROOT_ID	0xff
+
+static int fme_dperf_get_clock(struct ifpga_fme_hw *fme, u64 *clock)
+{
+	struct feature_fme_dperf *dperf;
+	struct feature_fme_dfpmon_clk_ctr clk;
+
+	dperf = get_fme_feature_ioaddr_by_index(fme,
+						FME_FEATURE_ID_GLOBAL_DPERF);
+	clk.afu_interf_clock = readq(&dperf->clk);
+
+	*clock = clk.afu_interf_clock;
+	return 0;
+}
+
+static int fme_dperf_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+	struct feature_fme_dperf *dperf;
+	struct feature_header header;
+
+	dperf = get_fme_feature_ioaddr_by_index(fme,
+						FME_FEATURE_ID_GLOBAL_DPERF);
+	header.csr = readq(&dperf->header);
+	*revision = header.revision;
+
+	return 0;
+}
+
+#define DPERF_TIMEOUT	30
+
+static bool fabric_pobj_is_enabled(int port_id,
+				   struct feature_fme_dperf *dperf)
+{
+	struct feature_fme_dfpmon_fab_ctl ctl;
+
+	ctl.csr = readq(&dperf->fab_ctl);
+
+	if (ctl.port_filter == FAB_DISABLE_FILTER)
+		return port_id == PERF_OBJ_ROOT_ID;
+
+	return port_id == ctl.port_id;
+}
+
+static u64 read_fabric_counter(struct ifpga_fme_hw *fme, u8 port_id,
+			       enum dperf_fab_events fab_event)
+{
+	struct feature_fme_dfpmon_fab_ctl ctl;
+	struct feature_fme_dfpmon_fab_ctr ctr;
+	struct feature_fme_dperf *dperf;
+	u64 counter = 0;
+
+	spinlock_lock(&fme->lock);
+	dperf = get_fme_feature_ioaddr_by_index(fme,
+						FME_FEATURE_ID_GLOBAL_DPERF);
+
+	/* if it is disabled, force the counter to return zero. */
+	if (!fabric_pobj_is_enabled(port_id, dperf))
+		goto exit;
+
+	ctl.csr = readq(&dperf->fab_ctl);
+	ctl.fab_evtcode = fab_event;
+	writeq(ctl.csr, &dperf->fab_ctl);
+
+	ctr.event_code = fab_event;
+
+	if (fpga_wait_register_field(event_code, ctr,
+				     &dperf->fab_ctr, DPERF_TIMEOUT, 1)) {
+		dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
+		spinlock_unlock(&fme->lock);
+		return -ETIMEDOUT;
+	}
+
+	ctr.csr = readq(&dperf->fab_ctr);
+	counter = ctr.fab_cnt;
+exit:
+	spinlock_unlock(&fme->lock);
+	return counter;
+}
+
+#define FAB_PORT_SHOW(name, event)					\
+static int fme_dperf_get_fab_port_##name(struct ifpga_fme_hw *fme,	\
+					 u8 port_id, u64 *counter)	\
+{									\
+	*counter = read_fabric_counter(fme, port_id, event);		\
+	return 0;							\
+}
+
+FAB_PORT_SHOW(pcie0_read, DPERF_FAB_PCIE0_RD);
+FAB_PORT_SHOW(pcie0_write, DPERF_FAB_PCIE0_WR);
+FAB_PORT_SHOW(mmio_read, DPERF_FAB_MMIO_RD);
+FAB_PORT_SHOW(mmio_write, DPERF_FAB_MMIO_WR);
+
+static int fme_dperf_get_fab_port_enable(struct ifpga_fme_hw *fme,
+					 u8 port_id, u64 *enable)
+{
+	struct feature_fme_dperf *dperf;
+	int status;
+
+	dperf = get_fme_feature_ioaddr_by_index(fme,
+						FME_FEATURE_ID_GLOBAL_DPERF);
+
+	status = fabric_pobj_is_enabled(port_id, dperf);
+	*enable = (u64)status;
+
+	return 0;
+}
+
+/*
+ * If enable one port or all port event counter in fabric, other
+ * fabric event counter originally enabled will be disable automatically.
+ */
+static int fme_dperf_set_fab_port_enable(struct ifpga_fme_hw *fme,
+					 u8 port_id, u64 enable)
+{
+	struct feature_fme_dfpmon_fab_ctl ctl;
+	struct feature_fme_dperf *dperf;
+	bool state;
+
+	state = !!enable;
+
+	if (!state)
+		return -EINVAL;
+
+	dperf = get_fme_feature_ioaddr_by_index(fme,
+						FME_FEATURE_ID_GLOBAL_DPERF);
+
+	/* if it is already enabled. */
+	if (fabric_pobj_is_enabled(port_id, dperf))
+		return 0;
+
+	spinlock_lock(&fme->lock);
+	ctl.csr = readq(&dperf->fab_ctl);
+	if (port_id == PERF_OBJ_ROOT_ID)
+		ctl.port_filter = FAB_DISABLE_FILTER;
+	else {
+		ctl.port_filter = FAB_ENABLE_FILTER;
+		ctl.port_id = port_id;
+	}
+
+	writeq(ctl.csr, &dperf->fab_ctl);
+	spinlock_unlock(&fme->lock);
+
+	return 0;
+}
+
+static int fme_dperf_get_fab_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+	struct feature_fme_dperf *dperf;
+	struct feature_fme_dfpmon_fab_ctl ctl;
+
+	dperf = get_fme_feature_ioaddr_by_index(fme,
+						FME_FEATURE_ID_GLOBAL_DPERF);
+	ctl.csr = readq(&dperf->fab_ctl);
+	*freeze = (u64)ctl.freeze;
+
+	return 0;
+}
+
+static int fme_dperf_set_fab_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+	struct feature_fme_dperf *dperf;
+	struct feature_fme_dfpmon_fab_ctl ctl;
+	bool state;
+
+	state = !!freeze;
+
+	spinlock_lock(&fme->lock);
+	dperf = get_fme_feature_ioaddr_by_index(fme,
+						FME_FEATURE_ID_GLOBAL_DPERF);
+	ctl.csr = readq(&dperf->fab_ctl);
+	ctl.freeze = state;
+	writeq(ctl.csr, &dperf->fab_ctl);
+	spinlock_unlock(&fme->lock);
+
+	return 0;
+}
+
+#define PERF_MAX_PORT_NUM	1
+
+static int fme_global_dperf_init(struct feature *feature)
+{
+	UNUSED(feature);
+
+	dev_info(NULL, "FME global_dperf Init.\n");
+
+	return 0;
+}
+
+static void fme_global_dperf_uinit(struct feature *feature)
+{
+	UNUSED(feature);
+
+	dev_info(NULL, "FME global_dperf UInit.\n");
+}
+
+static int fme_dperf_fab_get_prop(struct feature *feature,
+				  struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme = feature->parent;
+	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+	switch (id) {
+	case 0x1: /* FREEZE */
+		return fme_dperf_get_fab_freeze(fme, &prop->data);
+	case 0x2: /* PCIE0_READ */
+		return fme_dperf_get_fab_port_pcie0_read(fme, sub, &prop->data);
+	case 0x3: /* PCIE0_WRITE */
+		return fme_dperf_get_fab_port_pcie0_write(fme, sub,
+							  &prop->data);
+	case 0x4: /* MMIO_READ */
+		return fme_dperf_get_fab_port_mmio_read(fme, sub, &prop->data);
+	case 0x5: /* MMIO_WRITE */
+		return fme_dperf_get_fab_port_mmio_write(fme, sub, &prop->data);
+	case 0x6: /* ENABLE */
+		return fme_dperf_get_fab_port_enable(fme, sub, &prop->data);
+	}
+
+	return -ENOENT;
+}
+
+static int fme_dperf_root_get_prop(struct feature *feature,
+				   struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme = feature->parent;
+	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+	if (sub != PERF_PROP_SUB_UNUSED)
+		return -ENOENT;
+
+	switch (id) {
+	case 0x1: /* CLOCK */
+		return fme_dperf_get_clock(fme, &prop->data);
+	case 0x2: /* REVISION */
+		return fme_dperf_get_revision(fme, &prop->data);
+	}
+
+	return -ENOENT;
+}
+
+static int fme_global_dperf_get_prop(struct feature *feature,
+				     struct feature_prop *prop)
+{
+	u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+	switch (top) {
+	case PERF_PROP_TOP_FAB:
+		return fme_dperf_fab_get_prop(feature, prop);
+	case PERF_PROP_TOP_UNUSED:
+		return fme_dperf_root_get_prop(feature, prop);
+	}
+
+	return -ENOENT;
+}
+
+static int fme_dperf_fab_set_prop(struct feature *feature,
+				  struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme = feature->parent;
+	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+	switch (id) {
+	case 0x1: /* FREEZE - fab root only prop */
+		if (sub != PERF_PROP_SUB_UNUSED)
+			return -ENOENT;
+		return fme_dperf_set_fab_freeze(fme, prop->data);
+	case 0x6: /* ENABLE - fab both root and sub */
+		return fme_dperf_set_fab_port_enable(fme, sub, prop->data);
+	}
+
+	return -ENOENT;
+}
+
+static int fme_global_dperf_set_prop(struct feature *feature,
+				     struct feature_prop *prop)
+{
+	u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+	switch (top) {
+	case PERF_PROP_TOP_FAB:
+		return fme_dperf_fab_set_prop(feature, prop);
+	}
+
+	return -ENOENT;
+}
+
+struct feature_ops fme_global_dperf_ops = {
+	.init = fme_global_dperf_init,
+	.uinit = fme_global_dperf_uinit,
+	.get_prop = fme_global_dperf_get_prop,
+	.set_prop = fme_global_dperf_set_prop,
+
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c
new file mode 100644
index 0000000..93f9995
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c
@@ -0,0 +1,399 @@
+#include "ifpga_feature_dev.h"
+
+static int fme_err_get_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+	struct feature_fme_err *fme_err
+		= get_fme_feature_ioaddr_by_index(fme,
+						  FME_FEATURE_ID_GLOBAL_ERR);
+	struct feature_fme_error0 fme_error0;
+
+	fme_error0.csr = readq(&fme_err->fme_err);
+	*val = fme_error0.csr;
+
+	return 0;
+}
+
+static int fme_err_get_first_error(struct ifpga_fme_hw *fme, u64 *val)
+{
+	struct feature_fme_err *fme_err
+		= get_fme_feature_ioaddr_by_index(fme,
+						  FME_FEATURE_ID_GLOBAL_ERR);
+	struct feature_fme_first_error fme_first_err;
+
+	fme_first_err.csr = readq(&fme_err->fme_first_err);
+	*val = fme_first_err.err_reg_status;
+
+	return 0;
+}
+
+static int fme_err_get_next_error(struct ifpga_fme_hw *fme, u64 *val)
+{
+	struct feature_fme_err *fme_err
+		= get_fme_feature_ioaddr_by_index(fme,
+						  FME_FEATURE_ID_GLOBAL_ERR);
+	struct feature_fme_next_error fme_next_err;
+
+	fme_next_err.csr = readq(&fme_err->fme_next_err);
+	*val = fme_next_err.err_reg_status;
+
+	return 0;
+}
+
+static int fme_err_set_clear(struct ifpga_fme_hw *fme, u64 val)
+{
+	struct feature_fme_err *fme_err
+		= get_fme_feature_ioaddr_by_index(fme,
+						  FME_FEATURE_ID_GLOBAL_ERR);
+	struct feature_fme_error0 fme_error0;
+	struct feature_fme_first_error fme_first_err;
+	struct feature_fme_next_error fme_next_err;
+	int ret = 0;
+
+	spinlock_lock(&fme->lock);
+	writeq(FME_ERROR0_MASK, &fme_err->fme_err_mask);
+
+	fme_error0.csr = readq(&fme_err->fme_err);
+	if (val != fme_error0.csr) {
+		ret = -EBUSY;
+		goto exit;
+	}
+
+	fme_first_err.csr = readq(&fme_err->fme_first_err);
+	fme_next_err.csr = readq(&fme_err->fme_next_err);
+
+	writeq(fme_error0.csr & FME_ERROR0_MASK, &fme_err->fme_err);
+	writeq(fme_first_err.csr & FME_FIRST_ERROR_MASK,
+	       &fme_err->fme_first_err);
+	writeq(fme_next_err.csr & FME_NEXT_ERROR_MASK,
+	       &fme_err->fme_next_err);
+
+exit:
+	writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
+	spinlock_unlock(&fme->lock);
+
+	return ret;
+}
+
+static int fme_err_get_revision(struct ifpga_fme_hw *fme, u64 *val)
+{
+	struct feature_fme_err *fme_err
+		= get_fme_feature_ioaddr_by_index(fme,
+						  FME_FEATURE_ID_GLOBAL_ERR);
+	struct feature_header header;
+
+	header.csr = readq(&fme_err->header);
+	*val = header.revision;
+
+	return 0;
+}
+
+static int fme_err_get_pcie0_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+	struct feature_fme_err *fme_err
+		= get_fme_feature_ioaddr_by_index(fme,
+						  FME_FEATURE_ID_GLOBAL_ERR);
+	struct feature_fme_pcie0_error pcie0_err;
+
+	pcie0_err.csr = readq(&fme_err->pcie0_err);
+	*val = pcie0_err.csr;
+
+	return 0;
+}
+
+static int fme_err_set_pcie0_errors(struct ifpga_fme_hw *fme, u64 val)
+{
+	struct feature_fme_err *fme_err
+		= get_fme_feature_ioaddr_by_index(fme,
+						  FME_FEATURE_ID_GLOBAL_ERR);
+	struct feature_fme_pcie0_error pcie0_err;
+	int ret = 0;
+
+	spinlock_lock(&fme->lock);
+	writeq(FME_PCIE0_ERROR_MASK, &fme_err->pcie0_err_mask);
+
+	pcie0_err.csr = readq(&fme_err->pcie0_err);
+	if (val != pcie0_err.csr)
+		ret = -EBUSY;
+	else
+		writeq(pcie0_err.csr & FME_PCIE0_ERROR_MASK,
+		       &fme_err->pcie0_err);
+
+	writeq(0UL, &fme_err->pcie0_err_mask);
+	spinlock_unlock(&fme->lock);
+
+	return ret;
+}
+
+static int fme_err_get_pcie1_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+	struct feature_fme_err *fme_err
+		= get_fme_feature_ioaddr_by_index(fme,
+						  FME_FEATURE_ID_GLOBAL_ERR);
+	struct feature_fme_pcie1_error pcie1_err;
+
+	pcie1_err.csr = readq(&fme_err->pcie1_err);
+	*val = pcie1_err.csr;
+
+	return 0;
+}
+
+static int fme_err_set_pcie1_errors(struct ifpga_fme_hw *fme, u64 val)
+{
+	struct feature_fme_err *fme_err
+		= get_fme_feature_ioaddr_by_index(fme,
+						  FME_FEATURE_ID_GLOBAL_ERR);
+	struct feature_fme_pcie1_error pcie1_err;
+	int ret = 0;
+
+	spinlock_lock(&fme->lock);
+	writeq(FME_PCIE1_ERROR_MASK, &fme_err->pcie1_err_mask);
+
+	pcie1_err.csr = readq(&fme_err->pcie1_err);
+	if (val != pcie1_err.csr)
+		ret = -EBUSY;
+	else
+		writeq(pcie1_err.csr & FME_PCIE1_ERROR_MASK,
+		       &fme_err->pcie1_err);
+
+	writeq(0UL, &fme_err->pcie1_err_mask);
+	spinlock_unlock(&fme->lock);
+
+	return ret;
+}
+
+static int fme_err_get_nonfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+	struct feature_fme_err *fme_err
+		= get_fme_feature_ioaddr_by_index(fme,
+						  FME_FEATURE_ID_GLOBAL_ERR);
+	struct feature_fme_ras_nonfaterror ras_nonfaterr;
+
+	ras_nonfaterr.csr = readq(&fme_err->ras_nonfaterr);
+	*val = ras_nonfaterr.csr;
+
+	return 0;
+}
+
+static int fme_err_get_catfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+	struct feature_fme_err *fme_err
+		= get_fme_feature_ioaddr_by_index(fme,
+						  FME_FEATURE_ID_GLOBAL_ERR);
+	struct feature_fme_ras_catfaterror ras_catfaterr;
+
+	ras_catfaterr.csr = readq(&fme_err->ras_catfaterr);
+	*val = ras_catfaterr.csr;
+
+	return 0;
+}
+
+static int fme_err_get_inject_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+	struct feature_fme_err *fme_err
+		= get_fme_feature_ioaddr_by_index(fme,
+						  FME_FEATURE_ID_GLOBAL_ERR);
+	struct feature_fme_ras_error_inj ras_error_inj;
+
+	ras_error_inj.csr = readq(&fme_err->ras_error_inj);
+	*val = ras_error_inj.csr & FME_RAS_ERROR_INJ_MASK;
+
+	return 0;
+}
+
+static int fme_err_set_inject_errors(struct ifpga_fme_hw *fme, u64 val)
+{
+	struct feature_fme_err *fme_err
+		= get_fme_feature_ioaddr_by_index(fme,
+					      FME_FEATURE_ID_GLOBAL_ERR);
+	struct feature_fme_ras_error_inj ras_error_inj;
+
+	spinlock_lock(&fme->lock);
+	ras_error_inj.csr = readq(&fme_err->ras_error_inj);
+
+	if (val <= FME_RAS_ERROR_INJ_MASK) {
+		ras_error_inj.csr = val;
+	} else {
+		spinlock_unlock(&fme->lock);
+		return -EINVAL;
+	}
+
+	writeq(ras_error_inj.csr, &fme_err->ras_error_inj);
+	spinlock_unlock(&fme->lock);
+
+	return 0;
+}
+
+static void fme_error_enable(struct ifpga_fme_hw *fme)
+{
+	struct feature_fme_err *fme_err
+		= get_fme_feature_ioaddr_by_index(fme,
+						  FME_FEATURE_ID_GLOBAL_ERR);
+
+	writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
+	writeq(0UL, &fme_err->pcie0_err_mask);
+	writeq(0UL, &fme_err->pcie1_err_mask);
+	writeq(0UL, &fme_err->ras_nonfat_mask);
+	writeq(0UL, &fme_err->ras_catfat_mask);
+}
+
+static int fme_global_error_init(struct feature *feature)
+{
+	struct ifpga_fme_hw *fme = feature->parent;
+
+	fme_error_enable(fme);
+
+	if (feature->ctx_num)
+		fme->capability |= FPGA_FME_CAP_ERR_IRQ;
+
+	return 0;
+}
+
+static void fme_global_error_uinit(struct feature *feature)
+{
+	UNUSED(feature);
+}
+
+static int fme_err_fme_err_get_prop(struct feature *feature,
+				    struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme = feature->parent;
+	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+	switch (id) {
+	case 0x1: /* ERRORS */
+		return fme_err_get_errors(fme, &prop->data);
+	case 0x2: /* FIRST_ERROR */
+		return fme_err_get_first_error(fme, &prop->data);
+	case 0x3: /* NEXT_ERROR */
+		return fme_err_get_next_error(fme, &prop->data);
+	}
+
+	return -ENOENT;
+}
+
+static int fme_err_root_get_prop(struct feature *feature,
+				 struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme = feature->parent;
+	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+	switch (id) {
+	case 0x5: /* REVISION */
+		return fme_err_get_revision(fme, &prop->data);
+	case 0x6: /* PCIE0_ERRORS */
+		return fme_err_get_pcie0_errors(fme, &prop->data);
+	case 0x7: /* PCIE1_ERRORS */
+		return fme_err_get_pcie1_errors(fme, &prop->data);
+	case 0x8: /* NONFATAL_ERRORS */
+		return fme_err_get_nonfatal_errors(fme, &prop->data);
+	case 0x9: /* CATFATAL_ERRORS */
+		return fme_err_get_catfatal_errors(fme, &prop->data);
+	case 0xa: /* INJECT_ERRORS */
+		return fme_err_get_inject_errors(fme, &prop->data);
+	case 0xb: /* REVISION*/
+		return fme_err_get_revision(fme, &prop->data);
+	}
+
+	return -ENOENT;
+}
+
+static int fme_global_error_get_prop(struct feature *feature,
+				     struct feature_prop *prop)
+{
+	u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+	/* PROP_SUB is never used */
+	if (sub != PROP_SUB_UNUSED)
+		return -ENOENT;
+
+	switch (top) {
+	case ERR_PROP_TOP_FME_ERR:
+		return fme_err_fme_err_get_prop(feature, prop);
+	case ERR_PROP_TOP_UNUSED:
+		return fme_err_root_get_prop(feature, prop);
+	}
+
+	return -ENOENT;
+}
+
+static int fme_err_fme_err_set_prop(struct feature *feature,
+				    struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme = feature->parent;
+	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+	switch (id) {
+	case 0x4: /* CLEAR */
+		return fme_err_set_clear(fme, prop->data);
+	}
+
+	return -ENOENT;
+}
+
+static int fme_err_root_set_prop(struct feature *feature,
+				 struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme = feature->parent;
+	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+	switch (id) {
+	case 0x6: /* PCIE0_ERRORS */
+		return fme_err_set_pcie0_errors(fme, prop->data);
+	case 0x7: /* PCIE1_ERRORS */
+		return fme_err_set_pcie1_errors(fme, prop->data);
+	case 0xa: /* INJECT_ERRORS */
+		return fme_err_set_inject_errors(fme, prop->data);
+	}
+
+	return -ENOENT;
+}
+
+static int fme_global_error_set_prop(struct feature *feature,
+				     struct feature_prop *prop)
+{
+	u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+	/* PROP_SUB is never used */
+	if (sub != PROP_SUB_UNUSED)
+		return -ENOENT;
+
+	switch (top) {
+	case ERR_PROP_TOP_FME_ERR:
+		return fme_err_fme_err_set_prop(feature, prop);
+	case ERR_PROP_TOP_UNUSED:
+		return fme_err_root_set_prop(feature, prop);
+	}
+
+	return -ENOENT;
+}
+
+static int fme_global_err_set_irq(struct feature *feature, void *irq_set)
+{
+	struct fpga_fme_err_irq_set *err_irq_set =
+			(struct fpga_fme_err_irq_set *)irq_set;
+	struct ifpga_fme_hw *fme;
+	int ret;
+
+	fme = (struct ifpga_fme_hw *)feature->parent;
+
+	spinlock_lock(&fme->lock);
+	if (!(fme->capability & FPGA_FME_CAP_ERR_IRQ)) {
+		spinlock_unlock(&fme->lock);
+		return -ENODEV;
+	}
+
+	ret = fpga_msix_set_block(feature, 0, 1, &err_irq_set->evtfd);
+	spinlock_unlock(&fme->lock);
+
+	return ret;
+}
+
+struct feature_ops fme_global_err_ops = {
+	.init = fme_global_error_init,
+	.uinit = fme_global_error_uinit,
+	.get_prop = fme_global_error_get_prop,
+	.set_prop = fme_global_error_set_prop,
+	.set_irq = fme_global_err_set_irq,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c
new file mode 100644
index 0000000..7186d9e
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c
@@ -0,0 +1,711 @@
+#include "ifpga_feature_dev.h"
+
+#define PERF_OBJ_ROOT_ID	0xff
+
+static int fme_iperf_get_clock(struct ifpga_fme_hw *fme, u64 *clock)
+{
+	struct feature_fme_iperf *iperf;
+	struct feature_fme_ifpmon_clk_ctr clk;
+
+	iperf = get_fme_feature_ioaddr_by_index(fme,
+				FME_FEATURE_ID_GLOBAL_IPERF);
+	clk.afu_interf_clock = readq(&iperf->clk);
+
+	*clock = clk.afu_interf_clock;
+	return 0;
+}
+
+static int fme_iperf_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+	struct feature_fme_iperf *iperf;
+	struct feature_header header;
+
+	iperf = get_fme_feature_ioaddr_by_index(fme,
+						FME_FEATURE_ID_GLOBAL_IPERF);
+	header.csr = readq(&iperf->header);
+	*revision = header.revision;
+
+	return 0;
+}
+
+static int fme_iperf_get_cache_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+	struct feature_fme_iperf *iperf;
+	struct feature_fme_ifpmon_ch_ctl ctl;
+
+	iperf = get_fme_feature_ioaddr_by_index(fme,
+					    FME_FEATURE_ID_GLOBAL_IPERF);
+	ctl.csr = readq(&iperf->ch_ctl);
+	*freeze = (u64)ctl.freeze;
+	return 0;
+}
+
+static int fme_iperf_set_cache_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+	struct feature_fme_iperf *iperf;
+	struct feature_fme_ifpmon_ch_ctl ctl;
+	bool state;
+
+	state = !!freeze;
+
+	spinlock_lock(&fme->lock);
+	iperf = get_fme_feature_ioaddr_by_index(fme,
+					FME_FEATURE_ID_GLOBAL_IPERF);
+	ctl.csr = readq(&iperf->ch_ctl);
+	ctl.freeze = state;
+	writeq(ctl.csr, &iperf->ch_ctl);
+	spinlock_unlock(&fme->lock);
+
+	return 0;
+}
+
+#define IPERF_TIMEOUT	30
+
+static u64 read_cache_counter(struct ifpga_fme_hw *fme,
+				  u8 channel, enum iperf_cache_events event)
+{
+	struct feature_fme_iperf *iperf;
+	struct feature_fme_ifpmon_ch_ctl ctl;
+	struct feature_fme_ifpmon_ch_ctr ctr0, ctr1;
+	u64 counter;
+
+	spinlock_lock(&fme->lock);
+	iperf = get_fme_feature_ioaddr_by_index(fme,
+					FME_FEATURE_ID_GLOBAL_IPERF);
+
+	/* set channel access type and cache event code. */
+	ctl.csr = readq(&iperf->ch_ctl);
+	ctl.cci_chsel = channel;
+	ctl.cache_event = event;
+	writeq(ctl.csr, &iperf->ch_ctl);
+
+	/* check the event type in the counter registers */
+	ctr0.event_code = event;
+
+	if (fpga_wait_register_field(event_code, ctr0,
+				     &iperf->ch_ctr0, IPERF_TIMEOUT, 1)) {
+		dev_err(fme, "timeout, unmatched cache event type in counter registers.\n");
+		spinlock_unlock(&fme->lock);
+		return -ETIMEDOUT;
+	}
+
+	ctr0.csr = readq(&iperf->ch_ctr0);
+	ctr1.csr = readq(&iperf->ch_ctr1);
+	counter = ctr0.cache_counter + ctr1.cache_counter;
+	spinlock_unlock(&fme->lock);
+
+	return counter;
+}
+
+#define CACHE_SHOW(name, type, event)					\
+static int fme_iperf_get_cache_##name(struct ifpga_fme_hw *fme,		\
+					u64 *counter)			\
+{									\
+	*counter = read_cache_counter(fme, type, event);		\
+	return 0;							\
+}
+
+CACHE_SHOW(read_hit, CACHE_CHANNEL_RD, IPERF_CACHE_RD_HIT);
+CACHE_SHOW(read_miss, CACHE_CHANNEL_RD, IPERF_CACHE_RD_MISS);
+CACHE_SHOW(write_hit, CACHE_CHANNEL_WR, IPERF_CACHE_WR_HIT);
+CACHE_SHOW(write_miss, CACHE_CHANNEL_WR, IPERF_CACHE_WR_MISS);
+CACHE_SHOW(hold_request, CACHE_CHANNEL_RD, IPERF_CACHE_HOLD_REQ);
+CACHE_SHOW(tx_req_stall, CACHE_CHANNEL_RD, IPERF_CACHE_TX_REQ_STALL);
+CACHE_SHOW(rx_req_stall, CACHE_CHANNEL_RD, IPERF_CACHE_RX_REQ_STALL);
+CACHE_SHOW(rx_eviction, CACHE_CHANNEL_RD, IPERF_CACHE_EVICTIONS);
+CACHE_SHOW(data_write_port_contention, CACHE_CHANNEL_WR,
+	   IPERF_CACHE_DATA_WR_PORT_CONTEN);
+CACHE_SHOW(tag_write_port_contention, CACHE_CHANNEL_WR,
+	   IPERF_CACHE_TAG_WR_PORT_CONTEN);
+
+static int fme_iperf_get_vtd_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+	struct feature_fme_ifpmon_vtd_ctl ctl;
+	struct feature_fme_iperf *iperf;
+
+	iperf = get_fme_feature_ioaddr_by_index(fme,
+					FME_FEATURE_ID_GLOBAL_IPERF);
+	ctl.csr = readq(&iperf->vtd_ctl);
+	*freeze = (u64)ctl.freeze;
+
+	return 0;
+}
+
+static int fme_iperf_set_vtd_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+	struct feature_fme_ifpmon_vtd_ctl ctl;
+	struct feature_fme_iperf *iperf;
+	bool state;
+
+	state = !!freeze;
+
+	spinlock_lock(&fme->lock);
+	iperf = get_fme_feature_ioaddr_by_index(fme,
+					    FME_FEATURE_ID_GLOBAL_IPERF);
+	ctl.csr = readq(&iperf->vtd_ctl);
+	ctl.freeze = state;
+	writeq(ctl.csr, &iperf->vtd_ctl);
+	spinlock_unlock(&fme->lock);
+
+	return 0;
+}
+
+static u64 read_iommu_sip_counter(struct ifpga_fme_hw *fme,
+				      enum iperf_vtd_sip_events event)
+{
+	struct feature_fme_ifpmon_vtd_sip_ctl sip_ctl;
+	struct feature_fme_ifpmon_vtd_sip_ctr sip_ctr;
+	struct feature_fme_iperf *iperf;
+	u64 counter;
+
+	spinlock_lock(&fme->lock);
+	iperf = get_fme_feature_ioaddr_by_index(fme,
+					    FME_FEATURE_ID_GLOBAL_IPERF);
+	sip_ctl.csr = readq(&iperf->vtd_sip_ctl);
+	sip_ctl.vtd_evtcode = event;
+	writeq(sip_ctl.csr, &iperf->vtd_sip_ctl);
+
+	sip_ctr.event_code = event;
+
+	if (fpga_wait_register_field(event_code, sip_ctr,
+				     &iperf->vtd_sip_ctr, IPERF_TIMEOUT, 1)) {
+		dev_err(fme, "timeout, unmatched VTd SIP event type in counter registers\n");
+		spinlock_unlock(&fme->lock);
+		return -ETIMEDOUT;
+	}
+
+	sip_ctr.csr = readq(&iperf->vtd_sip_ctr);
+	counter = sip_ctr.vtd_counter;
+	spinlock_unlock(&fme->lock);
+
+	return counter;
+}
+
+#define VTD_SIP_SHOW(name, event)					\
+static int fme_iperf_get_vtd_sip_##name(struct ifpga_fme_hw *fme,	\
+						u64 *counter)		\
+{									\
+	*counter = read_iommu_sip_counter(fme, event);			\
+	return 0;							\
+}
+
+VTD_SIP_SHOW(iotlb_4k_hit, IPERF_VTD_SIP_IOTLB_4K_HIT);
+VTD_SIP_SHOW(iotlb_2m_hit, IPERF_VTD_SIP_IOTLB_2M_HIT);
+VTD_SIP_SHOW(iotlb_1g_hit, IPERF_VTD_SIP_IOTLB_1G_HIT);
+VTD_SIP_SHOW(slpwc_l3_hit, IPERF_VTD_SIP_SLPWC_L3_HIT);
+VTD_SIP_SHOW(slpwc_l4_hit, IPERF_VTD_SIP_SLPWC_L4_HIT);
+VTD_SIP_SHOW(rcc_hit, IPERF_VTD_SIP_RCC_HIT);
+VTD_SIP_SHOW(iotlb_4k_miss, IPERF_VTD_SIP_IOTLB_4K_MISS);
+VTD_SIP_SHOW(iotlb_2m_miss, IPERF_VTD_SIP_IOTLB_2M_MISS);
+VTD_SIP_SHOW(iotlb_1g_miss, IPERF_VTD_SIP_IOTLB_1G_MISS);
+VTD_SIP_SHOW(slpwc_l3_miss, IPERF_VTD_SIP_SLPWC_L3_MISS);
+VTD_SIP_SHOW(slpwc_l4_miss, IPERF_VTD_SIP_SLPWC_L4_MISS);
+VTD_SIP_SHOW(rcc_miss, IPERF_VTD_SIP_RCC_MISS);
+
+static u64 read_iommu_counter(struct ifpga_fme_hw *fme, u8 port_id,
+			      enum iperf_vtd_events base_event)
+{
+	struct feature_fme_ifpmon_vtd_ctl ctl;
+	struct feature_fme_ifpmon_vtd_ctr ctr;
+	struct feature_fme_iperf *iperf;
+	enum iperf_vtd_events event = base_event + port_id;
+	u64 counter;
+
+	spinlock_lock(&fme->lock);
+	iperf = get_fme_feature_ioaddr_by_index(fme,
+					    FME_FEATURE_ID_GLOBAL_IPERF);
+	ctl.csr = readq(&iperf->vtd_ctl);
+	ctl.vtd_evtcode = event;
+	writeq(ctl.csr, &iperf->vtd_ctl);
+
+	ctr.event_code = event;
+
+	if (fpga_wait_register_field(event_code, ctr,
+				     &iperf->vtd_ctr, IPERF_TIMEOUT, 1)) {
+		dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
+		spinlock_unlock(&fme->lock);
+		return -ETIMEDOUT;
+	}
+
+	ctr.csr = readq(&iperf->vtd_ctr);
+	counter = ctr.vtd_counter;
+	spinlock_unlock(&fme->lock);
+
+	return counter;
+}
+
+#define VTD_PORT_SHOW(name, base_event)					\
+static int fme_iperf_get_vtd_port_##name(struct ifpga_fme_hw *fme,	\
+				u8 port_id, u64 *counter)		\
+{									\
+	*counter = read_iommu_counter(fme, port_id, base_event);	\
+	return 0;							\
+}
+
+VTD_PORT_SHOW(read_transaction, IPERF_VTD_AFU_MEM_RD_TRANS);
+VTD_PORT_SHOW(write_transaction, IPERF_VTD_AFU_MEM_WR_TRANS);
+VTD_PORT_SHOW(devtlb_read_hit, IPERF_VTD_AFU_DEVTLB_RD_HIT);
+VTD_PORT_SHOW(devtlb_write_hit, IPERF_VTD_AFU_DEVTLB_WR_HIT);
+VTD_PORT_SHOW(devtlb_4k_fill, IPERF_VTD_DEVTLB_4K_FILL);
+VTD_PORT_SHOW(devtlb_2m_fill, IPERF_VTD_DEVTLB_2M_FILL);
+VTD_PORT_SHOW(devtlb_1g_fill, IPERF_VTD_DEVTLB_1G_FILL);
+
+static bool fabric_pobj_is_enabled(u8 port_id, struct feature_fme_iperf *iperf)
+{
+	struct feature_fme_ifpmon_fab_ctl ctl;
+
+	ctl.csr = readq(&iperf->fab_ctl);
+
+	if (ctl.port_filter == FAB_DISABLE_FILTER)
+		return port_id == PERF_OBJ_ROOT_ID;
+
+	return port_id == ctl.port_id;
+}
+
+static u64 read_fabric_counter(struct ifpga_fme_hw *fme, u8 port_id,
+			       enum iperf_fab_events fab_event)
+{
+	struct feature_fme_ifpmon_fab_ctl ctl;
+	struct feature_fme_ifpmon_fab_ctr ctr;
+	struct feature_fme_iperf *iperf;
+	u64 counter = 0;
+
+	spinlock_lock(&fme->lock);
+	iperf = get_fme_feature_ioaddr_by_index(fme,
+					    FME_FEATURE_ID_GLOBAL_IPERF);
+
+	/* if it is disabled, force the counter to return zero. */
+	if (!fabric_pobj_is_enabled(port_id, iperf))
+		goto exit;
+
+	ctl.csr = readq(&iperf->fab_ctl);
+	ctl.fab_evtcode = fab_event;
+	writeq(ctl.csr, &iperf->fab_ctl);
+
+	ctr.event_code = fab_event;
+
+	if (fpga_wait_register_field(event_code, ctr,
+				     &iperf->fab_ctr, IPERF_TIMEOUT, 1)) {
+		dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
+		spinlock_unlock(&fme->lock);
+		return -ETIMEDOUT;
+	}
+
+	ctr.csr = readq(&iperf->fab_ctr);
+	counter = ctr.fab_cnt;
+exit:
+	spinlock_unlock(&fme->lock);
+	return counter;
+}
+
+#define FAB_PORT_SHOW(name, event)					\
+static int fme_iperf_get_fab_port_##name(struct ifpga_fme_hw *fme,	\
+				u8 port_id, u64 *counter)		\
+{									\
+	*counter = read_fabric_counter(fme, port_id, event);		\
+	return 0;							\
+}
+
+FAB_PORT_SHOW(pcie0_read, IPERF_FAB_PCIE0_RD);
+FAB_PORT_SHOW(pcie0_write, IPERF_FAB_PCIE0_WR);
+FAB_PORT_SHOW(pcie1_read, IPERF_FAB_PCIE1_RD);
+FAB_PORT_SHOW(pcie1_write, IPERF_FAB_PCIE1_WR);
+FAB_PORT_SHOW(upi_read, IPERF_FAB_UPI_RD);
+FAB_PORT_SHOW(upi_write, IPERF_FAB_UPI_WR);
+FAB_PORT_SHOW(mmio_read, IPERF_FAB_MMIO_RD);
+FAB_PORT_SHOW(mmio_write, IPERF_FAB_MMIO_WR);
+
+static int fme_iperf_get_fab_port_enable(struct ifpga_fme_hw *fme,
+					 u8 port_id, u64 *enable)
+{
+	struct feature_fme_iperf *iperf;
+	int status;
+
+	iperf = get_fme_feature_ioaddr_by_index(fme,
+					    FME_FEATURE_ID_GLOBAL_IPERF);
+
+	status = fabric_pobj_is_enabled(port_id, iperf);
+	*enable = (u64)status;
+
+	return 0;
+}
+
+/*
+ * If enable one port or all port event counter in fabric, other
+ * fabric event counter originally enabled will be disable automatically.
+ */
+static int fme_iperf_set_fab_port_enable(struct ifpga_fme_hw *fme,
+					 u8 port_id, u64 enable)
+{
+	struct feature_fme_ifpmon_fab_ctl ctl;
+	struct feature_fme_iperf *iperf;
+	bool state;
+
+	state = !!enable;
+
+	if (!state)
+		return -EINVAL;
+
+	iperf = get_fme_feature_ioaddr_by_index(fme,
+					    FME_FEATURE_ID_GLOBAL_IPERF);
+
+	/* if it is already enabled. */
+	if (fabric_pobj_is_enabled(port_id, iperf))
+		return 0;
+
+	spinlock_lock(&fme->lock);
+	ctl.csr = readq(&iperf->fab_ctl);
+	if (port_id == PERF_OBJ_ROOT_ID)
+		ctl.port_filter = FAB_DISABLE_FILTER;
+	else {
+		ctl.port_filter = FAB_ENABLE_FILTER;
+		ctl.port_id = port_id;
+	}
+
+	writeq(ctl.csr, &iperf->fab_ctl);
+	spinlock_unlock(&fme->lock);
+
+	return 0;
+}
+
+static int fme_iperf_get_fab_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+	struct feature_fme_iperf *iperf;
+	struct feature_fme_ifpmon_fab_ctl ctl;
+
+	iperf = get_fme_feature_ioaddr_by_index(fme,
+					    FME_FEATURE_ID_GLOBAL_IPERF);
+	ctl.csr = readq(&iperf->fab_ctl);
+	*freeze = (u64)ctl.freeze;
+
+	return 0;
+}
+
+static int fme_iperf_set_fab_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+	struct feature_fme_iperf *iperf;
+	struct feature_fme_ifpmon_fab_ctl ctl;
+	bool state;
+
+	state = !!freeze;
+
+	spinlock_lock(&fme->lock);
+	iperf = get_fme_feature_ioaddr_by_index(fme,
+					    FME_FEATURE_ID_GLOBAL_IPERF);
+	ctl.csr = readq(&iperf->fab_ctl);
+	ctl.freeze = state;
+	writeq(ctl.csr, &iperf->fab_ctl);
+	spinlock_unlock(&fme->lock);
+
+	return 0;
+}
+
+#define PERF_MAX_PORT_NUM	1
+#define FME_IPERF_CAP_IOMMU	0x1
+
+static int fme_global_iperf_init(struct feature *feature)
+{
+	struct ifpga_fme_hw *fme;
+	struct feature_fme_header *fme_hdr;
+	struct feature_fme_capability fme_capability;
+
+	dev_info(NULL, "FME global_iperf Init.\n");
+
+	fme = (struct ifpga_fme_hw *)feature->parent;
+	fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+	/* check if iommu is not supported on this device. */
+	fme_capability.csr = readq(&fme_hdr->capability);
+	dev_info(NULL, "FME HEAD fme_capability %llx.\n",
+		 (unsigned long long)fme_hdr->capability.csr);
+
+	if (fme_capability.iommu_support)
+		feature->cap |= FME_IPERF_CAP_IOMMU;
+
+	return 0;
+}
+
+static void fme_global_iperf_uinit(struct feature *feature)
+{
+	UNUSED(feature);
+
+	dev_info(NULL, "FME global_iperf UInit.\n");
+}
+
+static int fme_iperf_root_get_prop(struct feature *feature,
+				   struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme = feature->parent;
+	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+	if (sub != PERF_PROP_SUB_UNUSED)
+		return -ENOENT;
+
+	switch (id) {
+	case 0x1: /* CLOCK */
+		return fme_iperf_get_clock(fme, &prop->data);
+	case 0x2: /* REVISION */
+		return fme_iperf_get_revision(fme, &prop->data);
+	}
+
+	return -ENOENT;
+}
+
+static int fme_iperf_cache_get_prop(struct feature *feature,
+				    struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme = feature->parent;
+	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+	if (sub != PERF_PROP_SUB_UNUSED)
+		return -ENOENT;
+
+	switch (id) {
+	case 0x1: /* FREEZE */
+		return fme_iperf_get_cache_freeze(fme, &prop->data);
+	case 0x2: /* READ_HIT */
+		return fme_iperf_get_cache_read_hit(fme, &prop->data);
+	case 0x3: /* READ_MISS */
+		return fme_iperf_get_cache_read_miss(fme, &prop->data);
+	case 0x4: /* WRITE_HIT */
+		return fme_iperf_get_cache_write_hit(fme, &prop->data);
+	case 0x5: /* WRITE_MISS */
+		return fme_iperf_get_cache_write_miss(fme, &prop->data);
+	case 0x6: /* HOLD_REQUEST */
+		return fme_iperf_get_cache_hold_request(fme, &prop->data);
+	case 0x7: /* TX_REQ_STALL */
+		return fme_iperf_get_cache_tx_req_stall(fme, &prop->data);
+	case 0x8: /* RX_REQ_STALL */
+		return fme_iperf_get_cache_rx_req_stall(fme, &prop->data);
+	case 0x9: /* RX_EVICTION */
+		return fme_iperf_get_cache_rx_eviction(fme, &prop->data);
+	case 0xa: /* DATA_WRITE_PORT_CONTENTION */
+		return fme_iperf_get_cache_data_write_port_contention(fme,
+							&prop->data);
+	case 0xb: /* TAG_WRITE_PORT_CONTENTION */
+		return fme_iperf_get_cache_tag_write_port_contention(fme,
+							&prop->data);
+	}
+
+	return -ENOENT;
+}
+
+static int fme_iperf_vtd_root_get_prop(struct feature *feature,
+				       struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme = feature->parent;
+	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+	switch (id) {
+	case 0x1: /* FREEZE */
+		return fme_iperf_get_vtd_freeze(fme, &prop->data);
+	case 0x2: /* IOTLB_4K_HIT */
+		return fme_iperf_get_vtd_sip_iotlb_4k_hit(fme, &prop->data);
+	case 0x3: /* IOTLB_2M_HIT */
+		return fme_iperf_get_vtd_sip_iotlb_2m_hit(fme, &prop->data);
+	case 0x4: /* IOTLB_1G_HIT */
+		return fme_iperf_get_vtd_sip_iotlb_1g_hit(fme, &prop->data);
+	case 0x5: /* SLPWC_L3_HIT */
+		return fme_iperf_get_vtd_sip_slpwc_l3_hit(fme, &prop->data);
+	case 0x6: /* SLPWC_L4_HIT */
+		return fme_iperf_get_vtd_sip_slpwc_l4_hit(fme, &prop->data);
+	case 0x7: /* RCC_HIT */
+		return fme_iperf_get_vtd_sip_rcc_hit(fme, &prop->data);
+	case 0x8: /* IOTLB_4K_MISS */
+		return fme_iperf_get_vtd_sip_iotlb_4k_miss(fme, &prop->data);
+	case 0x9: /* IOTLB_2M_MISS */
+		return fme_iperf_get_vtd_sip_iotlb_2m_miss(fme, &prop->data);
+	case 0xa: /* IOTLB_1G_MISS */
+		return fme_iperf_get_vtd_sip_iotlb_1g_miss(fme, &prop->data);
+	case 0xb: /* SLPWC_L3_MISS */
+		return fme_iperf_get_vtd_sip_slpwc_l3_miss(fme, &prop->data);
+	case 0xc: /* SLPWC_L4_MISS */
+		return fme_iperf_get_vtd_sip_slpwc_l4_miss(fme, &prop->data);
+	case 0xd: /* RCC_MISS */
+		return fme_iperf_get_vtd_sip_rcc_miss(fme, &prop->data);
+	}
+
+	return -ENOENT;
+}
+
+static int fme_iperf_vtd_sub_get_prop(struct feature *feature,
+				      struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme = feature->parent;
+	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+	if (sub > PERF_MAX_PORT_NUM)
+		return -ENOENT;
+
+	switch (id) {
+	case 0xe: /* READ_TRANSACTION */
+		return fme_iperf_get_vtd_port_read_transaction(fme, sub,
+							       &prop->data);
+	case 0xf: /* WRITE_TRANSACTION */
+		return fme_iperf_get_vtd_port_write_transaction(fme, sub,
+								&prop->data);
+	case 0x10: /* DEVTLB_READ_HIT */
+		return fme_iperf_get_vtd_port_devtlb_read_hit(fme, sub,
+							      &prop->data);
+	case 0x11: /* DEVTLB_WRITE_HIT */
+		return fme_iperf_get_vtd_port_devtlb_write_hit(fme, sub,
+							       &prop->data);
+	case 0x12: /* DEVTLB_4K_FILL */
+		return fme_iperf_get_vtd_port_devtlb_4k_fill(fme, sub,
+							     &prop->data);
+	case 0x13: /* DEVTLB_2M_FILL */
+		return fme_iperf_get_vtd_port_devtlb_2m_fill(fme, sub,
+							     &prop->data);
+	case 0x14: /* DEVTLB_1G_FILL */
+		return fme_iperf_get_vtd_port_devtlb_1g_fill(fme, sub,
+							     &prop->data);
+	}
+
+	return -ENOENT;
+}
+
+static int fme_iperf_vtd_get_prop(struct feature *feature,
+				  struct feature_prop *prop)
+{
+	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+	if (sub == PERF_PROP_SUB_UNUSED)
+		return fme_iperf_vtd_root_get_prop(feature, prop);
+
+	return fme_iperf_vtd_sub_get_prop(feature, prop);
+}
+
+static int fme_iperf_fab_get_prop(struct feature *feature,
+				  struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme = feature->parent;
+	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+	u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+	/* Other properties are present for both top and sub levels */
+	switch (id) {
+	case 0x1: /* FREEZE */
+		if (sub != PERF_PROP_SUB_UNUSED)
+			return -ENOENT;
+		return fme_iperf_get_fab_freeze(fme, &prop->data);
+	case 0x2: /* PCIE0_READ */
+		return fme_iperf_get_fab_port_pcie0_read(fme, sub,
+							 &prop->data);
+	case 0x3: /* PCIE0_WRITE */
+		return fme_iperf_get_fab_port_pcie0_write(fme, sub,
+							  &prop->data);
+	case 0x4: /* PCIE1_READ */
+		return fme_iperf_get_fab_port_pcie1_read(fme, sub,
+							 &prop->data);
+	case 0x5: /* PCIE1_WRITE */
+		return fme_iperf_get_fab_port_pcie1_write(fme, sub,
+							  &prop->data);
+	case 0x6: /* UPI_READ */
+		return fme_iperf_get_fab_port_upi_read(fme, sub,
+						       &prop->data);
+	case 0x7: /* UPI_WRITE */
+		return fme_iperf_get_fab_port_upi_write(fme, sub,
+							&prop->data);
+	case 0x8: /* MMIO_READ */
+		return fme_iperf_get_fab_port_mmio_read(fme, sub,
+							&prop->data);
+	case 0x9: /* MMIO_WRITE */
+		return fme_iperf_get_fab_port_mmio_write(fme, sub,
+							 &prop->data);
+	case 0xa: /* ENABLE */
+		return fme_iperf_get_fab_port_enable(fme, sub, &prop->data);
+	}
+
+	return -ENOENT;
+}
+
+static int fme_global_iperf_get_prop(struct feature *feature,
+				     struct feature_prop *prop)
+{
+	u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+	switch (top) {
+	case PERF_PROP_TOP_CACHE:
+		return fme_iperf_cache_get_prop(feature, prop);
+	case PERF_PROP_TOP_VTD:
+		return fme_iperf_vtd_get_prop(feature, prop);
+	case PERF_PROP_TOP_FAB:
+		return fme_iperf_fab_get_prop(feature, prop);
+	case PERF_PROP_TOP_UNUSED:
+		return fme_iperf_root_get_prop(feature, prop);
+	}
+
+	return -ENOENT;
+}
+
+static int fme_iperf_cache_set_prop(struct feature *feature,
+				    struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme = feature->parent;
+	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+	u16 id = GET_FIELD(PROP_TOP, prop->prop_id);
+
+	if (sub == PERF_PROP_SUB_UNUSED && id == 0x1) /* FREEZE */
+		return fme_iperf_set_cache_freeze(fme, prop->data);
+
+	return -ENOENT;
+}
+
+static int fme_iperf_vtd_set_prop(struct feature *feature,
+				  struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme = feature->parent;
+	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+	u16 id = GET_FIELD(PROP_TOP, prop->prop_id);
+
+	if (sub == PERF_PROP_SUB_UNUSED && id == 0x1) /* FREEZE */
+		return fme_iperf_set_vtd_freeze(fme, prop->data);
+
+	return -ENOENT;
+}
+
+static int fme_iperf_fab_set_prop(struct feature *feature,
+				  struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme = feature->parent;
+	u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+	u16 id = GET_FIELD(PROP_TOP, prop->prop_id);
+
+	switch (id) {
+	case 0x1: /* FREEZE */
+		if (sub != PERF_PROP_SUB_UNUSED)
+			return -ENOENT;
+		return fme_iperf_set_fab_freeze(fme, prop->data);
+	case 0xa: /* ENABLE */
+		return fme_iperf_set_fab_port_enable(fme, sub, prop->data);
+	}
+
+	return -ENOENT;
+}
+
+static int fme_global_iperf_set_prop(struct feature *feature,
+				     struct feature_prop *prop)
+{
+	u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+	switch (top) {
+	case PERF_PROP_TOP_CACHE:
+		return fme_iperf_cache_set_prop(feature, prop);
+	case PERF_PROP_TOP_VTD:
+		return fme_iperf_vtd_set_prop(feature, prop);
+	case PERF_PROP_TOP_FAB:
+		return fme_iperf_fab_set_prop(feature, prop);
+	}
+
+	return -ENOENT;
+}
+
+struct feature_ops fme_global_iperf_ops = {
+	.init = fme_global_iperf_init,
+	.uinit = fme_global_iperf_uinit,
+	.get_prop = fme_global_iperf_get_prop,
+	.set_prop = fme_global_iperf_set_prop,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c
new file mode 100644
index 0000000..2a5a320
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c
@@ -0,0 +1,364 @@
+#include "ifpga_feature_dev.h"
+
+//#if defined(RTE_ARCH_X86) && defined(RTE_MACHINE_CPUFLAG_AVX512F)
+#if 1
+static inline void copy512(const void *src, void *dst)
+{
+	asm volatile("vmovdqu64 (%0), %%zmm0;"
+		     "vmovntdq %%zmm0, (%1);"
+		     :
+		     : "r"(src), "r"(dst));
+}
+#else
+static inline void copy512(void *src, void *dst)
+{
+	WARN_ON(1);
+}
+#endif
+
+static u64
+pr_err_handle(struct feature_fme_pr *fme_pr)
+{
+	struct feature_fme_pr_status fme_pr_status;
+	unsigned long err_code;
+	u64 fme_pr_error;
+#if 0
+	int i = 0;
+#endif
+
+	fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
+	if (!fme_pr_status.pr_status)
+		return 0;
+
+	err_code = readq(&fme_pr->ccip_fme_pr_err);
+	fme_pr_error = err_code;
+#if 0
+	for_each_set_bit(i, &err_code, PR_MAX_ERR_NUM)
+		dev_info(NULL, "%s\n", pr_err_msg[i]);
+#endif
+	writeq(fme_pr_error, &fme_pr->ccip_fme_pr_err);
+	return fme_pr_error;
+}
+
+static int fme_pr_write_init(struct ifpga_fme_hw *fme_dev,
+			     struct fpga_pr_info *info)
+{
+	struct feature_fme_pr *fme_pr;
+	struct feature_fme_pr_ctl fme_pr_ctl;
+	struct feature_fme_pr_status fme_pr_status;
+
+	fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
+						 FME_FEATURE_ID_PR_MGMT);
+	if (!fme_pr)
+		return -EINVAL;
+
+	if (info->flags != FPGA_MGR_PARTIAL_RECONFIG)
+		return -EINVAL;
+
+	dev_info(fme_dev, "resetting PR before initiated PR\n");
+
+	fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+	fme_pr_ctl.pr_reset = 1;
+	writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+	fme_pr_ctl.pr_reset_ack = 1;
+
+	if (fpga_wait_register_field(pr_reset_ack, fme_pr_ctl,
+		&fme_pr->ccip_fme_pr_control, PR_WAIT_TIMEOUT, 1)) {
+		dev_err(fme_dev, "maximum PR timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+	fme_pr_ctl.pr_reset = 0;
+	writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+	dev_info(fme_dev, "waiting for PR resource in HW to be initialized and ready\n");
+
+	fme_pr_status.pr_host_status = PR_HOST_STATUS_IDLE;
+
+	if (fpga_wait_register_field(pr_host_status, fme_pr_status,
+		&fme_pr->ccip_fme_pr_status, PR_WAIT_TIMEOUT, 1)) {
+		dev_err(fme_dev, "maximum PR timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	dev_info(fme_dev, "check if have any previous PR error\n");
+	pr_err_handle(fme_pr);
+	return 0;
+}
+
+static int fme_pr_write(struct ifpga_fme_hw *fme_dev,
+			int port_id, const char *buf, size_t count,
+			struct fpga_pr_info *info)
+{
+	struct feature_fme_pr *fme_pr;
+	struct feature_fme_pr_ctl fme_pr_ctl;
+	struct feature_fme_pr_status fme_pr_status;
+	struct feature_fme_pr_data fme_pr_data;
+	int delay, pr_credit;
+	int ret = 0;
+
+	fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
+						 FME_FEATURE_ID_PR_MGMT);
+	if (!fme_pr)
+		return -EINVAL;
+
+	dev_info(fme_dev, "set PR port ID and start request\n");
+
+	fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+	fme_pr_ctl.pr_regionid = port_id;
+	fme_pr_ctl.pr_start_req = 1;
+	writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+	dev_info(fme_dev, "pushing data from bitstream to HW\n");
+
+	fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
+	pr_credit = fme_pr_status.pr_credit;
+
+	while (count > 0) {
+		delay = 0;
+		while (pr_credit <= 1) {
+			if (delay++ > PR_WAIT_TIMEOUT) {
+				dev_err(fme_dev, "maximum try\n");
+
+				info->pr_err = pr_err_handle(fme_pr);
+				return info->pr_err ? -EIO : -ETIMEDOUT;
+			}
+			udelay(1);
+
+			fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
+			pr_credit = fme_pr_status.pr_credit;
+		};
+
+		if (count >= fme_dev->pr_bandwidth) {
+			switch (fme_dev->pr_bandwidth) {
+			case 4:
+				fme_pr_data.rsvd = 0;
+				fme_pr_data.pr_data_raw = *((const u32 *)buf);
+				writeq(fme_pr_data.csr,
+				       &fme_pr->ccip_fme_pr_data);
+				break;
+			case 64:
+				copy512(buf, &fme_pr->fme_pr_data1);
+				break;
+			default:
+				ret = -EFAULT;
+				goto done;
+			}
+
+			buf += fme_dev->pr_bandwidth;
+			count -= fme_dev->pr_bandwidth;
+			pr_credit--;
+		} else {
+			WARN_ON(1);
+			return -EINVAL;
+			goto done;
+		}
+	}
+
+done:
+	return ret;
+}
+
+static int fme_pr_write_complete(struct ifpga_fme_hw *fme_dev,
+				 struct fpga_pr_info *info)
+{
+	struct feature_fme_pr *fme_pr;
+	struct feature_fme_pr_ctl fme_pr_ctl;
+
+	fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
+						 FME_FEATURE_ID_PR_MGMT);
+
+	fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+	fme_pr_ctl.pr_push_complete = 1;
+	writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+	dev_info(fme_dev, "green bitstream push complete\n");
+	dev_info(fme_dev, "waiting for HW to release PR resource\n");
+
+	fme_pr_ctl.pr_start_req = 0;
+
+	if (fpga_wait_register_field(pr_start_req, fme_pr_ctl,
+		&fme_pr->ccip_fme_pr_control, PR_WAIT_TIMEOUT, 1)) {
+		printf("maximum try.\n");
+		return -ETIMEDOUT;
+	}
+
+	dev_info(fme_dev, "PR operation complete, checking status\n");
+	info->pr_err = pr_err_handle(fme_pr);
+	if (info->pr_err)
+		return -EIO;
+
+	dev_info(fme_dev, "PR done successfully\n");
+	return 0;
+}
+
+static int fpga_pr_buf_load(struct ifpga_fme_hw *fme_dev,
+			    struct fpga_pr_info *info, const char *buf,
+			    size_t count)
+{
+	int ret;
+
+	info->state = FPGA_PR_STATE_WRITE_INIT;
+	ret = fme_pr_write_init(fme_dev, info);
+	if (ret) {
+		dev_err(fme_dev, "Error preparing FPGA for writing\n");
+		info->state = FPGA_PR_STATE_WRITE_INIT_ERR;
+		return ret;
+	}
+
+	/*
+	 * Write the FPGA image to the FPGA.
+	 */
+	info->state = FPGA_PR_STATE_WRITE;
+	ret = fme_pr_write(fme_dev, info->port_id, buf, count, info);
+	if (ret) {
+		dev_err(fme_dev, "Error while writing image data to FPGA\n");
+		info->state = FPGA_PR_STATE_WRITE_ERR;
+		return ret;
+	}
+
+	/*
+	 * After all the FPGA image has been written, do the device specific
+	 * steps to finish and set the FPGA into operating mode.
+	 */
+	info->state = FPGA_PR_STATE_WRITE_COMPLETE;
+	ret = fme_pr_write_complete(fme_dev, info);
+	if (ret) {
+		dev_err(fme_dev, "Error after writing image data to FPGA\n");
+		info->state = FPGA_PR_STATE_WRITE_COMPLETE_ERR;
+		return ret;
+	}
+	info->state = FPGA_PR_STATE_DONE;
+
+	return 0;
+}
+
+static int fme_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
+		  u64 *status)
+{
+	struct feature_fme_header *fme_hdr;
+	struct feature_fme_capability fme_capability;
+	struct ifpga_fme_hw *fme = &hw->fme;
+	struct fpga_pr_info info;
+	struct ifpga_port_hw *port;
+	int ret = 0;
+
+	if (!buffer || size == 0)
+		return -EINVAL;
+	if (fme->state != IFPGA_FME_IMPLEMENTED)
+		return -EINVAL;
+
+	/*
+	 * Padding extra zeros to align PR buffer with PR bandwidth, HW will
+	 * ignore these zeros automatically.
+	 */
+	size = ALIGN(size, fme->pr_bandwidth);
+
+	/* get fme header region */
+	fme_hdr = get_fme_feature_ioaddr_by_index(fme,
+						  FME_FEATURE_ID_HEADER);
+	if (!fme_hdr)
+		return -EINVAL;
+
+	/* check port id */
+	fme_capability.csr = readq(&fme_hdr->capability);
+	if (port_id >= fme_capability.num_ports) {
+		dev_err(fme,  "port number more than maximum\n");
+		return -EINVAL;
+	}
+
+	memset(&info, 0, sizeof(struct fpga_pr_info));
+	info.flags = FPGA_MGR_PARTIAL_RECONFIG;
+	info.port_id = port_id;
+
+	spinlock_lock(&fme->lock);
+
+	/* get port device by port_id */
+	port = &hw->port[port_id];
+
+	/* Disable Port before PR */
+	fpga_port_disable(port);
+
+	ret = fpga_pr_buf_load(fme, &info, (void *)buffer, size);
+
+	*status = info.pr_err;
+
+	/* Re-enable Port after PR finished */
+	fpga_port_enable(port);
+	spinlock_unlock(&fme->lock);
+
+	return ret;
+}
+
+int do_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size, u64 *status)
+{
+	struct bts_header *bts_hdr;
+	void *buf;
+	struct ifpga_port_hw *port;
+	int ret;
+
+	if (!buffer || size == 0) {
+		dev_err(hw, "invalid parameter\n");
+		return -EINVAL;
+	}
+
+	bts_hdr = (struct bts_header *)buffer;
+
+	if (is_valid_bts(bts_hdr)) {
+		dev_info(hw, "this is a valid bitsteam..\n");
+		size -= (sizeof(struct bts_header) +
+				     bts_hdr->metadata_len);
+		buf = (u8 *)buffer + sizeof(struct bts_header) +
+			       bts_hdr->metadata_len;
+	} else {
+		return -EINVAL;
+	}
+
+	/* clean port error before do PR */
+	port = &hw->port[port_id];
+	ret = port_clear_error(port);
+	if (ret) {
+		dev_err(hw, "port cannot clear error\n");
+		return -EINVAL;
+	}
+
+	return fme_pr(hw, port_id, buf, size, status);
+}
+
+static int fme_pr_mgmt_init(struct feature *feature)
+{
+	struct feature_fme_pr *fme_pr;
+	struct feature_header fme_pr_header;
+	struct ifpga_fme_hw *fme;
+
+	dev_info(NULL, "FME PR MGMT Init.\n");
+
+	fme = (struct ifpga_fme_hw *)feature->parent;
+
+	fme_pr = (struct feature_fme_pr *)feature->addr;
+
+	fme_pr_header.csr = readq(&fme_pr->header);
+	if (fme_pr_header.revision == 2) {
+		dev_info(NULL, "using 512-bit PR\n");
+		fme->pr_bandwidth = 64;
+	} else {
+		dev_info(NULL, "using 32-bit PR\n");
+		fme->pr_bandwidth = 4;
+	}
+
+	return 0;
+}
+
+static void fme_pr_mgmt_uinit(struct feature *feature)
+{
+	UNUSED(feature);
+
+	dev_info(NULL, "FME PR MGMT UInit.\n");
+}
+
+struct feature_ops fme_pr_mgmt_ops = {
+	.init = fme_pr_mgmt_init,
+	.uinit = fme_pr_mgmt_uinit,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_hw.h b/drivers/raw/ifpga_rawdev/base/ifpga_hw.h
new file mode 100644
index 0000000..a4257f8
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_hw.h
@@ -0,0 +1,145 @@
+/*******************************************************************************
+
+Copyright (c) 2017, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+#ifndef _IFPGA_HW_H_
+#define _IFPGA_HW_H_
+
+#include "ifpga_defines.h"
+#include "opae_ifpga_hw_api.h"
+
+enum ifpga_feature_state {
+	IFPGA_FEATURE_UNUSED = 0,
+	IFPGA_FEATURE_ATTACHED,
+};
+
+struct feature_irq_ctx {
+	int eventfd;
+	int idx;
+};
+
+struct feature {
+	enum ifpga_feature_state state;
+	const char *name;
+	u64 id;
+	u8 *addr;
+	uint64_t phys_addr;
+	u32 size;
+	int revision;
+	u64 cap;
+	int vfio_dev_fd;
+	struct feature_irq_ctx *ctx;
+	unsigned int ctx_num;
+
+	void *parent;		/* to parent hw data structure */
+
+	struct feature_ops *ops;/* callback to this private feature */
+};
+
+struct feature_ops {
+	int (*init)(struct feature *feature);
+	void (*uinit)(struct feature *feature);
+	int (*test)(struct feature *feature);
+	int (*get_prop)(struct feature *feature, struct feature_prop *prop);
+	int (*set_prop)(struct feature *feature, struct feature_prop *prop);
+	int (*set_irq)(struct feature *feature, void *irq_set);
+};
+
+enum ifpga_fme_state {
+	IFPGA_FME_UNUSED = 0,
+	IFPGA_FME_IMPLEMENTED,
+};
+
+struct ifpga_fme_hw {
+	enum ifpga_fme_state state;
+
+	struct feature sub_feature[FME_FEATURE_ID_MAX];
+	spinlock_t lock;	/* protect hardware access */
+
+	void *parent;		/* pointer to ifpga_hw */
+
+	/* provied by HEADER feature */
+	u32 port_num;
+	uuid bitstream_id;
+	u64 bitstream_md;
+	size_t pr_bandwidth;
+	u32 socket_id;
+	u32 fabric_version_id;
+	u32 cache_size;
+
+	u32 capability;
+};
+
+enum ifpga_port_state {
+	IFPGA_PORT_UNUSED = 0,
+	IFPGA_PORT_ATTACHED,
+	IFPGA_PORT_DETACHED,
+};
+
+struct ifpga_port_hw {
+	enum ifpga_port_state state;
+
+	struct feature sub_feature[PORT_FEATURE_ID_MAX];
+	spinlock_t lock;	/* protect access to hw */
+
+	void *parent;		/* pointer to ifpga_hw */
+
+	int port_id;		/* provied by HEADER feature */
+	uuid afu_id;		/* provied by User AFU feature */
+
+	unsigned int disable_count;
+
+	u32 capability;
+	u32 num_umsgs;	/* The number of allocated umsgs */
+	u32 num_uafu_irqs;	/* The number of uafu interrupts */
+};
+
+struct ifpga_hw {
+	struct opae_adapter *adapter;
+	struct opae_adapter_data_pci *pci_data;
+
+	struct ifpga_fme_hw fme;
+	struct ifpga_port_hw port[MAX_FPGA_PORT_NUM];
+};
+
+static inline bool is_ifpga_hw_pf(struct ifpga_hw *hw)
+{
+	return hw->fme.state != IFPGA_FME_UNUSED;
+}
+
+static inline bool is_valid_port_id(struct ifpga_hw *hw, u32 port_id)
+{
+	if (port_id >= MAX_FPGA_PORT_NUM ||
+		(hw->port[port_id].state != IFPGA_PORT_ATTACHED))
+		return false;
+	return true;
+}
+#endif /* _IFPGA_HW_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_port.c b/drivers/raw/ifpga_rawdev/base/ifpga_port.c
new file mode 100644
index 0000000..32a38f2
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_port.c
@@ -0,0 +1,699 @@
+#include "ifpga_feature_dev.h"
+
+int port_get_prop(struct ifpga_port_hw *port, struct feature_prop *prop)
+{
+	struct feature *feature;
+
+	if (!port)
+		return -ENOENT;
+
+	feature = get_port_feature_by_id(port, prop->feature_id);
+
+	if (feature && feature->ops && feature->ops->get_prop)
+		return feature->ops->get_prop(feature, prop);
+
+	return -ENOENT;
+}
+
+int port_set_prop(struct ifpga_port_hw *port, struct feature_prop *prop)
+{
+	struct feature *feature;
+
+	if (!port)
+		return -ENOENT;
+
+	feature = get_port_feature_by_id(port, prop->feature_id);
+
+	if (feature && feature->ops && feature->ops->set_prop)
+		return feature->ops->set_prop(feature, prop);
+
+	return -ENOENT;
+}
+
+int port_set_irq(struct ifpga_port_hw *port, u32 feature_id, void *irq_set)
+{
+	struct feature *feature;
+
+	if (!port)
+		return -ENOENT;
+
+	feature = get_port_feature_by_id(port, feature_id);
+
+	if (feature && feature->ops && feature->ops->set_irq)
+		return feature->ops->set_irq(feature, irq_set);
+
+	return -ENOENT;
+}
+
+
+void port_check_reg(void __iomem *addr,
+		    const char *reg_name, u64 dflt)
+{
+	u64 value = readq(addr);
+
+	UNUSED(reg_name);
+
+	if (value != dflt)
+		dev_debug(NULL, "%s: incorrect value 0x%llx vs defautl 0x%llx\n",
+			  reg_name, (unsigned long long)value,
+			  (unsigned long long)dflt);
+}
+
+static int port_get_revision(struct ifpga_port_hw *port, u64* revision)
+{
+	struct feature_port_header *port_hdr
+		= get_port_feature_ioaddr_by_index(port,
+						   PORT_FEATURE_ID_HEADER);
+	struct feature_header header;
+
+	header.csr = readq(&port_hdr->header);
+
+	*revision = header.revision;
+
+	return 0;
+}
+
+static int port_get_portidx(struct ifpga_port_hw *port, u64* idx)
+{
+	struct feature_port_header *port_hdr;
+	struct feature_port_capability capability;
+
+	port_hdr = get_port_feature_ioaddr_by_index(port,
+						    PORT_FEATURE_ID_HEADER);
+
+	capability.csr = readq(&port_hdr->capability);
+	*idx = capability.port_number;
+
+	return 0;
+}
+
+static int port_get_latency_tolerance(struct ifpga_port_hw *port, u64* val)
+{
+	struct feature_port_header *port_hdr;
+	struct feature_port_control control;
+
+	port_hdr = get_port_feature_ioaddr_by_index(port,
+						    PORT_FEATURE_ID_HEADER);
+
+	control.csr = readq(&port_hdr->control);
+	*val = control.latency_tolerance;
+
+	return 0;
+}
+
+static int port_get_ap1_event(struct ifpga_port_hw *port, u64* val)
+{
+	struct feature_port_header *port_hdr;
+	struct feature_port_status status;
+
+	port_hdr = get_port_feature_ioaddr_by_index(port,
+						    PORT_FEATURE_ID_HEADER);
+
+	spinlock_lock(&port->lock);
+	status.csr = readq(&port_hdr->status);
+	spinlock_unlock(&port->lock);
+
+	*val = status.ap1_event;
+
+	return 0;
+}
+
+static int port_set_ap1_event(struct ifpga_port_hw *port, u64 val)
+{
+	struct feature_port_header *port_hdr;
+	struct feature_port_status status;
+
+	port_hdr = get_port_feature_ioaddr_by_index(port,
+						    PORT_FEATURE_ID_HEADER);
+
+	spinlock_lock(&port->lock);
+	status.csr = readq(&port_hdr->status);
+	status.ap1_event = val;
+	writeq(status.csr, &port_hdr->status);
+	spinlock_unlock(&port->lock);
+
+	return 0;
+}
+
+static int port_get_ap2_event(struct ifpga_port_hw *port, u64* val)
+{
+	struct feature_port_header *port_hdr;
+	struct feature_port_status status;
+
+	port_hdr = get_port_feature_ioaddr_by_index(port,
+						    PORT_FEATURE_ID_HEADER);
+
+	spinlock_lock(&port->lock);
+	status.csr = readq(&port_hdr->status);
+	spinlock_unlock(&port->lock);
+
+	*val = status.ap2_event;
+
+	return 0;
+}
+
+static int port_set_ap2_event(struct ifpga_port_hw *port, u64 val)
+{
+	struct feature_port_header *port_hdr;
+	struct feature_port_status status;
+
+	port_hdr = get_port_feature_ioaddr_by_index(port,
+						    PORT_FEATURE_ID_HEADER);
+
+	spinlock_lock(&port->lock);
+	status.csr = readq(&port_hdr->status);
+	status.ap2_event = val;
+	writeq(status.csr, &port_hdr->status);
+	spinlock_unlock(&port->lock);
+
+	return 0;
+}
+
+static int port_get_power_state(struct ifpga_port_hw *port, u64* val)
+{
+	struct feature_port_header *port_hdr;
+	struct feature_port_status status;
+
+	port_hdr = get_port_feature_ioaddr_by_index(port,
+						    PORT_FEATURE_ID_HEADER);
+
+	spinlock_lock(&port->lock);
+	status.csr = readq(&port_hdr->status);
+	spinlock_unlock(&port->lock);
+
+	*val = status.power_state;
+
+	return 0;
+}
+
+static int port_get_userclk_freqcmd(struct ifpga_port_hw *port, u64* val)
+{
+	struct feature_port_header *port_hdr;
+
+	port_hdr = get_port_feature_ioaddr_by_index(port,
+						    PORT_FEATURE_ID_HEADER);
+
+	spinlock_lock(&port->lock);
+	*val = readq(&port_hdr->user_clk_freq_cmd0);
+	spinlock_unlock(&port->lock);
+
+	return 0;
+}
+
+static int port_set_userclk_freqcmd(struct ifpga_port_hw *port, u64 val)
+{
+	struct feature_port_header *port_hdr;
+
+	port_hdr = get_port_feature_ioaddr_by_index(port,
+						    PORT_FEATURE_ID_HEADER);
+
+	spinlock_lock(&port->lock);
+	writeq(val, &port_hdr->user_clk_freq_cmd0);
+	spinlock_unlock(&port->lock);
+
+	return 0;
+}
+
+static int port_get_userclk_freqcntrcmd(struct ifpga_port_hw *port, u64 *val)
+{
+	struct feature_port_header *port_hdr;
+
+	port_hdr = get_port_feature_ioaddr_by_index(port,
+						    PORT_FEATURE_ID_HEADER);
+
+	spinlock_lock(&port->lock);
+	*val = readq(&port_hdr->user_clk_freq_cmd1);
+	spinlock_unlock(&port->lock);
+
+	return 0;
+}
+
+static int port_set_userclk_freqcntrcmd(struct ifpga_port_hw *port, u64 val)
+{
+	struct feature_port_header *port_hdr;
+
+	port_hdr = get_port_feature_ioaddr_by_index(port,
+						    PORT_FEATURE_ID_HEADER);
+
+	spinlock_lock(&port->lock);
+	writeq(val, &port_hdr->user_clk_freq_cmd1);
+	spinlock_unlock(&port->lock);
+
+	return 0;
+}
+
+static int port_get_userclk_freqsts(struct ifpga_port_hw *port, u64 *val)
+{
+	struct feature_port_header *port_hdr;
+
+	port_hdr = get_port_feature_ioaddr_by_index(port,
+						    PORT_FEATURE_ID_HEADER);
+
+	spinlock_lock(&port->lock);
+	*val = readq(&port_hdr->user_clk_freq_sts0);
+	spinlock_unlock(&port->lock);
+
+	return 0;
+}
+
+static int port_get_userclk_freqcntrsts(struct ifpga_port_hw *port, u64 *val)
+{
+	struct feature_port_header *port_hdr;
+
+	port_hdr = get_port_feature_ioaddr_by_index(port,
+						    PORT_FEATURE_ID_HEADER);
+
+	spinlock_lock(&port->lock);
+	*val = readq(&port_hdr->user_clk_freq_sts1);
+	spinlock_unlock(&port->lock);
+
+	return 0;
+}
+
+static struct feature_port_header hdr_dflt = {
+	.port_mailbox		= 0x0000000000000000,
+	.scratchpad		= 0x0000000000000000,
+	.capability = {
+		.csr		= 0x0000000100010000,
+	},
+	.control = {
+		/* Port Reset Bit is cleared in PCIe driver */
+		.csr		= 0x0000000000000004,
+	},
+	.status = {
+		.csr		= 0x0000000000000000,
+	},
+	.rsvd2			= 0x0000000000000000,
+	.user_clk_freq_cmd0	= 0x0000000000000000,
+	.user_clk_freq_cmd1	= 0x0000000000000000,
+	.user_clk_freq_sts0	= 0x0000000000000000,
+	.user_clk_freq_sts1	= 0x0000000000000000,
+};
+
+static int port_hdr_test(struct feature *feature)
+{
+	struct feature_port_header *port_hdr =
+		(struct feature_port_header *)feature->addr;
+
+	/* Check if default value of hardware registers matches with spec */
+	port_check_reg(&port_hdr->port_mailbox,
+			"hdr:port_mailbox", hdr_dflt.port_mailbox);
+	port_check_reg(&port_hdr->scratchpad,
+			"hdr:scratchpad", hdr_dflt.scratchpad);
+	port_check_reg(&port_hdr->capability,
+			"hdr:capability", hdr_dflt.capability.csr);
+	port_check_reg(&port_hdr->control,
+			"hdr:control", hdr_dflt.control.csr);
+	port_check_reg(&port_hdr->status,
+			"hdr:status", hdr_dflt.status.csr);
+	port_check_reg(&port_hdr->rsvd2,
+			"hdr:rsvd2", hdr_dflt.rsvd2);
+	port_check_reg(&port_hdr->user_clk_freq_cmd0,
+			"hdr:user_clk_cmd0", hdr_dflt.user_clk_freq_cmd0);
+	port_check_reg(&port_hdr->user_clk_freq_cmd1,
+			"hdr:user_clk_cmd1", hdr_dflt.user_clk_freq_cmd1);
+	port_check_reg(&port_hdr->user_clk_freq_sts0,
+			"hdr:user_clk_sts0", hdr_dflt.user_clk_freq_sts0);
+	port_check_reg(&port_hdr->user_clk_freq_sts1,
+			"hdr:user_clk_sts1", hdr_dflt.user_clk_freq_sts1);
+
+	dev_debug(NULL, "%s finished\n", __func__);
+
+	return 0;
+}
+
+static int port_hdr_init(struct feature *feature)
+{
+	struct ifpga_port_hw *port = feature->parent;
+
+	dev_info(NULL, "port hdr Init.\n");
+
+	fpga_port_reset(port);
+
+	return 0;
+}
+
+static void port_hdr_uinit(struct feature *feature)
+{
+	UNUSED(feature);
+
+	dev_info(NULL, "port hdr uinit.\n");
+}
+
+static int port_hdr_get_prop(struct feature *feature,
+				   struct feature_prop *prop)
+{
+	struct ifpga_port_hw *port = feature->parent;
+
+	switch (prop->prop_id) {
+	case PORT_HDR_REVISION:
+		return port_get_revision(port, &prop->data);
+	case PORT_HDR_PORTIDX:
+		return port_get_portidx(port, &prop->data);
+	case PORT_HDR_LATENCY_TOLERANCE:
+		return port_get_latency_tolerance(port, &prop->data);
+	case PORT_HDR_AP1_EVENT:
+		return port_get_ap1_event(port, &prop->data);
+	case PORT_HDR_AP2_EVENT:
+		return port_get_ap2_event(port, &prop->data);
+	case PORT_HDR_POWER_STATE:
+		return port_get_power_state(port, &prop->data);
+	case PORT_HDR_USERCLK_FREQCMD:
+		return port_get_userclk_freqcmd(port, &prop->data);
+	case PORT_HDR_USERCLK_FREQCNTRCMD:
+		return port_get_userclk_freqcntrcmd(port, &prop->data);
+	case PORT_HDR_USERCLK_FREQSTS:
+		return port_get_userclk_freqsts(port, &prop->data);
+	case PORT_HDR_USERCLK_CNTRSTS:
+		return port_get_userclk_freqcntrsts(port, &prop->data);
+	}
+
+	return -ENOENT;
+}
+
+static int port_hdr_set_prop(struct feature *feature,
+				   struct feature_prop *prop)
+{
+	struct ifpga_port_hw *port = feature->parent;
+
+	switch (prop->prop_id) {
+	case PORT_HDR_AP1_EVENT:
+		return port_set_ap1_event(port, prop->data);
+	case PORT_HDR_AP2_EVENT:
+		return port_set_ap2_event(port, prop->data);
+	case PORT_HDR_USERCLK_FREQCMD:
+		return port_set_userclk_freqcmd(port, prop->data);
+	case PORT_HDR_USERCLK_FREQCNTRCMD:
+		return port_set_userclk_freqcntrcmd(port, prop->data);
+	}
+
+	return -ENOENT;
+}
+
+struct feature_ops port_hdr_ops = {
+	.init = port_hdr_init,
+	.uinit = port_hdr_uinit,
+	.test = port_hdr_test,
+	.get_prop = port_hdr_get_prop,
+	.set_prop = port_hdr_set_prop,
+};
+
+static struct feature_port_umsg umsg_dflt = {
+	.capability = {
+		.csr		= 0x0000000000000008,
+	},
+	.baseaddr = {
+		.csr		= 0x0000000000000000,
+	},
+	.mode = {
+		.csr		= 0x0000000000000000,
+	},
+};
+
+static int port_umsg_test(struct feature *feature)
+{
+	struct feature_port_umsg *port_umsg =
+		(struct feature_port_umsg *)feature->addr;
+
+	port_check_reg(&port_umsg->capability,
+				"umsg:capaiblity", umsg_dflt.capability.csr);
+	port_check_reg(&port_umsg->baseaddr,
+				"umsg:baseaddr", umsg_dflt.baseaddr.csr);
+	port_check_reg(&port_umsg->mode,
+				"umsg:mode", umsg_dflt.mode.csr);
+
+	dev_debug(NULL, "%s finished\n", __func__);
+	return 0;
+}
+
+static u8 port_umsg_get_num(struct ifpga_port_hw *port)
+{
+	struct feature_port_umsg *port_umsg;
+	struct feature_port_umsg_cap capability;
+
+	port_umsg = get_port_feature_ioaddr_by_index(port,
+						PORT_FEATURE_ID_UMSG);
+
+	capability.csr = readq(&port_umsg->capability);
+
+	return capability.umsg_allocated;
+}
+
+static u64 port_umsg_get_addr(struct ifpga_port_hw *port)
+{
+	struct feature_port_umsg *port_umsg;
+	struct feature_port_umsg_baseaddr baseaddr;
+
+	port_umsg = get_port_feature_ioaddr_by_index(port,
+					PORT_FEATURE_ID_UMSG);
+
+	baseaddr.csr = readq(&port_umsg->baseaddr);
+
+	return baseaddr.base_addr;
+}
+
+static int port_umsg_enable(struct ifpga_port_hw *port, bool enable)
+{
+	struct feature_port_umsg *port_umsg;
+	struct feature_port_umsg_cap capability;
+
+	port_umsg = get_port_feature_ioaddr_by_index(port,
+					PORT_FEATURE_ID_UMSG);
+
+	capability.csr = readq(&port_umsg->capability);
+
+	/* Return directly if UMSG is already enabled/disabled */
+	if ((enable && capability.umsg_enable) ||
+			!(enable || capability.umsg_enable))
+		return 0;
+
+	capability.umsg_enable = enable;
+	writeq(capability.csr, &port_umsg->capability);
+
+	/*
+	 * Each time umsg engine enabled/disabled, driver polls the
+	 * init_complete bit for confirmation.
+	 */
+	capability.umsg_init_complete = !!enable;
+
+	if (fpga_wait_register_field(umsg_init_complete, capability,
+				     &port_umsg->capability,
+				     UMSG_EN_POLL_TIMEOUT, UMSG_EN_POLL_INVL)) {
+		dev_err(dev, "timeout, fail to %s umsg\n",
+					enable ? "enable" : "disable");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static bool port_umsg_is_enabled(struct ifpga_port_hw *port)
+{
+	struct feature_port_umsg *port_umsg;
+	struct feature_port_umsg_cap capability;
+
+	port_umsg = get_port_feature_ioaddr_by_index(port,
+						PORT_FEATURE_ID_UMSG);
+
+	capability.csr = readq(&port_umsg->capability);
+
+	return capability.umsg_enable;
+}
+
+static void port_umsg_set_mode(struct ifpga_port_hw *port, u32 mode)
+{
+	struct feature_port_umsg *port_umsg;
+	struct feature_port_umsg_mode umode;
+
+	port_umsg = get_port_feature_ioaddr_by_index(port,
+						PORT_FEATURE_ID_UMSG);
+
+	umode.csr = readq(&port_umsg->mode);
+	umode.umsg_hint_enable = mode;
+	writeq(umode.csr, &port_umsg->mode);
+}
+
+static void port_umsg_set_addr(struct ifpga_port_hw *port, u64 iova)
+{
+	struct feature_port_umsg *port_umsg;
+	struct feature_port_umsg_baseaddr baseaddr;
+
+	port_umsg = get_port_feature_ioaddr_by_index(port,
+					PORT_FEATURE_ID_UMSG);
+
+	baseaddr.csr = readq(&port_umsg->baseaddr);
+	baseaddr.base_addr = iova;
+	writeq(baseaddr.csr, &port_umsg->baseaddr);
+}
+
+int afu_port_umsg_enable(struct ifpga_port_hw *port, bool enable)
+{
+	if (enable && port_umsg_get_addr(port))
+		return -EIO;
+	if (port_umsg_enable(port, enable))
+		return -ENODEV;
+
+	return 0;
+}
+
+int afu_port_umsg_set_mode(struct ifpga_port_hw *port, u32 mode)
+{
+	u8 num_umsgs = port_umsg_get_num(port);
+
+	if (mode >> num_umsgs) {
+		dev_err(port, "invaild UMsg config hint_bitmap\n");
+		return -EINVAL;
+	}
+
+	port_umsg_set_mode(port, mode);
+
+	return 0;
+}
+
+static int __maybe_unused afu_port_umsg_set_addr(struct ifpga_port_hw *port,
+						 u64 iova)
+{
+	u8 num_umsgs = port_umsg_get_num(port);
+	u64 size = num_umsgs * PAGE_SIZE;
+
+	/* Make sure base addr is configured only when umsg is disabled */
+	if (port_umsg_is_enabled(port)) {
+		dev_err(port, "umsg is still enabled\n");
+		return -EIO;
+	}
+
+	if (iova) {
+		/* Check input, only accept page-aligned region for umsg */
+		if (!PAGE_ALIGNED(iova))
+			return -EINVAL;
+
+		/* Check overflow */
+		if (iova + size < iova)
+			return -EINVAL;
+
+		port_umsg_set_addr(port, iova);
+	} else {
+		/* Read current iova from hardware */
+		iova = port_umsg_get_addr(port);
+		if (!iova)
+			return 0;
+
+		/* Check overflow */
+		if ((iova + size < iova))
+			return -EINVAL;
+
+		port_umsg_set_addr(port, 0);
+	}
+
+	return 0;
+}
+
+static int port_umsg_init(struct feature *feature)
+{
+	UNUSED(feature);
+
+	dev_info(NULL, "port umsg Init.\n");
+
+	return 0;
+}
+
+static void port_umsg_uinit(struct feature *feature)
+{
+	UNUSED(feature);
+
+	dev_info(NULL, "port umsg uinit.\n");
+}
+
+struct feature_ops port_umsg_ops = {
+	.init = port_umsg_init,
+	.uinit = port_umsg_uinit,
+	.test = port_umsg_test,
+};
+
+static struct feature_port_stp stp_dflt = {
+	.stp_status = {
+		.csr		= 0x0000000000000000,
+	},
+};
+
+static int port_stp_test(struct feature *feature)
+{
+	struct feature_port_stp *port_stp =
+		(struct feature_port_stp *)feature->addr;
+
+	port_check_reg(&port_stp->stp_status,
+				"stp:stp_csr", stp_dflt.stp_status.csr);
+
+	dev_debug(NULL, "%s finished\n", __func__);
+	return 0;
+}
+
+static int port_stp_init(struct feature *feature)
+{
+	UNUSED(feature);
+
+	dev_info(NULL, "port stp Init.\n");
+
+	return 0;
+}
+
+static void port_stp_uinit(struct feature *feature)
+{
+	UNUSED(feature);
+
+	dev_info(NULL, "port stp uinit.\n");
+}
+
+struct feature_ops port_stp_ops = {
+	.init = port_stp_init,
+	.uinit = port_stp_uinit,
+	.test = port_stp_test,
+};
+
+static int port_uint_init(struct feature *feature)
+{
+	struct ifpga_port_hw *port = feature->parent;
+
+	dev_info(NULL, "PORT UINT Init.\n");
+
+	spinlock_lock(&port->lock);
+	if (feature->ctx_num) {
+		port->capability |= FPGA_PORT_CAP_UAFU_IRQ;
+		port->num_uafu_irqs = feature->ctx_num;
+	}
+	spinlock_unlock(&port->lock);
+
+	return 0;
+}
+
+static void port_uint_uinit(struct feature *feature)
+{
+	UNUSED(feature);
+
+	dev_info(NULL, "PORT UINT UInit.\n");
+}
+
+static int port_uint_set_irq(struct feature *feature, void *irq_set)
+{
+	struct fpga_uafu_irq_set *uafu_irq_set = irq_set;
+	struct ifpga_port_hw *port = feature->parent;
+	int ret;
+
+	spinlock_lock(&port->lock);
+	if (!(port->capability & FPGA_PORT_CAP_UAFU_IRQ)) {
+		spinlock_unlock(&port->lock);
+		return -ENODEV;
+	}
+
+	ret = fpga_msix_set_block(feature, uafu_irq_set->start,
+				  uafu_irq_set->count, uafu_irq_set->evtfds);
+	spinlock_unlock(&port->lock);
+
+	return ret;
+}
+
+struct feature_ops port_uint_ops = {
+	.init = port_uint_init,
+	.uinit = port_uint_uinit,
+	.set_irq = port_uint_set_irq,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c b/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c
new file mode 100644
index 0000000..9439936
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c
@@ -0,0 +1,112 @@
+#include "ifpga_feature_dev.h"
+
+static struct feature_port_error err_dflt = {
+	.error_mask = {
+		.csr		= 0x0000000000000000,
+	},
+	.port_error = {
+		.csr		= 0x0000000000000000,
+	},
+	.port_first_error = {
+		.csr		= 0x0000000000000000,
+	},
+	.malreq0 = {
+		.header_lsb	= 0x0000000000000000,
+	},
+	.malreq1 = {
+		.header_msb	= 0x0000000000000000,
+	},
+	.port_debug = {
+		.port_debug	= 0x0000000000000000,
+	},
+};
+
+static int port_err_test(struct feature *feature)
+{
+	struct feature_port_error *port_err =
+		(struct feature_port_error *)feature->addr;
+
+	port_check_reg(&port_err->error_mask,
+		       "err:error_mask", err_dflt.error_mask.csr);
+	port_check_reg(&port_err->port_error,
+		       "err:port_error", err_dflt.port_error.csr);
+	port_check_reg(&port_err->port_first_error,
+		       "err:port_first_err", err_dflt.port_first_error.csr);
+	port_check_reg(&port_err->malreq0,
+		       "err:malreq0", err_dflt.malreq0.header_lsb);
+	port_check_reg(&port_err->malreq1,
+		       "err:malreq1", err_dflt.malreq1.header_msb);
+	port_check_reg(&port_err->port_debug,
+		       "err:port_debug", err_dflt.port_debug.port_debug);
+
+	dev_debug(NULL, "%s finished\n", __func__);
+	return 0;
+}
+
+static int port_error_init(struct feature *feature)
+{
+	struct ifpga_port_hw *port = feature->parent;
+
+	dev_info(NULL, "port error Init.\n");
+
+	spinlock_lock(&port->lock);
+	port_err_mask(feature->addr, false);
+	if (feature->ctx_num)
+		port->capability |= FPGA_PORT_CAP_ERR_IRQ;
+	spinlock_unlock(&port->lock);
+
+	return 0;
+}
+
+static void port_error_uinit(struct feature *feature)
+{
+	UNUSED(feature);
+}
+
+static int port_error_get_prop(struct feature *feature,
+			       struct feature_prop *prop)
+{
+	UNUSED(feature);
+	UNUSED(prop);
+
+	return 0;
+}
+
+static int port_error_set_prop(struct feature *feature,
+			       struct feature_prop *prop)
+{
+	UNUSED(feature);
+	UNUSED(prop);
+
+	return 0;
+}
+
+static int port_error_set_irq(struct feature *feature, void *irq_set)
+{
+	struct fpga_port_err_irq_set *err_irq_set =
+			(struct fpga_port_err_irq_set *)irq_set;
+	struct ifpga_port_hw *port;
+	int ret;
+
+	port = (struct ifpga_port_hw *)feature->parent;
+
+	spinlock_lock(&port->lock);
+	if (!(port->capability & FPGA_PORT_CAP_ERR_IRQ)) {
+		spinlock_unlock(&port->lock);
+		return -ENODEV;
+	}
+
+	ret = fpga_msix_set_block(feature, 0, 1, &err_irq_set->evtfd);
+	spinlock_unlock(&port->lock);
+
+	return ret;
+}
+
+struct feature_ops port_error_ops = {
+	.init = port_error_init,
+	.uinit = port_error_uinit,
+	.test = port_err_test,
+	.get_prop = port_error_get_prop,
+	.set_prop = port_error_set_prop,
+	.set_irq = port_error_set_irq,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/opae_debug.c b/drivers/raw/ifpga_rawdev/base/opae_debug.c
new file mode 100644
index 0000000..cba963b
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_debug.c
@@ -0,0 +1,95 @@
+#define OPAE_HW_DEBUG
+
+#include "opae_hw_api.h"
+#include "opae_debug.h"
+
+void opae_manager_dump(struct opae_manager *mgr)
+{
+	opae_log("=====%s=====\n", __func__);
+	opae_log("OPAE Manger %s\n", mgr->name);
+	opae_log("OPAE Manger OPs = %p\n", mgr->ops);
+	opae_log("OPAE Manager Private Data = %p\n", mgr->data);
+	opae_log("OPAE Adapter(parent) = %p\n", mgr->adapter);
+	opae_log("==========================\n");
+}
+
+void opae_bridge_dump(struct opae_bridge *br)
+{
+	opae_log("=====%s=====\n", __func__);
+	opae_log("OPAE Bridge %s\n", br->name);
+	opae_log("OPAE Bridge ID = %d\n", br->id);
+	opae_log("OPAE Bridge OPs = %p\n", br->ops);
+	opae_log("OPAE Bridge Private Data = %p\n", br->data);
+	opae_log("OPAE Accelerator(under this bridge) = %p\n", br->acc);
+	opae_log("==========================\n");
+}
+
+void opae_accelerator_dump(struct opae_accelerator *acc)
+{
+	opae_log("=====%s=====\n", __func__);
+	opae_log("OPAE Accelerator %s\n", acc->name);
+	opae_log("OPAE Accelerator Index = %d\n", acc->index);
+	opae_log("OPAE Accelerator OPs = %p\n", acc->ops);
+	opae_log("OPAE Accelerator Private Data = %p\n", acc->data);
+	opae_log("OPAE Bridge (upstream) = %p\n", acc->br);
+	opae_log("OPAE Manager (upstream) = %p\n", acc->mgr);
+	opae_log("==========================\n");
+
+	if (acc->br)
+		opae_bridge_dump(acc->br);
+}
+
+static void opae_adapter_data_dump(void *data)
+{
+	struct opae_adapter_data *d = data;
+	struct opae_adapter_data_pci *d_pci;
+	struct opae_reg_region *r;
+	int i;
+
+	opae_log("=====%s=====\n", __func__);
+
+	switch (d->type) {
+	case OPAE_FPGA_PCI:
+		d_pci = (struct opae_adapter_data_pci *)d;
+
+		opae_log("OPAE Adapter Type = PCI\n");
+		opae_log("PCI Device ID: 0x%04x\n", d_pci->device_id);
+		opae_log("PCI Vendor ID: 0x%04x\n", d_pci->vendor_id);
+
+		for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+			r = &d_pci->region[i];
+			opae_log("PCI Bar %d: phy(%llx) len(%llx) addr(%p)\n",
+				 i, (unsigned long long)r->phys_addr,
+				 (unsigned long long)r->len, r->addr);
+		}
+		break;
+	case OPAE_FPGA_NET:
+		break;
+	}
+
+	opae_log("==========================\n");
+}
+
+void opae_adapter_dump(struct opae_adapter *adapter, int verbose)
+{
+	struct opae_accelerator *acc;
+
+	opae_log("=====%s=====\n", __func__);
+	opae_log("OPAE Adapter %s\n", adapter->name);
+	opae_log("OPAE Adapter OPs = %p\n", adapter->ops);
+	opae_log("OPAE Adapter Private Data = %p\n", adapter->data);
+	opae_log("OPAE Manager (downstream) = %p\n", adapter->mgr);
+
+	if (verbose) {
+		if (adapter->mgr)
+			opae_manager_dump(adapter->mgr);
+
+		opae_adapter_for_each_acc(adapter, acc)
+			opae_accelerator_dump(acc);
+
+		if (adapter->data)
+			opae_adapter_data_dump(adapter->data);
+	}
+
+	opae_log("==========================\n");
+}
diff --git a/drivers/raw/ifpga_rawdev/base/opae_debug.h b/drivers/raw/ifpga_rawdev/base/opae_debug.h
new file mode 100644
index 0000000..49fbd37
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_debug.h
@@ -0,0 +1,15 @@
+#ifndef _OPAE_DEBUG_H_
+#define _OPAE_DEBUG_H_
+
+#ifdef OPAE_HW_DEBUG
+#define opae_log(fmt, args...) printf(fmt, ## args)
+#else
+#define opae_log(fme, args...) do {} while (0)
+#endif
+
+void opae_manager_dump(struct opae_manager *mgr);
+void opae_bridge_dump(struct opae_bridge *br);
+void opae_accelerator_dump(struct opae_accelerator *acc);
+void opae_adapter_dump(struct opae_adapter *adapter, int verbose);
+
+#endif /* _OPAE_DEBUG_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/opae_hw_api.c b/drivers/raw/ifpga_rawdev/base/opae_hw_api.c
new file mode 100644
index 0000000..3de11b2
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_hw_api.c
@@ -0,0 +1,355 @@
+#include "opae_hw_api.h"
+#include "opae_debug.h"
+#include "ifpga_api.h"
+
+/* OPAE Bridge Functions */
+
+/**
+ * opae_bridge_alloc - alloc opae_bridge data structure
+ * @name: bridge name.
+ * @ops: ops of this bridge.
+ * @data: private data of this bridge.
+ *
+ * Return opae_bridge on success, otherwise NULL.
+ */
+struct opae_bridge *
+opae_bridge_alloc(const char *name, struct opae_bridge_ops *ops, void *data)
+{
+	struct opae_bridge *br = opae_zmalloc(sizeof(*br));
+
+	if (!br)
+		return NULL;
+
+	br->name = name;
+	br->ops = ops;
+	br->data = data;
+
+	opae_log("%s %p\n", __func__, br);
+
+	return br;
+}
+
+/**
+ * opae_bridge_reset -  reset opae_bridge
+ * @br: bridge to be reset.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_bridge_reset(struct opae_bridge *br)
+{
+	if (br && br->ops && br->ops->reset)
+		return br->ops->reset(br);
+
+	opae_log("%s no ops\n", __func__);
+
+	return -ENODEV;
+}
+
+/* Accelerator Functions */
+
+/**
+ * opae_accelerator_alloc - alloc opae_accelerator data structure
+ * @name: accelerator name.
+ * @ops: ops of this accelerator.
+ * @data: private data of this accelerator.
+ *
+ * Return: opae_accelerator on success, otherwise NULL.
+ */
+struct opae_accelerator *
+opae_accelerator_alloc(const char *name, struct opae_accelerator_ops *ops,
+		       void *data)
+{
+	struct opae_accelerator *acc = opae_zmalloc(sizeof(*acc));
+
+	if (!acc)
+		return NULL;
+
+	acc->name = name;
+	acc->ops = ops;
+	acc->data = data;
+
+	opae_log("%s %p\n", __func__, acc);
+
+	return acc;
+}
+
+/**
+ * opae_acc_reg_read - read accelerator's register from its reg region.
+ * @acc: accelerator to read.
+ * @region_idx: reg region index.
+ * @offset: reg offset.
+ * @byte: read operation width, e.g 4 byte = 32bit read.
+ * @data: data to store the value read from the register.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_reg_read(struct opae_accelerator *acc, unsigned int region_idx,
+		      u64 offset, unsigned int byte, void *data)
+{
+	struct opae_acc_reg_region_info *resource;
+	struct opae_reg_region *region;
+
+	/* check inputs and overflow */
+	if (!acc || !data || offset + byte <= offset)
+		return -EINVAL;
+
+	if (acc->ops && acc->ops->read)
+		return acc->ops->read(acc, offset, region_idx, byte, data);
+
+	/* if no ops, then decode it as register region for normal mmio */
+	if (!acc->data)
+		return -EINVAL;
+
+	resource = acc->data;
+	if (region_idx >= resource->num_regions)
+		return -EINVAL;
+
+	region = &resource->region[region_idx];
+	if (offset + byte > region->len)
+		return -EINVAL;
+
+	switch (byte) {
+	case 8:
+		*(u64  *)data = opae_readq(region->addr + offset);
+		break;
+	case 4:
+		*(u32 *)data = opae_readl(region->addr + offset);
+		break;
+	case 2:
+		*(u16 *)data = opae_readw(region->addr + offset);
+		break;
+	case 1:
+		*(u8 *)data = opae_readb(region->addr + offset);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * opae_acc_reg_write - write to accelerator's register from its reg region.
+ * @acc: accelerator to write.
+ * @region_idx: reg region index.
+ * @offset: reg offset.
+ * @byte: write operation width, e.g 4 byte = 32bit write.
+ * @data: data stored the value to write to the register.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_reg_write(struct opae_accelerator *acc, unsigned int region_idx,
+		       u64 offset, unsigned int byte, void *data)
+{
+	struct opae_acc_reg_region_info *resource;
+	struct opae_reg_region *region;
+
+	/* check inputs and overflow */
+	if (!acc || !data || offset + byte <= offset)
+		return -EINVAL;
+
+	if (acc->ops && acc->ops->write)
+		return acc->ops->write(acc, offset, region_idx, byte, data);
+
+	/* if no ops, then decode it as register region for normal mmio */
+	if (!acc->data)
+		return -EINVAL;
+
+	resource = acc->data;
+	if (region_idx >= resource->num_regions)
+		return -EINVAL;
+
+	region = &resource->region[region_idx];
+	if (offset + byte > region->len)
+		return -EINVAL;
+
+	/* normal mmio case */
+	switch (byte) {
+	case 8:
+		opae_writeq(*(u64 *)data, region->addr + offset);
+		break;
+	case 4:
+		opae_writel(*(u32 *)data, region->addr + offset);
+		break;
+	case 2:
+		opae_writew(*(u16 *)data, region->addr + offset);
+		break;
+	case 1:
+		opae_writeb(*(u8 *)data, region->addr + offset);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Manager Functions */
+
+/**
+ * opae_manager_alloc - alloc opae_manager data structure
+ * @name: manager name.
+ * @ops: ops of this manager.
+ * @data: private data of this manager.
+ *
+ * Return: opae_manager on success, otherwise NULL.
+ */
+struct opae_manager *
+opae_manager_alloc(const char *name, struct opae_manager_ops *ops, void *data)
+{
+	struct opae_manager *mgr = opae_zmalloc(sizeof(*mgr));
+
+	if (!mgr)
+		return NULL;
+
+	mgr->name = name;
+	mgr->ops = ops;
+	mgr->data = data;
+
+	opae_log("%s %p\n", __func__, mgr);
+
+	return mgr;
+}
+
+/**
+ * opae_manager_flash - flash a reconfiguration image via opae_manager
+ * @mgr: opae_manager for flash.
+ * @id: id of target region (accelerator).
+ * @buf: image data buffer.
+ * @size: buffer size.
+ * @status: status to store flash result.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_manager_flash(struct opae_manager *mgr, int id, void *buf, u32 size,
+		       u64 *status)
+{
+	if (mgr && mgr->ops && mgr->ops->flash)
+		return mgr->ops->flash(mgr, id, buf, size, status);
+
+	return -ENODEV;
+}
+
+/* Adapter Functions */
+
+/**
+ * opae_adapter_data_alloc - alloc opae_adapter_data data structure
+ * @type: opae_adapter_type.
+ *
+ * Return: opae_adapter_data on success, otherwise NULL.
+ */
+void *opae_adapter_data_alloc(enum opae_adapter_type type)
+{
+	struct opae_adapter_data *data;
+	int size;
+
+	switch (type) {
+	case OPAE_FPGA_PCI:
+		size = sizeof(struct opae_adapter_data_pci);
+		break;
+	case OPAE_FPGA_NET:
+		size = sizeof(struct opae_adapter_data_net);
+		break;
+	default:
+		size = sizeof(struct opae_adapter_data);
+		break;
+	}
+
+	data = opae_zmalloc(size);
+	if (!data)
+		return NULL;
+
+	data->type = type;
+
+	return data;
+}
+
+static struct opae_adapter_ops *match_ops(struct opae_adapter *adapter)
+{
+	struct opae_adapter_data *data;
+
+	if (!adapter && !adapter->data)
+		return NULL;
+
+	data = adapter->data;
+
+	if (data->type == OPAE_FPGA_PCI)
+		return &ifpga_adapter_ops;
+
+	return NULL;
+}
+
+/**
+ * opae_adapter_data_alloc - alloc opae_adapter_data data structure
+ * @name: adapter name.
+ * @data: private data of this adapter.
+ *
+ * Return: opae_adapter on success, otherwise NULL.
+ */
+struct opae_adapter *opae_adapter_alloc(const char *name, void *data)
+{
+	struct opae_adapter *adapter = opae_zmalloc(sizeof(*adapter));
+
+	if (!adapter)
+		return NULL;
+
+	TAILQ_INIT(&adapter->acc_list);
+	adapter->data = data;
+	adapter->name = name;
+	adapter->ops = match_ops(adapter);
+
+	return adapter;
+}
+
+/**
+ * opae_adapter_enumerate - enumerate this adapter
+ * @adapter: adapter to enumerate.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_adapter_enumerate(struct opae_adapter *adapter)
+{
+	int ret = -ENODEV;
+
+	if (adapter && adapter->ops && adapter->ops->enumerate)
+		ret = adapter->ops->enumerate(adapter);
+
+	if (!ret)
+		opae_adapter_dump(adapter, 1);
+
+	return ret;
+}
+
+/**
+ * opae_adapter_destroy - destroy this adapter
+ * @adapter: adapter to destroy.
+ *
+ * destroy things allocated during adapter enumeration.
+ */
+void opae_adapter_destroy(struct opae_adapter *adapter)
+{
+	if (adapter && adapter->ops && adapter->ops->destroy)
+		adapter->ops->destroy(adapter);
+}
+
+/**
+ * opae_adapter_get_acc - find and return accelerator with matched id
+ * @adapter: adapter to find the accelerator.
+ * @acc_id: id (index) of the accelerator.
+ *
+ * destroy things allocated during adapter enumeration.
+ */
+struct opae_accelerator *
+opae_adapter_get_acc(struct opae_adapter *adapter, int acc_id)
+{
+	struct opae_accelerator *acc = NULL;
+
+	if (!adapter)
+		return NULL;
+
+	opae_adapter_for_each_acc(adapter, acc)
+		if (acc->index == acc_id)
+			return acc;
+
+	return NULL;
+}
diff --git a/drivers/raw/ifpga_rawdev/base/opae_hw_api.h b/drivers/raw/ifpga_rawdev/base/opae_hw_api.h
new file mode 100644
index 0000000..49fc25b
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_hw_api.h
@@ -0,0 +1,235 @@
+
+#ifndef _OPAE_HW_API_H_
+#define _OPAE_HW_API_H_
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/queue.h>
+
+#include "opae_osdep.h"
+
+#ifndef PCI_MAX_RESOURCE
+#define PCI_MAX_RESOURCE 6
+#endif
+
+struct opae_adapter;
+
+enum opae_adapter_type {
+	OPAE_FPGA_PCI,
+	OPAE_FPGA_NET,
+};
+
+/* OPAE Manager Data Structure */
+struct opae_manager_ops;
+
+/*
+ * opae_manager has pointer to its parent adapter, as it could be able to manage
+ * all components on this FPGA device (adapter). If not the case, don't set this
+ * adapter, which limit opae_manager ops to manager itself.
+ */
+struct opae_manager {
+	const char *name;
+	struct opae_adapter *adapter;
+	struct opae_manager_ops *ops;
+	void *data;
+};
+
+/* FIXME: add more management ops, e.g power/thermal and etc */
+struct opae_manager_ops {
+	int (*flash)(struct opae_manager *mgr, int id, void *buffer,
+		     u32 size, u64 *status);
+};
+
+/* OPAE Manager APIs */
+struct opae_manager *
+opae_manager_alloc(const char *name, struct opae_manager_ops *ops, void *data);
+#define opae_manager_free(mgr) opae_free(mgr)
+int opae_manager_flash(struct opae_manager *mgr, int acc_id, void *buf,
+		       u32 size, u64 *status);
+
+/* OPAE Bridge Data Structure */
+struct opae_bridge_ops;
+
+/*
+ * opae_bridge only has pointer to its downstream accelerator.
+ */
+struct opae_bridge {
+	const char *name;
+	int id;
+	struct opae_accelerator *acc;
+	struct opae_bridge_ops *ops;
+	void *data;
+};
+
+struct opae_bridge_ops {
+	int (*reset)(struct opae_bridge *br);
+};
+
+/* OPAE Bridge APIs */
+struct opae_bridge *
+opae_bridge_alloc(const char *name, struct opae_bridge_ops *ops, void *data);
+int opae_bridge_reset(struct opae_bridge *br);
+#define opae_bridge_free(br) opae_free(br)
+
+/* OPAE Acceleraotr Data Structure */
+struct opae_accelerator_ops;
+
+/*
+ * opae_accelerator has pointer to its upstream bridge(port).
+ * In some cases, if we allow same user to do PR on its own accelerator, then
+ * set the manager pointer during the enumeration. But in other cases, the PR
+ * functions only could be done via manager in another module / thread / service
+ * / application for better protection.
+ */
+struct opae_accelerator {
+	TAILQ_ENTRY(opae_accelerator) node;
+	const char *name;
+	int index;
+	struct opae_bridge *br;
+	struct opae_manager *mgr;
+	struct opae_accelerator_ops *ops;
+	void *data;
+};
+
+struct opae_accelerator_ops {
+	int (*read)(struct opae_accelerator *acc, unsigned int region_idx,
+		    u64 offset, unsigned int byte, void *data);
+	int (*write)(struct opae_accelerator *acc, unsigned int region_idx,
+		     u64 offset, unsigned int byte, void *data);
+	int (*recv_package)(struct opae_accelerator *acc,
+		int command, void *package, u32 size);
+	int (*send_package)(struct opae_accelerator *acc,
+		int command, void *package, u32 size);
+	int (*set_irq)(struct opae_accelerator *acc,
+		       u32 start, u32 count, s32 evtfds[]);
+};
+
+struct opae_reg_region {
+	u64 phys_addr;
+	u64 len;
+	u8 *addr;	/* must be virtual address, if no specific ops */
+};
+
+#define ACC_MAX_REGION 4
+
+struct opae_acc_reg_region_info {
+	struct opae_reg_region region[ACC_MAX_REGION];
+	unsigned int num_regions;
+};
+
+static inline struct opae_acc_reg_region_info *
+opae_accelerator_get_info(struct opae_accelerator *acc)
+{
+	return acc ? acc->data : NULL;
+}
+
+/* OPAE accelerator APIs */
+struct opae_accelerator *
+opae_accelerator_alloc(const char *name, struct opae_accelerator_ops *ops,
+		       void *data);
+#define opae_accelerator_free(acc) opae_free(acc)
+static inline struct opae_bridge *
+opae_acc_get_br(struct opae_accelerator *acc)
+{
+	return acc ? acc->br : NULL;
+}
+
+static inline struct opae_manager *
+opae_acc_get_mgr(struct opae_accelerator *acc)
+{
+	return acc ? acc->mgr : NULL;
+}
+
+int opae_acc_reg_read(struct opae_accelerator *acc, unsigned int region_idx,
+		      u64 offset, unsigned int byte, void *data);
+int opae_acc_reg_write(struct opae_accelerator *acc, unsigned int region_idx,
+		       u64 offset, unsigned int byte, void *data);
+
+#define opae_acc_reg_read64(acc, region, offset, data) \
+	opae_acc_reg_read(acc, region, offset, 8, data)
+#define opae_acc_reg_write64(acc, region, offset, data) \
+	opae_acc_reg_write(acc, region, offset, 8, data)
+#define opae_acc_reg_read32(acc, region, offset, data) \
+	opae_acc_reg_read(acc, region, offset, 4, data)
+#define opae_acc_reg_write32(acc, region, offset, data) \
+	opae_acc_reg_write(acc, region, offset, 4, data)
+#define opae_acc_reg_read16(acc, region, offset, data) \
+	opae_acc_reg_read(acc, region, offset, 2, data)
+#define opae_acc_reg_write16(acc, region, offset, data) \
+	opae_acc_reg_write(acc, region, offset, 2, data)
+#define opae_acc_reg_read8(acc, region, offset, data) \
+	opae_acc_reg_read(acc, region, offset, 1, data)
+#define opae_acc_reg_write8(acc, region, offset, data) \
+	opae_acc_reg_write(acc, region, offset, 1, data)
+
+/*for Vista Glacier*/
+int opae_acc_send_package(struct opae_accelerator *acc, int command, void *package, u32 size);
+int opae_acc_recv_package(struct opae_accelerator *acc, int command, void *package, u32 size);
+
+/* OPAE Adapter Data Structure */
+struct opae_adapter_data {
+	enum opae_adapter_type type;
+};
+
+struct opae_adapter_data_pci {
+	enum opae_adapter_type type;
+	u16 device_id;
+	u16 vendor_id;
+	struct opae_reg_region region[PCI_MAX_RESOURCE];
+	int vfio_dev_fd;  /* VFIO device file descriptor */
+};
+
+/* FIXME: OPAE_FPGA_NET type */
+struct opae_adapter_data_net {
+	enum opae_adapter_type type;
+};
+
+struct opae_adapter_ops {
+	int (*enumerate)(struct opae_adapter *adapter);
+	void (*destroy)(struct opae_adapter *adapter);
+};
+
+TAILQ_HEAD(opae_accelerator_list, opae_accelerator);
+
+#define opae_adapter_for_each_acc(adatper, acc) \
+	TAILQ_FOREACH(acc, &adapter->acc_list, node)
+
+struct opae_adapter {
+	const char *name;
+	struct opae_manager *mgr;
+	struct opae_accelerator_list acc_list;
+	struct opae_adapter_ops *ops;
+	void *data;
+};
+
+/* OPAE Adapter APIs */
+void *opae_adapter_data_alloc(enum opae_adapter_type type);
+#define opae_adapter_data_free(data) opae_free(data)
+
+struct opae_adapter *opae_adapter_alloc(const char *name, void *data);
+#define opae_adapter_free(adapter) opae_free(adapter)
+
+int opae_adapter_enumerate(struct opae_adapter *adapter);
+void opae_adapter_destroy(struct opae_adapter *adapter);
+static inline struct opae_manager *
+opae_adapter_get_mgr(struct opae_adapter *adapter)
+{
+	return adapter ? adapter->mgr : NULL;
+}
+
+struct opae_accelerator *
+opae_adapter_get_acc(struct opae_adapter *adapter, int acc_id);
+
+static inline void opae_adapter_add_acc(struct opae_adapter *adapter,
+					struct opae_accelerator *acc)
+{
+	TAILQ_INSERT_TAIL(&adapter->acc_list, acc, node);
+}
+
+static inline void opae_adapter_remove_acc(struct opae_adapter *adapter,
+					   struct opae_accelerator *acc)
+{
+	TAILQ_REMOVE(&adapter->acc_list, acc, node);
+}
+#endif /* _OPAE_HW_API_H_*/
diff --git a/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c b/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c
new file mode 100644
index 0000000..50bc69e
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c
@@ -0,0 +1,120 @@
+#include "opae_ifpga_hw_api.h"
+#include "ifpga_api.h"
+
+int opae_manager_ifpga_get_prop(struct opae_manager *mgr,
+				struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme;
+
+	if (!mgr || !mgr->data)
+		return -EINVAL;
+
+	fme = mgr->data;
+
+	return ifpga_get_prop(fme->parent, FEATURE_FIU_ID_FME, 0, prop);
+}
+
+int opae_manager_ifpga_set_prop(struct opae_manager *mgr,
+			        struct feature_prop *prop)
+{
+	struct ifpga_fme_hw *fme;
+
+	if (!mgr || !mgr->data)
+		return -EINVAL;
+
+	fme = mgr->data;
+
+	return ifpga_set_prop(fme->parent, FEATURE_FIU_ID_FME, 0, prop);
+}
+
+int opae_manager_ifpga_get_info(struct opae_manager *mgr,
+				struct fpga_fme_info *fme_info)
+{
+	struct ifpga_fme_hw *fme;
+
+	if (!mgr || !mgr->data || !fme_info)
+		return -EINVAL;
+
+	fme = mgr->data;
+
+	spinlock_lock(&fme->lock);
+	fme_info->capability = fme->capability;
+	spinlock_unlock(&fme->lock);
+
+	return 0;
+}
+
+int opae_manager_ifpga_set_err_irq(struct opae_manager *mgr,
+				   struct fpga_fme_err_irq_set *err_irq_set)
+{
+	struct ifpga_fme_hw *fme;
+
+	if (!mgr || !mgr->data)
+		return -EINVAL;
+
+	fme = mgr->data;
+
+	return ifpga_set_irq(fme->parent, FEATURE_FIU_ID_FME, 0,
+			     FME_FEATURE_ID_GLOBAL_ERR, err_irq_set);
+}
+
+int opae_bridge_ifpga_get_prop(struct opae_bridge *br,
+			       struct feature_prop *prop)
+{
+	struct ifpga_port_hw *port;
+
+	if (!br || !br->data)
+		return -EINVAL;
+
+	port = br->data;
+
+	return ifpga_get_prop(port->parent, FEATURE_FIU_ID_PORT,
+			      port->port_id, prop);
+}
+
+int opae_bridge_ifpga_set_prop(struct opae_bridge *br,
+			       struct feature_prop *prop)
+{
+	struct ifpga_port_hw *port;
+
+	if (!br || !br->data)
+		return -EINVAL;
+
+	port = br->data;
+
+	return ifpga_set_prop(port->parent, FEATURE_FIU_ID_PORT,
+			      port->port_id, prop);
+}
+
+int opae_bridge_ifpga_get_info(struct opae_bridge *br,
+			       struct fpga_port_info *port_info)
+{
+	struct ifpga_port_hw *port;
+
+	if (!br || !br->data || !port_info)
+		return -EINVAL;
+
+	port = br->data;
+
+	spinlock_lock(&port->lock);
+	port_info->capability = port->capability;
+	port_info->num_umsgs = port->num_umsgs;
+	port_info->num_uafu_irqs = port->num_uafu_irqs;
+	spinlock_unlock(&port->lock);
+
+	return 0;
+}
+
+int opae_bridge_ifpga_set_err_irq(struct opae_bridge *br,
+				  struct fpga_port_err_irq_set *err_irq_set)
+{
+	struct ifpga_port_hw *port;
+
+	if (!br || !br->data)
+		return -EINVAL;
+
+	port = br->data;
+
+	return ifpga_set_irq(port->parent, FEATURE_FIU_ID_PORT, port->port_id,
+			     PORT_FEATURE_ID_ERROR, err_irq_set);
+}
diff --git a/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h b/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h
new file mode 100644
index 0000000..f54412d
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h
@@ -0,0 +1,253 @@
+#ifndef _OPAE_IFPGA_HW_API_H_
+#define _OPAE_IFPGA_HW_API_H_
+
+#include "opae_hw_api.h"
+
+/**
+ * struct feature_prop - data structure for feature property
+ * @feature_id: id of this feature.
+ * @prop_id: id of this property under this feature.
+ * @data: property value to set/get.
+ */
+struct feature_prop {
+	u64 feature_id;
+	u64 prop_id;
+	u64 data;
+};
+
+#define IFPGA_FIU_ID_FME	0x0
+#define IFPGA_FIU_ID_PORT	0x1
+
+#define IFPGA_FME_FEATURE_ID_HEADER		0x0
+#define IFPGA_FME_FEATURE_ID_THERMAL_MGMT	0x1
+#define IFPGA_FME_FEATURE_ID_POWER_MGMT		0x2
+#define IFPGA_FME_FEATURE_ID_GLOBAL_IPERF	0x3
+#define IFPGA_FME_FEATURE_ID_GLOBAL_ERR		0x4
+#define IFPGA_FME_FEATURE_ID_PR_MGMT		0x5
+#define IFPGA_FME_FEATURE_ID_HSSI		0x6
+#define IFPGA_FME_FEATURE_ID_GLOBAL_DPERF	0x7
+
+#define IFPGA_PORT_FEATURE_ID_HEADER		0x0
+#define IFPGA_PORT_FEATURE_ID_AFU		0xff
+#define IFPGA_PORT_FEATURE_ID_ERROR		0x10
+#define IFPGA_PORT_FEATURE_ID_UMSG		0x11
+#define IFPGA_PORT_FEATURE_ID_UINT		0x12
+#define IFPGA_PORT_FEATURE_ID_STP		0x13
+
+/*
+ * PROP format (TOP + SUB + ID)
+ *
+ * (~0x0) means this field is unused.
+ */
+#define PROP_TOP	GENMASK(31, 24)
+#define PROP_TOP_UNUSED	0xff
+#define PROP_SUB	GENMASK(23, 16)
+#define PROP_SUB_UNUSED	0xff
+#define PROP_ID		GENMASK(15, 0)
+
+#define PROP(_top, _sub, _id) \
+	(SET_FIELD(PROP_TOP, _top) | SET_FIELD(PROP_SUB, _sub) |\
+	 SET_FIELD(PROP_ID, _id))
+
+/* FME head feature's properties*/
+#define FME_HDR_PROP_REVISION		0x1	/* RDONLY */
+#define FME_HDR_PORTS_NUM		0x2	/* RDONLY */
+#define FME_HDR_CACHE_SIZE		0x3	/* RDONLY */
+#define FME_HDR_VERSION			0x4	/* RDONLY */
+#define FME_HDR_SOCKET_ID		0x5	/* RDONLY */
+#define FME_HDR_BITSTREAM_ID		0x6	/* RDONLY */
+#define FME_HDR_BITSTREAM_METADATA	0x7	/* RDONLY */
+
+/* FME error reporting feature's properties */
+/* FME error reporting properties format */
+#define ERR_PROP(_top, _id) 		PROP(_top, 0xff, _id)
+#define ERR_PROP_TOP_UNUSED		PROP_TOP_UNUSED
+#define ERR_PROP_TOP_FME_ERR		0x1
+#define ERR_PROP_ROOT(_id)		ERR_PROP(0xff, _id)
+#define ERR_PROP_FME_ERR(_id)		ERR_PROP(ERR_PROP_TOP_FME_ERR, _id)
+
+#define FME_ERR_PROP_ERRORS		ERR_PROP_FME_ERR(0x1)
+#define FME_ERR_PROP_FIRST_ERROR	ERR_PROP_FME_ERR(0x2)
+#define FME_ERR_PROP_NEXT_ERROR		ERR_PROP_FME_ERR(0x3)
+#define FME_ERR_PROP_CLEAR		ERR_PROP_FME_ERR(0x4)	/* WO */
+#define FME_ERR_PROP_REVISION		ERR_PROP_ROOT(0x5)
+#define FME_ERR_PROP_PCIE0_ERRORS	ERR_PROP_ROOT(0x6)	/* RW */
+#define FME_ERR_PROP_PCIE1_ERRORS	ERR_PROP_ROOT(0x7)	/* RW */
+#define FME_ERR_PROP_NONFATAL_ERRORS	ERR_PROP_ROOT(0x8)
+#define FME_ERR_PROP_CATFATAL_ERRORS	ERR_PROP_ROOT(0x9)
+#define FME_ERR_PROP_INJECT_ERRORS	ERR_PROP_ROOT(0xa)	/* RW */
+
+/* FME thermal feature's properties */
+#define FME_THERMAL_PROP_THRESHOLD1		0x1	/* RW */
+#define FME_THERMAL_PROP_THRESHOLD2		0x2	/* RW */
+#define FME_THERMAL_PROP_THRESHOLD_TRIP		0x3	/* RDONLY */
+#define FME_THERMAL_PROP_THRESHOLD1_REACHED	0x4	/* RDONLY */
+#define FME_THERMAL_PROP_THRESHOLD2_REACHED	0x5	/* RDONLY */
+#define FME_THERMAL_PROP_THRESHOLD1_POLICY	0x6	/* RW */
+#define FME_THERMAL_PROP_TEMPERATURE		0x7	/* RDONLY */
+#define FME_THERMAL_PROP_REVISION		0x8	/* RDONLY */
+
+/* FME power feature's properties */
+#define FME_PWR_PROP_CONSUMED			0x1	/* RDONLY */
+#define FME_PWR_PROP_THRESHOLD1			0x2	/* RW */
+#define FME_PWR_PROP_THRESHOLD2			0x3	/* RW */
+#define FME_PWR_PROP_THRESHOLD1_STATUS		0x4	/* RDONLY */
+#define FME_PWR_PROP_THRESHOLD2_STATUS		0x5	/* RDONLY */
+#define FME_PWR_PROP_RTL			0x6	/* RDONLY */
+#define FME_PWR_PROP_XEON_LIMIT			0x7	/* RDONLY */
+#define FME_PWR_PROP_FPGA_LIMIT			0x8	/* RDONLY */
+#define FME_PWR_PROP_REVISION			0x9	/* RDONLY */
+
+/* FME iperf/dperf PROP format */
+#define PERF_PROP_TOP_CACHE			0x1
+#define PERF_PROP_TOP_VTD			0x2
+#define PERF_PROP_TOP_FAB			0x3
+#define PERF_PROP_TOP_UNUSED			PROP_TOP_UNUSED
+#define PERF_PROP_SUB_UNUSED			PROP_SUB_UNUSED
+
+#define PERF_PROP_ROOT(_id)		PROP(0xff, 0xff, _id)
+#define PERF_PROP_CACHE(_id)		PROP(PERF_PROP_TOP_CACHE, 0xff, _id)
+#define PERF_PROP_VTD(_sub, _id)	PROP(PERF_PROP_TOP_VTD, _sub, _id)
+#define PERF_PROP_VTD_ROOT(_id)		PROP(PERF_PROP_TOP_VTD, 0xff, _id)
+#define PERF_PROP_FAB(_sub, _id)	PROP(PERF_PROP_TOP_FAB, _sub, id)
+
+/* FME iperf feature's properties */
+#define FME_IPERF_PROP_CLOCK			PERF_PROP_ROOT(0x1)
+#define FME_IPERF_PROP_REVISION			PERF_PROP_ROOT(0x2)
+
+/* iperf CACHE properties */
+#define FME_IPERF_PROP_CACHE_FREEZE		PERF_PROP_CACHE(0x1) /* RW */
+#define FME_IPERF_PROP_CACHE_READ_HIT		PERF_PROP_CACHE(0x2)
+#define FME_IPERF_PROP_CACHE_READ_MISS		PERF_PROP_CACHE(0x3)
+#define FME_IPERF_PROP_CACHE_WRITE_HIT		PERF_PROP_CACHE(0x4)
+#define FME_IPERF_PROP_CACHE_WRITE_MISS		PERF_PROP_CACHE(0x5)
+#define FME_IPERF_PROP_CACHE_HOLD_REQUEST	PERF_PROP_CACHE(0x6)
+#define FME_IPERF_PROP_CACHE_TX_REQ_STALL	PERF_PROP_CACHE(0x7)
+#define FME_IPERF_PROP_CACHE_RX_REQ_STALL	PERF_PROP_CACHE(0x8)
+#define FME_IPERF_PROP_CACHE_RX_EVICTION	PERF_PROP_CACHE(0x9)
+#define FME_IPERF_PROP_CACHE_DATA_WRITE_PORT_CONTENTION	PERF_PROP_CACHE(0xa)
+#define FME_IPERF_PROP_CACHE_TAG_WRITE_PORT_CONTENTION	PERF_PROP_CACHE(0xb)
+/* iperf VTD properties */
+#define FME_IPERF_PROP_VTD_FREEZE		PERF_PROP_VTD_ROOT(0x1) /* RW */
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_4K_HIT	PERF_PROP_VTD_ROOT(0x2)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_2M_HIT	PERF_PROP_VTD_ROOT(0x3)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_1G_HIT	PERF_PROP_VTD_ROOT(0x4)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L3_HIT	PERF_PROP_VTD_ROOT(0x5)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L4_HIT	PERF_PROP_VTD_ROOT(0x6)
+#define FME_IPERF_PROP_VTD_SIP_RCC_HIT		PERF_PROP_VTD_ROOT(0x7)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_4K_MISS	PERF_PROP_VTD_ROOT(0x8)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_2M_MISS	PERF_PROP_VTD_ROOT(0x9)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_1G_MISS	PERF_PROP_VTD_ROOT(0xa)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L3_MISS	PERF_PROP_VTD_ROOT(0xb)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L4_MISS	PERF_PROP_VTD_ROOT(0xc)
+#define FME_IPERF_PROP_VTD_SIP_RCC_MISS		PERF_PROP_VTD_ROOT(0xd)
+#define FME_IPERF_PROP_VTD_PORT_READ_TRANSACTION(n)	PERF_PROP_VTD(n, 0xe)
+#define FME_IPERF_PROP_VTD_PORT_WRITE_TRANSACTION(n)	PERF_PROP_VTD(n, 0xf)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_READ_HIT(n)	PERF_PROP_VTD(n, 0x10)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_WRITE_HIT(n)	PERF_PROP_VTD(n, 0x11)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_4K_FILL(n)	PERF_PROP_VTD(n, 0x12)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_2M_FILL(n)	PERF_PROP_VTD(n, 0x13)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_1G_FILL(n)	PERF_PROP_VTD(n, 0x14)
+/* iperf FAB properties */
+#define FME_IPERF_PROP_FAB_FREEZE		PERF_PROP_FAB_ROOT(0x1) /* RW */
+#define FME_IPERF_PROP_FAB_PCIE0_READ		PERF_PROP_FAB_ROOT(0x2)
+#define FME_IPERF_PROP_FAB_PORT_PCIE0_READ(n)	PERF_PROP_FAB(n, 0x2)
+#define FME_IPERF_PROP_FAB_PCIE0_WRITE		PERF_PROP_FAB_ROOT(0x3)
+#define FME_IPERF_PROP_FAB_PORT_PCIE0_WRITE(n)	PERF_PROP_FAB(n, 0x3)
+#define FME_IPERF_PROP_FAB_PCIE1_READ		PERF_PROP_FAB_ROOT(0x4)
+#define FME_IPERF_PROP_FAB_PORT_PCIE1_READ(n)	PERF_PROP_FAB(n, 0x4)
+#define FME_IPERF_PROP_FAB_PCIE1_WRITE		PERF_PROP_FAB_ROOT(0x5)
+#define FME_IPERF_PROP_FAB_PORT_PCIE1_WRITE(n)	PERF_PROP_FAB(n, 0x5)
+#define FME_IPERF_PROP_FAB_UPI_READ		PERF_PROP_FAB_ROOT(0x6)
+#define FME_IPERF_PROP_FAB_PORT_UPI_READ(n)	PERF_PROP_FAB(n, 0x6)
+#define FME_IPERF_PROP_FAB_UPI_WRITE		PERF_PROP_FAB_ROOT(0x7)
+#define FME_IPERF_PROP_FAB_PORT_UPI_WRITE(n)	PERF_PROP_FAB(n, 0x7)
+#define FME_IPERF_PROP_FAB_MMIO_READ		PERF_PROP_FAB_ROOT(0x8)
+#define FME_IPERF_PROP_FAB_PORT_MMIO_READ(n)	PERF_PROP_FAB(n, 0x8)
+#define FME_IPERF_PROP_FAB_MMIO_WRITE		PERF_PROP_FAB_ROOT(0x9)
+#define FME_IPERF_PROP_FAB_PORT_MMIO_WRITE(n)	PERF_PROP_FAB(n, 0x9)
+#define FME_IPERF_PROP_FAB_ENABLE		PERF_PROP_FAB_ROOT(0xa) /* RW */
+#define FME_IPERF_PROP_FAB_PORT_ENABLE(n)	PERF_PROP_FAB(n, 0xa)   /* RW */
+
+/* FME dperf properties */
+#define FME_DPERF_PROP_CLOCK			PERF_PROP_ROOT(0x1)
+#define FME_DPERF_PROP_REVISION			PERF_PROP_ROOT(0x2)
+
+/* dperf FAB properties */
+#define FME_DPERF_PROP_FAB_FREEZE		PERF_PROP_FAB_ROOT(0x1) /* RW */
+#define FME_DPERF_PROP_FAB_PCIE0_READ		PERF_PROP_FAB_ROOT(0x2)
+#define FME_DPERF_PROP_FAB_PORT_PCIE0_READ(n)	PERF_PROP_FAB(n, 0x2)
+#define FME_DPERF_PROP_FAB_PCIE0_WRITE		PERF_PROP_FAB_ROOT(0x3)
+#define FME_DPERF_PROP_FAB_PORT_PCIE0_WRITE(n)	PERF_PROP_FAB(n, 0x3)
+#define FME_DPERF_PROP_FAB_MMIO_READ		PERF_PROP_FAB_ROOT(0x4)
+#define FME_DPERF_PROP_FAB_PORT_MMIO_READ(n)	PERF_PROP_FAB(n, 0x4)
+#define FME_DPERF_PROP_FAB_MMIO_WRITE		PERF_PROP_FAB_ROOT(0x5)
+#define FME_DPERF_PROP_FAB_PORT_MMIO_WRITE(n)	PERF_PROP_FAB(n, 0x5)
+#define FME_DPERF_PROP_FAB_ENABLE		PERF_PROP_FAB_ROOT(0x6) /* RW */
+#define FME_DPERF_PROP_FAB_PORT_ENABLE(n)	PERF_PROP_FAB(n, 0x6)   /* RW */
+
+/*PORT hdr feature's properties*/
+#define PORT_HDR_REVISION   0x1  /*RDONLY*/
+#define PORT_HDR_PORTIDX  0x2        /*RDONLY*/
+#define PORT_HDR_LATENCY_TOLERANCE 0x3  /*RDONLY*/
+#define PORT_HDR_AP1_EVENT 0x4  /*RW*/
+#define PORT_HDR_AP2_EVENT  0x5   /*RW*/
+#define PORT_HDR_POWER_STATE 0x6 /*RDONLY*/
+#define PORT_HDR_USERCLK_FREQCMD 0x7 /*RW*/
+#define PORT_HDR_USERCLK_FREQCNTRCMD 0x8 /*RW*/
+#define PORT_HDR_USERCLK_FREQSTS 0x9 /*RDONLY*/
+#define PORT_HDR_USERCLK_CNTRSTS 0xa /*RDONLY*/
+
+int opae_manager_ifpga_get_prop(struct opae_manager *mgr,
+				struct feature_prop *prop);
+int opae_manager_ifpga_set_prop(struct opae_manager *mgr,
+				struct feature_prop *prop);
+int opae_bridge_ifpga_get_prop(struct opae_bridge *br,
+			       struct feature_prop *prop);
+int opae_bridge_ifpga_set_prop(struct opae_bridge *br,
+			       struct feature_prop *prop);
+
+/*
+ * Retrieve information about the fpga fme.
+ * Driver fills the info in provided struct fpga_fme_info.
+ */
+struct fpga_fme_info {
+	u32 capability;		/* The capability of FME device */
+#define FPGA_FME_CAP_ERR_IRQ	(1 << 0) /* Support fme error interrupt */
+};
+
+int opae_manager_ifpga_get_info(struct opae_manager *mgr,
+				struct fpga_fme_info *fme_info);
+
+/* Set eventfd information for ifpga FME error interrupt */
+struct fpga_fme_err_irq_set {
+	s32 evtfd;		/* Eventfd handler */
+};
+
+int opae_manager_ifpga_set_err_irq(struct opae_manager *mgr,
+				   struct fpga_fme_err_irq_set *err_irq_set);
+
+/*
+ * Retrieve information about the fpga port.
+ * Driver fills the info in provided struct fpga_port_info.
+ */
+struct fpga_port_info {
+	u32 capability;	/* The capability of port device */
+#define FPGA_PORT_CAP_ERR_IRQ	(1 << 0) /* Support port error interrupt */
+#define FPGA_PORT_CAP_UAFU_IRQ	(1 << 1) /* Support uafu error interrupt */
+	u32 num_umsgs;	/* The number of allocated umsgs */
+	u32 num_uafu_irqs;	/* The number of uafu interrupts */
+};
+
+int opae_bridge_ifpga_get_info(struct opae_bridge *br,
+			       struct fpga_port_info *port_info);
+
+/* Set eventfd information for ifpga port error interrupt */
+struct fpga_port_err_irq_set {
+	s32 evtfd;		/* Eventfd handler */
+};
+
+int opae_bridge_ifpga_set_err_irq(struct opae_bridge *br,
+				  struct fpga_port_err_irq_set *err_irq_set);
+
+#endif /* _OPAE_IFPGA_HW_API_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/opae_osdep.h b/drivers/raw/ifpga_rawdev/base/opae_osdep.h
new file mode 100644
index 0000000..e40fd83
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_osdep.h
@@ -0,0 +1,87 @@
+#ifndef _OPAE_OSDEP_H
+#define _OPAE_OSDEP_H
+
+#include <string.h>
+
+#ifdef RTE_LIBRTE_EAL
+#include "osdep_rte/osdep_generic.h"
+#else
+#include "osdep_raw/osdep_generic.h"
+#endif
+
+#include <asm/types.h>
+
+#define __iomem
+
+typedef uint8_t		u8;
+typedef int8_t		s8;
+typedef uint16_t	u16;
+typedef uint32_t	u32;
+typedef int32_t		s32;
+typedef uint64_t	u64;
+#ifndef __cplusplus
+typedef int		bool;
+#endif
+typedef uint64_t	dma_addr_t;
+
+#define FALSE	0
+#define TRUE	1
+#define false	0
+#define true	1
+
+#ifndef LINUX_MACROS
+#ifndef BITS_PER_LONG
+#define BITS_PER_LONG	(__SIZEOF_LONG__ * 8)
+#endif
+#ifndef BIT
+#define BIT(a) (1UL << (a))
+#endif /* BIT */
+#ifndef BIT_ULL
+#define BIT_ULL(a) (1ULL << (a))
+#endif /* BIT_ULL */
+#ifndef GENMASK
+#define GENMASK(h, l)	(((~0UL) << (1)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#endif /* GENMASK */
+#ifndef GENMASK_ULL
+#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
+#endif /* GENMASK_ULL */
+#endif /* LINUX_MACROS */
+
+#define SET_FIELD(m, v) (((v) << (__builtin_ffsll(m) - 1)) & (m))
+#define GET_FIELD(m, v) ((v) & (m) >> (__builtin_ffsll(m) - 1))
+
+#define dev_err(x, args...) dev_printf(ERR, args)
+#define dev_info(x, args...) dev_printf(INFO, args)
+#define dev_warn(x, args...) dev_printf(WARNING, args)
+
+#ifdef OPAE_DEBUG
+#define dev_debug(x, args...) dev_printf(DEBUG, args)
+#else
+#define dev_debug(x, args...) do { } while (0)
+#endif
+
+#define pr_err(y, args...) dev_err(0, y, ##args)
+#define pr_warn(y, args...) dev_warn(0, y, ##args)
+#define pr_info(y, args...) dev_info(0, y, ##args)
+#define BUG() pr_err("BUG at %s:%d", __func__, __LINE__)
+
+#define ASSERT(x) do {\
+	if (!(x)) \
+		osdep_panic("osdep_panic: x"); \
+} while (0)
+#define BUG_ON(x) ASSERT(!(x))
+
+#ifndef WARN_ON
+#define WARN_ON(x) do { \
+	int ret = !!(x); \
+	if (unlikely(ret)) \
+		pr_warn("WARN_ON: \"" #x "\" at %s:%d\n", __func__, __LINE__); \
+} while (0)
+#endif
+
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define udelay(x) opae_udelay(x)
+#define msleep(x) opae_udelay(1000 * (x))
+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+
+#endif
diff --git a/drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h b/drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h
new file mode 100644
index 0000000..baadfd3
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h
@@ -0,0 +1,69 @@
+#ifndef _OSDEP_RAW_GENERIC_H
+#define _OSDEP_RAW_GENERIC_H
+
+#define	compiler_barrier() do {		\
+	asm volatile ("" : : : "memory");	\
+} while(0)
+
+#define io_wmb() compiler_barrier()
+#define io_rmb() compiler_barrier()
+
+static inline uint8_t opae_readb(const volatile void *addr)
+{
+	uint8_t val;
+	val = *(const volatile uint8_t *)addr;
+	io_rmb();
+	return val;
+}
+
+static inline uint16_t opae_readw(const volatile void *addr)
+{
+	uint16_t val;
+	val = *(const volatile uint16_t *)addr;
+	io_rmb();
+	return val;
+}
+
+static inline uint32_t opae_readl(const volatile void *addr)
+{
+	uint32_t val;
+	val = *(const volatile uint32_t *)addr;
+	io_rmb();
+	return val;
+}
+
+static inline uint64_t opae_readq(const volatile void *addr)
+{
+	uint64_t val;
+	val = *(const volatile uint64_t *)addr;
+	io_rmb();
+	return val;
+}
+
+static inline void opae_writeb(uint8_t value, volatile void *addr)
+{
+	io_wmb();
+	*(volatile uint8_t *)addr = value;
+}
+
+static inline void opae_writew(uint16_t value, volatile void *addr)
+{
+	io_wmb();
+	*(volatile uint16_t *)addr = value;
+}
+
+static inline void opae_writel(uint32_t value, volatile void *addr)
+{
+	io_wmb();
+	*(volatile uint32_t *)addr = value;
+}
+
+static inline void opae_writeq(uint64_t value, volatile void *addr)
+{
+	io_wmb();
+	*(volatile uint64_t *)addr = value;
+}
+
+#define opae_free(addr) free(addr)
+
+#endif
diff --git a/drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h b/drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h
new file mode 100644
index 0000000..f018c2d
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h
@@ -0,0 +1,41 @@
+#ifndef _OSDEP_RTE_GENERIC_H
+#define _OSDEP_RTE_GENERIC_H
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+#include <rte_io.h>
+#include <rte_malloc.h>
+
+#define dev_printf(level, fmt, args...) \
+	RTE_LOG(level, PMD, "osdep_rte: " fmt, ## args)
+
+#define osdep_panic(...) rte_panic(...)
+
+#define opae_udelay(x) rte_delay_us(x)
+
+#define opae_readb(addr) rte_read8(addr)
+#define opae_readw(addr) rte_read16(addr)
+#define opae_readl(addr) rte_read32(addr)
+#define opae_readq(addr) rte_read64(addr)
+#define opae_writeb(value, addr) rte_write8(value, addr)
+#define opae_writew(value, addr) rte_write16(value, addr)
+#define opae_writel(value, addr) rte_write32(value, addr)
+#define opae_writeq(value, addr) rte_write64(value, addr)
+
+#define opae_malloc(size) rte_malloc(NULL, size, 0)
+#define opae_zmalloc(size) rte_zmalloc(NULL, size, 0)
+#define opae_free(addr) rte_free(addr)
+
+#define ARRAY_SIZE(arr) RTE_DIM(arr)
+
+#define min(a, b) RTE_MIN(a, b)
+#define max(a, b) RTE_MAX(a, b)
+
+#define spinlock_t rte_spinlock_t
+#define spinlock_init(x) rte_spinlock_init(x)
+#define spinlock_lock(x) rte_spinlock_lock(x)
+#define spinlock_unlock(x) rte_spinlock_unlock(x)
+
+#endif
-- 
1.8.3.1



More information about the dev mailing list