[dpdk-dev] [PATCH v7 2/7] qede: add core driver

Rasesh Mody rasesh.mody at qlogic.com
Wed Apr 27 16:18:37 CEST 2016


The Qlogic Everest Driver for Ethernet(QEDE) Poll Mode Driver(PMD) is
the DPDK specific module for QLogic FastLinQ QL4xxxx 25G/40G CNA family
of adapters as well as their virtual functions (VF) in SR-IOV context.

This patch adds QEDE PMD, which interacts with base driver and
initialises the HW.

This patch content also includes:
 - eth_dev_ops callbacks
 - Rx/Tx support for the driver
 - link default configuration
 - change link property
 - link up/down/update notifications
 - vlan offload and filtering capability
 - device/function/port statistics
 - qede nic guide and updated overview.rst

Note that the follow on commits contain the code for the features mentioned
in documents but not implemented in this patch.

Signed-off-by: Harish Patil <harish.patil at qlogic.com>
Signed-off-by: Rasesh Mody <rasesh.mody at qlogic.com>
Signed-off-by: Sony Chacko <sony.chacko at qlogic.com>
---
 MAINTAINERS                               |    1 +
 doc/guides/nics/index.rst                 |    1 +
 doc/guides/nics/overview.rst              |   84 +-
 doc/guides/nics/qede.rst                  |  315 ++++++++
 drivers/net/qede/Makefile                 |   12 +
 drivers/net/qede/qede_eth_if.h            |  176 +++++
 drivers/net/qede/qede_ethdev.c            | 1028 +++++++++++++++++++++++++
 drivers/net/qede/qede_ethdev.h            |  159 ++++
 drivers/net/qede/qede_if.h                |  155 ++++
 drivers/net/qede/qede_logs.h              |   90 +++
 drivers/net/qede/qede_main.c              |  545 +++++++++++++
 drivers/net/qede/qede_rxtx.c              | 1192 +++++++++++++++++++++++++++++
 drivers/net/qede/qede_rxtx.h              |  179 +++++
 drivers/net/qede/rte_pmd_qede_version.map |    4 +
 14 files changed, 3899 insertions(+), 42 deletions(-)
 create mode 100644 doc/guides/nics/qede.rst
 create mode 100644 drivers/net/qede/qede_eth_if.h
 create mode 100644 drivers/net/qede/qede_ethdev.c
 create mode 100644 drivers/net/qede/qede_ethdev.h
 create mode 100644 drivers/net/qede/qede_if.h
 create mode 100644 drivers/net/qede/qede_logs.h
 create mode 100644 drivers/net/qede/qede_main.c
 create mode 100644 drivers/net/qede/qede_rxtx.c
 create mode 100644 drivers/net/qede/qede_rxtx.h
 create mode 100644 drivers/net/qede/rte_pmd_qede_version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index b673cc7..ba4053a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -337,6 +337,7 @@ M: Harish Patil <harish.patil at qlogic.com>
 M: Rasesh Mody <rasesh.mody at qlogic.com>
 M: Sony Chacko <sony.chacko at qlogic.com>
 F: drivers/net/qede/
+F: doc/guides/nics/qede.rst
 
 RedHat virtio
 M: Huawei Xie <huawei.xie at intel.com>
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index 769f677..0b13698 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -48,6 +48,7 @@ Network Interface Controller Drivers
     mlx4
     mlx5
     nfp
+    qede
     szedata2
     virtio
     vhost
diff --git a/doc/guides/nics/overview.rst b/doc/guides/nics/overview.rst
index f08039e..a309752 100644
--- a/doc/guides/nics/overview.rst
+++ b/doc/guides/nics/overview.rst
@@ -74,40 +74,40 @@ Most of these differences are summarized below.
 
 .. table:: Features availability in networking drivers
 
-   ==================== = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
-   Feature              a b b b c e e e i i i i i i i i i i f f f f m m m n n p r s v v v v x
-                        f n n o x 1 n n 4 4 4 4 g g x x x x m m m m l l p f u c i z h i i m e
-                        p x x n g 0 a i 0 0 0 0 b b g g g g 1 1 1 1 x x i p l a n e o r r x n
-                        a 2 2 d b 0   c e e e e   v b b b b 0 0 0 0 4 5 p   l p g d s t t n v
-                        c x x i e 0       . v v   f e e e e k k k k     e         a t i i e i
-                        k   v n           . f f       . v v   . v v               t   o o t r
-                        e   f g           .   .       . f f   . f f               a     . 3 t
-                        t                 v   v       v   v   v   v               2     v
-                                          e   e       e   e   e   e                     e
-                                          c   c       c   c   c   c                     c
-   ==================== = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
+   ==================== = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
+   Feature              a b b b c e e e i i i i i i i i i i f f f f m m m n n p q q r s v v v v x
+                        f n n o x 1 n n 4 4 4 4 g g x x x x m m m m l l p f u c e e i z h i i m e
+                        p x x n g 0 a i 0 0 0 0 b b g g g g 1 1 1 1 x x i p l a d d n e o r r x n
+                        a 2 2 d b 0   c e e e e   v b b b b 0 0 0 0 4 5 p   l p e e g d s t t n v
+                        c x x i e 0       . v v   f e e e e k k k k     e         v   a t i i e i
+                        k   v n           . f f       . v v   . v v               f   t   o o t r
+                        e   f g           .   .       . f f   . f f                   a     . 3 t
+                        t                 v   v       v   v   v   v                   2     v
+                                          e   e       e   e   e   e                         e
+                                          c   c       c   c   c   c                         c
+   ==================== = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
    Speed capabilities
-   Link status            Y Y   Y Y   Y Y Y     Y   Y Y Y Y         Y Y           Y Y Y Y
-   Link status event      Y Y     Y     Y Y     Y   Y Y             Y Y             Y
-   Queue status event                                                               Y
+   Link status            Y Y   Y Y   Y Y Y     Y   Y Y Y Y         Y Y         Y Y   Y Y Y Y
+   Link status event      Y Y     Y     Y Y     Y   Y Y             Y Y         Y Y     Y
+   Queue status event                                                                   Y
    Rx interrupt                   Y     Y Y Y Y Y Y Y Y Y Y Y Y Y Y
-   Queue start/stop             Y   Y Y Y Y Y Y     Y Y     Y Y Y Y Y Y           Y   Y Y
+   Queue start/stop             Y   Y Y Y Y Y Y     Y Y     Y Y Y Y Y Y               Y   Y Y
    MTU update                   Y Y Y           Y   Y Y Y Y         Y Y
-   Jumbo frame                  Y Y Y Y Y Y Y Y Y   Y Y Y Y Y Y Y Y Y Y       Y
-   Scattered Rx                 Y Y Y   Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y           Y   Y
+   Jumbo frame                  Y Y Y Y Y Y Y Y Y   Y Y Y Y Y Y Y Y Y Y       Y Y Y
+   Scattered Rx                 Y Y Y   Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y               Y   Y
    LRO                                              Y Y Y Y
    TSO                          Y   Y   Y Y Y Y Y Y Y Y Y Y Y Y Y Y
-   Promiscuous mode       Y Y   Y Y   Y Y Y Y Y Y Y Y Y     Y Y     Y Y           Y   Y Y
-   Allmulticast mode            Y Y     Y Y Y Y Y Y Y Y Y Y Y Y     Y Y           Y   Y Y
-   Unicast MAC filter     Y Y     Y   Y Y Y Y Y Y Y Y Y Y Y Y Y     Y Y               Y Y
-   Multicast MAC filter   Y Y         Y Y Y Y Y             Y Y     Y Y               Y Y
-   RSS hash                     Y   Y Y Y Y Y Y Y   Y Y Y Y Y Y Y Y Y Y
+   Promiscuous mode       Y Y   Y Y   Y Y Y Y Y Y Y Y Y     Y Y     Y Y         Y Y   Y   Y Y
+   Allmulticast mode            Y Y     Y Y Y Y Y Y Y Y Y Y Y Y     Y Y         Y Y   Y   Y Y
+   Unicast MAC filter     Y Y     Y   Y Y Y Y Y Y Y Y Y Y Y Y Y     Y Y         Y Y       Y Y
+   Multicast MAC filter   Y Y         Y Y Y Y Y             Y Y     Y Y         Y Y       Y Y
+   RSS hash                     Y   Y Y Y Y Y Y Y   Y Y Y Y Y Y Y Y Y Y         Y Y
    RSS key update                   Y   Y Y Y Y Y   Y Y Y Y Y Y Y Y   Y
    RSS reta update                  Y   Y Y Y Y Y   Y Y Y Y Y Y Y Y   Y
    VMDq                                 Y Y     Y   Y Y     Y Y
-   SR-IOV                   Y       Y   Y Y     Y   Y Y             Y Y
+   SR-IOV                   Y       Y   Y Y     Y   Y Y             Y Y           Y
    DCB                                  Y Y     Y   Y Y
-   VLAN filter                    Y   Y Y Y Y Y Y Y Y Y Y Y Y Y     Y Y               Y Y
+   VLAN filter                    Y   Y Y Y Y Y Y Y Y Y Y Y Y Y     Y Y         Y Y       Y Y
    Ethertype filter                     Y Y     Y   Y Y
    N-tuple filter                               Y   Y Y
    SYN filter                                   Y   Y Y
@@ -115,38 +115,38 @@ Most of these differences are summarized below.
    Flexible filter                              Y
    Hash filter                          Y Y Y Y
    Flow director                        Y Y         Y Y               Y
-   Flow control                 Y Y     Y Y     Y   Y Y
+   Flow control                 Y Y     Y Y     Y   Y Y                         Y Y
    Rate limitation                                  Y Y
    Traffic mirroring                    Y Y         Y Y
-   CRC offload                  Y Y Y Y Y   Y   Y Y Y   Y   Y Y Y Y   Y
-   VLAN offload                 Y Y Y Y Y   Y   Y Y Y   Y   Y Y Y Y   Y
+   CRC offload                  Y Y Y Y Y   Y   Y Y Y   Y   Y Y Y Y   Y         Y Y
+   VLAN offload                 Y Y Y Y Y   Y   Y Y Y   Y   Y Y Y Y   Y         Y Y
    QinQ offload                   Y     Y   Y   Y Y Y   Y
    L3 checksum offload          Y Y Y Y Y   Y   Y Y Y   Y   Y Y Y Y Y Y
    L4 checksum offload          Y Y Y Y Y   Y   Y Y Y   Y   Y Y Y Y Y Y
    Inner L3 checksum                Y   Y   Y       Y   Y           Y
    Inner L4 checksum                Y   Y   Y       Y   Y           Y
-   Packet type parsing          Y     Y Y   Y   Y Y Y   Y   Y Y Y Y Y Y
+   Packet type parsing          Y     Y Y   Y   Y Y Y   Y   Y Y Y Y Y Y         Y Y
    Timesync                             Y Y     Y   Y Y
-   Basic stats            Y Y   Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y       Y   Y Y Y Y
+   Basic stats            Y Y   Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y       Y Y Y   Y Y Y Y
    Extended stats                   Y   Y Y Y Y Y Y Y Y Y Y Y Y Y Y                   Y Y
-   Stats per queue              Y                   Y Y     Y Y Y Y Y Y           Y   Y Y
+   Stats per queue              Y                   Y Y     Y Y Y Y Y Y         Y Y   Y   Y Y
    EEPROM dump                                  Y   Y Y
    Registers dump                               Y Y Y Y Y Y
-   Multiprocess aware                   Y Y Y Y     Y Y Y Y Y Y Y Y Y Y       Y
-   BSD nic_uio                  Y Y   Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y                   Y Y
-   Linux UIO              Y Y   Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y                   Y Y
-   Linux VFIO                   Y Y   Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y                   Y Y
-   Other kdrv                                                       Y Y           Y
-   ARMv7                                                                      Y       Y Y
-   ARMv8                                                                      Y       Y Y
+   Multiprocess aware                   Y Y Y Y     Y Y Y Y Y Y Y Y Y Y       Y Y Y
+   BSD nic_uio                  Y Y   Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y                       Y Y
+   Linux UIO              Y Y   Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y             Y Y       Y Y
+   Linux VFIO                   Y Y   Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y                       Y Y
+   Other kdrv                                                       Y Y               Y
+   ARMv7                                                                      Y           Y Y
+   ARMv8                                                                      Y           Y Y
    Power8                                                           Y Y       Y
    TILE-Gx                                                                    Y
-   x86-32                       Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y       Y     Y Y Y
-   x86-64                 Y Y   Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y       Y   Y Y Y Y
-   Usage doc              Y Y   Y     Y                             Y Y       Y   Y   Y
+   x86-32                       Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y       Y         Y Y Y
+   x86-64                 Y Y   Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y       Y  Y Y  Y Y Y Y
+   Usage doc              Y Y   Y     Y                             Y Y       Y  Y Y  Y   Y
    Design doc
    Perf doc
-   ==================== = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
+   ==================== = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
 
 .. Note::
 
diff --git a/doc/guides/nics/qede.rst b/doc/guides/nics/qede.rst
new file mode 100644
index 0000000..6f2d9f2
--- /dev/null
+++ b/doc/guides/nics/qede.rst
@@ -0,0 +1,315 @@
+..  BSD LICENSE
+    Copyright (c) 2016 QLogic Corporation
+    All rights reserved.
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions
+    are met:
+
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in
+    the documentation and/or other materials provided with the
+    distribution.
+    * Neither the name of QLogic Corporation nor the names of its
+    contributors may be used to endorse or promote products derived
+    from this software without specific prior written permission.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+QEDE Poll Mode Driver
+======================
+
+The QEDE poll mode driver library (**librte_pmd_qede**) implements support
+for **QLogic FastLinQ QL4xxxx 25G/40G CNA** family of adapters as well
+as their virtual functions (VF) in SR-IOV context. It is supported on
+several standard Linux distros like RHEL7.x, SLES12.x and Ubuntu.
+It is compile-tested under FreeBSD OS.
+
+More information can be found at `QLogic Corporation's Website
+<http://www.qlogic.com>`_.
+
+Supported Features
+------------------
+
+- Unicast/Multicast filtering
+- Promiscuous mode
+- Allmulti mode
+- Port hardware statistics
+- Jumbo frames (using single buffer)
+- VLAN offload - Filtering and stripping
+- Stateless checksum offloads (IPv4/TCP/UDP)
+- Multiple Rx/Tx queues (queue-pairs)
+- RSS (with default table/key)
+- TSS
+- Multiple MAC address
+- Default pause flow control
+- SR-IOV VF
+
+Non-supported Features
+----------------------
+
+- Scatter-Gather Rx/Tx frames
+- User configurable RETA table/key
+- Unequal number of Rx/Tx queues
+- MTU change (dynamic)
+- SR-IOV PF
+- Tunneling offloads
+- Reload of the PMD after a non-graceful termination
+
+Supported QLogic Adapters
+-------------------------
+
+- QLogic FastLinQ QL4xxxx 25G/40G CNAs
+
+Prerequisites
+-------------
+
+- Requires firmware version **8.7.x.** and management
+  firmware version **8.7.x or higher**. Firmware may be available
+  inbox in certain newer Linux distros under the standard directory
+  ``E.g. /lib/firmware/qed/qed_init_values_zipped-8.7.7.0.bin``
+
+- If the required firmware files are not available then visit
+  `QLogic Driver Download Center <http://driverdownloads.qlogic.com>`_.
+
+- This driver relies on external zlib library (-lz) for uncompressing
+  the firmware file.
+
+Performance note
+~~~~~~~~~~~~~~~~
+
+- For better performance, it is recommended to use 4K or higher RX/TX rings.
+
+Config File Options
+~~~~~~~~~~~~~~~~~~~
+
+The following options can be modified in the ``.config`` file. Please note that
+enabling debugging options may affect system performance.
+
+- ``CONFIG_RTE_LIBRTE_QEDE_PMD`` (default **y**)
+
+  Toggle compilation of QEDE PMD driver.
+
+- ``CONFIG_RTE_LIBRTE_QEDE_DEBUG_INFO`` (default **n**)
+
+  Toggle display of generic debugging messages.
+
+- ``CONFIG_RTE_LIBRTE_QEDE_DEBUG_DRV`` (default **n**)
+
+  Toggle display of ecore related messages.
+
+- ``CONFIG_RTE_LIBRTE_QEDE_DEBUG_TX`` (default **n**)
+
+  Toggle display of transmit fast path run-time messages.
+
+- ``CONFIG_RTE_LIBRTE_QEDE_DEBUG_RX`` (default **n**)
+
+  Toggle display of receive fast path run-time messages.
+
+- ``CONFIG_RTE_LIBRTE_QEDE_FW`` (default **""**)
+
+  Gives absolute path of firmware file.
+  ``Eg: "/lib/firmware/qed/qed_init_values_zipped-8.7.7.0.bin"``
+  Empty string indicates driver will pick up the firmware file
+  from the default location.
+
+Driver Compilation
+~~~~~~~~~~~~~~~~~~
+
+To compile QEDE PMD for Linux x86_64 gcc target, run the following ``make``
+command::
+
+   cd <DPDK-source-directory>
+   make config T=x86_64-native-linuxapp-gcc install
+
+To compile QEDE PMD for Linux x86_64 clang target, run the following ``make``
+command::
+
+   cd <DPDK-source-directory>
+   make config T=x86_64-native-linuxapp-clang install
+
+To compile QEDE PMD for FreeBSD x86_64 clang target, run the following ``gmake``
+command::
+
+   cd <DPDK-source-directory>
+   gmake config T=x86_64-native-bsdapp-clang install
+
+To compile QEDE PMD for FreeBSD x86_64 gcc target, run the following ``gmake``
+command::
+
+   cd <DPDK-source-directory>
+   gmake config T=x86_64-native-bsdapp-gcc install -Wl,-rpath=\
+                                        /usr/local/lib/gcc48 CC=gcc48
+
+
+Sample Application Notes
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section demonstrates how to launch ``testpmd`` with QLogic 4xxxx
+devices managed by ``librte_pmd_qede`` in Linux operating system.
+
+#. Request huge pages:
+
+   .. code-block:: console
+
+      echo 1024 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages/ \
+                                                                nr_hugepages
+
+#. Load ``igb_uio`` driver:
+
+   .. code-block:: console
+
+      insmod ./x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
+
+#. Bind the QLogic 4xxxx adapters to ``igb_uio`` loaded in the
+   previous step:
+
+   .. code-block:: console
+
+      ./tools/dpdk_nic_bind.py --bind igb_uio 0000:84:00.0 0000:84:00.1 \
+                                              0000:84:00.2 0000:84:00.3
+
+#. Start ``testpmd`` with basic parameters:
+   (Enable QEDE_DEBUG_INFO=y to view informational messages)
+
+   .. code-block:: console
+
+      testpmd -c 0xff1 -n 4 -- -i --nb-cores=8 --portmask=0xf --rxd=4096 \
+      --txd=4096 --txfreet=4068 --enable-rx-cksum --rxq=4 --txq=4 \
+      --rss-ip --rss-udp
+
+      [...]
+
+    EAL: PCI device 0000:84:00.0 on NUMA socket 1
+    EAL:   probe driver: 1077:1634 rte_qede_pmd
+    EAL:   Not managed by a supported kernel driver, skipped
+    EAL: PCI device 0000:84:00.1 on NUMA socket 1
+    EAL:   probe driver: 1077:1634 rte_qede_pmd
+    EAL:   Not managed by a supported kernel driver, skipped
+    EAL: PCI device 0000:88:00.0 on NUMA socket 1
+    EAL:   probe driver: 1077:1656 rte_qede_pmd
+    EAL:   PCI memory mapped at 0x7f738b200000
+    EAL:   PCI memory mapped at 0x7f738b280000
+    EAL:   PCI memory mapped at 0x7f738b300000
+    PMD: Chip details : BB1
+    PMD: Driver version : QEDE PMD 8.7.9.0_1.0.0
+    PMD: Firmware version : 8.7.7.0
+    PMD: Management firmware version : 8.7.8.0
+    PMD: Firmware file : /lib/firmware/qed/qed_init_values_zipped-8.7.7.0.bin
+    [QEDE PMD: (84:00.0:dpdk-port-0)]qede_common_dev_init:macaddr \
+                                                        00:0e:1e:d2:09:9c
+      [...]
+    [QEDE PMD: (84:00.0:dpdk-port-0)]qede_tx_queue_setup:txq 0 num_desc 4096 \
+                                                tx_free_thresh 4068 socket 0
+    [QEDE PMD: (84:00.0:dpdk-port-0)]qede_tx_queue_setup:txq 1 num_desc 4096 \
+                                                tx_free_thresh 4068 socket 0
+    [QEDE PMD: (84:00.0:dpdk-port-0)]qede_tx_queue_setup:txq 2 num_desc 4096 \
+                                                 tx_free_thresh 4068 socket 0
+    [QEDE PMD: (84:00.0:dpdk-port-0)]qede_tx_queue_setup:txq 3 num_desc 4096 \
+                                                 tx_free_thresh 4068 socket 0
+    [QEDE PMD: (84:00.0:dpdk-port-0)]qede_rx_queue_setup:rxq 0 num_desc 4096 \
+                                                rx_buf_size=2148 socket 0
+    [QEDE PMD: (84:00.0:dpdk-port-0)]qede_rx_queue_setup:rxq 1 num_desc 4096 \
+                                                rx_buf_size=2148 socket 0
+    [QEDE PMD: (84:00.0:dpdk-port-0)]qede_rx_queue_setup:rxq 2 num_desc 4096 \
+                                                rx_buf_size=2148 socket 0
+    [QEDE PMD: (84:00.0:dpdk-port-0)]qede_rx_queue_setup:rxq 3 num_desc 4096 \
+                                                rx_buf_size=2148 socket 0
+    [QEDE PMD: (84:00.0:dpdk-port-0)]qede_dev_start:port 0
+    [QEDE PMD: (84:00.0:dpdk-port-0)]qede_dev_start:link status: down
+      [...]
+    Checking link statuses...
+    Port 0 Link Up - speed 25000 Mbps - full-duplex
+    Port 1 Link Up - speed 25000 Mbps - full-duplex
+    Port 2 Link Up - speed 25000 Mbps - full-duplex
+    Port 3 Link Up - speed 25000 Mbps - full-duplex
+    Done
+    testpmd>
+
+
+SR-IOV: Prerequisites and Sample Application Notes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section provides instructions to configure SR-IOV with Linux OS.
+
+**Note**: librte_pmd_qede will be used to bind to SR-IOV VF device and Linux native kernel driver (QEDE) will function as SR-IOV PF driver.
+
+#. Verify SR-IOV and ARI capability is enabled on the adapter using ``lspci``:
+
+   .. code-block:: console
+
+      lspci -s <slot> -vvv
+
+   Example output:
+
+   .. code-block:: console
+
+      [...]
+      Capabilities: [1b8 v1] Alternative Routing-ID Interpretation (ARI)
+      [...]
+      Capabilities: [1c0 v1] Single Root I/O Virtualization (SR-IOV)
+      [...]
+      Kernel driver in use: igb_uio
+
+#. Load the kernel module:
+
+   .. code-block:: console
+
+      modprobe qede
+
+   Example output:
+
+   .. code-block:: console
+
+      systemd-udevd[4848]: renamed network interface eth0 to ens5f0
+      systemd-udevd[4848]: renamed network interface eth1 to ens5f1
+
+#. Bring up the PF ports:
+
+   .. code-block:: console
+
+      ifconfig ens5f0 up
+      ifconfig ens5f1 up
+
+#. Create VF device(s):
+
+   Echo the number of VFs to be created into ``"sriov_numvfs"`` sysfs entry
+   of the parent PF.
+
+   Example output:
+
+   .. code-block:: console
+
+      echo 2 > /sys/devices/pci0000:00/0000:00:03.0/0000:81:00.0/sriov_numvfs
+
+
+#. Assign VF MAC address:
+
+   Assign MAC address to the VF using iproute2 utility. The syntax is::
+
+      ip link set <PF iface> vf <VF id> mac <macaddr>
+
+   Example output:
+
+   .. code-block:: console
+
+      ip link set ens5f0 vf 0 mac 52:54:00:2f:9d:e8
+
+
+#. PCI Passthrough:
+
+   The VF devices may be passed through to the guest VM using ``virt-manager`` or
+   ``virsh``. QEDE PMD should be used to bind the VF devices in the guest VM
+   using the instructions outlined in the Application notes above.
diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile
index febd41d..75e0d29 100644
--- a/drivers/net/qede/Makefile
+++ b/drivers/net/qede/Makefile
@@ -6,9 +6,18 @@
 
 include $(RTE_SDK)/mk/rte.vars.mk
 
+#
+# library name
+#
+LIB = librte_pmd_qede.a
+
 CFLAGS += -O3
 CFLAGS += $(WERROR_FLAGS)
 
+EXPORT_MAP := rte_pmd_qede_version.map
+
+LIBABIVER := 1
+
 #
 # OS
 #
@@ -72,6 +81,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_init_ops.c
 SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_mcp.c
 SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_int.c
 SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/bcm_osal.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_rxtx.c
 
 # dependent libs:
 DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_eal lib/librte_ether
diff --git a/drivers/net/qede/qede_eth_if.h b/drivers/net/qede/qede_eth_if.h
new file mode 100644
index 0000000..77438c8
--- /dev/null
+++ b/drivers/net/qede/qede_eth_if.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _QEDE_ETH_IF_H
+#define _QEDE_ETH_IF_H
+
+#include "qede_if.h"
+
+/*forward decl */
+struct eth_slow_path_rx_cqe;
+
+#define INIT_STRUCT_FIELD(field, value) .field = value
+
+#define QED_ETH_INTERFACE_VERSION       609
+
+#define QEDE_MAX_MCAST_FILTERS		64
+
+enum qed_filter_rx_mode_type {
+	QED_FILTER_RX_MODE_TYPE_REGULAR,
+	QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
+	QED_FILTER_RX_MODE_TYPE_PROMISC,
+};
+
+enum qed_filter_xcast_params_type {
+	QED_FILTER_XCAST_TYPE_ADD,
+	QED_FILTER_XCAST_TYPE_DEL,
+	QED_FILTER_XCAST_TYPE_REPLACE,
+};
+
+enum qed_filter_type {
+	QED_FILTER_TYPE_UCAST,
+	QED_FILTER_TYPE_MCAST,
+	QED_FILTER_TYPE_RX_MODE,
+	QED_MAX_FILTER_TYPES,
+};
+
+struct qed_dev_eth_info {
+	struct qed_dev_info common;
+
+	uint8_t num_queues;
+	uint8_t num_tc;
+
+	struct ether_addr port_mac;
+	uint8_t num_vlan_filters;
+	uint32_t num_mac_addrs;
+};
+
+struct qed_update_vport_rss_params {
+	uint16_t rss_ind_table[128];
+	uint32_t rss_key[10];
+};
+
+struct qed_stop_rxq_params {
+	uint8_t rss_id;
+	uint8_t rx_queue_id;
+	uint8_t vport_id;
+	bool eq_completion_only;
+};
+
+struct qed_update_vport_params {
+	uint8_t vport_id;
+	uint8_t update_vport_active_flg;
+	uint8_t vport_active_flg;
+	uint8_t update_inner_vlan_removal_flg;
+	uint8_t inner_vlan_removal_flg;
+	uint8_t update_tx_switching_flg;
+	uint8_t tx_switching_flg;
+	uint8_t update_accept_any_vlan_flg;
+	uint8_t accept_any_vlan;
+	uint8_t update_rss_flg;
+	struct qed_update_vport_rss_params rss_params;
+};
+
+struct qed_start_vport_params {
+	bool remove_inner_vlan;
+	bool handle_ptp_pkts;
+	bool gro_enable;
+	bool drop_ttl0;
+	uint8_t vport_id;
+	uint16_t mtu;
+	bool clear_stats;
+};
+
+struct qed_stop_txq_params {
+	uint8_t rss_id;
+	uint8_t tx_queue_id;
+};
+
+struct qed_filter_ucast_params {
+	enum qed_filter_xcast_params_type type;
+	uint8_t vlan_valid;
+	uint16_t vlan;
+	uint8_t mac_valid;
+	unsigned char mac[ETHER_ADDR_LEN];
+};
+
+struct qed_filter_mcast_params {
+	enum qed_filter_xcast_params_type type;
+	uint8_t num;
+	unsigned char mac[QEDE_MAX_MCAST_FILTERS][ETHER_ADDR_LEN];
+};
+
+union qed_filter_type_params {
+	enum qed_filter_rx_mode_type accept_flags;
+	struct qed_filter_ucast_params ucast;
+	struct qed_filter_mcast_params mcast;
+};
+
+struct qed_filter_params {
+	enum qed_filter_type type;
+	union qed_filter_type_params filter;
+};
+
+struct qed_eth_ops {
+	const struct qed_common_ops *common;
+
+	int (*fill_dev_info)(struct ecore_dev *edev,
+			     struct qed_dev_eth_info *info);
+
+	int (*vport_start)(struct ecore_dev *edev,
+			   struct qed_start_vport_params *params);
+
+	int (*vport_stop)(struct ecore_dev *edev, uint8_t vport_id);
+
+	int (*vport_update)(struct ecore_dev *edev,
+			    struct qed_update_vport_params *params);
+
+	int (*q_rx_start)(struct ecore_dev *cdev,
+			  uint8_t rss_id, uint8_t rx_queue_id,
+			  uint8_t vport_id, uint16_t sb,
+			  uint8_t sb_index, uint16_t bd_max_bytes,
+			  dma_addr_t bd_chain_phys_addr,
+			  dma_addr_t cqe_pbl_addr,
+			  uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod);
+
+	int (*q_rx_stop)(struct ecore_dev *edev,
+			 struct qed_stop_rxq_params *params);
+
+	int (*q_tx_start)(struct ecore_dev *edev,
+			  uint8_t rss_id, uint16_t tx_queue_id,
+			  uint8_t vport_id, uint16_t sb,
+			  uint8_t sb_index,
+			  dma_addr_t pbl_addr,
+			  uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell);
+
+	int (*q_tx_stop)(struct ecore_dev *edev,
+			 struct qed_stop_txq_params *params);
+
+	int (*eth_cqe_completion)(struct ecore_dev *edev,
+				  uint8_t rss_id,
+				  struct eth_slow_path_rx_cqe *cqe);
+
+	int (*fastpath_stop)(struct ecore_dev *edev);
+
+	void (*get_vport_stats)(struct ecore_dev *edev,
+				struct ecore_eth_stats *stats);
+
+	int (*filter_config)(struct ecore_dev *edev,
+			     struct qed_filter_params *params);
+};
+
+/* externs */
+
+extern const struct qed_common_ops qed_common_ops_pass;
+
+void qed_put_eth_ops(void);
+
+int qed_configure_filter_rx_mode(struct ecore_dev *edev,
+				 enum qed_filter_rx_mode_type type);
+
+#endif /* _QEDE_ETH_IF_H */
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
new file mode 100644
index 0000000..12e02b2
--- /dev/null
+++ b/drivers/net/qede/qede_ethdev.c
@@ -0,0 +1,1028 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "qede_ethdev.h"
+
+/* Globals */
+static const struct qed_eth_ops *qed_ops;
+static const char *drivername = "qede pmd";
+
+static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
+{
+	ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
+}
+
+static void
+qede_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+
+	qede_interrupt_action(ECORE_LEADING_HWFN(edev));
+	if (rte_intr_enable(&eth_dev->pci_dev->intr_handle))
+		DP_ERR(edev, "rte_intr_enable failed\n");
+}
+
+static void
+qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
+{
+	rte_memcpy(&qdev->dev_info, info, sizeof(*info));
+	qdev->num_tc = qdev->dev_info.num_tc;
+	qdev->ops = qed_ops;
+}
+
+static void qede_print_adapter_info(struct qede_dev *qdev)
+{
+	struct ecore_dev *edev = &qdev->edev;
+	struct qed_dev_info *info = &qdev->dev_info.common;
+	static char ver_str[QED_DRV_VER_STR_SIZE];
+
+	DP_INFO(edev, "*********************************\n");
+	DP_INFO(edev, " Chip details : %s%d\n",
+		ECORE_IS_BB(edev) ? "BB" : "AH",
+		CHIP_REV_IS_A0(edev) ? 0 : 1);
+
+	sprintf(ver_str, "%s %s_%d.%d.%d.%d", QEDE_PMD_VER_PREFIX,
+		edev->ver_str, QEDE_PMD_VERSION_MAJOR, QEDE_PMD_VERSION_MINOR,
+		QEDE_PMD_VERSION_REVISION, QEDE_PMD_VERSION_PATCH);
+	strcpy(qdev->drv_ver, ver_str);
+	DP_INFO(edev, " Driver version : %s\n", ver_str);
+
+	sprintf(ver_str, "%d.%d.%d.%d", info->fw_major, info->fw_minor,
+		info->fw_rev, info->fw_eng);
+	DP_INFO(edev, " Firmware version : %s\n", ver_str);
+
+	sprintf(ver_str, "%d.%d.%d.%d",
+		(info->mfw_rev >> 24) & 0xff,
+		(info->mfw_rev >> 16) & 0xff,
+		(info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
+	DP_INFO(edev, " Management firmware version : %s\n", ver_str);
+
+	DP_INFO(edev, " Firmware file : %s\n", fw_file);
+
+	DP_INFO(edev, "*********************************\n");
+}
+
+static int
+qede_set_ucast_rx_mac(struct qede_dev *qdev,
+		      enum qed_filter_xcast_params_type opcode,
+		      uint8_t mac[ETHER_ADDR_LEN])
+{
+	struct ecore_dev *edev = &qdev->edev;
+	struct qed_filter_params filter_cmd;
+
+	memset(&filter_cmd, 0, sizeof(filter_cmd));
+	filter_cmd.type = QED_FILTER_TYPE_UCAST;
+	filter_cmd.filter.ucast.type = opcode;
+	filter_cmd.filter.ucast.mac_valid = 1;
+	rte_memcpy(&filter_cmd.filter.ucast.mac[0], &mac[0], ETHER_ADDR_LEN);
+	return qdev->ops->filter_config(edev, &filter_cmd);
+}
+
+static void
+qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
+		  uint32_t index, __rte_unused uint32_t pool)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	int rc;
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	if (index >= qdev->dev_info.num_mac_addrs) {
+		DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
+		       index, qdev->dev_info.num_mac_addrs);
+		return;
+	}
+
+	/* Adding macaddr even though promiscuous mode is set */
+	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
+		DP_INFO(edev, "Port is in promisc mode, yet adding it\n");
+
+	/* Add MAC filters according to the unicast secondary macs */
+	rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
+				   mac_addr->addr_bytes);
+	if (rc)
+		DP_ERR(edev, "Unable to add macaddr rc=%d\n", rc);
+}
+
+static void
+qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	struct ether_addr mac_addr;
+	int rc;
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	if (index >= qdev->dev_info.num_mac_addrs) {
+		DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
+		       index, qdev->dev_info.num_mac_addrs);
+		return;
+	}
+
+	/* Use the index maintained by rte */
+	ether_addr_copy(&eth_dev->data->mac_addrs[index], &mac_addr);
+	rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
+				   mac_addr.addr_bytes);
+	if (rc)
+		DP_ERR(edev, "Unable to remove macaddr rc=%d\n", rc);
+}
+
+static void
+qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
+{
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	int rc;
+
+	/* First remove the primary mac */
+	rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
+				   qdev->primary_mac.addr_bytes);
+
+	if (rc) {
+		DP_ERR(edev, "Unable to remove current macaddr"
+			     " Reverting to previous default mac\n");
+		ether_addr_copy(&qdev->primary_mac,
+				&eth_dev->data->mac_addrs[0]);
+		return;
+	}
+
+	/* Add new MAC */
+	rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
+				   mac_addr->addr_bytes);
+
+	if (rc)
+		DP_ERR(edev, "Unable to add new default mac\n");
+	else
+		ether_addr_copy(mac_addr, &qdev->primary_mac);
+}
+
+
+
+
+static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
+{
+	struct ecore_dev *edev = &qdev->edev;
+	struct qed_update_vport_params params = {
+		.vport_id = 0,
+		.accept_any_vlan = action,
+		.update_accept_any_vlan_flg = 1,
+	};
+	int rc;
+
+	/* Proceed only if action actually needs to be performed */
+	if (qdev->accept_any_vlan == action)
+		return;
+
+	rc = qdev->ops->vport_update(edev, &params);
+	if (rc) {
+		DP_ERR(edev, "Failed to %s accept-any-vlan\n",
+		       action ? "enable" : "disable");
+	} else {
+		DP_INFO(edev, "%s accept-any-vlan\n",
+			action ? "enabled" : "disabled");
+		qdev->accept_any_vlan = action;
+	}
+}
+
+void qede_config_rx_mode(struct rte_eth_dev *eth_dev)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	/* TODO: - QED_FILTER_TYPE_UCAST */
+	enum qed_filter_rx_mode_type accept_flags =
+			QED_FILTER_RX_MODE_TYPE_REGULAR;
+	struct qed_filter_params rx_mode;
+	int rc;
+
+	/* Configure the struct for the Rx mode */
+	memset(&rx_mode, 0, sizeof(struct qed_filter_params));
+	rx_mode.type = QED_FILTER_TYPE_RX_MODE;
+
+	rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_REPLACE,
+				   eth_dev->data->mac_addrs[0].addr_bytes);
+	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
+		accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
+	} else {
+		rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
+					   eth_dev->data->
+					   mac_addrs[0].addr_bytes);
+		if (rc) {
+			DP_ERR(edev, "Unable to add filter\n");
+			return;
+		}
+	}
+
+	/* take care of VLAN mode */
+	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
+		qede_config_accept_any_vlan(qdev, true);
+	} else if (!qdev->non_configured_vlans) {
+		/* If we dont have non-configured VLANs and promisc
+		 * is not set, then check if we need to disable
+		 * accept_any_vlan mode.
+		 * Because in this case, accept_any_vlan mode is set
+		 * as part of IFF_RPOMISC flag handling.
+		 */
+		qede_config_accept_any_vlan(qdev, false);
+	}
+	rx_mode.filter.accept_flags = accept_flags;
+	rc = qdev->ops->filter_config(edev, &rx_mode);
+	if (rc)
+		DP_ERR(edev, "Filter config failed rc=%d\n", rc);
+}
+
+static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
+{
+	struct qed_update_vport_params vport_update_params;
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	int rc;
+
+	memset(&vport_update_params, 0, sizeof(vport_update_params));
+	vport_update_params.vport_id = 0;
+	vport_update_params.update_inner_vlan_removal_flg = 1;
+	vport_update_params.inner_vlan_removal_flg = set_stripping;
+	rc = qdev->ops->vport_update(edev, &vport_update_params);
+	if (rc) {
+		DP_ERR(edev, "Update V-PORT failed %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+{
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+	if (mask & ETH_VLAN_STRIP_MASK) {
+		if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
+			(void)qede_vlan_stripping(eth_dev, 1);
+		else
+			(void)qede_vlan_stripping(eth_dev, 0);
+	}
+
+	DP_INFO(edev, "vlan offload mask %d vlan-strip %d\n",
+		mask, eth_dev->data->dev_conf.rxmode.hw_vlan_strip);
+}
+
+static int qede_set_ucast_rx_vlan(struct qede_dev *qdev,
+				  enum qed_filter_xcast_params_type opcode,
+				  uint16_t vid)
+{
+	struct qed_filter_params filter_cmd;
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+	memset(&filter_cmd, 0, sizeof(filter_cmd));
+	filter_cmd.type = QED_FILTER_TYPE_UCAST;
+	filter_cmd.filter.ucast.type = opcode;
+	filter_cmd.filter.ucast.vlan_valid = 1;
+	filter_cmd.filter.ucast.vlan = vid;
+
+	return qdev->ops->filter_config(edev, &filter_cmd);
+}
+
+static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
+				uint16_t vlan_id, int on)
+{
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	struct qed_dev_eth_info *dev_info = &qdev->dev_info;
+	int rc;
+
+	if (vlan_id != 0 &&
+	    qdev->configured_vlans == dev_info->num_vlan_filters) {
+		DP_NOTICE(edev, false, "Reached max VLAN filter limit"
+				     " enabling accept_any_vlan\n");
+		qede_config_accept_any_vlan(qdev, true);
+		return 0;
+	}
+
+	if (on) {
+		rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_ADD,
+					    vlan_id);
+		if (rc)
+			DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
+			       rc);
+		else
+			if (vlan_id != 0)
+				qdev->configured_vlans++;
+	} else {
+		rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_DEL,
+					    vlan_id);
+		if (rc)
+			DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
+			       vlan_id, rc);
+		else
+			if (vlan_id != 0)
+				qdev->configured_vlans--;
+	}
+
+	DP_INFO(edev, "vlan_id %u on %u rc %d configured_vlans %u\n",
+			vlan_id, on, rc, qdev->configured_vlans);
+
+	return rc;
+}
+
+static int qede_dev_configure(struct rte_eth_dev *eth_dev)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	if (eth_dev->data->nb_rx_queues != eth_dev->data->nb_tx_queues) {
+		DP_NOTICE(edev, false,
+			  "Unequal number of rx/tx queues "
+			  "is not supported RX=%u TX=%u\n",
+			  eth_dev->data->nb_rx_queues,
+			  eth_dev->data->nb_tx_queues);
+		return -EINVAL;
+	}
+
+	qdev->num_rss = eth_dev->data->nb_rx_queues;
+
+	/* Initial state */
+	qdev->state = QEDE_CLOSE;
+
+	/* Sanity checks and throw warnings */
+
+	if (rxmode->enable_scatter == 1) {
+		DP_ERR(edev, "RX scatter packets is not supported\n");
+		return -EINVAL;
+	}
+
+	if (rxmode->enable_lro == 1) {
+		DP_INFO(edev, "LRO is not supported\n");
+		return -EINVAL;
+	}
+
+	if (!rxmode->hw_strip_crc)
+		DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
+
+	if (!rxmode->hw_ip_checksum)
+		DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
+			      "in hw\n");
+
+
+	DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
+		QEDE_RSS_CNT(qdev), qdev->num_tc);
+
+	DP_INFO(edev, "my_id %u rel_pf_id %u abs_pf_id %u"
+		" port %u first_on_engine %d\n",
+		edev->hwfns[0].my_id,
+		edev->hwfns[0].rel_pf_id,
+		edev->hwfns[0].abs_pf_id,
+		edev->hwfns[0].port_id, edev->hwfns[0].first_on_engine);
+
+	return 0;
+}
+
+/* Info about HW descriptor ring limitations */
+static const struct rte_eth_desc_lim qede_rx_desc_lim = {
+	.nb_max = NUM_RX_BDS_MAX,
+	.nb_min = 128,
+	.nb_align = 128	/* lowest common multiple */
+};
+
+static const struct rte_eth_desc_lim qede_tx_desc_lim = {
+	.nb_max = NUM_TX_BDS_MAX,
+	.nb_min = 256,
+	.nb_align = 256
+};
+
+static void
+qede_dev_info_get(struct rte_eth_dev *eth_dev,
+		  struct rte_eth_dev_info *dev_info)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
+					      QEDE_ETH_OVERHEAD);
+	dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
+	dev_info->rx_desc_lim = qede_rx_desc_lim;
+	dev_info->tx_desc_lim = qede_tx_desc_lim;
+	dev_info->max_rx_queues = (uint16_t)QEDE_MAX_RSS_CNT(qdev);
+	dev_info->max_tx_queues = dev_info->max_rx_queues;
+	dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs;
+	dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev);
+	dev_info->driver_name = qdev->drv_ver;
+	dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
+
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.txq_flags = QEDE_TXQ_FLAGS,
+	};
+
+	dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
+				     DEV_RX_OFFLOAD_IPV4_CKSUM |
+				     DEV_RX_OFFLOAD_UDP_CKSUM |
+				     DEV_RX_OFFLOAD_TCP_CKSUM);
+	dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
+				     DEV_TX_OFFLOAD_IPV4_CKSUM |
+				     DEV_TX_OFFLOAD_UDP_CKSUM |
+				     DEV_TX_OFFLOAD_TCP_CKSUM);
+
+	dev_info->speed_capa = ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G;
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	uint16_t link_duplex;
+	struct qed_link_output link;
+	struct rte_eth_link *curr = &eth_dev->data->dev_link;
+
+	memset(&link, 0, sizeof(struct qed_link_output));
+	qdev->ops->common->get_link(edev, &link);
+
+	/* Link Speed */
+	curr->link_speed = link.speed;
+
+	/* Link Mode */
+	switch (link.duplex) {
+	case QEDE_DUPLEX_HALF:
+		link_duplex = ETH_LINK_HALF_DUPLEX;
+		break;
+	case QEDE_DUPLEX_FULL:
+		link_duplex = ETH_LINK_FULL_DUPLEX;
+		break;
+	case QEDE_DUPLEX_UNKNOWN:
+	default:
+		link_duplex = -1;
+	}
+	curr->link_duplex = link_duplex;
+
+	/* Link Status */
+	curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+	/* AN */
+	curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
+			     ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+
+	DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
+		curr->link_speed, curr->link_duplex,
+		curr->link_autoneg, curr->link_status);
+
+	/* return 0 means link status changed, -1 means not changed */
+	return ((curr->link_status == link.link_up) ? -1 : 0);
+}
+
+static void
+qede_rx_mode_setting(struct rte_eth_dev *eth_dev,
+		     enum qed_filter_rx_mode_type accept_flags)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	struct qed_filter_params rx_mode;
+
+	DP_INFO(edev, "%s mode %u\n", __func__, accept_flags);
+
+	memset(&rx_mode, 0, sizeof(struct qed_filter_params));
+	rx_mode.type = QED_FILTER_TYPE_RX_MODE;
+	rx_mode.filter.accept_flags = accept_flags;
+	qdev->ops->filter_config(edev, &rx_mode);
+}
+
+static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
+
+	if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
+		type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+
+	qede_rx_mode_setting(eth_dev, type);
+}
+
+static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
+		qede_rx_mode_setting(eth_dev,
+				     QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
+	else
+		qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
+}
+
+static void qede_dev_close(struct rte_eth_dev *eth_dev)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	/* dev_stop() shall cleanup fp resources in hw but without releasing
+	 * dma memories and sw structures so that dev_start() can be called
+	 * by the app without reconfiguration. However, in dev_close() we
+	 * can release all the resources and device can be brought up newly
+	 */
+	if (qdev->state != QEDE_STOP)
+		qede_dev_stop(eth_dev);
+	else
+		DP_INFO(edev, "Device is already stopped\n");
+
+	qede_free_mem_load(qdev);
+
+	qede_free_fp_arrays(qdev);
+
+	qede_dev_set_link_state(eth_dev, false);
+
+	qdev->ops->common->slowpath_stop(edev);
+
+	qdev->ops->common->remove(edev);
+
+	rte_intr_disable(&eth_dev->pci_dev->intr_handle);
+
+	rte_intr_callback_unregister(&eth_dev->pci_dev->intr_handle,
+				     qede_interrupt_handler, (void *)eth_dev);
+
+	qdev->state = QEDE_CLOSE;
+}
+
+static void
+qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	struct ecore_eth_stats stats;
+
+	qdev->ops->get_vport_stats(edev, &stats);
+
+	/* RX Stats */
+	eth_stats->ipackets = stats.rx_ucast_pkts +
+	    stats.rx_mcast_pkts + stats.rx_bcast_pkts;
+
+	eth_stats->ibytes = stats.rx_ucast_bytes +
+	    stats.rx_mcast_bytes + stats.rx_bcast_bytes;
+
+	eth_stats->ierrors = stats.rx_crc_errors +
+	    stats.rx_align_errors +
+	    stats.rx_carrier_errors +
+	    stats.rx_oversize_packets +
+	    stats.rx_jabbers + stats.rx_undersize_packets;
+
+	eth_stats->rx_nombuf = stats.no_buff_discards;
+
+	eth_stats->imissed = stats.mftag_filter_discards +
+	    stats.mac_filter_discards +
+	    stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
+
+	/* TX stats */
+	eth_stats->opackets = stats.tx_ucast_pkts +
+	    stats.tx_mcast_pkts + stats.tx_bcast_pkts;
+
+	eth_stats->obytes = stats.tx_ucast_bytes +
+	    stats.tx_mcast_bytes + stats.tx_bcast_bytes;
+
+	eth_stats->oerrors = stats.tx_err_drop_pkts;
+
+	DP_INFO(edev,
+		"no_buff_discards=%" PRIu64 ""
+		" mac_filter_discards=%" PRIu64 ""
+		" brb_truncates=%" PRIu64 ""
+		" brb_discards=%" PRIu64 "\n",
+		stats.no_buff_discards,
+		stats.mac_filter_discards,
+		stats.brb_truncates, stats.brb_discards);
+}
+
+int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
+{
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	struct qed_link_params link_params;
+	int rc;
+
+	DP_INFO(edev, "setting link state %d\n", link_up);
+	memset(&link_params, 0, sizeof(link_params));
+	link_params.link_up = link_up;
+	rc = qdev->ops->common->set_link(edev, &link_params);
+	if (rc != ECORE_SUCCESS)
+		DP_ERR(edev, "Unable to set link state %d\n", link_up);
+
+	return rc;
+}
+
+static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
+{
+	return qede_dev_set_link_state(eth_dev, true);
+}
+
+static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
+{
+	return qede_dev_set_link_state(eth_dev, false);
+}
+
+static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
+{
+	enum qed_filter_rx_mode_type type =
+	    QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+
+	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
+		type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
+
+	qede_rx_mode_setting(eth_dev, type);
+}
+
+static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
+{
+	if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
+		qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_PROMISC);
+	else
+		qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
+}
+
+static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
+			      struct rte_eth_fc_conf *fc_conf)
+{
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	struct qed_link_output current_link;
+	struct qed_link_params params;
+
+	memset(&current_link, 0, sizeof(current_link));
+	qdev->ops->common->get_link(edev, &current_link);
+
+	memset(&params, 0, sizeof(params));
+	params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
+	if (fc_conf->autoneg) {
+		if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
+			DP_ERR(edev, "Autoneg not supported\n");
+			return -EINVAL;
+		}
+		params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
+	}
+
+	/* Pause is assumed to be supported (SUPPORTED_Pause) */
+	if (fc_conf->mode == RTE_FC_FULL)
+		params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
+					QED_LINK_PAUSE_RX_ENABLE);
+	if (fc_conf->mode == RTE_FC_TX_PAUSE)
+		params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
+	if (fc_conf->mode == RTE_FC_RX_PAUSE)
+		params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
+
+	params.link_up = true;
+	(void)qdev->ops->common->set_link(edev, &params);
+
+	return 0;
+}
+
+static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
+			      struct rte_eth_fc_conf *fc_conf)
+{
+	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	struct qed_link_output current_link;
+
+	memset(&current_link, 0, sizeof(current_link));
+	qdev->ops->common->get_link(edev, &current_link);
+
+	if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
+		fc_conf->autoneg = true;
+
+	if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
+					 QED_LINK_PAUSE_TX_ENABLE))
+		fc_conf->mode = RTE_FC_FULL;
+	else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
+		fc_conf->mode = RTE_FC_RX_PAUSE;
+	else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
+		fc_conf->mode = RTE_FC_TX_PAUSE;
+	else
+		fc_conf->mode = RTE_FC_NONE;
+
+	return 0;
+}
+
+static const uint32_t *
+qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
+{
+	static const uint32_t ptypes[] = {
+		RTE_PTYPE_L3_IPV4,
+		RTE_PTYPE_L3_IPV6,
+		RTE_PTYPE_UNKNOWN
+	};
+
+	if (eth_dev->rx_pkt_burst == qede_recv_pkts)
+		return ptypes;
+
+	return NULL;
+}
+
+static const struct eth_dev_ops qede_eth_dev_ops = {
+	.dev_configure = qede_dev_configure,
+	.dev_infos_get = qede_dev_info_get,
+	.rx_queue_setup = qede_rx_queue_setup,
+	.rx_queue_release = qede_rx_queue_release,
+	.tx_queue_setup = qede_tx_queue_setup,
+	.tx_queue_release = qede_tx_queue_release,
+	.dev_start = qede_dev_start,
+	.dev_set_link_up = qede_dev_set_link_up,
+	.dev_set_link_down = qede_dev_set_link_down,
+	.link_update = qede_link_update,
+	.promiscuous_enable = qede_promiscuous_enable,
+	.promiscuous_disable = qede_promiscuous_disable,
+	.allmulticast_enable = qede_allmulticast_enable,
+	.allmulticast_disable = qede_allmulticast_disable,
+	.dev_stop = qede_dev_stop,
+	.dev_close = qede_dev_close,
+	.stats_get = qede_get_stats,
+	.mac_addr_add = qede_mac_addr_add,
+	.mac_addr_remove = qede_mac_addr_remove,
+	.mac_addr_set = qede_mac_addr_set,
+	.vlan_offload_set = qede_vlan_offload_set,
+	.vlan_filter_set = qede_vlan_filter_set,
+	.flow_ctrl_set = qede_flow_ctrl_set,
+	.flow_ctrl_get = qede_flow_ctrl_get,
+	.dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
+};
+
+static void qede_update_pf_params(struct ecore_dev *edev)
+{
+	struct ecore_pf_params pf_params;
+	/* 32 rx + 32 tx */
+	memset(&pf_params, 0, sizeof(struct ecore_pf_params));
+	pf_params.eth_pf_params.num_cons = 64;
+	qed_ops->common->update_pf_params(edev, &pf_params);
+}
+
+static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
+{
+	struct rte_pci_device *pci_dev;
+	struct rte_pci_addr pci_addr;
+	struct qede_dev *adapter;
+	struct ecore_dev *edev;
+	struct qed_dev_eth_info dev_info;
+	struct qed_slowpath_params params;
+	uint32_t qed_ver;
+	static bool do_once = true;
+	uint8_t bulletin_change;
+	uint8_t vf_mac[ETHER_ADDR_LEN];
+	uint8_t is_mac_forced;
+	bool is_mac_exist;
+	/* Fix up ecore debug level */
+	uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
+	uint8_t dp_level = ECORE_LEVEL_VERBOSE;
+	uint32_t max_mac_addrs;
+	int rc;
+
+	/* Extract key data structures */
+	adapter = eth_dev->data->dev_private;
+	edev = &adapter->edev;
+	pci_addr = eth_dev->pci_dev->addr;
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
+		 pci_addr.bus, pci_addr.devid, pci_addr.function,
+		 eth_dev->data->port_id);
+
+	eth_dev->rx_pkt_burst = qede_recv_pkts;
+	eth_dev->tx_pkt_burst = qede_xmit_pkts;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		DP_NOTICE(edev, false,
+			  "Skipping device init from secondary process\n");
+		return 0;
+	}
+
+	pci_dev = eth_dev->pci_dev;
+
+	rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+	DP_INFO(edev, "Starting qede probe\n");
+
+	rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
+				    dp_module, dp_level, is_vf);
+
+	if (rc != 0) {
+		DP_ERR(edev, "qede probe failed rc %d\n", rc);
+		return -ENODEV;
+	}
+
+	qede_update_pf_params(edev);
+
+	rte_intr_callback_register(&eth_dev->pci_dev->intr_handle,
+				   qede_interrupt_handler, (void *)eth_dev);
+
+	if (rte_intr_enable(&eth_dev->pci_dev->intr_handle)) {
+		DP_ERR(edev, "rte_intr_enable() failed\n");
+		return -ENODEV;
+	}
+
+	/* Start the Slowpath-process */
+	memset(&params, 0, sizeof(struct qed_slowpath_params));
+	params.int_mode = ECORE_INT_MODE_MSIX;
+	params.drv_major = QEDE_MAJOR_VERSION;
+	params.drv_minor = QEDE_MINOR_VERSION;
+	params.drv_rev = QEDE_REVISION_VERSION;
+	params.drv_eng = QEDE_ENGINEERING_VERSION;
+	strncpy((char *)params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+
+	rc = qed_ops->common->slowpath_start(edev, &params);
+	if (rc) {
+		DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
+		return -ENODEV;
+	}
+
+	rc = qed_ops->fill_dev_info(edev, &dev_info);
+	if (rc) {
+		DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
+		qed_ops->common->slowpath_stop(edev);
+		qed_ops->common->remove(edev);
+		return -ENODEV;
+	}
+
+	qede_alloc_etherdev(adapter, &dev_info);
+
+	adapter->ops->common->set_id(edev, edev->name, QEDE_DRV_MODULE_VERSION);
+
+	if (!is_vf)
+		adapter->dev_info.num_mac_addrs =
+			(uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
+					    ECORE_MAC);
+	else
+		adapter->dev_info.num_mac_addrs = 1;
+
+	/* Allocate memory for storing MAC addr */
+	eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
+					(ETHER_ADDR_LEN *
+					adapter->dev_info.num_mac_addrs),
+					RTE_CACHE_LINE_SIZE);
+
+	if (eth_dev->data->mac_addrs == NULL) {
+		DP_ERR(edev, "Failed to allocate MAC address\n");
+		qed_ops->common->slowpath_stop(edev);
+		qed_ops->common->remove(edev);
+		return -ENOMEM;
+	}
+
+	ether_addr_copy((struct ether_addr *)edev->hwfns[0].
+				hw_info.hw_mac_addr,
+				&eth_dev->data->mac_addrs[0]);
+
+	eth_dev->dev_ops = &qede_eth_dev_ops;
+
+	if (do_once) {
+		qede_print_adapter_info(adapter);
+		do_once = false;
+	}
+
+	DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
+		  adapter->primary_mac.addr_bytes[0],
+		  adapter->primary_mac.addr_bytes[1],
+		  adapter->primary_mac.addr_bytes[2],
+		  adapter->primary_mac.addr_bytes[3],
+		  adapter->primary_mac.addr_bytes[4],
+		  adapter->primary_mac.addr_bytes[5]);
+
+	return rc;
+}
+
+static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
+{
+	return qede_common_dev_init(eth_dev, 1);
+}
+
+static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
+{
+	return qede_common_dev_init(eth_dev, 0);
+}
+
+static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
+{
+	/* only uninitialize in the primary process */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	/* safe to close dev here */
+	qede_dev_close(eth_dev);
+
+	eth_dev->dev_ops = NULL;
+	eth_dev->rx_pkt_burst = NULL;
+	eth_dev->tx_pkt_burst = NULL;
+
+	if (eth_dev->data->mac_addrs)
+		rte_free(eth_dev->data->mac_addrs);
+
+	eth_dev->data->mac_addrs = NULL;
+
+	return 0;
+}
+
+static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+	return qede_dev_common_uninit(eth_dev);
+}
+
+static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+	return qede_dev_common_uninit(eth_dev);
+}
+
+static struct rte_pci_id pci_id_qedevf_map[] = {
+#define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
+	{
+		QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF)
+	},
+	{
+		QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV)
+	},
+	{.vendor_id = 0,}
+};
+
+static struct rte_pci_id pci_id_qede_map[] = {
+#define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
+	{
+		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E)
+	},
+	{
+		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S)
+	},
+	{
+		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40)
+	},
+	{
+		QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25)
+	},
+	{.vendor_id = 0,}
+};
+
+static struct eth_driver rte_qedevf_pmd = {
+	.pci_drv = {
+		    .name = "rte_qedevf_pmd",
+		    .id_table = pci_id_qedevf_map,
+		    .drv_flags =
+		    RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+		    },
+	.eth_dev_init = qedevf_eth_dev_init,
+	.eth_dev_uninit = qedevf_eth_dev_uninit,
+	.dev_private_size = sizeof(struct qede_dev),
+};
+
+static struct eth_driver rte_qede_pmd = {
+	.pci_drv = {
+		    .name = "rte_qede_pmd",
+		    .id_table = pci_id_qede_map,
+		    .drv_flags =
+		    RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+		    },
+	.eth_dev_init = qede_eth_dev_init,
+	.eth_dev_uninit = qede_eth_dev_uninit,
+	.dev_private_size = sizeof(struct qede_dev),
+};
+
+static int
+rte_qedevf_pmd_init(const char *name __rte_unused,
+		    const char *params __rte_unused)
+{
+	rte_eth_driver_register(&rte_qedevf_pmd);
+
+	return 0;
+}
+
+static int
+rte_qede_pmd_init(const char *name __rte_unused,
+		  const char *params __rte_unused)
+{
+	rte_eth_driver_register(&rte_qede_pmd);
+
+	return 0;
+}
+
+static struct rte_driver rte_qedevf_driver = {
+	.type = PMD_PDEV,
+	.init = rte_qede_pmd_init
+};
+
+static struct rte_driver rte_qede_driver = {
+	.type = PMD_PDEV,
+	.init = rte_qedevf_pmd_init
+};
+
+PMD_REGISTER_DRIVER(rte_qede_driver);
+PMD_REGISTER_DRIVER(rte_qedevf_driver);
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
new file mode 100644
index 0000000..3700ee6
--- /dev/null
+++ b/drivers/net/qede/qede_ethdev.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+
+#ifndef _QEDE_ETHDEV_H_
+#define _QEDE_ETHDEV_H_
+
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_dev.h>
+
+/* ecore includes */
+#include "base/bcm_osal.h"
+#include "base/ecore.h"
+#include "base/ecore_dev_api.h"
+#include "base/ecore_sp_api.h"
+#include "base/ecore_mcp_api.h"
+#include "base/ecore_hsi_common.h"
+#include "base/ecore_int_api.h"
+#include "base/ecore_chain.h"
+#include "base/ecore_status.h"
+#include "base/ecore_hsi_eth.h"
+#include "base/ecore_dev_api.h"
+
+#include "qede_logs.h"
+#include "qede_if.h"
+#include "qede_eth_if.h"
+
+#include "qede_rxtx.h"
+
+#define qede_stringify1(x...)		#x
+#define qede_stringify(x...)		qede_stringify1(x)
+
+/* Driver versions */
+#define QEDE_PMD_VER_PREFIX		"QEDE PMD"
+#define QEDE_PMD_VERSION_MAJOR		1
+#define QEDE_PMD_VERSION_MINOR		0
+#define QEDE_PMD_VERSION_REVISION	6
+#define QEDE_PMD_VERSION_PATCH		1
+
+#define QEDE_MAJOR_VERSION		8
+#define QEDE_MINOR_VERSION		7
+#define QEDE_REVISION_VERSION		9
+#define QEDE_ENGINEERING_VERSION	0
+
+#define QEDE_DRV_MODULE_VERSION qede_stringify(QEDE_MAJOR_VERSION) "."	\
+		qede_stringify(QEDE_MINOR_VERSION) "."			\
+		qede_stringify(QEDE_REVISION_VERSION) "."		\
+		qede_stringify(QEDE_ENGINEERING_VERSION)
+
+#define QEDE_RSS_INDIR_INITED     (1 << 0)
+#define QEDE_RSS_KEY_INITED       (1 << 1)
+#define QEDE_RSS_CAPS_INITED      (1 << 2)
+
+#define QEDE_MAX_RSS_CNT(edev)  ((edev)->dev_info.num_queues)
+#define QEDE_MAX_TSS_CNT(edev)  ((edev)->dev_info.num_queues * \
+					(edev)->dev_info.num_tc)
+
+#define QEDE_RSS_CNT(edev)	((edev)->num_rss)
+#define QEDE_TSS_CNT(edev)	((edev)->num_rss * (edev)->num_tc)
+
+#define QEDE_DUPLEX_FULL	1
+#define QEDE_DUPLEX_HALF	2
+#define QEDE_DUPLEX_UNKNOWN     0xff
+
+#define QEDE_SUPPORTED_AUTONEG (1 << 6)
+#define QEDE_SUPPORTED_PAUSE   (1 << 13)
+
+#define QEDE_INIT_QDEV(eth_dev) (eth_dev->data->dev_private)
+
+#define QEDE_INIT_EDEV(adapter) (&((struct qede_dev *)adapter)->edev)
+
+#define QEDE_INIT(eth_dev) {					\
+	struct qede_dev *qdev = eth_dev->data->dev_private;	\
+	struct ecore_dev *edev = &qdev->edev;			\
+}
+
+/************* QLogic 25G/40G vendor/devices ids *************/
+#define PCI_VENDOR_ID_QLOGIC            0x1077
+
+#define CHIP_NUM_57980E                 0x1634
+#define CHIP_NUM_57980S                 0x1629
+#define CHIP_NUM_VF                     0x1630
+#define CHIP_NUM_57980S_40              0x1634
+#define CHIP_NUM_57980S_25              0x1656
+#define CHIP_NUM_57980S_IOV             0x1664
+
+#define PCI_DEVICE_ID_NX2_57980E        CHIP_NUM_57980E
+#define PCI_DEVICE_ID_NX2_57980S        CHIP_NUM_57980S
+#define PCI_DEVICE_ID_NX2_VF            CHIP_NUM_VF
+#define PCI_DEVICE_ID_57980S_40         CHIP_NUM_57980S_40
+#define PCI_DEVICE_ID_57980S_25         CHIP_NUM_57980S_25
+#define PCI_DEVICE_ID_57980S_IOV        CHIP_NUM_57980S_IOV
+
+extern char fw_file[];
+
+/* Port/function states */
+enum dev_state {
+	QEDE_START,
+	QEDE_STOP,
+	QEDE_CLOSE
+};
+
+struct qed_int_param {
+	uint32_t int_mode;
+	uint8_t num_vectors;
+	uint8_t min_msix_cnt;
+};
+
+struct qed_int_params {
+	struct qed_int_param in;
+	struct qed_int_param out;
+	bool fp_initialized;
+};
+
+/*
+ *  Structure to store private data for each port.
+ */
+struct qede_dev {
+	struct ecore_dev edev;
+	uint8_t protocol;
+	const struct qed_eth_ops *ops;
+	struct qed_dev_eth_info dev_info;
+	struct ecore_sb_info *sb_array;
+	struct qede_fastpath *fp_array;
+	uint16_t num_rss;
+	uint8_t num_tc;
+	uint16_t mtu;
+	bool rss_enabled;
+	struct qed_update_vport_rss_params rss_params;
+	uint32_t flags;
+	bool gro_disable;
+	struct qede_rx_queue **rx_queues;
+	struct qede_tx_queue **tx_queues;
+	enum dev_state state;
+
+	/* Vlans */
+	osal_list_t vlan_list;
+	uint16_t configured_vlans;
+	uint16_t non_configured_vlans;
+	bool accept_any_vlan;
+	uint16_t vxlan_dst_port;
+
+	struct ether_addr primary_mac;
+	bool handle_hw_err;
+	char drv_ver[QED_DRV_VER_STR_SIZE];
+};
+
+int qed_fill_eth_dev_info(struct ecore_dev *edev,
+				 struct qed_dev_eth_info *info);
+int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up);
+void qede_config_rx_mode(struct rte_eth_dev *eth_dev);
+
+#endif /* _QEDE_ETHDEV_H_ */
diff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h
new file mode 100644
index 0000000..935eed8
--- /dev/null
+++ b/drivers/net/qede/qede_if.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _QEDE_IF_H
+#define _QEDE_IF_H
+
+#include "qede_ethdev.h"
+
+/* forward */
+struct ecore_dev;
+struct qed_sb_info;
+struct qed_pf_params;
+enum ecore_int_mode;
+
+struct qed_dev_info {
+	uint8_t num_hwfns;
+	uint8_t hw_mac[ETHER_ADDR_LEN];
+	bool is_mf_default;
+
+	/* FW version */
+	uint16_t fw_major;
+	uint16_t fw_minor;
+	uint16_t fw_rev;
+	uint16_t fw_eng;
+
+	/* MFW version */
+	uint32_t mfw_rev;
+
+	uint32_t flash_size;
+	uint8_t mf_mode;
+	bool tx_switching;
+	/* To be added... */
+};
+
+enum qed_sb_type {
+	QED_SB_TYPE_L2_QUEUE,
+	QED_SB_TYPE_STORAGE,
+	QED_SB_TYPE_CNQ,
+};
+
+enum qed_protocol {
+	QED_PROTOCOL_ETH,
+};
+
+struct qed_link_params {
+	bool link_up;
+
+#define QED_LINK_OVERRIDE_SPEED_AUTONEG         (1 << 0)
+#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS      (1 << 1)
+#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED    (1 << 2)
+#define QED_LINK_OVERRIDE_PAUSE_CONFIG          (1 << 3)
+	uint32_t override_flags;
+	bool autoneg;
+	uint32_t adv_speeds;
+	uint32_t forced_speed;
+#define QED_LINK_PAUSE_AUTONEG_ENABLE           (1 << 0)
+#define QED_LINK_PAUSE_RX_ENABLE                (1 << 1)
+#define QED_LINK_PAUSE_TX_ENABLE                (1 << 2)
+	uint32_t pause_config;
+};
+
+struct qed_link_output {
+	bool link_up;
+	uint32_t supported_caps;	/* In SUPPORTED defs */
+	uint32_t advertised_caps;	/* In ADVERTISED defs */
+	uint32_t lp_caps;	/* In ADVERTISED defs */
+	uint32_t speed;		/* In Mb/s */
+	uint8_t duplex;		/* In DUPLEX defs */
+	uint8_t port;		/* In PORT defs */
+	bool autoneg;
+	uint32_t pause_config;
+};
+
+#define QED_DRV_VER_STR_SIZE 80
+struct qed_slowpath_params {
+	uint32_t int_mode;
+	uint8_t drv_major;
+	uint8_t drv_minor;
+	uint8_t drv_rev;
+	uint8_t drv_eng;
+	uint8_t name[QED_DRV_VER_STR_SIZE];
+};
+
+#define ILT_PAGE_SIZE_TCFC 0x8000	/* 32KB */
+
+struct qed_common_cb_ops {
+	void (*link_update)(void *dev, struct qed_link_output *link);
+};
+
+struct qed_selftest_ops {
+/**
+ * @brief registers - Perform register tests
+ *
+ * @param edev
+ *
+ * @return 0 on success, error otherwise.
+ */
+	int (*registers)(struct ecore_dev *edev);
+};
+
+struct qed_common_ops {
+	int (*probe)(struct ecore_dev *edev,
+		     struct rte_pci_device *pci_dev,
+		     enum qed_protocol protocol,
+		     uint32_t dp_module, uint8_t dp_level, bool is_vf);
+	void (*set_id)(struct ecore_dev *edev,
+		char name[], const char ver_str[]);
+	enum _ecore_status_t (*chain_alloc)(struct ecore_dev *edev,
+					    enum ecore_chain_use_mode
+					    intended_use,
+					    enum ecore_chain_mode mode,
+					    enum ecore_chain_cnt_type cnt_type,
+					    uint32_t num_elems,
+					    osal_size_t elem_size,
+					    struct ecore_chain *p_chain);
+
+	void (*chain_free)(struct ecore_dev *edev,
+			   struct ecore_chain *p_chain);
+
+	void (*get_link)(struct ecore_dev *edev,
+			 struct qed_link_output *if_link);
+	int (*set_link)(struct ecore_dev *edev,
+			struct qed_link_params *params);
+
+	int (*drain)(struct ecore_dev *edev);
+
+	void (*remove)(struct ecore_dev *edev);
+
+	int (*slowpath_stop)(struct ecore_dev *edev);
+
+	void (*update_pf_params)(struct ecore_dev *edev,
+				 struct ecore_pf_params *params);
+
+	int (*slowpath_start)(struct ecore_dev *edev,
+			      struct qed_slowpath_params *params);
+
+	int (*set_fp_int)(struct ecore_dev *edev, uint16_t cnt);
+
+	uint32_t (*sb_init)(struct ecore_dev *edev,
+			    struct ecore_sb_info *sb_info,
+			    void *sb_virt_addr,
+			    dma_addr_t sb_phy_addr,
+			    uint16_t sb_id, enum qed_sb_type type);
+
+	bool (*can_link_change)(struct ecore_dev *edev);
+	void (*update_msglvl)(struct ecore_dev *edev,
+			      uint32_t dp_module, uint8_t dp_level);
+};
+
+#endif /* _QEDE_IF_H */
diff --git a/drivers/net/qede/qede_logs.h b/drivers/net/qede/qede_logs.h
new file mode 100644
index 0000000..9f9bb64
--- /dev/null
+++ b/drivers/net/qede/qede_logs.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _QEDE_LOGS_H_
+#define _QEDE_LOGS_H_
+
+#define DP_ERR(p_dev, fmt, ...) \
+	rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD, \
+		"[%s:%d(%s)]" fmt, \
+		  __func__, __LINE__, \
+		(p_dev)->name ? (p_dev)->name : "", \
+		##__VA_ARGS__)
+
+#define DP_NOTICE(p_dev, is_assert, fmt, ...) \
+	rte_log(RTE_LOG_NOTICE, RTE_LOGTYPE_PMD,\
+		"[QEDE PMD: (%s)]%s:" fmt, \
+		(p_dev)->name ? (p_dev)->name : "", \
+		 __func__, \
+		##__VA_ARGS__)
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
+
+#define DP_INFO(p_dev, fmt, ...) \
+	rte_log(RTE_LOG_INFO, RTE_LOGTYPE_PMD, \
+		"[%s:%d(%s)]" fmt, \
+		__func__, __LINE__, \
+		(p_dev)->name ? (p_dev)->name : "", \
+		##__VA_ARGS__)
+#else
+#define DP_INFO(p_dev, fmt, ...) do { } while (0)
+
+#endif
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_DRV
+#define DP_VERBOSE(p_dev, module, fmt, ...) \
+do { \
+	if ((p_dev)->dp_module & module) \
+		rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_PMD, \
+			"[%s:%d(%s)]" fmt, \
+		      __func__, __LINE__, \
+		      (p_dev)->name ? (p_dev)->name : "", \
+		      ##__VA_ARGS__); \
+} while (0)
+#else
+#define DP_VERBOSE(p_dev, fmt, ...) do { } while (0)
+#endif
+
+#define PMD_INIT_LOG(level, edev, fmt, args...)	\
+	rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
+		"[qede_pmd: %s] %s() " fmt "\n", \
+	(edev)->name, __func__, ##args)
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE(edev) PMD_INIT_LOG(DEBUG, edev, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE(edev) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+#define PMD_TX_LOG(level, q, fmt, args...) \
+	RTE_LOG(level, PMD, "%s(): port=%u queue=%u " fmt "\n", \
+		__func__, q->port_id, q->queue_id, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+#define PMD_RX_LOG(level, q, fmt, args...) \
+	RTE_LOG(level, PMD, "%s(): port=%u queue=%u " fmt "\n",	\
+		__func__, q->port_id, q->queue_id, ## args)
+#else
+#define PMD_RX_LOG(level, q, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_DRIVER
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+	RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#else
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
+#endif
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+	PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _QEDE_LOGS_H_ */
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
new file mode 100644
index 0000000..b09fb85
--- /dev/null
+++ b/drivers/net/qede/qede_main.c
@@ -0,0 +1,545 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <zlib.h>
+#include <limits.h>
+
+#include "qede_ethdev.h"
+
+static uint8_t npar_tx_switching = 1;
+
+#define CONFIG_QED_BINARY_FW
+/* Global variable to hold absolute path of fw file */
+char fw_file[PATH_MAX];
+
+const char *QEDE_DEFAULT_FIRMWARE =
+	"/lib/firmware/qed/qed_init_values_zipped-8.7.7.0.bin";
+
+static void
+qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
+{
+	int i;
+
+	for (i = 0; i < edev->num_hwfns; i++) {
+		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+		p_hwfn->pf_params = *params;
+	}
+}
+
+static void qed_init_pci(struct ecore_dev *edev, struct rte_pci_device *pci_dev)
+{
+	edev->regview = pci_dev->mem_resource[0].addr;
+	edev->doorbells = pci_dev->mem_resource[2].addr;
+}
+
+static int
+qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
+	  enum qed_protocol protocol, uint32_t dp_module,
+	  uint8_t dp_level, bool is_vf)
+{
+	struct qede_dev *qdev = (struct qede_dev *)edev;
+	int rc;
+
+	ecore_init_struct(edev);
+	qdev->protocol = protocol;
+	if (is_vf) {
+		edev->b_is_vf = true;
+		edev->sriov_info.b_hw_channel = true;
+	}
+	ecore_init_dp(edev, dp_module, dp_level, NULL);
+	qed_init_pci(edev, pci_dev);
+	rc = ecore_hw_prepare(edev, ECORE_PCI_DEFAULT);
+	if (rc) {
+		DP_ERR(edev, "hw prepare failed\n");
+		return rc;
+	}
+
+	return rc;
+}
+
+static int qed_nic_setup(struct ecore_dev *edev)
+{
+	int rc, i;
+
+	rc = ecore_resc_alloc(edev);
+	if (rc)
+		return rc;
+
+	DP_INFO(edev, "Allocated qed resources\n");
+	ecore_resc_setup(edev);
+
+	return rc;
+}
+
+static int qed_alloc_stream_mem(struct ecore_dev *edev)
+{
+	int i;
+
+	for_each_hwfn(edev, i) {
+		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+
+		p_hwfn->stream = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+					     sizeof(*p_hwfn->stream));
+		if (!p_hwfn->stream)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void qed_free_stream_mem(struct ecore_dev *edev)
+{
+	int i;
+
+	for_each_hwfn(edev, i) {
+		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+
+		if (!p_hwfn->stream)
+			return;
+
+		OSAL_FREE(p_hwfn->p_dev, p_hwfn->stream);
+	}
+}
+
+static int qed_load_firmware_data(struct ecore_dev *edev)
+{
+	int fd;
+	struct stat st;
+	const char *fw = RTE_LIBRTE_QEDE_FW;
+
+	if (strcmp(fw, "") == 0)
+		strcpy(fw_file, QEDE_DEFAULT_FIRMWARE);
+	else
+		strcpy(fw_file, fw);
+
+	fd = open(fw_file, O_RDONLY);
+	if (fd < 0) {
+		DP_NOTICE(edev, false, "Can't open firmware file\n");
+		return -ENOENT;
+	}
+
+	if (fstat(fd, &st) < 0) {
+		DP_NOTICE(edev, false, "Can't stat firmware file\n");
+		return -1;
+	}
+
+	edev->firmware = rte_zmalloc("qede_fw", st.st_size,
+				    RTE_CACHE_LINE_SIZE);
+	if (!edev->firmware) {
+		DP_NOTICE(edev, false, "Can't allocate memory for firmware\n");
+		close(fd);
+		return -ENOMEM;
+	}
+
+	if (read(fd, edev->firmware, st.st_size) != st.st_size) {
+		DP_NOTICE(edev, false, "Can't read firmware data\n");
+		close(fd);
+		return -1;
+	}
+
+	edev->fw_len = st.st_size;
+	if (edev->fw_len < 104) {
+		DP_NOTICE(edev, false, "Invalid fw size: %" PRIu64 "\n",
+			  edev->fw_len);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qed_slowpath_start(struct ecore_dev *edev,
+			      struct qed_slowpath_params *params)
+{
+	bool allow_npar_tx_switching;
+	const uint8_t *data = NULL;
+	struct ecore_hwfn *hwfn;
+	struct ecore_mcp_drv_version drv_version;
+	struct qede_dev *qdev = (struct qede_dev *)edev;
+	int rc;
+#ifdef QED_ENC_SUPPORTED
+	struct ecore_tunn_start_params tunn_info;
+#endif
+
+#ifdef CONFIG_QED_BINARY_FW
+	rc = qed_load_firmware_data(edev);
+	if (rc) {
+		DP_NOTICE(edev, true,
+			  "Failed to find fw file %s\n", fw_file);
+		goto err;
+	}
+#endif
+
+	rc = qed_nic_setup(edev);
+	if (rc)
+		goto err;
+
+	/* set int_coalescing_mode */
+	edev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
+
+	/* Should go with CONFIG_QED_BINARY_FW */
+	/* Allocate stream for unzipping */
+	rc = qed_alloc_stream_mem(edev);
+	if (rc) {
+		DP_NOTICE(edev, true,
+		"Failed to allocate stream memory\n");
+		goto err2;
+	}
+
+	/* Start the slowpath */
+#ifdef CONFIG_QED_BINARY_FW
+	data = edev->firmware;
+#endif
+	allow_npar_tx_switching = npar_tx_switching ? true : false;
+
+#ifdef QED_ENC_SUPPORTED
+	memset(&tunn_info, 0, sizeof(tunn_info));
+	tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
+	    1 << QED_MODE_L2GRE_TUNN |
+	    1 << QED_MODE_IPGRE_TUNN |
+	    1 << QED_MODE_L2GENEVE_TUNN | 1 << QED_MODE_IPGENEVE_TUNN;
+	tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
+	tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
+	tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
+	rc = ecore_hw_init(edev, &tunn_info, true, ECORE_INT_MODE_MSIX,
+			   allow_npar_tx_switching, data);
+#else
+	rc = ecore_hw_init(edev, NULL, true, ECORE_INT_MODE_MSIX,
+			   allow_npar_tx_switching, data);
+#endif
+	if (rc) {
+		DP_ERR(edev, "ecore_hw_init failed\n");
+		goto err2;
+	}
+
+	DP_INFO(edev, "HW inited and function started\n");
+
+	hwfn = ECORE_LEADING_HWFN(edev);
+	drv_version.version = (params->drv_major << 24) |
+		    (params->drv_minor << 16) |
+		    (params->drv_rev << 8) | (params->drv_eng);
+	/* TBD: strlcpy() */
+	strncpy((char *)drv_version.name, (const char *)params->name,
+			MCP_DRV_VER_STR_SIZE - 4);
+	rc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
+						&drv_version);
+	if (rc) {
+		DP_NOTICE(edev, true,
+			  "Failed sending drv version command\n");
+		return rc;
+	}
+
+	return 0;
+
+	ecore_hw_stop(edev);
+err2:
+	ecore_resc_free(edev);
+err:
+#ifdef CONFIG_QED_BINARY_FW
+	if (edev->firmware)
+		rte_free(edev->firmware);
+	edev->firmware = NULL;
+#endif
+	return rc;
+}
+
+static int
+qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)
+{
+	struct ecore_ptt *ptt = NULL;
+
+	memset(dev_info, 0, sizeof(struct qed_dev_info));
+	dev_info->num_hwfns = edev->num_hwfns;
+	dev_info->is_mf_default = IS_MF_DEFAULT(&edev->hwfns[0]);
+	rte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
+	       ETHER_ADDR_LEN);
+
+	dev_info->fw_major = FW_MAJOR_VERSION;
+	dev_info->fw_minor = FW_MINOR_VERSION;
+	dev_info->fw_rev = FW_REVISION_VERSION;
+	dev_info->fw_eng = FW_ENGINEERING_VERSION;
+	dev_info->mf_mode = edev->mf_mode;
+	dev_info->tx_switching = false;
+
+	ptt = ecore_ptt_acquire(ECORE_LEADING_HWFN(edev));
+	if (ptt) {
+		ecore_mcp_get_mfw_ver(edev, ptt,
+					      &dev_info->mfw_rev, NULL);
+
+		ecore_mcp_get_flash_size(ECORE_LEADING_HWFN(edev), ptt,
+						 &dev_info->flash_size);
+
+		/* Workaround to allow PHY-read commands for
+		 * B0 bringup.
+		 */
+		if (ECORE_IS_BB_B0(edev))
+			dev_info->flash_size = 0xffffffff;
+
+		ecore_ptt_release(ECORE_LEADING_HWFN(edev), ptt);
+	}
+
+	return 0;
+}
+
+int
+qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
+{
+	struct qede_dev *qdev = (struct qede_dev *)edev;
+	int i;
+
+	memset(info, 0, sizeof(*info));
+
+	info->num_tc = 1 /* @@@TBD aelior MULTI_COS */;
+
+	info->num_queues = 0;
+	for_each_hwfn(edev, i)
+		info->num_queues +=
+		    FEAT_NUM(&edev->hwfns[i], ECORE_PF_L2_QUE);
+
+	info->num_vlan_filters = RESC_NUM(&edev->hwfns[0], ECORE_VLAN);
+
+	rte_memcpy(&info->port_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
+			   ETHER_ADDR_LEN);
+
+	qed_fill_dev_info(edev, &info->common);
+
+	return 0;
+}
+
+static void
+qed_set_id(struct ecore_dev *edev, char name[NAME_SIZE],
+	   const char ver_str[VER_SIZE])
+{
+	int i;
+
+	rte_memcpy(edev->name, name, NAME_SIZE);
+	for_each_hwfn(edev, i) {
+		snprintf(edev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
+	}
+	rte_memcpy(edev->ver_str, ver_str, VER_SIZE);
+	edev->drv_type = DRV_ID_DRV_TYPE_LINUX;
+}
+
+static uint32_t
+qed_sb_init(struct ecore_dev *edev, struct ecore_sb_info *sb_info,
+	    void *sb_virt_addr, dma_addr_t sb_phy_addr,
+	    uint16_t sb_id, enum qed_sb_type type)
+{
+	struct ecore_hwfn *p_hwfn;
+	int hwfn_index;
+	uint16_t rel_sb_id;
+	uint8_t n_hwfns;
+	uint32_t rc;
+
+	/* RoCE uses single engine and CMT uses two engines. When using both
+	 * we force only a single engine. Storage uses only engine 0 too.
+	 */
+	if (type == QED_SB_TYPE_L2_QUEUE)
+		n_hwfns = edev->num_hwfns;
+	else
+		n_hwfns = 1;
+
+	hwfn_index = sb_id % n_hwfns;
+	p_hwfn = &edev->hwfns[hwfn_index];
+	rel_sb_id = sb_id / n_hwfns;
+
+	DP_INFO(edev, "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
+		hwfn_index, rel_sb_id, sb_id);
+
+	rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
+			       sb_virt_addr, sb_phy_addr, rel_sb_id);
+
+	return rc;
+}
+
+static void qed_fill_link(struct ecore_hwfn *hwfn,
+			  struct qed_link_output *if_link)
+{
+	struct ecore_mcp_link_params params;
+	struct ecore_mcp_link_state link;
+	struct ecore_mcp_link_capabilities link_caps;
+	uint32_t media_type;
+	uint8_t change = 0;
+
+	memset(if_link, 0, sizeof(*if_link));
+
+	/* Prepare source inputs */
+	rte_memcpy(&params, ecore_mcp_get_link_params(hwfn),
+		       sizeof(params));
+	rte_memcpy(&link, ecore_mcp_get_link_state(hwfn), sizeof(link));
+	rte_memcpy(&link_caps, ecore_mcp_get_link_capabilities(hwfn),
+		       sizeof(link_caps));
+
+	/* Set the link parameters to pass to protocol driver */
+	if (link.link_up)
+		if_link->link_up = true;
+
+	if (link.link_up)
+		if_link->speed = link.speed;
+
+	if_link->duplex = QEDE_DUPLEX_FULL;
+
+	if (params.speed.autoneg)
+		if_link->supported_caps |= QEDE_SUPPORTED_AUTONEG;
+
+	if (params.pause.autoneg || params.pause.forced_rx ||
+	    params.pause.forced_tx)
+		if_link->supported_caps |= QEDE_SUPPORTED_PAUSE;
+
+	if (params.pause.autoneg)
+		if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
+
+	if (params.pause.forced_rx)
+		if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
+
+	if (params.pause.forced_tx)
+		if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
+}
+
+static void
+qed_get_current_link(struct ecore_dev *edev, struct qed_link_output *if_link)
+{
+	qed_fill_link(&edev->hwfns[0], if_link);
+
+#ifdef CONFIG_QED_SRIOV
+	for_each_hwfn(cdev, i)
+		qed_inform_vf_link_state(&cdev->hwfns[i]);
+#endif
+}
+
+static int qed_set_link(struct ecore_dev *edev, struct qed_link_params *params)
+{
+	struct ecore_hwfn *hwfn;
+	struct ecore_ptt *ptt;
+	struct ecore_mcp_link_params *link_params;
+	int rc;
+
+	/* The link should be set only once per PF */
+	hwfn = &edev->hwfns[0];
+
+	ptt = ecore_ptt_acquire(hwfn);
+	if (!ptt)
+		return -EBUSY;
+
+	link_params = ecore_mcp_get_link_params(hwfn);
+	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
+		link_params->speed.autoneg = params->autoneg;
+
+	if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
+		if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
+			link_params->pause.autoneg = true;
+		else
+			link_params->pause.autoneg = false;
+		if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
+			link_params->pause.forced_rx = true;
+		else
+			link_params->pause.forced_rx = false;
+		if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
+			link_params->pause.forced_tx = true;
+		else
+			link_params->pause.forced_tx = false;
+	}
+
+	rc = ecore_mcp_set_link(hwfn, ptt, params->link_up);
+
+	ecore_ptt_release(hwfn, ptt);
+
+	return rc;
+}
+
+static int qed_drain(struct ecore_dev *edev)
+{
+	struct ecore_hwfn *hwfn;
+	struct ecore_ptt *ptt;
+	int i, rc;
+
+	for_each_hwfn(edev, i) {
+		hwfn = &edev->hwfns[i];
+		ptt = ecore_ptt_acquire(hwfn);
+		if (!ptt) {
+			DP_NOTICE(hwfn, true, "Failed to drain NIG; No PTT\n");
+			return -EBUSY;
+		}
+		rc = ecore_mcp_drain(hwfn, ptt);
+		if (rc)
+			return rc;
+		ecore_ptt_release(hwfn, ptt);
+	}
+
+	return 0;
+}
+
+static int qed_nic_stop(struct ecore_dev *edev)
+{
+	int i, rc;
+
+	rc = ecore_hw_stop(edev);
+	for (i = 0; i < edev->num_hwfns; i++) {
+		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+
+		if (p_hwfn->b_sp_dpc_enabled)
+			p_hwfn->b_sp_dpc_enabled = false;
+	}
+	return rc;
+}
+
+static int qed_nic_reset(struct ecore_dev *edev)
+{
+	int rc;
+
+	rc = ecore_hw_reset(edev);
+	if (rc)
+		return rc;
+
+	ecore_resc_free(edev);
+
+	return 0;
+}
+
+static int qed_slowpath_stop(struct ecore_dev *edev)
+{
+#ifdef CONFIG_QED_SRIOV
+	int i;
+#endif
+
+	if (!edev)
+		return -ENODEV;
+
+	qed_free_stream_mem(edev);
+
+	qed_nic_stop(edev);
+
+	qed_nic_reset(edev);
+
+	return 0;
+}
+
+static void qed_remove(struct ecore_dev *edev)
+{
+	if (!edev)
+		return;
+
+	ecore_hw_remove(edev);
+}
+
+const struct qed_common_ops qed_common_ops_pass = {
+	INIT_STRUCT_FIELD(probe, &qed_probe),
+	INIT_STRUCT_FIELD(update_pf_params, &qed_update_pf_params),
+	INIT_STRUCT_FIELD(slowpath_start, &qed_slowpath_start),
+	INIT_STRUCT_FIELD(set_id, &qed_set_id),
+	INIT_STRUCT_FIELD(chain_alloc, &ecore_chain_alloc),
+	INIT_STRUCT_FIELD(chain_free, &ecore_chain_free),
+	INIT_STRUCT_FIELD(sb_init, &qed_sb_init),
+	INIT_STRUCT_FIELD(get_link, &qed_get_current_link),
+	INIT_STRUCT_FIELD(set_link, &qed_set_link),
+	INIT_STRUCT_FIELD(drain, &qed_drain),
+	INIT_STRUCT_FIELD(slowpath_stop, &qed_slowpath_stop),
+	INIT_STRUCT_FIELD(remove, &qed_remove),
+};
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
new file mode 100644
index 0000000..d9194ee
--- /dev/null
+++ b/drivers/net/qede/qede_rxtx.c
@@ -0,0 +1,1192 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "qede_rxtx.h"
+
+static bool gro_disable = 1;	/* mod_param */
+
+static inline struct
+rte_mbuf *qede_rxmbuf_alloc(struct rte_mempool *mp)
+{
+	struct rte_mbuf *m;
+
+	m = __rte_mbuf_raw_alloc(mp);
+	__rte_mbuf_sanity_check(m, 0);
+
+	return m;
+}
+
+static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
+{
+	struct rte_mbuf *new_mb = NULL;
+	struct eth_rx_bd *rx_bd;
+	dma_addr_t mapping;
+	uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
+
+	new_mb = qede_rxmbuf_alloc(rxq->mb_pool);
+	if (unlikely(!new_mb)) {
+		PMD_RX_LOG(ERR, rxq,
+			   "Failed to allocate rx buffer "
+			   "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
+			   idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
+			   rte_mempool_count(rxq->mb_pool),
+			   rte_mempool_free_count(rxq->mb_pool));
+		return -ENOMEM;
+	}
+	rxq->sw_rx_ring[idx].mbuf = new_mb;
+	rxq->sw_rx_ring[idx].page_offset = 0;
+	mapping = rte_mbuf_data_dma_addr_default(new_mb);
+	/* Advance PROD and get BD pointer */
+	rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
+	rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
+	rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
+	rxq->sw_rx_prod++;
+	return 0;
+}
+
+static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
+{
+	uint16_t i;
+
+	if (rxq->sw_rx_ring != NULL) {
+		for (i = 0; i < rxq->nb_rx_desc; i++) {
+			if (rxq->sw_rx_ring[i].mbuf != NULL) {
+				rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
+				rxq->sw_rx_ring[i].mbuf = NULL;
+			}
+		}
+	}
+}
+
+void qede_rx_queue_release(void *rx_queue)
+{
+	struct qede_rx_queue *rxq = rx_queue;
+
+	if (rxq != NULL) {
+		qede_rx_queue_release_mbufs(rxq);
+		rte_free(rxq->sw_rx_ring);
+		rxq->sw_rx_ring = NULL;
+		rte_free(rxq);
+		rx_queue = NULL;
+	}
+}
+
+int
+qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+		    uint16_t nb_desc, unsigned int socket_id,
+		    const struct rte_eth_rxconf *rx_conf,
+		    struct rte_mempool *mp)
+{
+	struct qede_dev *qdev = dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	struct rte_eth_dev_data *eth_data = dev->data;
+	struct qede_rx_queue *rxq;
+	uint16_t pkt_len = (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len;
+	size_t size;
+	uint16_t data_size;
+	int rc;
+	int i;
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	/* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
+	if (!rte_is_power_of_2(nb_desc)) {
+		DP_ERR(edev, "Ring size %u is not power of 2\n",
+			  nb_desc);
+		return -EINVAL;
+	}
+
+	/* Free memory prior to re-allocation if needed... */
+	if (dev->data->rx_queues[queue_idx] != NULL) {
+		qede_rx_queue_release(dev->data->rx_queues[queue_idx]);
+		dev->data->rx_queues[queue_idx] = NULL;
+	}
+
+	/* First allocate the rx queue data structure */
+	rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+
+	if (!rxq) {
+		DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
+			  socket_id);
+		return -ENOMEM;
+	}
+
+	rxq->qdev = qdev;
+	rxq->mb_pool = mp;
+	rxq->nb_rx_desc = nb_desc;
+	rxq->queue_id = queue_idx;
+	rxq->port_id = dev->data->port_id;
+
+	/* Sanity check */
+	data_size = (uint16_t)rte_pktmbuf_data_room_size(mp) -
+				RTE_PKTMBUF_HEADROOM;
+
+	if (pkt_len > data_size) {
+		DP_ERR(edev, "MTU %u should not exceed dataroom %u\n",
+		       pkt_len, data_size);
+		rte_free(rxq);
+		return -EINVAL;
+	}
+
+	qdev->mtu = pkt_len;
+	rxq->rx_buf_size = pkt_len + QEDE_ETH_OVERHEAD;
+
+	DP_INFO(edev, "MTU = %u ; RX buffer = %u\n",
+		qdev->mtu, rxq->rx_buf_size);
+
+	if (pkt_len > ETHER_MAX_LEN) {
+		dev->data->dev_conf.rxmode.jumbo_frame = 1;
+		DP_NOTICE(edev, false, "jumbo frame enabled\n");
+	} else {
+		dev->data->dev_conf.rxmode.jumbo_frame = 0;
+	}
+
+	/* Allocate the parallel driver ring for Rx buffers */
+	size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
+	rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
+					     RTE_CACHE_LINE_SIZE, socket_id);
+	if (!rxq->sw_rx_ring) {
+		DP_NOTICE(edev, false,
+			  "Unable to alloc memory for sw_rx_ring on socket %u\n",
+			  socket_id);
+		rte_free(rxq);
+		rxq = NULL;
+		return -ENOMEM;
+	}
+
+	/* Allocate FW Rx ring  */
+	rc = qdev->ops->common->chain_alloc(edev,
+					    ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
+					    ECORE_CHAIN_MODE_NEXT_PTR,
+					    ECORE_CHAIN_CNT_TYPE_U16,
+					    rxq->nb_rx_desc,
+					    sizeof(struct eth_rx_bd),
+					    &rxq->rx_bd_ring);
+
+	if (rc != ECORE_SUCCESS) {
+		DP_NOTICE(edev, false,
+			  "Unable to alloc memory for rxbd ring on socket %u\n",
+			  socket_id);
+		rte_free(rxq->sw_rx_ring);
+		rxq->sw_rx_ring = NULL;
+		rte_free(rxq);
+		rxq = NULL;
+	}
+
+	/* Allocate FW completion ring */
+	rc = qdev->ops->common->chain_alloc(edev,
+					    ECORE_CHAIN_USE_TO_CONSUME,
+					    ECORE_CHAIN_MODE_PBL,
+					    ECORE_CHAIN_CNT_TYPE_U16,
+					    rxq->nb_rx_desc,
+					    sizeof(union eth_rx_cqe),
+					    &rxq->rx_comp_ring);
+
+	if (rc != ECORE_SUCCESS) {
+		DP_NOTICE(edev, false,
+			  "Unable to alloc memory for cqe ring on socket %u\n",
+			  socket_id);
+		/* TBD: Freeing RX BD ring */
+		rte_free(rxq->sw_rx_ring);
+		rxq->sw_rx_ring = NULL;
+		rte_free(rxq);
+	}
+
+	/* Allocate buffers for the Rx ring */
+	for (i = 0; i < rxq->nb_rx_desc; i++) {
+		rc = qede_alloc_rx_buffer(rxq);
+		if (rc) {
+			DP_NOTICE(edev, false,
+				  "RX buffer allocation failed at idx=%d\n", i);
+			goto err4;
+		}
+	}
+
+	dev->data->rx_queues[queue_idx] = rxq;
+	if (!qdev->rx_queues)
+		qdev->rx_queues = (struct qede_rx_queue **)dev->data->rx_queues;
+
+	DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
+		  queue_idx, nb_desc, qdev->mtu, socket_id);
+
+	return 0;
+err4:
+	qede_rx_queue_release(rxq);
+	return -ENOMEM;
+}
+
+static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
+{
+	unsigned int i;
+
+	PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs\n", txq->nb_tx_desc);
+
+	if (txq->sw_tx_ring != NULL) {
+		for (i = 0; i < txq->nb_tx_desc; i++) {
+			if (txq->sw_tx_ring[i].mbuf != NULL) {
+				rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
+				txq->sw_tx_ring[i].mbuf = NULL;
+			}
+		}
+	}
+}
+
+void qede_tx_queue_release(void *tx_queue)
+{
+	struct qede_tx_queue *txq = tx_queue;
+
+	if (txq != NULL) {
+		qede_tx_queue_release_mbufs(txq);
+		if (txq->sw_tx_ring) {
+			rte_free(txq->sw_tx_ring);
+			txq->sw_tx_ring = NULL;
+		}
+		rte_free(txq);
+	}
+	tx_queue = NULL;
+}
+
+int
+qede_tx_queue_setup(struct rte_eth_dev *dev,
+		    uint16_t queue_idx,
+		    uint16_t nb_desc,
+		    unsigned int socket_id,
+		    const struct rte_eth_txconf *tx_conf)
+{
+	struct qede_dev *qdev = dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	struct qede_tx_queue *txq;
+	int rc;
+
+	PMD_INIT_FUNC_TRACE(edev);
+
+	if (!rte_is_power_of_2(nb_desc)) {
+		DP_ERR(edev, "Ring size %u is not power of 2\n",
+		       nb_desc);
+		return -EINVAL;
+	}
+
+	/* Free memory prior to re-allocation if needed... */
+	if (dev->data->tx_queues[queue_idx] != NULL) {
+		qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
+		dev->data->tx_queues[queue_idx] = NULL;
+	}
+
+	txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+
+	if (txq == NULL) {
+		DP_ERR(edev,
+		       "Unable to allocate memory for txq on socket %u",
+		       socket_id);
+		return -ENOMEM;
+	}
+
+	txq->nb_tx_desc = nb_desc;
+	txq->qdev = qdev;
+	txq->port_id = dev->data->port_id;
+
+	rc = qdev->ops->common->chain_alloc(edev,
+					    ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
+					    ECORE_CHAIN_MODE_PBL,
+					    ECORE_CHAIN_CNT_TYPE_U16,
+					    txq->nb_tx_desc,
+					    sizeof(union eth_tx_bd_types),
+					    &txq->tx_pbl);
+	if (rc != ECORE_SUCCESS) {
+		DP_ERR(edev,
+		       "Unable to allocate memory for txbd ring on socket %u",
+		       socket_id);
+		qede_tx_queue_release(txq);
+		return -ENOMEM;
+	}
+
+	/* Allocate software ring */
+	txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
+					     (sizeof(struct qede_tx_entry) *
+					      txq->nb_tx_desc),
+					     RTE_CACHE_LINE_SIZE, socket_id);
+
+	if (!txq->sw_tx_ring) {
+		DP_ERR(edev,
+		       "Unable to allocate memory for txbd ring on socket %u",
+		       socket_id);
+		qede_tx_queue_release(txq);
+		return -ENOMEM;
+	}
+
+	txq->queue_id = queue_idx;
+
+	txq->nb_tx_avail = txq->nb_tx_desc;
+
+	txq->tx_free_thresh =
+	    tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
+	    (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
+
+	dev->data->tx_queues[queue_idx] = txq;
+	if (!qdev->tx_queues)
+		qdev->tx_queues = (struct qede_tx_queue **)dev->data->tx_queues;
+
+	txq->txq_counter = 0;
+
+	DP_INFO(edev,
+		  "txq %u num_desc %u tx_free_thresh %u socket %u\n",
+		  queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
+
+	return 0;
+}
+
+/* This function inits fp content and resets the SB, RXQ and TXQ arrays */
+static void qede_init_fp(struct qede_dev *qdev)
+{
+	struct qede_fastpath *fp;
+	int rss_id, txq_index, tc;
+
+	memset((void *)qdev->fp_array, 0, (QEDE_RSS_CNT(qdev) *
+					   sizeof(*qdev->fp_array)));
+	memset((void *)qdev->sb_array, 0, (QEDE_RSS_CNT(qdev) *
+					   sizeof(*qdev->sb_array)));
+	for_each_rss(rss_id) {
+		fp = &qdev->fp_array[rss_id];
+
+		fp->qdev = qdev;
+		fp->rss_id = rss_id;
+
+		/* Point rxq to generic rte queues that was created
+		 * as part of queue creation.
+		 */
+		fp->rxq = qdev->rx_queues[rss_id];
+		fp->sb_info = &qdev->sb_array[rss_id];
+
+		for (tc = 0; tc < qdev->num_tc; tc++) {
+			txq_index = tc * QEDE_RSS_CNT(qdev) + rss_id;
+			fp->txqs[tc] = qdev->tx_queues[txq_index];
+			fp->txqs[tc]->queue_id = txq_index;
+			/* Updating it to main structure */
+			snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+				 "qdev", rss_id);
+		}
+	}
+
+	qdev->gro_disable = gro_disable;
+}
+
+void qede_free_fp_arrays(struct qede_dev *qdev)
+{
+	/* It asseumes qede_free_mem_load() is called before */
+	if (qdev->fp_array != NULL) {
+		rte_free(qdev->fp_array);
+		qdev->fp_array = NULL;
+	}
+
+	if (qdev->sb_array != NULL) {
+		rte_free(qdev->sb_array);
+		qdev->sb_array = NULL;
+	}
+}
+
+int qede_alloc_fp_array(struct qede_dev *qdev)
+{
+	struct qede_fastpath *fp;
+	struct ecore_dev *edev = &qdev->edev;
+	int i;
+
+	qdev->fp_array = rte_calloc("fp", QEDE_RSS_CNT(qdev),
+				    sizeof(*qdev->fp_array),
+				    RTE_CACHE_LINE_SIZE);
+
+	if (!qdev->fp_array) {
+		DP_ERR(edev, "fp array allocation failed\n");
+		return -ENOMEM;
+	}
+
+	qdev->sb_array = rte_calloc("sb", QEDE_RSS_CNT(qdev),
+				    sizeof(*qdev->sb_array),
+				    RTE_CACHE_LINE_SIZE);
+
+	if (!qdev->sb_array) {
+		DP_ERR(edev, "sb array allocation failed\n");
+		rte_free(qdev->fp_array);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/* This function allocates fast-path status block memory */
+static int
+qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
+		  uint16_t sb_id)
+{
+	struct ecore_dev *edev = &qdev->edev;
+	struct status_block *sb_virt;
+	dma_addr_t sb_phys;
+	int rc;
+
+	sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys, sizeof(*sb_virt));
+
+	if (!sb_virt) {
+		DP_ERR(edev, "Status block allocation failed\n");
+		return -ENOMEM;
+	}
+
+	rc = qdev->ops->common->sb_init(edev, sb_info,
+					sb_virt, sb_phys, sb_id,
+					QED_SB_TYPE_L2_QUEUE);
+	if (rc) {
+		DP_ERR(edev, "Status block initialization failed\n");
+		/* TBD: No dma_free_coherent possible */
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qede_alloc_mem_fp(struct qede_dev *qdev, struct qede_fastpath *fp)
+{
+	return qede_alloc_mem_sb(qdev, fp->sb_info, fp->rss_id);
+}
+
+static void qede_shrink_txq(struct qede_dev *qdev, uint16_t num_rss)
+{
+	/* @@@TBD - this should also re-set the qed interrupts */
+}
+
+/* This function allocates all qede memory at NIC load. */
+static int qede_alloc_mem_load(struct qede_dev *qdev)
+{
+	int rc = 0, rss_id;
+	struct ecore_dev *edev = &qdev->edev;
+
+	for (rss_id = 0; rss_id < QEDE_RSS_CNT(qdev); rss_id++) {
+		struct qede_fastpath *fp = &qdev->fp_array[rss_id];
+
+		rc = qede_alloc_mem_fp(qdev, fp);
+		if (rc)
+			break;
+	}
+
+	if (rss_id != QEDE_RSS_CNT(qdev)) {
+		/* Failed allocating memory for all the queues */
+		if (!rss_id) {
+			DP_ERR(edev,
+			       "Failed to alloc memory for leading queue\n");
+			rc = -ENOMEM;
+		} else {
+			DP_NOTICE(edev, false,
+				  "Failed to allocate memory for all of "
+				  "RSS queues\n"
+				  "Desired: %d queues, allocated: %d queues\n",
+				  QEDE_RSS_CNT(qdev), rss_id);
+			qede_shrink_txq(qdev, rss_id);
+		}
+		qdev->num_rss = rss_id;
+	}
+
+	return 0;
+}
+
+static inline void
+qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
+{
+	uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
+	uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
+	struct eth_rx_prod_data rx_prods = { 0 };
+
+	/* Update producers */
+	rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
+	rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
+
+	/* Make sure that the BD and SGE data is updated before updating the
+	 * producers since FW might read the BD/SGE right after the producer
+	 * is updated.
+	 */
+	rte_wmb();
+
+	internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
+			(uint32_t *)&rx_prods);
+
+	/* mmiowb is needed to synchronize doorbell writes from more than one
+	 * processor. It guarantees that the write arrives to the device before
+	 * the napi lock is released and another qede_poll is called (possibly
+	 * on another CPU). Without this barrier, the next doorbell can bypass
+	 * this doorbell. This is applicable to IA64/Altix systems.
+	 */
+	rte_wmb();
+
+	PMD_RX_LOG(DEBUG, rxq, "bd_prod %u  cqe_prod %u\n", bd_prod, cqe_prod);
+}
+
+static inline uint32_t
+qede_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings)
+{
+	return index % n_rx_rings;
+}
+
+#ifdef ENC_SUPPORTED
+static bool qede_tunn_exist(uint16_t flag)
+{
+	return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
+		    PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
+}
+
+static inline uint8_t qede_check_tunn_csum(uint16_t flag)
+{
+	uint8_t tcsum = 0;
+	uint16_t csum_flag = 0;
+
+	if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
+	     PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
+		csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
+		    PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
+
+	if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+	     PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
+		csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+		    PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+		tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
+	}
+
+	csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
+	    PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
+	    PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+	    PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+	if (csum_flag & flag)
+		return QEDE_CSUM_ERROR;
+
+	return QEDE_CSUM_UNNECESSARY | tcsum;
+}
+#else
+static inline uint8_t qede_tunn_exist(uint16_t flag)
+{
+	return 0;
+}
+
+static inline uint8_t qede_check_tunn_csum(uint16_t flag)
+{
+	return 0;
+}
+#endif
+
+static inline uint8_t qede_check_notunn_csum(uint16_t flag)
+{
+	uint8_t csum = 0;
+	uint16_t csum_flag = 0;
+
+	if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+	     PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
+		csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+		    PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+		csum = QEDE_CSUM_UNNECESSARY;
+	}
+
+	csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+	    PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+	if (csum_flag & flag)
+		return QEDE_CSUM_ERROR;
+
+	return csum;
+}
+
+static inline uint8_t qede_check_csum(uint16_t flag)
+{
+	if (likely(!qede_tunn_exist(flag)))
+		return qede_check_notunn_csum(flag);
+	else
+		return qede_check_tunn_csum(flag);
+}
+
+static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
+{
+	ecore_chain_consume(&rxq->rx_bd_ring);
+	rxq->sw_rx_cons++;
+}
+
+static inline void
+qede_reuse_page(struct qede_dev *qdev,
+		struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
+{
+	struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
+	uint16_t idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+	struct qede_rx_entry *curr_prod;
+	dma_addr_t new_mapping;
+
+	curr_prod = &rxq->sw_rx_ring[idx];
+	*curr_prod = *curr_cons;
+
+	new_mapping = rte_mbuf_data_dma_addr_default(curr_prod->mbuf) +
+		      curr_prod->page_offset;
+
+	rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
+	rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
+
+	rxq->sw_rx_prod++;
+}
+
+static inline void
+qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
+			struct qede_dev *qdev, uint8_t count)
+{
+	struct qede_rx_entry *curr_cons;
+
+	for (; count > 0; count--) {
+		curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
+		qede_reuse_page(qdev, rxq, curr_cons);
+		qede_rx_bd_ring_consume(rxq);
+	}
+}
+
+static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
+{
+	uint32_t p_type;
+	/* TBD - L4 indications needed ? */
+	uint16_t protocol = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
+			      PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & flags);
+
+	/* protocol = 3 means LLC/SNAP over Ethernet */
+	if (unlikely(protocol == 0 || protocol == 3))
+		p_type = RTE_PTYPE_UNKNOWN;
+	else if (protocol == 1)
+		p_type = RTE_PTYPE_L3_IPV4;
+	else if (protocol == 2)
+		p_type = RTE_PTYPE_L3_IPV6;
+
+	return RTE_PTYPE_L2_ETHER | p_type;
+}
+
+uint16_t
+qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+	struct qede_rx_queue *rxq = p_rxq;
+	struct qede_dev *qdev = rxq->qdev;
+	struct ecore_dev *edev = &qdev->edev;
+	struct qede_fastpath *fp = &qdev->fp_array[rxq->queue_id];
+	uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
+	uint16_t rx_pkt = 0;
+	union eth_rx_cqe *cqe;
+	struct eth_fast_path_rx_reg_cqe *fp_cqe;
+	register struct rte_mbuf *rx_mb = NULL;
+	enum eth_rx_cqe_type cqe_type;
+	uint16_t len, pad;
+	uint16_t preload_idx;
+	uint8_t csum_flag;
+	uint16_t parse_flag;
+	enum rss_hash_type htype;
+
+	hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
+	sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+	rte_rmb();
+
+	if (hw_comp_cons == sw_comp_cons)
+		return 0;
+
+	while (sw_comp_cons != hw_comp_cons) {
+		/* Get the CQE from the completion ring */
+		cqe =
+		    (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
+		cqe_type = cqe->fast_path_regular.type;
+
+		if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
+			PMD_RX_LOG(DEBUG, rxq, "Got a slowath CQE\n");
+
+			qdev->ops->eth_cqe_completion(edev, fp->rss_id,
+				(struct eth_slow_path_rx_cqe *)cqe);
+			goto next_cqe;
+		}
+
+		/* Get the data from the SW ring */
+		sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+		rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
+		assert(rx_mb != NULL);
+
+		/* non GRO */
+		fp_cqe = &cqe->fast_path_regular;
+
+		len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
+		pad = fp_cqe->placement_offset;
+		assert((len + pad) <= rx_mb->buf_len);
+
+		PMD_RX_LOG(DEBUG, rxq,
+			   "CQE type = 0x%x, flags = 0x%x, vlan = 0x%x"
+			   " len = %u, parsing_flags = %d\n",
+			   cqe_type, fp_cqe->bitfields,
+			   rte_le_to_cpu_16(fp_cqe->vlan_tag),
+			   len, rte_le_to_cpu_16(fp_cqe->pars_flags.flags));
+
+		/* If this is an error packet then drop it */
+		parse_flag =
+		    rte_le_to_cpu_16(cqe->fast_path_regular.pars_flags.flags);
+		csum_flag = qede_check_csum(parse_flag);
+		if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
+			PMD_RX_LOG(ERR, rxq,
+				   "CQE in CONS = %u has error, flags = 0x%x "
+				   "dropping incoming packet\n",
+				   sw_comp_cons, parse_flag);
+			rxq->rx_hw_errors++;
+			qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
+			goto next_cqe;
+		}
+
+		if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
+			PMD_RX_LOG(ERR, rxq,
+				   "New buffer allocation failed,"
+				   "dropping incoming packet\n");
+			qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
+			rte_eth_devices[rxq->port_id].
+			    data->rx_mbuf_alloc_failed++;
+			rxq->rx_alloc_errors++;
+			break;
+		}
+
+		qede_rx_bd_ring_consume(rxq);
+
+		/* Prefetch next mbuf while processing current one. */
+		preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+		rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
+
+		if (fp_cqe->bd_num != 1)
+			PMD_RX_LOG(DEBUG, rxq,
+				   "Jumbo-over-BD packet not supported\n");
+
+		/* Update MBUF fields */
+		rx_mb->ol_flags = 0;
+		rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM;
+		rx_mb->nb_segs = 1;
+		rx_mb->data_len = len;
+		rx_mb->pkt_len = len;
+		rx_mb->port = rxq->port_id;
+		rx_mb->packet_type = qede_rx_cqe_to_pkt_type(parse_flag);
+
+		htype = (uint8_t)GET_FIELD(fp_cqe->bitfields,
+				ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
+		if (qdev->rss_enabled && htype) {
+			rx_mb->ol_flags |= PKT_RX_RSS_HASH;
+			rx_mb->hash.rss = rte_le_to_cpu_32(fp_cqe->rss_hash);
+			PMD_RX_LOG(DEBUG, rxq, "Hash result 0x%x\n",
+				   rx_mb->hash.rss);
+		}
+
+		rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
+
+		if (CQE_HAS_VLAN(parse_flag)) {
+			rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
+			rx_mb->ol_flags |= PKT_RX_VLAN_PKT;
+		}
+
+		if (CQE_HAS_OUTER_VLAN(parse_flag)) {
+			/* FW does not provide indication of Outer VLAN tag,
+			 * which is always stripped, so vlan_tci_outer is set
+			 * to 0. Here vlan_tag represents inner VLAN tag.
+			 */
+			rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
+			rx_mb->ol_flags |= PKT_RX_QINQ_PKT;
+		}
+
+		rx_pkts[rx_pkt] = rx_mb;
+		rx_pkt++;
+next_cqe:
+		ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
+		sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+		if (rx_pkt == nb_pkts) {
+			PMD_RX_LOG(DEBUG, rxq,
+				   "Budget reached nb_pkts=%u received=%u\n",
+				   rx_pkt, nb_pkts);
+			break;
+		}
+	}
+
+	qede_update_rx_prod(qdev, rxq);
+
+	PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d\n", rx_pkt, rte_lcore_id());
+
+	return rx_pkt;
+}
+
+static inline int
+qede_free_tx_pkt(struct ecore_dev *edev, struct qede_tx_queue *txq)
+{
+	uint16_t idx = TX_CONS(txq);
+	struct eth_tx_bd *tx_data_bd;
+	struct rte_mbuf *mbuf = txq->sw_tx_ring[idx].mbuf;
+
+	if (unlikely(!mbuf)) {
+		PMD_TX_LOG(ERR, txq,
+			   "null mbuf nb_tx_desc %u nb_tx_avail %u "
+			   "sw_tx_cons %u sw_tx_prod %u\n",
+			   txq->nb_tx_desc, txq->nb_tx_avail, idx,
+			   TX_PROD(txq));
+		return -1;
+	}
+
+	/* Free now */
+	rte_pktmbuf_free_seg(mbuf);
+	txq->sw_tx_ring[idx].mbuf = NULL;
+	ecore_chain_consume(&txq->tx_pbl);
+	txq->nb_tx_avail++;
+
+	return 0;
+}
+
+static inline uint16_t
+qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq)
+{
+	uint16_t tx_compl = 0;
+	uint16_t hw_bd_cons;
+	int rc;
+
+	hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
+	rte_compiler_barrier();
+
+	while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl)) {
+		rc = qede_free_tx_pkt(edev, txq);
+		if (rc) {
+			DP_NOTICE(edev, false,
+				  "hw_bd_cons = %d, chain_cons=%d\n",
+				  hw_bd_cons,
+				  ecore_chain_get_cons_idx(&txq->tx_pbl));
+			break;
+		}
+		txq->sw_tx_cons++;	/* Making TXD available */
+		tx_compl++;
+	}
+
+	PMD_TX_LOG(DEBUG, txq, "Tx compl %u sw_tx_cons %u avail %u\n",
+		   tx_compl, txq->sw_tx_cons, txq->nb_tx_avail);
+	return tx_compl;
+}
+
+uint16_t
+qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	struct qede_tx_queue *txq = p_txq;
+	struct qede_dev *qdev = txq->qdev;
+	struct ecore_dev *edev = &qdev->edev;
+	struct qede_fastpath *fp = &qdev->fp_array[txq->queue_id];
+	struct eth_tx_1st_bd *first_bd;
+	uint16_t nb_tx_pkts;
+	uint16_t nb_pkt_sent = 0;
+	uint16_t bd_prod;
+	uint16_t idx;
+	uint16_t tx_count;
+
+	if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
+		PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u\n",
+			   nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
+		(void)qede_process_tx_compl(edev, txq);
+	}
+
+	nb_tx_pkts = RTE_MIN(nb_pkts, (txq->nb_tx_avail / MAX_NUM_TX_BDS));
+	if (unlikely(nb_tx_pkts == 0)) {
+		PMD_TX_LOG(DEBUG, txq, "Out of BDs nb_pkts=%u avail=%u\n",
+			   nb_pkts, txq->nb_tx_avail);
+		return 0;
+	}
+
+	tx_count = nb_tx_pkts;
+	while (nb_tx_pkts--) {
+		/* Fill the entry in the SW ring and the BDs in the FW ring */
+		idx = TX_PROD(txq);
+		struct rte_mbuf *mbuf = *tx_pkts++;
+		txq->sw_tx_ring[idx].mbuf = mbuf;
+		first_bd = (struct eth_tx_1st_bd *)
+		    ecore_chain_produce(&txq->tx_pbl);
+		first_bd->data.bd_flags.bitfields =
+		    1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+		/* Map MBUF linear data for DMA and set in the first BD */
+		QEDE_BD_SET_ADDR_LEN(first_bd, rte_mbuf_data_dma_addr(mbuf),
+				     mbuf->data_len);
+
+		/* Descriptor based VLAN insertion */
+		if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+			first_bd->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
+			first_bd->data.bd_flags.bitfields |=
+			    1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
+		}
+
+		/* Offload the IP checksum in the hardware */
+		if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
+			first_bd->data.bd_flags.bitfields |=
+			    1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+		}
+
+		/* L4 checksum offload (tcp or udp) */
+		if (mbuf->ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
+			first_bd->data.bd_flags.bitfields |=
+			    1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+			/* IPv6 + extn. -> later */
+		}
+		first_bd->data.nbds = MAX_NUM_TX_BDS;
+		txq->sw_tx_prod++;
+		rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
+		txq->nb_tx_avail--;
+		bd_prod =
+		    rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
+		nb_pkt_sent++;
+	}
+
+	/* Write value of prod idx into bd_prod */
+	txq->tx_db.data.bd_prod = bd_prod;
+	rte_wmb();
+	rte_compiler_barrier();
+	DIRECT_REG_WR(edev, txq->doorbell_addr, txq->tx_db.raw);
+	rte_wmb();
+
+	/* Check again for Tx completions */
+	(void)qede_process_tx_compl(edev, txq);
+
+	PMD_TX_LOG(DEBUG, txq, "to_send=%u can_send=%u sent=%u core=%d\n",
+		   nb_pkts, tx_count, nb_pkt_sent, rte_lcore_id());
+
+	return nb_pkt_sent;
+}
+
+int qede_dev_start(struct rte_eth_dev *eth_dev)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	struct qed_link_output link_output;
+	int rc;
+
+	DP_INFO(edev, "port %u\n", eth_dev->data->port_id);
+
+	if (qdev->state == QEDE_START) {
+		DP_INFO(edev, "device already started\n");
+		return 0;
+	}
+
+	if (qdev->state == QEDE_CLOSE) {
+		rc = qede_alloc_fp_array(qdev);
+		qede_init_fp(qdev);
+		rc = qede_alloc_mem_load(qdev);
+		DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
+			QEDE_RSS_CNT(qdev), qdev->num_tc);
+	} else if (qdev->state == QEDE_STOP) {
+		DP_INFO(edev, "restarting port %u\n", eth_dev->data->port_id);
+	} else {
+		DP_INFO(edev, "unknown state port %u\n",
+			eth_dev->data->port_id);
+		return -EINVAL;
+	}
+
+	if (rc) {
+		DP_ERR(edev, "Failed to start queues\n");
+		/* TBD: free */
+		return rc;
+	}
+
+	DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
+
+	qede_dev_set_link_state(eth_dev, true);
+
+	/* Query whether link is already-up */
+	memset(&link_output, 0, sizeof(link_output));
+	qdev->ops->common->get_link(edev, &link_output);
+	DP_NOTICE(edev, false, "link status: %s\n",
+		  link_output.link_up ? "up" : "down");
+
+	qdev->state = QEDE_START;
+
+	qede_config_rx_mode(eth_dev);
+
+	DP_INFO(edev, "dev_state is QEDE_START\n");
+
+	return 0;
+}
+
+static int qede_drain_txq(struct qede_dev *qdev,
+			  struct qede_tx_queue *txq, bool allow_drain)
+{
+	struct ecore_dev *edev = &qdev->edev;
+	int rc, cnt = 1000;
+
+	while (txq->sw_tx_cons != txq->sw_tx_prod) {
+		qede_process_tx_compl(edev, txq);
+		if (!cnt) {
+			if (allow_drain) {
+				DP_NOTICE(edev, false,
+					  "Tx queue[%u] is stuck,"
+					  "requesting MCP to drain\n",
+					  txq->queue_id);
+				rc = qdev->ops->common->drain(edev);
+				if (rc)
+					return rc;
+				return qede_drain_txq(qdev, txq, false);
+			}
+
+			DP_NOTICE(edev, false,
+				  "Timeout waiting for tx queue[%d]:"
+				  "PROD=%d, CONS=%d\n",
+				  txq->queue_id, txq->sw_tx_prod,
+				  txq->sw_tx_cons);
+			return -ENODEV;
+		}
+		cnt--;
+		DELAY(1000);
+		rte_compiler_barrier();
+	}
+
+	/* FW finished processing, wait for HW to transmit all tx packets */
+	DELAY(2000);
+
+	return 0;
+}
+
+static int qede_stop_queues(struct qede_dev *qdev)
+{
+	struct qed_update_vport_params vport_update_params;
+	struct ecore_dev *edev = &qdev->edev;
+	int rc, tc, i;
+
+	/* Disable the vport */
+	memset(&vport_update_params, 0, sizeof(vport_update_params));
+	vport_update_params.vport_id = 0;
+	vport_update_params.update_vport_active_flg = 1;
+	vport_update_params.vport_active_flg = 0;
+	vport_update_params.update_rss_flg = 0;
+
+	DP_INFO(edev, "vport_update\n");
+
+	rc = qdev->ops->vport_update(edev, &vport_update_params);
+	if (rc) {
+		DP_ERR(edev, "Failed to update vport\n");
+		return rc;
+	}
+
+	DP_INFO(edev, "Flushing tx queues\n");
+
+	/* Flush Tx queues. If needed, request drain from MCP */
+	for_each_rss(i) {
+		struct qede_fastpath *fp = &qdev->fp_array[i];
+		for (tc = 0; tc < qdev->num_tc; tc++) {
+			struct qede_tx_queue *txq = fp->txqs[tc];
+			rc = qede_drain_txq(qdev, txq, true);
+			if (rc)
+				return rc;
+		}
+	}
+
+	/* Stop all Queues in reverse order */
+	for (i = QEDE_RSS_CNT(qdev) - 1; i >= 0; i--) {
+		struct qed_stop_rxq_params rx_params;
+
+		/* Stop the Tx Queue(s) */
+		for (tc = 0; tc < qdev->num_tc; tc++) {
+			struct qed_stop_txq_params tx_params;
+
+			tx_params.rss_id = i;
+			tx_params.tx_queue_id = tc * QEDE_RSS_CNT(qdev) + i;
+
+			DP_INFO(edev, "Stopping tx queues\n");
+			rc = qdev->ops->q_tx_stop(edev, &tx_params);
+			if (rc) {
+				DP_ERR(edev, "Failed to stop TXQ #%d\n",
+				       tx_params.tx_queue_id);
+				return rc;
+			}
+		}
+
+		/* Stop the Rx Queue */
+		memset(&rx_params, 0, sizeof(rx_params));
+		rx_params.rss_id = i;
+		rx_params.rx_queue_id = i;
+		rx_params.eq_completion_only = 1;
+
+		DP_INFO(edev, "Stopping rx queues\n");
+
+		rc = qdev->ops->q_rx_stop(edev, &rx_params);
+		if (rc) {
+			DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
+			return rc;
+		}
+	}
+
+	DP_INFO(edev, "Stopping vports\n");
+
+	/* Stop the vport */
+	rc = qdev->ops->vport_stop(edev, 0);
+	if (rc)
+		DP_ERR(edev, "Failed to stop VPORT\n");
+
+	return rc;
+}
+
+void qede_reset_fp_rings(struct qede_dev *qdev)
+{
+	uint16_t rss_id;
+	uint8_t tc;
+
+	for_each_rss(rss_id) {
+		DP_INFO(&qdev->edev, "reset fp chain for rss %u\n", rss_id);
+		struct qede_fastpath *fp = &qdev->fp_array[rss_id];
+		ecore_chain_reset(&fp->rxq->rx_bd_ring);
+		ecore_chain_reset(&fp->rxq->rx_comp_ring);
+		for (tc = 0; tc < qdev->num_tc; tc++) {
+			struct qede_tx_queue *txq = fp->txqs[tc];
+			ecore_chain_reset(&txq->tx_pbl);
+		}
+	}
+}
+
+/* This function frees all memory of a single fp */
+static void qede_free_mem_fp(struct qede_dev *qdev, struct qede_fastpath *fp)
+{
+	uint8_t tc;
+
+	qede_rx_queue_release(fp->rxq);
+	for (tc = 0; tc < qdev->num_tc; tc++)
+		qede_tx_queue_release(fp->txqs[tc]);
+}
+
+void qede_free_mem_load(struct qede_dev *qdev)
+{
+	uint8_t rss_id;
+
+	for_each_rss(rss_id) {
+		struct qede_fastpath *fp = &qdev->fp_array[rss_id];
+		qede_free_mem_fp(qdev, fp);
+	}
+	/* qdev->num_rss = 0; */
+}
+
+/*
+ * Stop an Ethernet device. The device can be restarted with a call to
+ * rte_eth_dev_start().
+ * Do not change link state and do not release sw structures.
+ */
+void qede_dev_stop(struct rte_eth_dev *eth_dev)
+{
+	struct qede_dev *qdev = eth_dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
+	int rc;
+
+	DP_INFO(edev, "port %u\n", eth_dev->data->port_id);
+
+	if (qdev->state != QEDE_START) {
+		DP_INFO(edev, "device not yet started\n");
+		return;
+	}
+
+	rc = qede_stop_queues(qdev);
+
+	if (rc)
+		DP_ERR(edev, "Didn't succeed to close queues\n");
+
+	DP_INFO(edev, "Stopped queues\n");
+
+	qdev->ops->fastpath_stop(edev);
+
+	qede_reset_fp_rings(qdev);
+
+	qdev->state = QEDE_STOP;
+
+	DP_INFO(edev, "dev_state is QEDE_STOP\n");
+}
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
new file mode 100644
index 0000000..34eaf8b
--- /dev/null
+++ b/drivers/net/qede/qede_rxtx.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+
+#ifndef _QEDE_RXTX_H_
+#define _QEDE_RXTX_H_
+
+#include "qede_ethdev.h"
+
+/* Ring Descriptors */
+#define RX_RING_SIZE_POW        16	/* 64K */
+#define RX_RING_SIZE            (1ULL << RX_RING_SIZE_POW)
+#define NUM_RX_BDS_MAX          (RX_RING_SIZE - 1)
+#define NUM_RX_BDS_MIN          128
+#define NUM_RX_BDS_DEF          NUM_RX_BDS_MAX
+#define NUM_RX_BDS(q)           (q->nb_rx_desc - 1)
+
+#define TX_RING_SIZE_POW        16	/* 64K */
+#define TX_RING_SIZE            (1ULL << TX_RING_SIZE_POW)
+#define NUM_TX_BDS_MAX          (TX_RING_SIZE - 1)
+#define NUM_TX_BDS_MIN          128
+#define NUM_TX_BDS_DEF          NUM_TX_BDS_MAX
+#define NUM_TX_BDS(q)           (q->nb_tx_desc - 1)
+
+#define TX_CONS(txq)            (txq->sw_tx_cons & NUM_TX_BDS(txq))
+#define TX_PROD(txq)            (txq->sw_tx_prod & NUM_TX_BDS(txq))
+
+/* Number of TX BDs per packet used currently */
+#define MAX_NUM_TX_BDS			1
+
+#define QEDE_DEFAULT_TX_FREE_THRESH	32
+
+#define QEDE_CSUM_ERROR			(1 << 0)
+#define QEDE_CSUM_UNNECESSARY		(1 << 1)
+#define QEDE_TUNN_CSUM_UNNECESSARY	(1 << 2)
+
+#define QEDE_BD_SET_ADDR_LEN(bd, maddr, len) \
+	do { \
+		(bd)->addr.hi = rte_cpu_to_le_32(U64_HI(maddr)); \
+		(bd)->addr.lo = rte_cpu_to_le_32(U64_LO(maddr)); \
+		(bd)->nbytes = rte_cpu_to_le_16(len); \
+	} while (0)
+
+#define CQE_HAS_VLAN(flags) \
+	((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \
+		<< PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT))
+
+#define CQE_HAS_OUTER_VLAN(flags) \
+	((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \
+		<< PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT))
+
+/* Max supported alignment is 256 (8 shift)
+ * minimal alignment shift 6 is optimal for 57xxx HW performance
+ */
+#define QEDE_L1_CACHE_SHIFT	6
+#define QEDE_RX_ALIGN_SHIFT	(RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT)))
+#define QEDE_FW_RX_ALIGN_END	(1UL << QEDE_RX_ALIGN_SHIFT)
+
+#define QEDE_ETH_OVERHEAD       (ETHER_HDR_LEN + 8 + 8 + QEDE_FW_RX_ALIGN_END)
+
+/* TBD: Excluding IPV6 */
+#define QEDE_RSS_OFFLOAD_ALL    (ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP | \
+				 ETH_RSS_NONFRAG_IPV4_UDP)
+
+#define QEDE_TXQ_FLAGS		((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS)
+
+#define MAX_NUM_TC		8
+
+#define for_each_rss(i) for (i = 0; i < qdev->num_rss; i++)
+
+/*
+ * RX BD descriptor ring
+ */
+struct qede_rx_entry {
+	struct rte_mbuf *mbuf;
+	uint32_t page_offset;
+	/* allows expansion .. */
+};
+
+/*
+ * Structure associated with each RX queue.
+ */
+struct qede_rx_queue {
+	struct rte_mempool *mb_pool;
+	struct ecore_chain rx_bd_ring;
+	struct ecore_chain rx_comp_ring;
+	uint16_t *hw_cons_ptr;
+	void OSAL_IOMEM *hw_rxq_prod_addr;
+	struct qede_rx_entry *sw_rx_ring;
+	uint16_t sw_rx_cons;
+	uint16_t sw_rx_prod;
+	uint16_t nb_rx_desc;
+	uint16_t queue_id;
+	uint16_t port_id;
+	uint16_t rx_buf_size;
+	uint64_t rx_hw_errors;
+	uint64_t rx_alloc_errors;
+	struct qede_dev *qdev;
+};
+
+/*
+ * TX BD descriptor ring
+ */
+struct qede_tx_entry {
+	struct rte_mbuf *mbuf;
+	uint8_t flags;
+};
+
+union db_prod {
+	struct eth_db_data data;
+	uint32_t raw;
+};
+
+struct qede_tx_queue {
+	struct ecore_chain tx_pbl;
+	struct qede_tx_entry *sw_tx_ring;
+	uint16_t nb_tx_desc;
+	uint16_t nb_tx_avail;
+	uint16_t tx_free_thresh;
+	uint16_t queue_id;
+	uint16_t *hw_cons_ptr;
+	uint16_t sw_tx_cons;
+	uint16_t sw_tx_prod;
+	void OSAL_IOMEM *doorbell_addr;
+	volatile union db_prod tx_db;
+	uint16_t port_id;
+	uint64_t txq_counter;
+	struct qede_dev *qdev;
+};
+
+struct qede_fastpath {
+	struct qede_dev *qdev;
+	uint8_t rss_id;
+	struct ecore_sb_info *sb_info;
+	struct qede_rx_queue *rxq;
+	struct qede_tx_queue *txqs[MAX_NUM_TC];
+	char name[80];
+};
+
+/*
+ * RX/TX function prototypes
+ */
+int qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+			uint16_t nb_desc, unsigned int socket_id,
+			const struct rte_eth_rxconf *rx_conf,
+			struct rte_mempool *mp);
+
+int qede_tx_queue_setup(struct rte_eth_dev *dev,
+			uint16_t queue_idx,
+			uint16_t nb_desc,
+			unsigned int socket_id,
+			const struct rte_eth_txconf *tx_conf);
+
+void qede_rx_queue_release(void *rx_queue);
+
+void qede_tx_queue_release(void *tx_queue);
+
+int qede_dev_start(struct rte_eth_dev *eth_dev);
+
+void qede_dev_stop(struct rte_eth_dev *eth_dev);
+
+void qede_reset_fp_rings(struct qede_dev *qdev);
+
+void qede_free_fp_arrays(struct qede_dev *qdev);
+
+void qede_free_mem_load(struct qede_dev *qdev);
+
+uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
+			uint16_t nb_pkts);
+
+uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
+			uint16_t nb_pkts);
+
+#endif /* _QEDE_RXTX_H_ */
diff --git a/drivers/net/qede/rte_pmd_qede_version.map b/drivers/net/qede/rte_pmd_qede_version.map
new file mode 100644
index 0000000..349c6e1
--- /dev/null
+++ b/drivers/net/qede/rte_pmd_qede_version.map
@@ -0,0 +1,4 @@
+DPDK_16.04 {
+
+	local: *;
+};
-- 
1.7.10.3



More information about the dev mailing list