[dpdk-dev] [PATCH] crypto/ccp: Add support for AMD CCP Crypto poll mode driver

Ravi Kumar ravi1.kumar at amd.com
Fri Aug 25 17:45:33 CEST 2017


The CCP poll mode driver library (librte_pmd_ccp) implements support for AMD’s cryptographic co-processor (CCP). The CCP PMD is a virtual crypto poll mode driver which schedules crypto operations to one or more available CCP hardware engines on the platform. The CCP PMD provides poll mode crypto driver support for the following hardware accelerator devices:
-	AMD Cryptographic Co-processor (0x1456)
-	AMD Cryptographic Co-processor (0x1468)
The patchset consists of following algorithms support:
-	Cipher Algorithm support
o	RTE_CRYPTO_CIPHER_AES_ECB
o	RTE_CRYPTO_CIPHER_AES_CBC
o	RTE_CRYPTO_CIPHER_AES_CTR
o	RTE_CRYPTO_CIPHER_3DES_CBC
-	Authentication Algorithm support
o	RTE_CRYPTO_AUTH_SHA1
o	RTE_CRYPTO_AUTH_SHA1_HMAC
o	RTE_CRYPTO_AUTH_SHA224
o	RTE_CRYPTO_AUTH_SHA224_HMAC
o	RTE_CRYPTO_AUTH_SHA256
o	RTE_CRYPTO_AUTH_SHA256_HMAC
o	RTE_CRYPTO_AUTH_SHA384
o	RTE_CRYPTO_AUTH_SHA384_HMAC
o	RTE_CRYPTO_AUTH_SHA512
o	RTE_CRYPTO_AUTH_SHA512_HMAC
o	RTE_CRYPTO_AUTH_MD5_HMAC
o	RTE_CRYPTO_AUTH_AES_CMAC
Addition to crypto Library to support SHA3 authentication algorithm
o	RTE_CRYPTO_AUTH_SHA3_224
o	RTE_CRYPTO_AUTH_SHA3_224_HMAC
o	RTE_CRYPTO_AUTH_SHA3_256
o	RTE_CRYPTO_AUTH_SHA3_256_HMAC
o	RTE_CRYPTO_AUTH_SHA3_384
o	RTE_CRYPTO_AUTH_SHA3_384_HMAC
o	RTE_CRYPTO_AUTH_SHA3_512
o	RTE_CRYPTO_AUTH_SHA3_512_HMAC
-	AEAD Algorithm Support
o	RTE_CRYPTO_AEAD_AES_GCM

The CCP PMD is not enabled by default. Following option can be enabled in DPDK configuration to enable CCP PMD.
CONFIG_RTE_LIBRTE_PMD_CCP (default n)

Signed-off-by: Ravi Kumar <ravi1.kumar at amd.com>
---
 MAINTAINERS                                |    5 +
 config/common_base                         |    6 +
 doc/guides/cryptodevs/ccp.rst              |  126 ++
 doc/guides/cryptodevs/features/ccp.ini     |   57 +
 doc/guides/cryptodevs/features/default.ini |   12 +
 doc/guides/cryptodevs/index.rst            |    1 +
 drivers/crypto/Makefile                    |    2 +
 drivers/crypto/ccp/Makefile                |   71 +
 drivers/crypto/ccp/ccp_crypto.c            | 3008 ++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_crypto.h            |  411 ++++
 drivers/crypto/ccp/ccp_dev.c               |  847 ++++++++
 drivers/crypto/ccp/ccp_dev.h               |  533 +++++
 drivers/crypto/ccp/ccp_pci.c               |  331 +++
 drivers/crypto/ccp/ccp_pci.h               |   58 +
 drivers/crypto/ccp/ccp_pmd_ops.c           |  860 ++++++++
 drivers/crypto/ccp/ccp_pmd_private.h       |  135 ++
 drivers/crypto/ccp/rte_ccp_pmd.c           |  283 +++
 drivers/crypto/ccp/rte_pmd_ccp_version.map |    3 +
 lib/librte_cryptodev/rte_crypto_sym.h      |   18 +
 mk/rte.app.mk                              |    1 +
 20 files changed, 6768 insertions(+)
 create mode 100644 doc/guides/cryptodevs/ccp.rst
 create mode 100644 doc/guides/cryptodevs/features/ccp.ini
 create mode 100644 drivers/crypto/ccp/Makefile
 create mode 100644 drivers/crypto/ccp/ccp_crypto.c
 create mode 100644 drivers/crypto/ccp/ccp_crypto.h
 create mode 100644 drivers/crypto/ccp/ccp_dev.c
 create mode 100644 drivers/crypto/ccp/ccp_dev.h
 create mode 100644 drivers/crypto/ccp/ccp_pci.c
 create mode 100644 drivers/crypto/ccp/ccp_pci.h
 create mode 100644 drivers/crypto/ccp/ccp_pmd_ops.c
 create mode 100644 drivers/crypto/ccp/ccp_pmd_private.h
 create mode 100644 drivers/crypto/ccp/rte_ccp_pmd.c
 create mode 100644 drivers/crypto/ccp/rte_pmd_ccp_version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index a0cd75e..cce6bce 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -586,6 +586,11 @@ M: Fan Zhang <roy.fan.zhang at intel.com>
 F: drivers/crypto/scheduler/
 F: doc/guides/cryptodevs/scheduler.rst
 
+AMD CCP Crypto PMD 
+M: Ravi Kumar <Ravi1.Kumar at amd.com>
+F: drivers/crypto/ccp/
+F: doc/guides/cryptodevs/ccp.rst
+F: doc/guides/cryptodevs/features/ccp.ini
 
 Eventdev Drivers
 ----------------
diff --git a/config/common_base b/config/common_base
index 5e97a08..3353e19 100644
--- a/config/common_base
+++ b/config/common_base
@@ -514,6 +514,12 @@ CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n
 CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
 
 #
+# Compile PMD for AMD CCP crypto device
+#
+CONFIG_RTE_LIBRTE_PMD_CCP=n
+CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=n
+
+#
 # Compile generic event device library
 #
 CONFIG_RTE_LIBRTE_EVENTDEV=y
diff --git a/doc/guides/cryptodevs/ccp.rst b/doc/guides/cryptodevs/ccp.rst
new file mode 100644
index 0000000..83343fe
--- /dev/null
+++ b/doc/guides/cryptodevs/ccp.rst
@@ -0,0 +1,126 @@
+..  BSD LICENSE
+    Copyright(c) 2017 Advance Micro Devices, Inc.
+    All rights reserved.
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions
+    are met:
+
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in
+    the documentation and/or other materials provided with the
+    distribution.
+    * Neither the name of Advance Micro Devices, Inc nor the names of
+    its contributors may be used to endorse or promote products derived
+    from this software without specific prior written permission.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+AMD CCP Poll Mode Driver
+========================
+
+This code provides the initial implementation of the ccp poll mode driver.
+The CCP poll mode driver library (librte_pmd_ccp) implements support for
+AMD’s cryptographic co-processor (CCP). The CCP PMD is a virtual crypto
+poll mode driver which schedules crypto operations to one or more available
+CCP hardware engines on the platform. The CCP PMD provides poll mode crypto
+driver support for the following hardware accelerator devices::
+
+	AMD Cryptographic Co-processor (0x1456)
+	AMD Cryptographic Co-processor (0x1468)
+
+Features
+--------
+
+CCP PMD has support for:
+
+Supported cipher algorithms:
+* ``RTE_CRYPTO_CIPHER_AES_CBC``
+* ``RTE_CRYPTO_CIPHER_AES_ECB``
+* ``RTE_CRYPTO_CIPHER_AES_CTR``
+* ``RTE_CRYPTO_CIPHER_3DES_CBC``
+
+Supported authentication algorithms:
+* ``RTE_CRYPTO_AUTH_SHA1``
+* ``RTE_CRYPTO_AUTH_SHA1_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA224``
+* ``RTE_CRYPTO_AUTH_SHA224_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA256``
+* ``RTE_CRYPTO_AUTH_SHA256_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA384``
+* ``RTE_CRYPTO_AUTH_SHA384_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA512``
+* ``RTE_CRYPTO_AUTH_SHA512_HMAC``
+* ``RTE_CRYPTO_AUTH_MD5_HMAC``
+* ``RTE_CRYPTO_AUTH_AES_CMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_224``
+* ``RTE_CRYPTO_AUTH_SHA3_224_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_256``
+* ``RTE_CRYPTO_AUTH_SHA3_256_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_384``
+* ``RTE_CRYPTO_AUTH_SHA3_384_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_512``
+* ``RTE_CRYPTO_AUTH_SHA3_512_HMAC``
+
+Supported AEAD algorithms:
+* ``RTE_CRYPTO_AEAD_AES_GCM``
+
+Installation
+------------
+
+To compile CCP PMD, it has to be enabled in the config/common_base file.
+* ``CONFIG_RTE_LIBRTE_PMD_CCP=y``
+
+The CCP PMD also supports computing authentication over CPU with cipher offloaded
+to CCP. To enable this feature, enable following in the configuration.
+* ``CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=y``
+
+This code was verified on Ubuntu 16.04.
+
+Initialization
+--------------
+
+Bind the CCP devices to DPDK UIO driver module before running the CCP PMD stack.
+e.g. for the 0x1456 device::
+
+	cd to the top-level DPDK directory
+	modprobe uio
+	insmod ./build/kmod/igb_uio.ko
+	echo "1022 1456" > /sys/bus/pci/drivers/igb_uio/new_id
+
+Another way to bind the CCP devices to DPDK UIO driver is by using the ``dpdk-devbind.py`` script.
+The following command assumes ``BFD`` of ``0000:09:00.2``::
+
+	cd to the top-level DPDK directory
+	./usertools/dpdk-devbind.py -b igb_uio 0000:09:00.2
+
+To verify real traffic l2fwd-crypto example can be used with following command:
+
+.. code-block:: console
+
+	sudo ./build/l2fwd-crypto -l 1 -n 4 --vdev "crypto_ccp" -- -p 0x1
+	--chain CIPHER_HASH --cipher_op ENCRYPT --cipher_algo AES_CBC
+	--cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f
+	--iv 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:ff
+	--auth_op GENERATE --auth_algo SHA1_HMAC
+	--auth_key 11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+	:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+	:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+
+Limitations
+-----------
+
+* Chained mbufs are not supported
+* MD5_HMAC is supported only if ``CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=y`` is enabled in configuration
diff --git a/doc/guides/cryptodevs/features/ccp.ini b/doc/guides/cryptodevs/features/ccp.ini
new file mode 100644
index 0000000..6184a67
--- /dev/null
+++ b/doc/guides/cryptodevs/features/ccp.ini
@@ -0,0 +1,57 @@
+;
+; Supported features of the 'ccp' crypto driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Symmetric crypto       = Y
+Sym operation chaining = Y
+HW Accelerated         = Y
+
+;
+; Supported crypto algorithms of the 'ccp' crypto driver.
+;
+[Cipher]
+AES CBC (128)  = Y
+AES CBC (192)  = Y
+AES CBC (256)  = Y
+AES ECB (128)  = Y
+AES ECB (192)  = Y
+AES ECB (256)  = Y
+AES CTR (128)  = Y
+AES CTR (192)  = Y
+AES CTR (256)  = Y
+3DES CBC       = Y
+
+;
+; Supported authentication algorithms of the 'ccp' crypto driver.
+;
+[Auth]
+MD5 HMAC       = Y
+SHA1           = Y
+SHA1 HMAC      = Y
+SHA224	       = Y
+SHA224 HMAC    = Y
+SHA256         = Y
+SHA256 HMAC    = Y
+SHA384         = Y
+SHA384 HMAC    = Y
+SHA512         = Y
+SHA512 HMAC    = Y
+AES CMAC       = Y
+SHA3_224       = Y
+SHA3_224 HMAC  = Y
+SHA3_256       = Y
+SHA3_256 HMAC  = Y
+SHA3_384       = Y
+SHA3_384 HMAC  = Y
+SHA3_512       = Y
+SHA3_512 HMAC  = Y
+
+;
+; Supported AEAD algorithms of the 'ccp' crypto driver.
+;
+[AEAD]
+AES GCM (128) = Y
+AES GCM (192) = Y
+AES GCM (256) = Y
diff --git a/doc/guides/cryptodevs/features/default.ini b/doc/guides/cryptodevs/features/default.ini
index 0926887..d09943f 100644
--- a/doc/guides/cryptodevs/features/default.ini
+++ b/doc/guides/cryptodevs/features/default.ini
@@ -26,6 +26,9 @@ NULL           =
 AES CBC (128)  =
 AES CBC (192)  =
 AES CBC (256)  =
+AES ECB (128)  =
+AES ECB (192)  =
+AES ECB (256)  =
 AES CTR (128)  =
 AES CTR (192)  =
 AES CTR (256)  =
@@ -60,6 +63,15 @@ AES GMAC     =
 SNOW3G UIA2  =
 KASUMI F9    =
 ZUC EIA3     =
+AES CMAC     =
+SHA3_224     = 
+SHA3_224 HMAC=
+SHA3_256     =
+SHA3_256 HMAC=
+SHA3_384     =
+SHA3_384 HMAC=
+SHA3_512     =
+SHA3_512 HMAC=
 
 ;
 ; Supported AEAD algorithms of a default crypto driver.
diff --git a/doc/guides/cryptodevs/index.rst b/doc/guides/cryptodevs/index.rst
index 361b82d..f31c978 100644
--- a/doc/guides/cryptodevs/index.rst
+++ b/doc/guides/cryptodevs/index.rst
@@ -39,6 +39,7 @@ Crypto Device Drivers
     aesni_mb
     aesni_gcm
     armv8
+    ccp
     dpaa2_sec
     kasumi
     openssl
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 7a719b9..3505649 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -55,5 +55,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
 DEPDIRS-null = $(core-libs)
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec
 DEPDIRS-dpaa2_sec = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp
+DEPDIRS-ccp = $(core-libs)
 
 include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
new file mode 100644
index 0000000..8b6a15a
--- /dev/null
+++ b/drivers/crypto/ccp/Makefile
@@ -0,0 +1,71 @@
+#   BSD LICENSE
+#
+#   Copyright(c) 2017 Advance Micro Devices, Inc. All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of Advance Micro Devices, Inc nor the names
+#       of its contributors may be used to endorse or promote products
+#       derived from this software without specific prior written
+#	permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+
+# library name
+LIB = librte_pmd_ccp.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += -I$(SRCDIR)
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# external library include paths
+LDLIBS += -lcrypto
+LDLIBS += -lpthread
+LDLIBS += -lcrypto
+
+# versioning export map
+EXPORT_MAP := rte_pmd_ccp_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_crypto.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
+
+# export include files
+SYMLINK-y-include +=
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += lib/librte_ring
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += lib/librte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
new file mode 100644
index 0000000..63da1cd
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -0,0 +1,3008 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Advance Micro Devices, Inc. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Advance Micro Devices, Inc nor the names
+ *       of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written
+ *       permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+#include <rte_cryptodev_pmd.h>
+
+#include <ccp_dev.h>
+#include <ccp_crypto.h>
+#include <ccp_pci.h>
+#include <ccp_pmd_private.h>
+
+#include <openssl/sha.h> /*partial hash apis*/
+#include <openssl/cmac.h> /*sub key apis*/
+#include <openssl/evp.h> /*sub key apis*/
+
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+#include <openssl/conf.h>
+#include <openssl/err.h>
+#include <openssl/hmac.h>
+#endif
+
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+/* SHA initial context values */
+static uint32_t ccp_sha1_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+	SHA1_H4, SHA1_H3,
+	SHA1_H2, SHA1_H1,
+	SHA1_H0, 0x0U,
+	0x0U, 0x0U,
+};
+
+uint32_t ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+	SHA224_H7, SHA224_H6,
+	SHA224_H5, SHA224_H4,
+	SHA224_H3, SHA224_H2,
+	SHA224_H1, SHA224_H0,
+};
+
+uint32_t ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+	SHA256_H7, SHA256_H6,
+	SHA256_H5, SHA256_H4,
+	SHA256_H3, SHA256_H2,
+	SHA256_H1, SHA256_H0,
+};
+
+uint64_t ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+	SHA384_H7, SHA384_H6,
+	SHA384_H5, SHA384_H4,
+	SHA384_H3, SHA384_H2,
+	SHA384_H1, SHA384_H0,
+};
+
+uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+	SHA512_H7, SHA512_H6,
+	SHA512_H5, SHA512_H4,
+	SHA512_H3, SHA512_H2,
+	SHA512_H1, SHA512_H0,
+};
+#endif
+
+extern uint8_t cryptodev_driver_id;
+
+#if defined(_MSC_VER)
+#define SHA3_CONST(x) x
+#else
+#define SHA3_CONST(x) x##L
+#endif
+
+/** 'Words' here refers to uint64_t */
+#define SHA3_KECCAK_SPONGE_WORDS \
+	(((1600) / 8) / sizeof(uint64_t))
+typedef struct sha3_context_ {
+	uint64_t saved;
+	/**
+	 * The portion of the input message that we
+	 * didn't consume yet
+	 */
+	union {
+		uint64_t s[SHA3_KECCAK_SPONGE_WORDS];
+		/* Keccak's state */
+		uint8_t sb[SHA3_KECCAK_SPONGE_WORDS * 8];
+		/**total 200 ctx size**/
+	};
+	unsigned int byteIndex;
+	/**
+	 * 0..7--the next byte after the set one
+	 * (starts from 0; 0--none are buffered)
+	 */
+	unsigned int wordIndex;
+	/**
+	 * 0..24--the next word to integrate input
+	 * (starts from 0)
+	 */
+	unsigned int capacityWords;
+	/**
+	 * the double size of the hash output in
+	 * words (e.g. 16 for Keccak 512)
+	 */
+} sha3_context;
+
+#ifndef SHA3_ROTL64
+#define SHA3_ROTL64(x, y) \
+	(((x) << (y)) | ((x) >> ((sizeof(uint64_t)*8) - (y))))
+#endif
+
+static const uint64_t keccakf_rndc[24] = {
+	SHA3_CONST(0x0000000000000001UL), SHA3_CONST(0x0000000000008082UL),
+	SHA3_CONST(0x800000000000808aUL), SHA3_CONST(0x8000000080008000UL),
+	SHA3_CONST(0x000000000000808bUL), SHA3_CONST(0x0000000080000001UL),
+	SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008009UL),
+	SHA3_CONST(0x000000000000008aUL), SHA3_CONST(0x0000000000000088UL),
+	SHA3_CONST(0x0000000080008009UL), SHA3_CONST(0x000000008000000aUL),
+	SHA3_CONST(0x000000008000808bUL), SHA3_CONST(0x800000000000008bUL),
+	SHA3_CONST(0x8000000000008089UL), SHA3_CONST(0x8000000000008003UL),
+	SHA3_CONST(0x8000000000008002UL), SHA3_CONST(0x8000000000000080UL),
+	SHA3_CONST(0x000000000000800aUL), SHA3_CONST(0x800000008000000aUL),
+	SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008080UL),
+	SHA3_CONST(0x0000000080000001UL), SHA3_CONST(0x8000000080008008UL)
+};
+
+static const unsigned int keccakf_rotc[24] = {
+	1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62,
+	18, 39, 61, 20, 44
+};
+
+static const unsigned int keccakf_piln[24] = {
+	10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20,
+	14, 22, 9, 6, 1
+};
+
+static enum ccp_cmd_order
+ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+{
+	enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
+
+	if (xform == NULL)
+		return res;
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (xform->next == NULL)
+			return CCP_CMD_AUTH;
+		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+			return CCP_CMD_HASH_CIPHER;
+	}
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (xform->next == NULL)
+			return CCP_CMD_CIPHER;
+		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+			return CCP_CMD_CIPHER_HASH;
+	}
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+		return CCP_CMD_COMBINED;
+	return res;
+}
+
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+/**partial hash using openssl*/
+static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA_CTX ctx;
+
+	if (!SHA1_Init(&ctx))
+		return -EFAULT;
+	SHA1_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA256_CTX ctx;
+
+	if (!SHA224_Init(&ctx))
+		return -EFAULT;
+	SHA256_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA256_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA256_CTX ctx;
+
+	if (!SHA256_Init(&ctx))
+		return -EFAULT;
+	SHA256_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA256_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA512_CTX ctx;
+
+	if (!SHA384_Init(&ctx))
+		return -EFAULT;
+	SHA512_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA512_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA512_CTX ctx;
+
+	if (!SHA512_Init(&ctx))
+		return -EFAULT;
+	SHA512_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA512_DIGEST_LENGTH);
+	return 0;
+}
+#endif
+
+static void
+keccakf(uint64_t s[25])
+{
+	int i, j, round;
+	uint64_t t, bc[5];
+#define KECCAK_ROUNDS 24
+
+	for (round = 0; round < KECCAK_ROUNDS; round++) {
+
+		/* Theta */
+		for (i = 0; i < 5; i++)
+			bc[i] = s[i] ^ s[i + 5] ^ s[i + 10] ^ s[i + 15] ^
+				s[i + 20];
+
+		for (i = 0; i < 5; i++) {
+			t = bc[(i + 4) % 5] ^ SHA3_ROTL64(bc[(i + 1) % 5], 1);
+			for (j = 0; j < 25; j += 5)
+				s[j + i] ^= t;
+		}
+
+		/* Rho Pi */
+		t = s[1];
+		for (i = 0; i < 24; i++) {
+			j = keccakf_piln[i];
+			bc[0] = s[j];
+			s[j] = SHA3_ROTL64(t, keccakf_rotc[i]);
+			t = bc[0];
+		}
+
+		/* Chi */
+		for (j = 0; j < 25; j += 5) {
+			for (i = 0; i < 5; i++)
+				bc[i] = s[j + i];
+			for (i = 0; i < 5; i++)
+				s[j + i] ^= (~bc[(i + 1) % 5]) &
+					    bc[(i + 2) % 5];
+		}
+
+		/* Iota */
+		s[0] ^= keccakf_rndc[round];
+	}
+}
+
+static void
+sha3_Init224(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 224 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init256(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 256 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init384(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 384 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init512(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 512 / (8 * sizeof(uint64_t));
+}
+
+
+/* This is simply the 'update' with the padding block.
+ * The padding block is 0x01 || 0x00* || 0x80. First 0x01 and last 0x80
+ * bytes are always present, but they can be the same byte.
+ */
+static void
+sha3_Update(void *priv, void const *bufIn, size_t len)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+	unsigned int old_tail = (8 - ctx->byteIndex) & 7;
+	size_t words;
+	unsigned int tail;
+	size_t i;
+	const uint8_t *buf = bufIn;
+
+	if (len < old_tail) {
+		while (len--)
+			ctx->saved |= (uint64_t) (*(buf++)) <<
+				      ((ctx->byteIndex++) * 8);
+		return;
+	}
+
+	if (old_tail) {
+		len -= old_tail;
+		while (old_tail--)
+			ctx->saved |= (uint64_t) (*(buf++)) <<
+				      ((ctx->byteIndex++) * 8);
+
+		ctx->s[ctx->wordIndex] ^= ctx->saved;
+		ctx->byteIndex = 0;
+		ctx->saved = 0;
+		if (++ctx->wordIndex ==
+		   (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+			keccakf(ctx->s);
+			ctx->wordIndex = 0;
+		}
+	}
+
+	words = len / sizeof(uint64_t);
+	tail = len - words * sizeof(uint64_t);
+
+	for (i = 0; i < words; i++, buf += sizeof(uint64_t)) {
+		const uint64_t t = (uint64_t) (buf[0]) |
+			((uint64_t) (buf[1]) << 8 * 1) |
+			((uint64_t) (buf[2]) << 8 * 2) |
+			((uint64_t) (buf[3]) << 8 * 3) |
+			((uint64_t) (buf[4]) << 8 * 4) |
+			((uint64_t) (buf[5]) << 8 * 5) |
+			((uint64_t) (buf[6]) << 8 * 6) |
+			((uint64_t) (buf[7]) << 8 * 7);
+		ctx->s[ctx->wordIndex] ^= t;
+		if (++ctx->wordIndex ==
+		   (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+			keccakf(ctx->s);
+			ctx->wordIndex = 0;
+		}
+	}
+
+	while (tail--)
+		ctx->saved |= (uint64_t) (*(buf++)) << ((ctx->byteIndex++) * 8);
+}
+
+int partial_hash_sha3_224(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init224(ctx);
+	sha3_Update(ctx, data_in, SHA3_224_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
+int partial_hash_sha3_256(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init256(ctx);
+	sha3_Update(ctx, data_in, SHA3_256_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
+int partial_hash_sha3_384(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init384(ctx);
+	sha3_Update(ctx, data_in, SHA3_384_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
+int partial_hash_sha3_512(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init512(ctx);
+	sha3_Update(ctx, data_in, SHA3_512_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+static int generate_partial_hash(struct ccp_session *sess)
+{
+
+	uint8_t ipad[sess->auth.block_size];
+	uint8_t	opad[sess->auth.block_size];
+	uint8_t *ipad_t, *opad_t;
+	uint32_t *hash_value_be32, hash_temp32[8];
+	uint64_t *hash_value_be64, hash_temp64[8];
+	int i, count;
+	uint8_t *hash_value_sha3;
+
+	opad_t = ipad_t = (uint8_t *)sess->auth.key;
+
+	hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
+	hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute);
+
+	/**considering key size is always equal to block size of algorithm*/
+	for (i = 0; i < sess->auth.block_size; i++) {
+		ipad[i] = (ipad_t[i] ^ HMAC_IPAD_VALUE);
+		opad[i] = (opad_t[i] ^ HMAC_OPAD_VALUE);
+	}
+
+	switch (sess->auth.algo) {
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+		count = SHA1_DIGEST_SIZE >> 2;
+
+		if (partial_hash_sha1(ipad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+
+		hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha1(opad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+		return 0;
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+		count = SHA256_DIGEST_SIZE >> 2;
+
+		if (partial_hash_sha224(ipad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+
+		hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha224(opad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+		return 0;
+	case CCP_AUTH_ALGO_SHA3_224_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_224(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha3_224(opad, hash_value_sha3))
+			return -1;
+		return 0;
+	case CCP_AUTH_ALGO_SHA256_HMAC:
+		count = SHA256_DIGEST_SIZE >> 2;
+
+		if (partial_hash_sha256(ipad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+
+		hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha256(opad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+		return 0;
+	case CCP_AUTH_ALGO_SHA3_256_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_256(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					      + sess->auth.ctx_len);
+		if (partial_hash_sha3_256(opad, hash_value_sha3))
+			return -1;
+		return 0;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+		count = SHA512_DIGEST_SIZE >> 3;
+
+		if (partial_hash_sha384(ipad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+
+		hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha384(opad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+		return 0;
+	case CCP_AUTH_ALGO_SHA3_384_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_384(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					      + sess->auth.ctx_len);
+		if (partial_hash_sha3_384(opad, hash_value_sha3))
+			return -1;
+		return 0;
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		count = SHA512_DIGEST_SIZE >> 3;
+
+		if (partial_hash_sha512(ipad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+
+		hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha512(opad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+		return 0;
+	case CCP_AUTH_ALGO_SHA3_512_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_512(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					      + sess->auth.ctx_len);
+		if (partial_hash_sha3_512(opad, hash_value_sha3))
+			return -1;
+		return 0;
+	default:
+		CCP_LOG_ERR("Invalid session algo");
+		return -1;
+	}
+}
+
+/* prepare temporary keys K1 and K2 */
+static void prepare_key(unsigned char *k, unsigned char *l, int bl)
+{
+	int i;
+	/* Shift block to left, including carry */
+	for (i = 0; i < bl; i++) {
+		k[i] = l[i] << 1;
+		if (i < bl - 1 && l[i + 1] & 0x80)
+			k[i] |= 1;
+	}
+	/* If MSB set fixup with R */
+	if (l[0] & 0x80)
+		k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b;
+}
+
+/**subkeys K1 and K2 generation for CMAC*/
+static int
+generate_subkeys(struct ccp_session *sess)
+{
+	const EVP_CIPHER *algo;
+	EVP_CIPHER_CTX *ctx;
+	unsigned char *ccp_ctx;
+	size_t i;
+	int dstlen, totlen;
+	unsigned char zero_iv[AES_BLOCK_SIZE] = {0};
+	unsigned char dst[2 * AES_BLOCK_SIZE] = {0};
+	unsigned char k1[AES_BLOCK_SIZE] = {0};
+	unsigned char k2[AES_BLOCK_SIZE] = {0};
+
+	if (sess->auth.ut.aes_type == CCP_AES_TYPE_128)
+		algo =  EVP_aes_128_cbc();
+	else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192)
+		algo =  EVP_aes_192_cbc();
+	else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256)
+		algo =  EVP_aes_256_cbc();
+	else {
+		CCP_LOG_ERR("Invalid CMAC type length");
+		return -1;
+	}
+
+	ctx = EVP_CIPHER_CTX_new();
+	if (!ctx) {
+		CCP_LOG_ERR("ctx creation failed");
+		return -1;
+	}
+	if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key,
+			    (unsigned char *)zero_iv) <= 0)
+		goto key_generate_err;
+	if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
+		goto key_generate_err;
+	if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv,
+			      AES_BLOCK_SIZE) <= 0)
+		goto key_generate_err;
+	if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
+		goto key_generate_err;
+
+	memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2);
+
+	ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1);
+	prepare_key(k1, dst, AES_BLOCK_SIZE);
+	for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
+		*ccp_ctx = k1[i];
+
+	ccp_ctx = (unsigned char *)(sess->auth.pre_compute +
+				   (2 * CCP_SB_BYTES) - 1);
+	prepare_key(k2, k1, AES_BLOCK_SIZE);
+	for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
+		*ccp_ctx = k2[i];
+
+	EVP_CIPHER_CTX_free(ctx);
+
+	return 0;
+
+key_generate_err:
+	CCP_LOG_ERR("CMAC Init failed");
+		return -1;
+}
+#endif
+
+/**configure session*/
+static int
+ccp_configure_session_cipher(struct ccp_session *sess,
+			     const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+	size_t i, j, x, ctr_key_len;
+
+	cipher_xform = &xform->cipher;
+	/* set cipher direction */
+	if (cipher_xform->op ==  RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+		sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+	else
+		sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+
+	/* set cipher key */
+	sess->cipher.key_length = cipher_xform->key.length;
+	rte_memcpy(sess->cipher.key, cipher_xform->key.data,
+		   cipher_xform->key.length);
+
+	/* set iv parameters */
+	sess->iv.offset = cipher_xform->iv.offset;
+	sess->iv.length = cipher_xform->iv.length;
+
+	switch (cipher_xform->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		if (sess->cipher.key_length == 16)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+		else if (sess->cipher.key_length == 24)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+		else if (sess->cipher.key_length == 32)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+		else {
+			CCP_LOG_ERR("Invalid cipher key length");
+			return -1;
+		}
+		ctr_key_len = sess->cipher.key_length;
+		for (i = 0; i < ctr_key_len; i++)
+			sess->cipher.key_ccp[ctr_key_len - i - 1] =
+				sess->cipher.key[i];
+		goto finish;
+	case RTE_CRYPTO_CIPHER_AES_ECB:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
+		sess->cipher.um.aes_mode = CCP_DES_MODE_CBC;
+		sess->cipher.engine = CCP_ENGINE_3DES;
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported cipher algo");
+		return -1;
+	}
+
+
+	switch (sess->cipher.engine) {
+	case CCP_ENGINE_AES:
+		if (sess->cipher.key_length == 16)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+		else if (sess->cipher.key_length == 24)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+		else if (sess->cipher.key_length == 32)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+		else {
+			CCP_LOG_ERR("Invalid cipher key length");
+			return -1;
+		}
+		for (i = 0; i < sess->cipher.key_length ; i++)
+			sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+				sess->cipher.key[i];
+		sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+		break;
+	case CCP_ENGINE_3DES:
+		if (sess->cipher.key_length == 16)
+			sess->cipher.ut.des_type = CCP_DES_TYPE_128;
+		else if (sess->cipher.key_length == 24)
+			sess->cipher.ut.des_type = CCP_DES_TYPE_192;
+		else {
+			CCP_LOG_ERR("Invalid cipher key length");
+			return -1;
+		}
+		for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
+			for (i = 0; i < 8; i++)
+				sess->cipher.key_ccp[(8 + x) - i - 1] =
+					sess->cipher.key[i + x];
+		break;
+	default:
+		/* should never reach here */
+		CCP_LOG_ERR("Invalid CCP Engine");
+		return -1;
+	}
+finish:
+	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+	return 0;
+}
+
+static int
+ccp_configure_session_auth(struct ccp_session *sess,
+			   const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_auth_xform *auth_xform = NULL;
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	size_t i;
+#endif
+
+	auth_xform = &xform->auth;
+
+	sess->auth.digest_length = auth_xform->digest_length;
+	if (auth_xform->op ==  RTE_CRYPTO_AUTH_OP_GENERATE)
+		sess->auth.op = CCP_AUTH_OP_GENERATE;
+	else
+		sess->auth.op = CCP_AUTH_OP_VERIFY;
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	switch (auth_xform->algo) {
+	case RTE_CRYPTO_AUTH_SHA1:
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA1;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+		sess->auth.ctx = (void *)ccp_sha1_init;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+			return -1;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		sess->auth.block_size = SHA1_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA224:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA224;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+		sess->auth.ctx = (void *)ccp_sha224_init;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		sess->auth.block_size = SHA224_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_224:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_224;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_224_HMAC:
+		if (auth_xform->key.length > SHA3_224_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_224_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_224_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA256;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+		sess->auth.ctx = (void *)ccp_sha256_init;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		sess->auth.block_size = SHA256_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_256:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_256;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_256_HMAC:
+		if (auth_xform->key.length > SHA3_256_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_256_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_256_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA384;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+		sess->auth.ctx = (void *)ccp_sha384_init;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		sess->auth.block_size = SHA384_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_384:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_384;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_384_HMAC:
+		if (auth_xform->key.length > SHA3_384_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_384_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_384_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA512;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+		sess->auth.ctx = (void *)ccp_sha512_init;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		sess->auth.block_size = SHA512_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_512:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_512;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_512_HMAC:
+		if (auth_xform->key.length > SHA3_512_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_512_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_512_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
+		sess->auth.engine = CCP_ENGINE_AES;
+		sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
+		sess->auth.key_length = auth_xform->key.length;
+		/**<padding and hash result*/
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = AES_BLOCK_SIZE;
+		sess->auth.block_size = AES_BLOCK_SIZE;
+		if (sess->auth.key_length == 16)
+			sess->auth.ut.aes_type = CCP_AES_TYPE_128;
+		else if (sess->auth.key_length == 24)
+			sess->auth.ut.aes_type = CCP_AES_TYPE_192;
+		else if (sess->auth.key_length == 32)
+			sess->auth.ut.aes_type = CCP_AES_TYPE_256;
+		else {
+			CCP_LOG_ERR("Invalid CMAC key length");
+			return -1;
+		}
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   sess->auth.key_length);
+		for (i = 0; i < sess->auth.key_length; i++)
+			sess->auth.key_ccp[sess->auth.key_length - i - 1] =
+				sess->auth.key[i];
+		if (generate_subkeys(sess))
+			return -1;
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported hash algo");
+		return -1;
+	}
+#else
+	switch (auth_xform->algo) {
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		sess->auth.block_size = SHA1_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		sess->auth.block_size = SHA224_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		sess->auth.block_size = SHA256_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		sess->auth.block_size = SHA384_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		sess->auth.block_size = SHA512_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		sess->auth.algo = CCP_AUTH_ALGO_MD5_HMAC;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - MD5_DIGEST_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		sess->auth.block_size = MD5_BLOCK_SIZE;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported hash algo");
+		return -1;
+	}
+#endif
+	return 0;
+}
+
+static int
+ccp_configure_session_aead(struct ccp_session *sess,
+			   const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_aead_xform *aead_xform = NULL;
+	size_t i, ctr_key_len;
+
+	aead_xform = &xform->aead;
+
+	/* set iv parameters */
+	sess->iv.offset = aead_xform->iv.offset;
+	sess->iv.length = aead_xform->iv.length;
+
+	switch (aead_xform->algo) {
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_GCM;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_GCTR;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		if (sess->cipher.key_length == 16)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+		else if (sess->cipher.key_length == 24)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+		else if (sess->cipher.key_length == 32)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+		else {
+			CCP_LOG_ERR("Invalid aead key length");
+			return -1;
+		}
+		ctr_key_len = sess->cipher.key_length;
+		for (i = 0; i < ctr_key_len; i++)
+			sess->cipher.key_ccp[ctr_key_len - i - 1] =
+				sess->cipher.key[i];
+		sess->auth.algo = CCP_AUTH_ALGO_AES_GCM;
+		sess->auth.engine = CCP_ENGINE_AES;
+		sess->auth.um.aes_mode = CCP_AES_MODE_GHASH;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = 0;
+		sess->auth.block_size = AES_BLOCK_SIZE;
+		sess->auth.aad_length = aead_xform->aad_length;
+		sess->cmd_id = CCP_CMD_COMBINED;
+		break;
+	default:
+		return -ENOTSUP;
+	}
+	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+	return 0;
+}
+
+int
+ccp_set_session_parameters(struct ccp_session *sess,
+			   const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_sym_xform *cipher_xform = NULL;
+	const struct rte_crypto_sym_xform *auth_xform = NULL;
+	const struct rte_crypto_sym_xform *aead_xform = NULL;
+	int ret = 0;
+
+	sess->cmd_id = ccp_get_cmd_id(xform);
+
+	switch (sess->cmd_id) {
+	case CCP_CMD_CIPHER:
+		cipher_xform = xform;
+		break;
+	case CCP_CMD_AUTH:
+		auth_xform = xform;
+		break;
+	case CCP_CMD_CIPHER_HASH:
+		cipher_xform = xform;
+		auth_xform = xform->next;
+		break;
+	case CCP_CMD_HASH_CIPHER:
+		auth_xform = xform;
+		cipher_xform = xform->next;
+		break;
+	case CCP_CMD_COMBINED:
+		aead_xform = xform;
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported cmd_id");
+		return -1;
+	}
+
+	/* Default IV length = 0 */
+	sess->iv.length = 0;
+	if (cipher_xform) {
+		ret = ccp_configure_session_cipher(sess, cipher_xform);
+		if (ret != 0) {
+			CCP_LOG_ERR("Invalid/unsupported cipher parameters");
+			return ret;
+		}
+	}
+	if (auth_xform) {
+		ret = ccp_configure_session_auth(sess, auth_xform);
+		if (ret != 0) {
+			CCP_LOG_ERR("Invalid/unsupported auth parameters");
+			return ret;
+		}
+	}
+	if (aead_xform) {
+		ret = ccp_configure_session_aead(sess, aead_xform);
+		if (ret != 0) {
+			CCP_LOG_ERR("Invalid/unsupported aead parameters");
+			return ret;
+		}
+	}
+	return ret;
+}
+
+/* calculate CCP descriptors requirement */
+static inline int
+ccp_cipher_slot(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->cipher.algo) {
+	case CCP_CIPHER_ALGO_AES_CBC:
+		count = 2;
+		/**< op + passthrough for iv*/
+		break;
+	case CCP_CIPHER_ALGO_AES_ECB:
+		count = 1;
+		/**<only op*/
+		break;
+	case CCP_CIPHER_ALGO_AES_CTR:
+		count = 2;
+		/**< op + passthrough for iv*/
+		break;
+	case CCP_CIPHER_ALGO_3DES_CBC:
+		count = 2;
+		/**< op + passthrough for iv*/
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported ALGO %d", session->cipher.algo);
+	}
+	return count;
+}
+
+static inline int
+ccp_auth_slot(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_SHA1:
+	case CCP_AUTH_ALGO_SHA224:
+	case CCP_AUTH_ALGO_SHA256:
+	case CCP_AUTH_ALGO_SHA384:
+	case CCP_AUTH_ALGO_SHA512:
+		count = 3;
+		/**< op + lsb passthrough cpy to/from*/
+		break;
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+	case CCP_AUTH_ALGO_SHA256_HMAC:
+		count = 6;
+		break;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		count = 7;
+		/**
+		 * 1. Load PHash1 = H(k ^ ipad); to LSB
+		 * 2. generate IHash = H(hash on meassage with PHash1
+		 * as init values);
+		 * 3. Retrieve IHash 2 slots for 384/512
+		 * 4. Load Phash2 = H(k ^ opad); to LSB
+		 * 5. generate FHash = H(hash on Ihash with Phash2
+		 * as init value);
+		 * 6. Retrieve HMAC output from LSB to host memory
+		 */
+		break;
+	case CCP_AUTH_ALGO_SHA3_224:
+	case CCP_AUTH_ALGO_SHA3_256:
+	case CCP_AUTH_ALGO_SHA3_384:
+	case CCP_AUTH_ALGO_SHA3_512:
+		count = 1;
+		/**< only op ctx and dst in host memory*/
+		break;
+	case CCP_AUTH_ALGO_SHA3_224_HMAC:
+	case CCP_AUTH_ALGO_SHA3_256_HMAC:
+		count = 3;
+		break;
+	case CCP_AUTH_ALGO_SHA3_384_HMAC:
+	case CCP_AUTH_ALGO_SHA3_512_HMAC:
+		count = 4;
+		/**
+		 * 1. Op to Perform Ihash
+		 * 2. Retrieve result from LSB to host memory
+		 * 3. Perform final hash
+		 */
+		break;
+	case CCP_AUTH_ALGO_AES_CMAC:
+		count = 4;
+		/**
+		 * op
+		 * extra descriptor in padding case
+		 * (k1/k2(255:128) with iv(127:0))
+		 * Retrieve result
+		 */
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported ALGO %d", session->cipher.algo);
+	}
+
+	return count;
+}
+
+static int
+ccp_combined_mode_slot(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->cipher.algo) {
+	case CCP_CIPHER_ALGO_AES_GCM:
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported combined cipher ALGO %d",
+			    session->cipher.algo);
+	}
+	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_AES_GCM:
+		count = 5;
+		/**
+		 * 1. Passthru iv
+		 * 2. Hash AAD
+		 * 3. GCTR
+		 * 4. Reload passthru
+		 * 5. Hash Final tag
+		 */
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported combined auth ALGO %d",
+			    session->auth.algo);
+	}
+	return count;
+}
+
+int
+ccp_compute_slot_count(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->cmd_id) {
+	case CCP_CMD_CIPHER:
+		count = ccp_cipher_slot(session);
+		break;
+	case CCP_CMD_AUTH:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		count = ccp_auth_slot(session);
+#endif
+		break;
+	case CCP_CMD_CIPHER_HASH:
+	case CCP_CMD_HASH_CIPHER:
+		count = ccp_cipher_slot(session);
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		count += ccp_auth_slot(session);
+#endif
+		break;
+	case CCP_CMD_COMBINED:
+		count = ccp_combined_mode_slot(session);
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported cmd_id");
+
+	}
+
+	return count;
+}
+
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+static uint8_t
+algo_select(int sessalgo,
+	    const EVP_MD **algo)
+{
+	int res = 0;
+
+	switch (sessalgo) {
+	case CCP_AUTH_ALGO_MD5_HMAC:
+		*algo = EVP_md5();
+		break;
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+		*algo = EVP_sha1();
+		break;
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+		*algo = EVP_sha224();
+		break;
+	case CCP_AUTH_ALGO_SHA256_HMAC:
+		*algo = EVP_sha256();
+		break;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+		*algo = EVP_sha384();
+		break;
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		*algo = EVP_sha512();
+		break;
+	default:
+		res = -EINVAL;
+		break;
+	}
+	return res;
+}
+
+
+static int
+process_cpu_auth_hmac(uint8_t *src, uint8_t *dst,
+		      __rte_unused uint8_t *iv,
+		      EVP_PKEY *pkey,
+		      int srclen,
+		      EVP_MD_CTX *ctx,
+		      const EVP_MD *algo,
+		      uint16_t d_len)
+{
+	size_t dstlen;
+	unsigned char temp_dst[64];
+
+	if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
+		goto process_auth_err;
+
+	if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
+		goto process_auth_err;
+
+	if (EVP_DigestSignFinal(ctx, temp_dst, &dstlen) <= 0)
+		goto process_auth_err;
+
+	rte_memcpy(dst, temp_dst, d_len);
+	return 0;
+process_auth_err:
+	CCP_LOG_ERR("Process cpu auth failed");
+	return -EINVAL;
+}
+
+
+static int cpu_auth_verify(struct rte_crypto_op *op)
+{
+	int offset;
+	uint8_t *addr;
+	struct ccp_session *session;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+	addr = (uint8_t *)((char *)op->sym->m_src->buf_addr +
+			   op->sym->m_src->data_off +
+			   op->sym->m_src->data_len -
+			   session->auth.digest_length);
+	offset = session->auth.offset;
+
+	return memcmp(addr + offset,
+		      op->sym->auth.digest.data,
+		      session->auth.digest_length);
+}
+
+static int cpu_crypto_auth(struct rte_crypto_op *op, struct ccp_session *sess,
+			   EVP_MD_CTX *ctx)
+{
+	uint8_t *src, *dst;
+	int srclen, status;
+	struct rte_mbuf *mbuf_src, *mbuf_dst;
+	const EVP_MD *algo = NULL;
+	EVP_PKEY *pkey;
+
+	algo_select(sess->auth.algo, &algo);
+	pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL, sess->auth.key,
+				    sess->auth.key_length);
+	mbuf_src = op->sym->m_src;
+	mbuf_dst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+	srclen = op->sym->auth.data.length;
+	src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+				      op->sym->auth.data.offset);
+
+	if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+		dst = (uint8_t *)rte_pktmbuf_append(mbuf_src,
+						    sess->auth.digest_length);
+	} else {
+		dst = op->sym->auth.digest.data;
+		if (dst == NULL) {
+			dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+						     op->sym->auth.data.offset +
+						     sess->auth.digest_length);
+		}
+	}
+	status = process_cpu_auth_hmac(src, dst, NULL,
+				       pkey, srclen,
+				       ctx,
+				       algo,
+				       sess->auth.digest_length);
+	if (status) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return status;
+	}
+
+	if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+		if (memcmp(dst, op->sym->auth.digest.data,
+			   sess->auth.digest_length) != 0) {
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+		} else {
+			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+		}
+	} else {
+		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	}
+
+	if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+		rte_pktmbuf_trim(mbuf_src,
+				 sess->auth.digest_length);
+	}
+
+	return 0;
+}
+#endif
+
+static void
+ccp_perform_passthru(struct ccp_passthru *pst,
+		     struct ccp_queue *cmd_q)
+{
+	struct ccp_desc *desc;
+	union ccp_function function;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 0;
+	CCP_CMD_EOM(desc) = 0;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_PT_BYTESWAP(&function) = pst->byte_swap;
+	CCP_PT_BITWISE(&function) = pst->bit_mod;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = pst->len;
+
+	if (pst->dir) {
+		CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+		CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+		CCP_CMD_DST_HI(desc) = 0;
+		CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+		if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+			CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
+	} else {
+
+		CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+		CCP_CMD_SRC_HI(desc) = 0;
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
+
+		CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+		CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
+		CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+	}
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+}
+
+static int
+ccp_perform_hmac(struct rte_crypto_op *op,
+		 struct ccp_queue *cmd_q)
+{
+
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr, dest_addr_t;
+	struct ccp_passthru pst;
+	uint64_t auth_msg_bits;
+	void *append_ptr;
+	uint8_t *addr;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+	addr = session->auth.pre_compute;
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+	dest_addr_t = dest_addr;
+
+	/** Load PHash1 to LSB*/
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.len = session->auth.ctx_len;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/**sha engine command descriptor for IntermediateHash*/
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+	auth_msg_bits = (op->sym->auth.data.length +
+			 session->auth.block_size)  * 8;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+	CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+	CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Intermediate Hash value retrieve */
+	if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA_TYPE_512)) {
+
+		pst.src_addr =
+			(phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+	} else {
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = session->auth.ctx_len;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+	}
+
+	/** Load PHash2 to LSB*/
+	addr += session->auth.ctx_len;
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.len = session->auth.ctx_len;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/**sha engine command descriptor for FinalHash*/
+	dest_addr_t += session->auth.offset;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = (session->auth.ctx_len -
+			     session->auth.offset);
+	auth_msg_bits = (session->auth.block_size +
+			 session->auth.ctx_len -
+			 session->auth.offset) * 8;
+
+	CCP_CMD_SRC_LO(desc) = (uint32_t)(dest_addr_t);
+	CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+	CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+	CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Retrieve hmac output */
+	pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.dest_addr = dest_addr;
+	pst.len = session->auth.ctx_len;
+	pst.dir = 0;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	else
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+
+}
+
+static int
+ccp_perform_sha(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr;
+	struct ccp_passthru pst;
+	void *append_ptr;
+	uint64_t auth_msg_bits;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+
+	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+
+	/** Passthru sha context*/
+
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
+						     session->auth.ctx);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.len = session->auth.ctx_len;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/**prepare sha command descriptor*/
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+	auth_msg_bits = op->sym->auth.data.length * 8;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+	CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+	CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Hash value retrieve */
+	pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.dest_addr = dest_addr;
+	pst.len = session->auth.ctx_len;
+	pst.dir = 0;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	else
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+
+}
+
+static int
+ccp_perform_sha3_hmac(struct rte_crypto_op *op,
+		      struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	struct ccp_passthru pst;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint8_t *append_ptr;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr, ctx_paddr, dest_addr_t;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	if (!append_ptr) {
+		CCP_LOG_ERR("CCP MBUF append failed\n");
+		return -1;
+	}
+	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
+	ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void
+						   *)session->auth.pre_compute);
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	/*desc1 for SHA3-Ihash operation */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = (cmd_q->sb_sha * CCP_SB_BYTES);
+	CCP_CMD_DST_HI(desc) = 0;
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+	CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Intermediate Hash value retrieve */
+	if ((session->auth.ut.sha_type == CCP_SHA3_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA3_TYPE_512)) {
+
+		pst.src_addr =
+			(phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+	} else {
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+	}
+
+	/**sha engine command descriptor for FinalHash*/
+	ctx_paddr += CCP_SHA3_CTX_SIZE;
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	if (session->auth.ut.sha_type == CCP_SHA3_TYPE_224) {
+		dest_addr_t += (CCP_SB_BYTES - SHA224_DIGEST_SIZE);
+		CCP_CMD_LEN(desc) = SHA224_DIGEST_SIZE;
+	} else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_256) {
+		CCP_CMD_LEN(desc) = SHA256_DIGEST_SIZE;
+	} else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_384) {
+		dest_addr_t += (2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE);
+		CCP_CMD_LEN(desc) = SHA384_DIGEST_SIZE;
+	} else {
+		CCP_CMD_LEN(desc) = SHA512_DIGEST_SIZE;
+	}
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)dest_addr_t);
+	CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = (uint32_t)dest_addr;
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+	CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
+ccp_perform_sha3(struct rte_crypto_op *op,
+		 struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint8_t *ctx_addr, *append_ptr;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr, ctx_paddr;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	if (!append_ptr) {
+		CCP_LOG_ERR("CCP MBUF append failed\n");
+		return -1;
+	}
+	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	ctx_addr = session->auth.sha3_ctx;
+	ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	/* prepare desc for SHA3 operation */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+	CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
+ccp_perform_aes_cmac(struct rte_crypto_op *op,
+		     struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_passthru pst;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	uint8_t *src_tb, *append_ptr, *ctx_addr;
+	phys_addr_t src_addr, dest_addr, key_addr;
+	int length, non_align_len;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+	key_addr = rte_mem_virt2phy(session->auth.key_ccp);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
+	CCP_AES_MODE(&function) = session->auth.um.aes_mode;
+	CCP_AES_TYPE(&function) = session->auth.ut.aes_type;
+
+	if (op->sym->auth.data.length % session->auth.block_size == 0) {
+
+		ctx_addr = session->auth.pre_compute;
+		memset(ctx_addr, 0, AES_BLOCK_SIZE);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		desc = &cmd_q->qbase_desc[cmd_q->qidx];
+		memset(desc, 0, Q_DESC_SIZE);
+
+		/* prepare desc for aes-cmac command */
+		CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+		CCP_CMD_EOM(desc) = 1;
+		CCP_CMD_FUNCTION(desc) = function.raw;
+
+		CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+		CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+		CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+		CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+		CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+		cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+		rte_wmb();
+
+		tail =
+		(uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol | CMD_Q_RUN);
+	} else {
+		ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
+		memset(ctx_addr, 0, AES_BLOCK_SIZE);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		length = (op->sym->auth.data.length / AES_BLOCK_SIZE);
+		length *= AES_BLOCK_SIZE;
+		non_align_len = op->sym->auth.data.length - length;
+		/* prepare desc for aes-cmac command */
+		/*Command 1*/
+		desc = &cmd_q->qbase_desc[cmd_q->qidx];
+		memset(desc, 0, Q_DESC_SIZE);
+
+		CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+		CCP_CMD_INIT(desc) = 1;
+		CCP_CMD_FUNCTION(desc) = function.raw;
+
+		CCP_CMD_LEN(desc) = length;
+		CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+		CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+		CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+		CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+		cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+		/*Command 2*/
+		append_ptr = append_ptr + CCP_SB_BYTES;
+		memset(append_ptr, 0, AES_BLOCK_SIZE);
+		src_tb = rte_pktmbuf_mtod_offset(op->sym->m_src,
+						 uint8_t *,
+						 op->sym->auth.data.offset +
+						 length);
+		rte_memcpy(append_ptr, src_tb, non_align_len);
+		append_ptr[non_align_len] = CMAC_PAD_VALUE;
+
+		desc = &cmd_q->qbase_desc[cmd_q->qidx];
+		memset(desc, 0, Q_DESC_SIZE);
+
+		CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+		CCP_CMD_EOM(desc) = 1;
+		CCP_CMD_FUNCTION(desc) = function.raw;
+		CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+		CCP_CMD_SRC_LO(desc) = ((uint32_t)(dest_addr + CCP_SB_BYTES));
+		CCP_CMD_SRC_HI(desc) = high32_value(dest_addr + CCP_SB_BYTES);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+		CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+		CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+		cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+		rte_wmb();
+		tail =
+		(uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol | CMD_Q_RUN);
+	}
+	/* Retrieve result */
+	pst.dest_addr = dest_addr;
+	pst.src_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+	pst.len = CCP_SB_BYTES;
+	pst.dir = 0;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
+ccp_perform_aes(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	uint8_t *lsb_buf;
+	struct ccp_passthru pst = {0};
+	struct ccp_desc *desc;
+	phys_addr_t src_addr, dest_addr, key_addr;
+	uint8_t *iv;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+	function.raw = 0;
+
+	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+	if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
+		if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
+			rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE +
+				   CTR_NONCE_SIZE, iv, CTR_IV_SIZE);
+			pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
+			CCP_AES_SIZE(&function) = 0x1F;
+		} else {
+			lsb_buf =
+			&(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+			rte_memcpy(lsb_buf +
+				   (CCP_SB_BYTES - session->iv.length),
+				   iv, session->iv.length);
+			pst.src_addr = b_info->lsb_buf_phys +
+				(b_info->lsb_buf_idx * CCP_SB_BYTES);
+			b_info->lsb_buf_idx++;
+		}
+
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+	}
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->cipher.data.offset);
+	if (likely(op->sym->m_dst != NULL))
+		dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+						op->sym->cipher.data.offset);
+	else
+		dest_addr = src_addr;
+	key_addr = session->cipher.key_phys;
+
+	/* prepare desc for aes command */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+	CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
+ccp_perform_3des(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	unsigned char *lsb_buf;
+	struct ccp_passthru pst;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	uint8_t *iv;
+	phys_addr_t src_addr, dest_addr, key_addr;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+
+	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+	switch (session->cipher.um.des_mode) {
+	case CCP_DES_MODE_CBC:
+		lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+		b_info->lsb_buf_idx++;
+
+		rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
+			   iv, session->iv.length);
+
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+		break;
+	case CCP_DES_MODE_CFB:
+	case CCP_DES_MODE_ECB:
+		CCP_LOG_ERR("Unsupported DES cipher mode");
+		return -1;
+	}
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->cipher.data.offset);
+	if (unlikely(op->sym->m_dst != NULL))
+		dest_addr =
+			rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+						   op->sym->cipher.data.offset);
+	else
+		dest_addr = src_addr;
+
+	key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+	memset(desc, 0, Q_DESC_SIZE);
+
+	/* prepare desc for des command */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_DES_ENCRYPT(&function) = session->cipher.dir;
+	CCP_DES_MODE(&function) = session->cipher.um.des_mode;
+	CCP_DES_TYPE(&function) = session->cipher.ut.des_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	if (session->cipher.um.des_mode)
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	/* Write the new tail address back to the queue register */
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	/* Turn the queue back on using our cached control register */
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
+ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	uint8_t *lsb_buf, *append_ptr, *iv;
+	struct ccp_passthru pst;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	uint64_t *temp;
+	phys_addr_t src_addr, dest_addr, key_addr, aad_addr;
+	phys_addr_t digest_dest_addr;
+	int length, non_align_len, i;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+	key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->cipher.data.offset);
+	if (unlikely(op->sym->m_dst != NULL))
+		dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+						op->sym->cipher.data.offset);
+	else
+		dest_addr = src_addr;
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						   session->auth.ctx_len);
+	digest_dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	append_ptr += AES_BLOCK_SIZE;
+	temp = (uint64_t *)append_ptr;
+	*temp++ =  rte_bswap64(session->auth.aad_length << 3);
+	*temp =  rte_bswap64(op->sym->cipher.data.length << 3);
+
+	non_align_len = op->sym->cipher.data.length % AES_BLOCK_SIZE;
+	length = CCP_ALIGN(op->sym->cipher.data.length, AES_BLOCK_SIZE);
+
+	aad_addr = rte_mem_virt2phy((void *)op->sym->aead.aad.data);
+
+	/* CMD1 IV Passthru */
+	for (i = 0;  i < CTR_IV_SIZE; i++)
+		session->cipher.nonce[CTR_NONCE_SIZE + CTR_IV_SIZE - 1 - i] =
+			iv[i];
+	lsb_buf = session->cipher.nonce;
+
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+	pst.len = CCP_SB_BYTES;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/* CMD2 GHASH-AAD */
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_AAD;
+	CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = session->auth.aad_length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)aad_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(aad_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* CMD3 : GCTR Plain text */
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+	CCP_AES_MODE(&function) = CCP_AES_MODE_GCTR;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+	if (non_align_len == 0)
+		CCP_AES_SIZE(&function) = (AES_BLOCK_SIZE << 3) - 1;
+	else
+		CCP_AES_SIZE(&function) = (non_align_len << 3) - 1;
+
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* CMD4 : PT to copy IV */
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+	pst.len = AES_BLOCK_SIZE;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/* CMD5 : GHASH-Final */
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_FINAL;
+	CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+	/* Last block (AAD_len || PT_len)*/
+	CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)digest_dest_addr + AES_BLOCK_SIZE);
+	CCP_CMD_SRC_HI(desc) = high32_value(digest_dest_addr + AES_BLOCK_SIZE);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)digest_dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(digest_dest_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static inline int
+ccp_crypto_cipher(struct rte_crypto_op *op,
+		  struct ccp_queue *cmd_q,
+		  struct ccp_batch_info *b_info)
+{
+	int result;
+	struct ccp_session *session;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+
+	switch (session->cipher.algo) {
+	case CCP_CIPHER_ALGO_AES_CBC:
+		result = ccp_perform_aes(op, cmd_q, b_info);
+		b_info->desccnt += 2;
+		break;
+	case CCP_CIPHER_ALGO_AES_CTR:
+		result = ccp_perform_aes(op, cmd_q, b_info);
+		b_info->desccnt += 2;
+		break;
+	case CCP_CIPHER_ALGO_AES_ECB:
+		result = ccp_perform_aes(op, cmd_q, b_info);
+		b_info->desccnt += 1;
+		break;
+	case CCP_CIPHER_ALGO_3DES_CBC:
+		result = ccp_perform_3des(op, cmd_q, b_info);
+		b_info->desccnt += 2;
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported Cipher algo");
+		result = -1;
+	}
+	return result;
+}
+
+static inline int
+ccp_crypto_auth(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
+{
+
+	int result  = 0;
+	struct ccp_session *session;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+
+	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_SHA1:
+	case CCP_AUTH_ALGO_SHA224:
+	case CCP_AUTH_ALGO_SHA256:
+	case CCP_AUTH_ALGO_SHA384:
+	case CCP_AUTH_ALGO_SHA512:
+		result = ccp_perform_sha(op, cmd_q);
+		b_info->desccnt += 3;
+		break;
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+	case CCP_AUTH_ALGO_SHA256_HMAC:
+		result = ccp_perform_hmac(op, cmd_q);
+		b_info->desccnt += 6;
+		break;
+
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		result = ccp_perform_hmac(op, cmd_q);
+		b_info->desccnt += 7;
+		break;
+	case CCP_AUTH_ALGO_SHA3_224:
+	case CCP_AUTH_ALGO_SHA3_256:
+	case CCP_AUTH_ALGO_SHA3_384:
+	case CCP_AUTH_ALGO_SHA3_512:
+		result = ccp_perform_sha3(op, cmd_q);
+		b_info->desccnt += 1;
+		break;
+	case CCP_AUTH_ALGO_SHA3_224_HMAC:
+	case CCP_AUTH_ALGO_SHA3_256_HMAC:
+		result = ccp_perform_sha3_hmac(op, cmd_q);
+		b_info->desccnt += 3;
+		break;
+	case CCP_AUTH_ALGO_SHA3_384_HMAC:
+	case CCP_AUTH_ALGO_SHA3_512_HMAC:
+		result = ccp_perform_sha3_hmac(op, cmd_q);
+		b_info->desccnt += 4;
+		break;
+	case CCP_AUTH_ALGO_AES_CMAC:
+		result = ccp_perform_aes_cmac(op, cmd_q);
+		b_info->desccnt += 4;
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported Cipher algo");
+		result = -1;
+	}
+
+	return result;
+}
+
+static inline int
+ccp_crypto_combined(struct rte_crypto_op *op,
+		    struct ccp_queue *cmd_q __rte_unused,
+		    struct ccp_batch_info *b_info __rte_unused)
+{
+	int result  = 0;
+	struct ccp_session *session;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+
+	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_AES_GCM:
+		if (session->cipher.algo != CCP_CIPHER_ALGO_AES_GCM) {
+			CCP_LOG_ERR("Incorrect chain order");
+			return -1;
+		}
+		result = ccp_perform_aes_gcm(op, cmd_q);
+		b_info->desccnt += 5;
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported combined algo");
+		return -1;
+	}
+	return result;
+}
+
+int
+process_ops_to_enqueue(const struct ccp_qp *qp,
+		       struct rte_crypto_op **op,
+		       struct ccp_queue *cmd_q,
+		       uint16_t nb_ops,
+		       int slots_req)
+{
+	int i, result = 0;
+	struct ccp_batch_info *b_info;
+	struct ccp_session *session;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX *auth_ctx = NULL;
+#endif
+
+	if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
+		CCP_LOG_ERR("batch info allocation failed");
+		return 0;
+	}
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	auth_ctx = EVP_MD_CTX_create();
+	if (unlikely(!auth_ctx)) {
+		CCP_LOG_ERR("Unable to create auth ctx");
+		return 0;
+	}
+	b_info->auth_only = 1;
+#endif
+	/* populate batch info necessary for dequeue */
+	b_info->op_idx = 0;
+	b_info->lsb_buf_idx = 0;
+	b_info->desccnt = 0;
+	b_info->cmd_q = cmd_q;
+	b_info->lsb_buf_phys =
+		(phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+	rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
+
+	b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+					 Q_DESC_SIZE);
+	for (i = 0; i < nb_ops; i++) {
+		session = (struct ccp_session *)get_session_private_data(
+						 op[i]->sym->session,
+						 cryptodev_driver_id);
+		switch (session->cmd_id) {
+		case CCP_CMD_CIPHER:
+			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_AUTH:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+#else
+			result = cpu_crypto_auth(op[i], session, auth_ctx);
+#endif
+			break;
+		case CCP_CMD_CIPHER_HASH:
+			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+			if (result)
+				break;
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+#else
+			b_info->auth_only = 0;
+#endif
+			break;
+		case CCP_CMD_HASH_CIPHER:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+#else
+			result = cpu_crypto_auth(op[i], session, auth_ctx);
+			b_info->auth_only = 0;
+#endif
+			if (result)
+				break;
+			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_COMBINED:
+			result = ccp_crypto_combined(op[i], cmd_q, b_info);
+			break;
+		default:
+			CCP_LOG_ERR("Unsupported cmd_id");
+			result = -1;
+		}
+		if (unlikely(result < 0)) {
+			rte_atomic64_add(&b_info->cmd_q->free_slots,
+					 (slots_req - b_info->desccnt));
+			break;
+		}
+		b_info->op[i] = op[i];
+	}
+
+	b_info->opcnt = i;
+	b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+					 Q_DESC_SIZE);
+
+	rte_wmb();
+	/* Write the new tail address back to the queue register */
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+			      b_info->tail_offset);
+	/* Turn the queue back on using our cached control register */
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol | CMD_Q_RUN);
+
+	rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
+
+	return i;
+}
+
+static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
+{
+	struct ccp_session *session;
+	uint8_t *digest_addr, *addr;
+	struct rte_mbuf *m_last;
+	int offset;
+	uint8_t digest_le[64];
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+
+	m_last = rte_pktmbuf_lastseg(op->sym->m_src);
+	addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
+			   m_last->data_len - session->auth.ctx_len);
+
+	rte_mb();
+	offset = session->auth.offset;
+	if (session->auth.engine == CCP_ENGINE_SHA)
+		if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
+		    (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
+		    (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
+			/* All other algorithms require byte
+			 * swap done by host
+			 */
+			unsigned int i;
+
+			offset = session->auth.ctx_len -
+				session->auth.offset - 1;
+			for (i = 0; i < session->auth.digest_length; i++)
+				digest_le[i] = addr[offset - i];
+			offset = 0;
+			addr = digest_le;
+		}
+
+	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	if (session->auth.op == CCP_AUTH_OP_VERIFY) {
+		if (memcmp(addr + offset,
+			   op->sym->auth.digest.data,
+			   session->auth.digest_length)
+		    != 0)
+			op->status =
+				RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+	} else {
+		digest_addr = op->sym->auth.digest.data;
+		if (unlikely(digest_addr == 0))
+			digest_addr = rte_pktmbuf_mtod_offset(
+					op->sym->m_dst,
+					uint8_t *,
+					(op->sym->auth.data.offset +
+					 op->sym->auth.data.length));
+		rte_memcpy(digest_addr, addr + offset,
+			   session->auth.digest_length);
+	}
+	/* Trim area used for digest from mbuf. */
+	rte_pktmbuf_trim(op->sym->m_src,
+			 session->auth.ctx_len);
+}
+
+static int
+ccp_prepare_ops(struct rte_crypto_op **op_d,
+		struct ccp_batch_info *b_info,
+		uint16_t nb_ops)
+{
+	int i, min_ops;
+	struct ccp_session *session;
+
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX *auth_ctx = NULL;
+
+	auth_ctx = EVP_MD_CTX_create();
+	if (unlikely(!auth_ctx)) {
+		CCP_LOG_ERR("Unable to create auth ctx");
+		return 0;
+	}
+#endif
+	min_ops = RTE_MIN(nb_ops, b_info->opcnt);
+
+	for (i = 0; i < min_ops; i++) {
+		op_d[i] = b_info->op[b_info->op_idx++];
+		session = (struct ccp_session *)get_session_private_data(
+						 op_d[i]->sym->session,
+						 cryptodev_driver_id);
+		switch (session->cmd_id) {
+		case CCP_CMD_CIPHER:
+			op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+			break;
+		case CCP_CMD_AUTH:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			ccp_auth_dq_prepare(op_d[i]);
+#endif
+			break;
+		case CCP_CMD_CIPHER_HASH:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			cpu_crypto_auth(op_d[i], session, auth_ctx);
+#else
+			ccp_auth_dq_prepare(op_d[i]);
+#endif
+			break;
+		case CCP_CMD_HASH_CIPHER:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			if (!cpu_auth_verify(op_d[i]))
+				op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+			else
+				op_d[i]->status =
+					RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+#else
+			ccp_auth_dq_prepare(op_d[i]);
+#endif
+			break;
+		case CCP_CMD_COMBINED:
+			ccp_auth_dq_prepare(op_d[i]);
+			break;
+		default:
+			CCP_LOG_ERR("Unsupported cmd_id");
+		}
+	}
+
+	b_info->opcnt -= min_ops;
+	return min_ops;
+}
+
+int
+process_ops_to_dequeue(struct ccp_qp *qp,
+		       struct rte_crypto_op **op,
+		       uint16_t nb_ops)
+{
+	struct ccp_batch_info *b_info;
+	uint32_t cur_head_offset;
+
+	if (qp->b_info != NULL) {
+		b_info = qp->b_info;
+		if (unlikely(b_info->op_idx > 0))
+			goto success;
+	} else if (rte_ring_dequeue(qp->processed_pkts,
+				    (void **)&b_info))
+		return 0;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	if (b_info->auth_only == 1)
+		goto success;
+#endif
+	cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
+				       CMD_Q_HEAD_LO_BASE);
+
+	if (b_info->head_offset < b_info->tail_offset) {
+		if ((cur_head_offset >= b_info->head_offset) &&
+		    (cur_head_offset < b_info->tail_offset)) {
+			qp->b_info = b_info;
+			return 0;
+		}
+	} else {
+		if ((cur_head_offset >= b_info->head_offset) ||
+		    (cur_head_offset < b_info->tail_offset)) {
+			qp->b_info = b_info;
+			return 0;
+		}
+	}
+
+
+success:
+	nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
+	rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
+	b_info->desccnt = 0;
+	if (b_info->opcnt > 0) {
+		qp->b_info = b_info;
+	} else {
+		rte_mempool_put(qp->batch_mp, (void *)b_info);
+		qp->b_info = NULL;
+	}
+
+	return nb_ops;
+}
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
new file mode 100644
index 0000000..b74cbe0
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -0,0 +1,411 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Advance Micro Devices, Inc. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Advance Micro Devices, Inc nor the names
+ *       of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written
+ *       permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CCP_CRYPTO_H_
+#define _CCP_CRYPTO_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+#include <ccp_dev.h>
+
+#define AES_BLOCK_SIZE 16
+#define CMAC_PAD_VALUE 0x80
+#define CTR_NONCE_SIZE 4
+#define CTR_IV_SIZE 8
+#define CCP_SHA3_CTX_SIZE 200
+
+/**Macro helpers for CCP command creation*/
+#define	CCP_AES_SIZE(p)		((p)->aes.size)
+#define	CCP_AES_ENCRYPT(p)	((p)->aes.encrypt)
+#define	CCP_AES_MODE(p)		((p)->aes.mode)
+#define	CCP_AES_TYPE(p)		((p)->aes.type)
+#define	CCP_DES_ENCRYPT(p)	((p)->des.encrypt)
+#define	CCP_DES_MODE(p)		((p)->des.mode)
+#define	CCP_DES_TYPE(p)		((p)->des.type)
+#define	CCP_SHA_TYPE(p)		((p)->sha.type)
+#define	CCP_PT_BYTESWAP(p)	((p)->pt.byteswap)
+#define	CCP_PT_BITWISE(p)	((p)->pt.bitwise)
+
+/**HMAC*/
+#define HMAC_IPAD_VALUE 0x36
+#define HMAC_OPAD_VALUE 0x5c
+
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+#define MD5_DIGEST_SIZE         16
+#define MD5_BLOCK_SIZE          64
+#endif
+
+/**SHA */
+#define SHA1_DIGEST_SIZE        20
+#define SHA1_BLOCK_SIZE         64
+
+#define SHA224_DIGEST_SIZE      28
+#define SHA224_BLOCK_SIZE       64
+#define SHA3_224_BLOCK_SIZE     144
+
+#define SHA256_DIGEST_SIZE      32
+#define SHA256_BLOCK_SIZE       64
+#define SHA3_256_BLOCK_SIZE     136
+
+#define SHA384_DIGEST_SIZE      48
+#define SHA384_BLOCK_SIZE       128
+#define SHA3_384_BLOCK_SIZE	104
+
+#define SHA512_DIGEST_SIZE      64
+#define SHA512_BLOCK_SIZE       128
+#define SHA3_512_BLOCK_SIZE     72
+
+/**SHA LSB intialiazation values*/
+
+#define SHA1_H0		0x67452301UL
+#define SHA1_H1		0xefcdab89UL
+#define SHA1_H2		0x98badcfeUL
+#define SHA1_H3		0x10325476UL
+#define SHA1_H4		0xc3d2e1f0UL
+
+#define SHA224_H0	0xc1059ed8UL
+#define SHA224_H1	0x367cd507UL
+#define SHA224_H2	0x3070dd17UL
+#define SHA224_H3	0xf70e5939UL
+#define SHA224_H4	0xffc00b31UL
+#define SHA224_H5	0x68581511UL
+#define SHA224_H6	0x64f98fa7UL
+#define SHA224_H7	0xbefa4fa4UL
+
+#define SHA256_H0	0x6a09e667UL
+#define SHA256_H1	0xbb67ae85UL
+#define SHA256_H2	0x3c6ef372UL
+#define SHA256_H3	0xa54ff53aUL
+#define SHA256_H4	0x510e527fUL
+#define SHA256_H5	0x9b05688cUL
+#define SHA256_H6	0x1f83d9abUL
+#define SHA256_H7	0x5be0cd19UL
+
+#define SHA384_H0	0xcbbb9d5dc1059ed8ULL
+#define SHA384_H1	0x629a292a367cd507ULL
+#define SHA384_H2	0x9159015a3070dd17ULL
+#define SHA384_H3	0x152fecd8f70e5939ULL
+#define SHA384_H4	0x67332667ffc00b31ULL
+#define SHA384_H5	0x8eb44a8768581511ULL
+#define SHA384_H6	0xdb0c2e0d64f98fa7ULL
+#define SHA384_H7	0x47b5481dbefa4fa4ULL
+
+#define SHA512_H0	0x6a09e667f3bcc908ULL
+#define SHA512_H1	0xbb67ae8584caa73bULL
+#define SHA512_H2	0x3c6ef372fe94f82bULL
+#define SHA512_H3	0xa54ff53a5f1d36f1ULL
+#define SHA512_H4	0x510e527fade682d1ULL
+#define SHA512_H5	0x9b05688c2b3e6c1fULL
+#define SHA512_H6	0x1f83d9abfb41bd6bULL
+#define SHA512_H7	0x5be0cd19137e2179ULL
+
+/**
+ * CCP supported AES modes
+ */
+enum ccp_aes_mode {
+	CCP_AES_MODE_ECB = 0,
+	CCP_AES_MODE_CBC,
+	CCP_AES_MODE_OFB,
+	CCP_AES_MODE_CFB,
+	CCP_AES_MODE_CTR,
+	CCP_AES_MODE_CMAC,
+	CCP_AES_MODE_GHASH,
+	CCP_AES_MODE_GCTR,
+	CCP_AES_MODE__LAST,
+};
+
+/**
+ * CCP AES GHASH mode
+ */
+enum ccp_aes_ghash_mode {
+	CCP_AES_MODE_GHASH_AAD = 0,
+	CCP_AES_MODE_GHASH_FINAL
+};
+
+/**
+ * CCP supported AES types
+ */
+enum ccp_aes_type {
+	CCP_AES_TYPE_128 = 0,
+	CCP_AES_TYPE_192,
+	CCP_AES_TYPE_256,
+	CCP_AES_TYPE__LAST,
+};
+
+/***** 3DES engine *****/
+
+/**
+ * CCP supported DES/3DES modes
+ */
+enum ccp_des_mode {
+	CCP_DES_MODE_ECB = 0, /* Not supported */
+	CCP_DES_MODE_CBC,
+	CCP_DES_MODE_CFB,
+};
+
+/**
+ * CCP supported DES types
+ */
+enum ccp_des_type {
+	CCP_DES_TYPE_128 = 0,	/* 112 + 16 parity */
+	CCP_DES_TYPE_192,	/* 168 + 24 parity */
+	CCP_DES_TYPE__LAST,
+};
+
+/***** SHA engine *****/
+
+/**
+ * ccp_sha_type - type of SHA operation
+ *
+ * @CCP_SHA_TYPE_1: SHA-1 operation
+ * @CCP_SHA_TYPE_224: SHA-224 operation
+ * @CCP_SHA_TYPE_256: SHA-256 operation
+ */
+enum ccp_sha_type {
+	CCP_SHA_TYPE_1 = 1,
+	CCP_SHA_TYPE_224,
+	CCP_SHA_TYPE_256,
+	CCP_SHA_TYPE_384,
+	CCP_SHA_TYPE_512,
+	CCP_SHA_TYPE_RSVD1,
+	CCP_SHA_TYPE_RSVD2,
+	CCP_SHA3_TYPE_224,
+	CCP_SHA3_TYPE_256,
+	CCP_SHA3_TYPE_384,
+	CCP_SHA3_TYPE_512,
+	CCP_SHA_TYPE__LAST,
+};
+
+/**
+ * CCP supported cipher algorithms
+ */
+enum ccp_cipher_algo {
+	CCP_CIPHER_ALGO_AES_CBC = 0,
+	CCP_CIPHER_ALGO_AES_ECB,
+	CCP_CIPHER_ALGO_AES_CTR,
+	CCP_CIPHER_ALGO_AES_GCM,
+	CCP_CIPHER_ALGO_3DES_CBC,
+};
+
+/**
+ * CCP cipher operation type
+ */
+enum ccp_cipher_dir {
+	CCP_CIPHER_DIR_DECRYPT = 0,
+	CCP_CIPHER_DIR_ENCRYPT = 1,
+};
+
+/**
+ * CCP supported hash algorithms
+ */
+enum ccp_hash_algo {
+	CCP_AUTH_ALGO_SHA1 = 0,
+	CCP_AUTH_ALGO_SHA1_HMAC,
+	CCP_AUTH_ALGO_SHA224,
+	CCP_AUTH_ALGO_SHA224_HMAC,
+	CCP_AUTH_ALGO_SHA3_224,
+	CCP_AUTH_ALGO_SHA3_224_HMAC,
+	CCP_AUTH_ALGO_SHA256,
+	CCP_AUTH_ALGO_SHA256_HMAC,
+	CCP_AUTH_ALGO_SHA3_256,
+	CCP_AUTH_ALGO_SHA3_256_HMAC,
+	CCP_AUTH_ALGO_SHA384,
+	CCP_AUTH_ALGO_SHA384_HMAC,
+	CCP_AUTH_ALGO_SHA3_384,
+	CCP_AUTH_ALGO_SHA3_384_HMAC,
+	CCP_AUTH_ALGO_SHA512,
+	CCP_AUTH_ALGO_SHA512_HMAC,
+	CCP_AUTH_ALGO_SHA3_512,
+	CCP_AUTH_ALGO_SHA3_512_HMAC,
+	CCP_AUTH_ALGO_AES_CMAC,
+	CCP_AUTH_ALGO_AES_GCM,
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	CCP_AUTH_ALGO_MD5_HMAC,
+#endif
+};
+
+/**
+ * CCP hash operation type
+ */
+enum ccp_hash_op {
+	CCP_AUTH_OP_GENERATE = 0,
+	CCP_AUTH_OP_VERIFY = 1,
+};
+
+/* CCP crypto private session structure */
+struct ccp_session {
+	enum ccp_cmd_order cmd_id;
+	/**< chain order mode */
+	struct {
+		uint16_t length;
+		uint16_t offset;
+	} iv;
+	/**< IV parameters */
+	struct {
+		enum ccp_cipher_algo  algo;
+		enum ccp_engine  engine;
+		union {
+			enum ccp_aes_mode aes_mode;
+			enum ccp_des_mode des_mode;
+		} um;
+		union {
+			enum ccp_aes_type aes_type;
+			enum ccp_des_type des_type;
+		} ut;
+		enum ccp_cipher_dir dir;
+		uint64_t key_length;
+		/**< max cipher key size 256 bits */
+		uint8_t key[32];
+		/**ccp key format*/
+		uint8_t key_ccp[32];
+		phys_addr_t key_phys;
+		/**AES-ctr nonce(4) iv(8) ctr*/
+		uint8_t nonce[32];
+		phys_addr_t nonce_phys;
+	} cipher;
+	/**< Cipher Parameters */
+
+	struct {
+		enum ccp_hash_algo algo;
+		enum ccp_engine  engine;
+		union {
+			enum ccp_aes_mode aes_mode;
+		} um;
+		union {
+			enum ccp_sha_type sha_type;
+			enum ccp_aes_type aes_type;
+		} ut;
+		enum ccp_hash_op op;
+		uint64_t key_length;
+		/**< max hash key size 144 bytes (struct capabilties) */
+		uint8_t key[144];
+		/**< max be key size of AES-CMAC is 32*/
+		uint8_t key_ccp[32];
+		phys_addr_t key_phys;
+		uint64_t digest_length;
+		void *ctx;
+		int ctx_len;
+		int offset;
+		int block_size;
+		/**<Buffer to store  Software generated precomute values*/
+		/**< For HMAC H(ipad ^ key) and H(opad ^ key) */
+		/**< For CMAC K1 IV and K2 IV*/
+		uint8_t pre_compute[2 * CCP_SHA3_CTX_SIZE];
+		/**<SHA3 initial ctx all zeros*/
+		uint8_t sha3_ctx[200];
+		int aad_length;
+	} auth;
+	/**< Authentication Parameters */
+	enum rte_crypto_aead_algorithm aead_algo;
+	/**< AEAD Algorithm */
+
+	uint32_t reserved;
+} __rte_cache_aligned;
+
+
+struct ccp_qp;
+
+/**
+ * Set and validate CCP crypto session parameters
+ *
+ * @param sess ccp private session
+ * @param xform crypto xform for this session
+ * @return 0 on success otherwise -1
+ */
+int ccp_set_session_parameters(struct ccp_session *sess,
+			       const struct rte_crypto_sym_xform *xform);
+
+/**
+ * Find count of slots
+ *
+ * @param session CCP private session
+ * @return count of free slots available
+ */
+int ccp_compute_slot_count(struct ccp_session *session);
+
+/**
+ * process crypto ops to be enqueued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param cmd_q CCP cmd queue
+ * @param nb_ops No. of ops to be submitted
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_enqueue(const struct ccp_qp *qp,
+			   struct rte_crypto_op **op,
+			   struct ccp_queue *cmd_q,
+			   uint16_t nb_ops,
+			   int slots_req);
+
+/**
+ * process crypto ops to be dequeued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param nb_ops requested no. of ops
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_dequeue(struct ccp_qp *qp,
+			   struct rte_crypto_op **op,
+			   uint16_t nb_ops);
+
+
+/**
+ * Apis for SHA3 partial hash generation
+ * @param data_in buffer pointer on which phash is applied
+ * @param data_out phash result in ccp be format is written
+ */
+int partial_hash_sha3_224(uint8_t *data_in,
+			  uint8_t *data_out);
+
+int partial_hash_sha3_256(uint8_t *data_in,
+			  uint8_t *data_out);
+
+int partial_hash_sha3_384(uint8_t *data_in,
+			  uint8_t *data_out);
+
+int partial_hash_sha3_512(uint8_t *data_in,
+			  uint8_t *data_out);
+#endif /* _CCP_CRYPTO_H_ */
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
new file mode 100644
index 0000000..2bf7e22
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -0,0 +1,847 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Advance Micro Devices, Inc. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Advance Micro Devices, Inc nor the names
+ *       of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written
+ *       permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+
+#include <ccp_dev.h>
+#include <ccp_pci.h>
+#include <ccp_pmd_private.h>
+
+#include <openssl/sha.h> /*partial hash apis*/
+#include <openssl/cmac.h> /*sub key apis*/
+#include <openssl/evp.h> /*sub key apis*/
+
+struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
+static int ccp_dev_id;
+
+int
+ccp_dev_start(struct rte_cryptodev *dev)
+{
+	struct ccp_private *priv = dev->data->dev_private;
+
+	priv->last_dev = TAILQ_FIRST(&ccp_list);
+	return 0;
+}
+
+struct ccp_queue *
+ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
+{
+	int i, ret = 0;
+	struct ccp_device *dev;
+	struct ccp_private *priv = cdev->data->dev_private;
+
+	dev = TAILQ_NEXT(priv->last_dev, next);
+	if (unlikely(dev == NULL))
+		dev = TAILQ_FIRST(&ccp_list);
+	priv->last_dev = dev;
+	if (dev->qidx >= dev->cmd_q_count)
+		dev->qidx = 0;
+	ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+	if (ret >= slot_req)
+		return &dev->cmd_q[dev->qidx];
+	for (i = 0; i < dev->cmd_q_count; i++) {
+		dev->qidx++;
+		if (dev->qidx >= dev->cmd_q_count)
+			dev->qidx = 0;
+		ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+		if (ret >= slot_req)
+			return &dev->cmd_q[dev->qidx];
+	}
+	return NULL;
+}
+
+int
+ccp_read_hwrng(uint32_t *value)
+{
+	struct ccp_device *dev;
+
+	TAILQ_FOREACH(dev, &ccp_list, next) {
+		void *vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+		while (dev->hwrng_retries++ < TRNG_RETRIES) {
+			*value = CCP_READ_REG(vaddr, TRNG_OUT_REG);
+			if (*value) {
+				dev->hwrng_retries = 0;
+				return 0;
+			}
+		}
+		dev->hwrng_retries = 0;
+	}
+	return -1;
+}
+
+static const struct rte_memzone *
+ccp_queue_dma_zone_reserve(const char *queue_name,
+			   uint32_t queue_size,
+			   int socket_id)
+{
+	const struct rte_memzone *mz;
+	unsigned int memzone_flags = 0;
+	const struct rte_memseg *ms;
+
+	mz = rte_memzone_lookup(queue_name);
+	if (mz != 0)
+		return mz;
+
+	ms = rte_eal_get_physmem_layout();
+	switch (ms[0].hugepage_sz) {
+	case(RTE_PGSIZE_2M):
+		memzone_flags = RTE_MEMZONE_2MB;
+		break;
+	case(RTE_PGSIZE_1G):
+		memzone_flags = RTE_MEMZONE_1GB;
+		break;
+	case(RTE_PGSIZE_16M):
+		memzone_flags = RTE_MEMZONE_16MB;
+		break;
+	case(RTE_PGSIZE_16G):
+		memzone_flags = RTE_MEMZONE_16GB;
+		break;
+	default:
+		memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
+	}
+
+	return rte_memzone_reserve_aligned(queue_name,
+					   queue_size,
+					   socket_id,
+					   memzone_flags,
+					   queue_size);
+}
+
+/*---bitmap support apis---*/
+static inline void
+ccp_set_bit(unsigned long *bitmap, int n)
+{
+	__sync_fetch_and_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)));
+}
+
+static inline void
+ccp_clear_bit(unsigned long *bitmap, int n)
+{
+	__sync_fetch_and_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)));
+}
+
+static inline uint32_t
+ccp_get_bit(unsigned long *bitmap, int n)
+{
+	return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0);
+}
+
+
+static inline uint32_t
+ccp_ffz(unsigned long word)
+{
+	unsigned long first_zero;
+
+	first_zero = __builtin_ffsl(~word);
+	return first_zero ? (first_zero - 1) :
+		BITS_PER_WORD;
+}
+
+static inline uint32_t
+ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit)
+{
+	uint32_t i;
+	uint32_t nwords = 0;
+
+	nwords = (limit - 1) / BITS_PER_WORD + 1;
+	for (i = 0; i < nwords; i++) {
+		if (addr[i] == 0UL)
+			return i * BITS_PER_WORD;
+		if (addr[i] < ~(0UL))
+			break;
+	}
+	return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]);
+}
+
+static void
+ccp_bitmap_set(unsigned long *map, unsigned int start, int len)
+{
+	unsigned long *p = map + WORD_OFFSET(start);
+	const unsigned int size = start + len;
+	int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD);
+	unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+	while (len - bits_to_set >= 0) {
+		*p |= mask_to_set;
+		len -= bits_to_set;
+		bits_to_set = BITS_PER_WORD;
+		mask_to_set = ~0UL;
+		p++;
+	}
+	if (len) {
+		mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size);
+		*p |= mask_to_set;
+	}
+}
+
+static void
+ccp_bitmap_clear(unsigned long *map, unsigned int start, int len)
+{
+	unsigned long *p = map + WORD_OFFSET(start);
+	const unsigned int size = start + len;
+	int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD);
+	unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+	while (len - bits_to_clear >= 0) {
+		*p &= ~mask_to_clear;
+		len -= bits_to_clear;
+		bits_to_clear = BITS_PER_WORD;
+		mask_to_clear = ~0UL;
+		p++;
+	}
+	if (len) {
+		mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size);
+		*p &= ~mask_to_clear;
+	}
+}
+
+
+static unsigned long
+_ccp_find_next_bit(const unsigned long *addr,
+		   unsigned long nbits,
+		   unsigned long start,
+		   unsigned long invert)
+{
+	unsigned long tmp;
+
+	if (!nbits || start >= nbits)
+		return nbits;
+
+	tmp = addr[start / BITS_PER_WORD] ^ invert;
+
+	/* Handle 1st word. */
+	tmp &= CCP_BITMAP_FIRST_WORD_MASK(start);
+	start = ccp_round_down(start, BITS_PER_WORD);
+
+	while (!tmp) {
+		start += BITS_PER_WORD;
+		if (start >= nbits)
+			return nbits;
+
+		tmp = addr[start / BITS_PER_WORD] ^ invert;
+	}
+
+	return RTE_MIN(start + (ffs(tmp) - 1), nbits);
+}
+
+static unsigned long
+ccp_find_next_bit(const unsigned long *addr,
+		  unsigned long size,
+		  unsigned long offset)
+{
+	return _ccp_find_next_bit(addr, size, offset, 0UL);
+}
+
+static unsigned long
+ccp_find_next_zero_bit(const unsigned long *addr,
+		       unsigned long size,
+		       unsigned long offset)
+{
+	return _ccp_find_next_bit(addr, size, offset, ~0UL);
+}
+
+/**
+ * bitmap_find_next_zero_area - find a contiguous aligned zero area
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ */
+static unsigned long
+ccp_bitmap_find_next_zero_area(unsigned long *map,
+			       unsigned long size,
+			       unsigned long start,
+			       unsigned int nr)
+{
+	unsigned long index, end, i;
+
+again:
+	index = ccp_find_next_zero_bit(map, size, start);
+
+	end = index + nr;
+	if (end > size)
+		return end;
+	i = ccp_find_next_bit(map, end, index);
+	if (i < end) {
+		start = i + 1;
+		goto again;
+	}
+	return index;
+}
+
+static uint32_t
+ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count)
+{
+	struct ccp_device *ccp;
+	int start;
+
+	/* First look at the map for the queue */
+	if (cmd_q->lsb >= 0) {
+		start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap,
+								 LSB_SIZE, 0,
+								 count);
+		if (start < LSB_SIZE) {
+			ccp_bitmap_set(cmd_q->lsbmap, start, count);
+			return start + cmd_q->lsb * LSB_SIZE;
+		}
+	}
+
+	/* try to get an entry from the shared blocks */
+	ccp = cmd_q->dev;
+
+	rte_spinlock_lock(&ccp->lsb_lock);
+
+	start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap,
+						    MAX_LSB_CNT * LSB_SIZE,
+						    0, count);
+	if (start <= MAX_LSB_CNT * LSB_SIZE) {
+		ccp_bitmap_set(ccp->lsbmap, start, count);
+		rte_spinlock_unlock(&ccp->lsb_lock);
+		return start * LSB_ITEM_SIZE;
+	}
+	CCP_LOG_ERR("NO LSBs available");
+
+	rte_spinlock_unlock(&ccp->lsb_lock);
+
+	return 0;
+}
+
+static void __rte_unused
+ccp_lsb_free(struct ccp_queue *cmd_q,
+	     unsigned int start,
+	     unsigned int count)
+{
+	int lsbno = start / LSB_SIZE;
+
+	if (!start)
+		return;
+
+	if (cmd_q->lsb == lsbno) {
+		/* An entry from the private LSB */
+		ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
+	} else {
+		/* From the shared LSBs */
+		struct ccp_device *ccp = cmd_q->dev;
+
+		rte_spinlock_lock(&ccp->lsb_lock);
+		ccp_bitmap_clear(ccp->lsbmap, start, count);
+		rte_spinlock_unlock(&ccp->lsb_lock);
+	}
+}
+
+static int
+ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status)
+{
+	int q_mask = 1 << cmd_q->id;
+	int weight = 0;
+	int j;
+
+	/* Build a bit mask to know which LSBs
+	 * this queue has access to.
+	 * Don't bother with segment 0
+	 * as it has special
+	 * privileges.
+	 */
+	cmd_q->lsbmask = 0;
+	status >>= LSB_REGION_WIDTH;
+	for (j = 1; j < MAX_LSB_CNT; j++) {
+		if (status & q_mask)
+			ccp_set_bit(&cmd_q->lsbmask, j);
+
+		status >>= LSB_REGION_WIDTH;
+	}
+
+	for (j = 0; j < MAX_LSB_CNT; j++)
+		if (ccp_get_bit(&cmd_q->lsbmask, j))
+			weight++;
+
+	printf("Queue %d can access %d LSB regions  of mask  %lu\n",
+	       (int)cmd_q->id, weight, cmd_q->lsbmask);
+
+	return weight ? 0 : -EINVAL;
+}
+
+static int
+ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
+			     int lsb_cnt, int n_lsbs,
+			     unsigned long *lsb_pub)
+{
+	unsigned long qlsb = 0;
+	int bitno = 0;
+	int qlsb_wgt = 0;
+	int i, j;
+
+	/* For each queue:
+	 * If the count of potential LSBs available to a queue matches the
+	 * ordinal given to us in lsb_cnt:
+	 * Copy the mask of possible LSBs for this queue into "qlsb";
+	 * For each bit in qlsb, see if the corresponding bit in the
+	 * aggregation mask is set; if so, we have a match.
+	 *     If we have a match, clear the bit in the aggregation to
+	 *     mark it as no longer available.
+	 *     If there is no match, clear the bit in qlsb and keep looking.
+	 */
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		struct ccp_queue *cmd_q = &ccp->cmd_q[i];
+
+		qlsb_wgt = 0;
+		for (j = 0; j < MAX_LSB_CNT; j++)
+			if (ccp_get_bit(&cmd_q->lsbmask, j))
+				qlsb_wgt++;
+
+		if (qlsb_wgt == lsb_cnt) {
+			qlsb = cmd_q->lsbmask;
+
+			bitno = ffs(qlsb) - 1;
+			while (bitno < MAX_LSB_CNT) {
+				if (ccp_get_bit(lsb_pub, bitno)) {
+					/* We found an available LSB
+					 * that this queue can access
+					 */
+					cmd_q->lsb = bitno;
+					ccp_clear_bit(lsb_pub, bitno);
+					break;
+				}
+				ccp_clear_bit(&qlsb, bitno);
+				bitno = ffs(qlsb) - 1;
+			}
+			if (bitno >= MAX_LSB_CNT)
+				return -EINVAL;
+			n_lsbs--;
+		}
+	}
+	return n_lsbs;
+}
+
+/* For each queue, from the most- to least-constrained:
+ * find an LSB that can be assigned to the queue. If there are N queues that
+ * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
+ * dedicated LSB. Remaining LSB regions become a shared resource.
+ * If we have fewer LSBs than queues, all LSB regions become shared
+ * resources.
+ */
+static int
+ccp_assign_lsbs(struct ccp_device *ccp)
+{
+	unsigned long lsb_pub = 0, qlsb = 0;
+	int n_lsbs = 0;
+	int bitno;
+	int i, lsb_cnt;
+	int rc = 0;
+
+	rte_spinlock_init(&ccp->lsb_lock);
+
+	/* Create an aggregate bitmap to get a total count of available LSBs */
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		lsb_pub |= ccp->cmd_q[i].lsbmask;
+
+	for (i = 0; i < MAX_LSB_CNT; i++)
+		if (ccp_get_bit(&lsb_pub, i))
+			n_lsbs++;
+
+	if (n_lsbs >= ccp->cmd_q_count) {
+		/* We have enough LSBS to give every queue a private LSB.
+		 * Brute force search to start with the queues that are more
+		 * constrained in LSB choice. When an LSB is privately
+		 * assigned, it is removed from the public mask.
+		 * This is an ugly N squared algorithm with some optimization.
+		 */
+		for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
+		     lsb_cnt++) {
+			rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
+							  &lsb_pub);
+			if (rc < 0)
+				return -EINVAL;
+			n_lsbs = rc;
+		}
+	}
+
+	rc = 0;
+	/* What's left of the LSBs, according to the public mask, now become
+	 * shared. Any zero bits in the lsb_pub mask represent an LSB region
+	 * that can't be used as a shared resource, so mark the LSB slots for
+	 * them as "in use".
+	 */
+	qlsb = lsb_pub;
+	bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+	while (bitno < MAX_LSB_CNT) {
+		ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
+		ccp_set_bit(&qlsb, bitno);
+		bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+	}
+
+	return rc;
+}
+
+static int
+ccp_add_device(struct ccp_device *dev, int type)
+{
+	int i;
+	uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi;
+	uint64_t status;
+	struct ccp_queue *cmd_q;
+	const struct rte_memzone *q_mz;
+	void *vaddr;
+
+	if (dev == NULL)
+		return -1;
+
+	dev->id = ccp_dev_id++;
+	dev->qidx = 0;
+	vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+	if (type == CCP_VERSION_5B) {
+		CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57);
+		CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003);
+		for (i = 0; i < 12; i++) {
+			CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET,
+				      CCP_READ_REG(vaddr, TRNG_OUT_REG));
+		}
+		CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F);
+		CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D);
+		CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000);
+
+		CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF);
+		CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF);
+
+		CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
+	}
+	CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
+
+	/* Copy the private LSB mask to the public registers */
+	status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
+	status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET);
+	CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo);
+	CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi);
+	status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo);
+
+	dev->cmd_q_count = 0;
+	/* Find available queues */
+	qmr = CCP_READ_REG(vaddr, Q_MASK_REG);
+	for (i = 0; i < MAX_HW_QUEUES; i++) {
+		if (!(qmr & (1 << i)))
+			continue;
+		cmd_q = &dev->cmd_q[dev->cmd_q_count++];
+		cmd_q->dev = dev;
+		cmd_q->id = i;
+		cmd_q->qidx = 0;
+		cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+
+		cmd_q->reg_base = (uint8_t *)vaddr +
+			CMD_Q_STATUS_INCR * (i + 1);
+
+		/* CCP queue memory */
+		snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name),
+			 "%s_%d_%s_%d_%s",
+			 "ccp_dev",
+			 (int)dev->id, "queue",
+			 (int)cmd_q->id, "mem");
+		q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name,
+						  cmd_q->qsize, SOCKET_ID_ANY);
+		cmd_q->qbase_addr = (void *)q_mz->addr;
+		cmd_q->qbase_desc = (void *)q_mz->addr;
+		cmd_q->qbase_phys_addr =  q_mz->phys_addr;
+
+		cmd_q->qcontrol = 0;
+		/* init control reg to zero */
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol);
+
+		/* Disable the interrupts */
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00);
+		CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE);
+		CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE);
+
+		/* Clear the interrupts */
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE,
+			      ALL_INTERRUPTS);
+
+		/* Configure size of each virtual queue accessible to host */
+		cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT);
+		cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT;
+
+		dma_addr_lo = low32_value(cmd_q->qbase_phys_addr);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+			      (uint32_t)dma_addr_lo);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE,
+			      (uint32_t)dma_addr_lo);
+
+		dma_addr_hi = high32_value(cmd_q->qbase_phys_addr);
+		cmd_q->qcontrol |= (dma_addr_hi << 16);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol);
+
+		/* create LSB Mask map */
+		if (ccp_find_lsb_regions(cmd_q, status))
+			CCP_LOG_ERR("queue doesn't have lsb regions");
+		cmd_q->lsb = -1;
+
+		rte_atomic64_init(&cmd_q->free_slots);
+		rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1));
+		/* unused slot barrier b/w H&T */
+	}
+
+	if (ccp_assign_lsbs(dev))
+		CCP_LOG_ERR("Unable to assign lsb region");
+
+	/* pre-allocate LSB slots */
+	for (i = 0; i < dev->cmd_q_count; i++) {
+		dev->cmd_q[i].sb_key =
+			ccp_lsb_alloc(&dev->cmd_q[i], 1);
+		dev->cmd_q[i].sb_iv =
+			ccp_lsb_alloc(&dev->cmd_q[i], 1);
+		dev->cmd_q[i].sb_sha =
+			ccp_lsb_alloc(&dev->cmd_q[i], 2);
+		dev->cmd_q[i].sb_hmac =
+			ccp_lsb_alloc(&dev->cmd_q[i], 2);
+	}
+
+	TAILQ_INSERT_TAIL(&ccp_list, dev, next);
+	return 0;
+}
+
+static void
+ccp_remove_device(struct ccp_device *dev)
+{
+	if (dev == NULL)
+		return;
+
+	TAILQ_REMOVE(&ccp_list, dev, next);
+}
+
+static int
+is_ccp_device(const char *dirname,
+	      const struct rte_pci_id *ccp_id,
+	      int *type)
+{
+	char filename[PATH_MAX];
+	const struct rte_pci_id *id;
+	uint16_t vendor, device_id;
+	int i;
+	unsigned long tmp;
+
+	/* get vendor id */
+	snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		return 0;
+	vendor = (uint16_t)tmp;
+
+	/* get device id */
+	snprintf(filename, sizeof(filename), "%s/device", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		return 0;
+	device_id = (uint16_t)tmp;
+
+	for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) {
+		if (vendor == id->vendor_id &&
+		    device_id == id->device_id) {
+			*type = i;
+			return 1; /* Matched device */
+		}
+	}
+	return 0;
+}
+
+static int
+ccp_probe_device(const char *dirname, uint16_t domain,
+		 uint8_t bus, uint8_t devid,
+		 uint8_t function, int ccp_type)
+{
+	struct ccp_device *ccp_dev = NULL;
+	struct rte_pci_device *pci;
+	char filename[PATH_MAX];
+	unsigned long tmp;
+	int uio_fd = -1, i, uio_num;
+	char uio_devname[PATH_MAX];
+	void *map_addr;
+
+	ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
+			      RTE_CACHE_LINE_SIZE);
+	if (ccp_dev == NULL)
+		goto fail;
+	pci = &(ccp_dev->pci);
+
+	pci->addr.domain = domain;
+	pci->addr.bus = bus;
+	pci->addr.devid = devid;
+	pci->addr.function = function;
+
+	/* get vendor id */
+	snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.vendor_id = (uint16_t)tmp;
+
+	/* get device id */
+	snprintf(filename, sizeof(filename), "%s/device", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.device_id = (uint16_t)tmp;
+
+	/* get subsystem_vendor id */
+	snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
+			dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.subsystem_vendor_id = (uint16_t)tmp;
+
+	/* get subsystem_device id */
+	snprintf(filename, sizeof(filename), "%s/subsystem_device",
+			dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.subsystem_device_id = (uint16_t)tmp;
+
+	/* get class_id */
+	snprintf(filename, sizeof(filename), "%s/class",
+			dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	/* the least 24 bits are valid: class, subclass, program interface */
+	pci->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
+
+	/* parse resources */
+	snprintf(filename, sizeof(filename), "%s/resource", dirname);
+	if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
+		goto fail;
+
+	uio_num = ccp_find_uio_devname(dirname);
+	if (uio_num < 0) {
+		/*
+		 * It may take time for uio device to appear,
+		 * wait  here and try again
+		 */
+		usleep(100000);
+		uio_num = ccp_find_uio_devname(dirname);
+		if (uio_num < 0)
+			goto fail;
+	}
+	snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
+
+	uio_fd = open(uio_devname, O_RDWR);
+	if (uio_fd < 0)
+		goto fail;
+
+	/* Map the PCI memory resource of device */
+	for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+
+		char devname[PATH_MAX];
+		int res_fd;
+
+		if (pci->mem_resource[i].phys_addr == 0)
+			continue;
+		snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
+		res_fd = open(devname, O_RDWR);
+		if (res_fd < 0)
+			goto fail;
+		map_addr = mmap(NULL, pci->mem_resource[i].len,
+				PROT_READ | PROT_WRITE,
+				MAP_SHARED, res_fd, 0);
+		if (map_addr == MAP_FAILED)
+			goto fail;
+
+		pci->mem_resource[i].addr = map_addr;
+	}
+
+	/* device is valid, add in list */
+	if (ccp_add_device(ccp_dev, ccp_type)) {
+		ccp_remove_device(ccp_dev);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	CCP_LOG_ERR("CCP Device probe failed");
+	if (uio_fd > 0)
+		close(uio_fd);
+	if (ccp_dev)
+		rte_free(ccp_dev);
+	return -1;
+}
+
+int
+ccp_probe_devices(const struct rte_pci_id *ccp_id)
+{
+	int dev_cnt = 0;
+	int ccp_type = 0;
+	struct dirent *d;
+	DIR *dir;
+	int ret = 0;
+	int module_idx = 0;
+	uint16_t domain;
+	uint8_t bus, devid, function;
+	char dirname[PATH_MAX];
+
+	module_idx = ccp_check_pci_uio_module();
+	if (module_idx < 0)
+		return -1;
+
+	TAILQ_INIT(&ccp_list);
+	dir = opendir(SYSFS_PCI_DEVICES);
+	if (dir == NULL)
+		return -1;
+	while ((d = readdir(dir)) != NULL) {
+		if (d->d_name[0] == '.')
+			continue;
+		if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name),
+					&domain, &bus, &devid, &function) != 0)
+			continue;
+		snprintf(dirname, sizeof(dirname), "%s/%s",
+			     SYSFS_PCI_DEVICES, d->d_name);
+		if (is_ccp_device(dirname, ccp_id, &ccp_type)) {
+			printf("CCP : Detected CCP device with ID = 0x%x\n",
+			       ccp_id[ccp_type].device_id);
+			ret = ccp_probe_device(dirname, domain, bus, devid,
+					       function, ccp_type);
+			if (ret == 0)
+				dev_cnt++;
+		}
+	}
+	closedir(dir);
+	return dev_cnt;
+}
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
new file mode 100644
index 0000000..536ab94
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -0,0 +1,533 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Advance Micro Devices, Inc. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Advance Micro Devices, Inc nor the names
+ *       of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written
+ *       permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CCP_DEV_H_
+#define _CCP_DEV_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+/**CCP sspecific*/
+#define MAX_HW_QUEUES                   5
+#define TRNG_RETRIES                    10
+#define CCP_ALIGN(x, y) ((((x) + (y - 1)) / y) * y)
+
+/****** Register Mappings ******/
+#define Q_MASK_REG                      0x000
+#define TRNG_OUT_REG                    0x00c
+
+/* ----------- CCP Version 5 Specifics ------------ */
+#define CMD_QUEUE_MASK_OFFSET		0x00
+#define	CMD_QUEUE_PRIO_OFFSET		0x04
+#define CMD_REQID_CONFIG_OFFSET		0x08
+#define	CMD_CMD_TIMEOUT_OFFSET		0x10
+#define LSB_PUBLIC_MASK_LO_OFFSET	0x18
+#define LSB_PUBLIC_MASK_HI_OFFSET	0x1C
+#define LSB_PRIVATE_MASK_LO_OFFSET	0x20
+#define LSB_PRIVATE_MASK_HI_OFFSET	0x24
+
+#define CMD_Q_CONTROL_BASE		0x0000
+#define CMD_Q_TAIL_LO_BASE		0x0004
+#define CMD_Q_HEAD_LO_BASE		0x0008
+#define CMD_Q_INT_ENABLE_BASE		0x000C
+#define CMD_Q_INTERRUPT_STATUS_BASE	0x0010
+
+#define CMD_Q_STATUS_BASE		0x0100
+#define CMD_Q_INT_STATUS_BASE		0x0104
+
+#define	CMD_CONFIG_0_OFFSET		0x6000
+#define	CMD_TRNG_CTL_OFFSET		0x6008
+#define	CMD_AES_MASK_OFFSET		0x6010
+#define	CMD_CLK_GATE_CTL_OFFSET		0x603C
+
+/* Address offset between two virtual queue registers */
+#define CMD_Q_STATUS_INCR              0x1000
+
+/* Bit masks */
+#define CMD_Q_RUN                      0x1
+
+#define CMD_Q_SIZE                     0x1F
+#define CMD_Q_SHIFT                    3
+#define COMMANDS_PER_QUEUE              2048
+
+#define QUEUE_SIZE_VAL                  ((ffs(COMMANDS_PER_QUEUE) - 2) & \
+					 CMD_Q_SIZE)
+#define Q_DESC_SIZE                     sizeof(struct ccp_desc)
+#define Q_SIZE(n)                       (COMMANDS_PER_QUEUE*(n))
+
+#define INT_COMPLETION                  0x1
+#define INT_ERROR                       0x2
+#define INT_QUEUE_STOPPED               0x4
+#define ALL_INTERRUPTS                  (INT_COMPLETION| \
+					 INT_ERROR| \
+					 INT_QUEUE_STOPPED)
+
+#define LSB_REGION_WIDTH                5
+#define MAX_LSB_CNT                     8
+
+#define LSB_SIZE                        16
+#define LSB_ITEM_SIZE                   32
+#define SLSB_MAP_SIZE                   (MAX_LSB_CNT * LSB_SIZE)
+#define LSB_ENTRY_NUMBER(LSB_ADDR)      (LSB_ADDR / LSB_ITEM_SIZE)
+
+/* ------------------------ General CCP Defines ------------------------ */
+
+#define CCP_SB_BYTES                    32
+/* Word 0 */
+#define CCP_CMD_DW0(p)		((p)->dw0)
+#define CCP_CMD_SOC(p)		(CCP_CMD_DW0(p).soc)
+#define CCP_CMD_IOC(p)		(CCP_CMD_DW0(p).ioc)
+#define CCP_CMD_INIT(p)	        (CCP_CMD_DW0(p).init)
+#define CCP_CMD_EOM(p)		(CCP_CMD_DW0(p).eom)
+#define CCP_CMD_FUNCTION(p)	(CCP_CMD_DW0(p).function)
+#define CCP_CMD_ENGINE(p)	(CCP_CMD_DW0(p).engine)
+#define CCP_CMD_PROT(p)	        (CCP_CMD_DW0(p).prot)
+
+/* Word 1 */
+#define CCP_CMD_DW1(p)		((p)->length)
+#define CCP_CMD_LEN(p)		(CCP_CMD_DW1(p))
+
+/* Word 2 */
+#define CCP_CMD_DW2(p)		((p)->src_lo)
+#define CCP_CMD_SRC_LO(p)	(CCP_CMD_DW2(p))
+
+/* Word 3 */
+#define CCP_CMD_DW3(p)		((p)->dw3)
+#define CCP_CMD_SRC_MEM(p)	((p)->dw3.src_mem)
+#define CCP_CMD_SRC_HI(p)	((p)->dw3.src_hi)
+#define CCP_CMD_LSB_ID(p)	((p)->dw3.lsb_cxt_id)
+#define CCP_CMD_FIX_SRC(p)	((p)->dw3.fixed)
+
+/* Words 4/5 */
+#define CCP_CMD_DW4(p)		((p)->dw4)
+#define CCP_CMD_DST_LO(p)	(CCP_CMD_DW4(p).dst_lo)
+#define CCP_CMD_DW5(p)		((p)->dw5.fields.dst_hi)
+#define CCP_CMD_DST_HI(p)	(CCP_CMD_DW5(p))
+#define CCP_CMD_DST_MEM(p)	((p)->dw5.fields.dst_mem)
+#define CCP_CMD_FIX_DST(p)	((p)->dw5.fields.fixed)
+#define CCP_CMD_SHA_LO(p)	((p)->dw4.sha_len_lo)
+#define CCP_CMD_SHA_HI(p)	((p)->dw5.sha_len_hi)
+
+/* Word 6/7 */
+#define CCP_CMD_DW6(p)		((p)->key_lo)
+#define CCP_CMD_KEY_LO(p)	(CCP_CMD_DW6(p))
+#define CCP_CMD_DW7(p)		((p)->dw7)
+#define CCP_CMD_KEY_HI(p)	((p)->dw7.key_hi)
+#define CCP_CMD_KEY_MEM(p)	((p)->dw7.key_mem)
+/* bitmap */
+enum {
+	BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT
+};
+
+#define WORD_OFFSET(b) ((b) / BITS_PER_WORD)
+#define BIT_OFFSET(b)  ((b) % BITS_PER_WORD)
+
+#define CCP_DIV_ROUND_UP(n, d)  (((n) + (d) - 1) / (d))
+#define CCP_BITMAP_SIZE(nr) \
+	CCP_DIV_ROUND_UP(nr, CHAR_BIT * sizeof(unsigned long))
+
+#define CCP_BITMAP_FIRST_WORD_MASK(start) \
+	(~0UL << ((start) & (BITS_PER_WORD - 1)))
+#define CCP_BITMAP_LAST_WORD_MASK(nbits) \
+	(~0UL >> (-(nbits) & (BITS_PER_WORD - 1)))
+
+#define __ccp_round_mask(x, y) ((typeof(x))((y)-1))
+#define ccp_round_down(x, y) ((x) & ~__ccp_round_mask(x, y))
+
+/** CCP registers Write/Read */
+
+static inline void ccp_pci_reg_write(void *base, int offset,
+				     uint32_t value)
+{
+	volatile void *reg_addr = ((uint8_t *)base + offset);
+
+	rte_write32((rte_cpu_to_le_32(value)), reg_addr);
+}
+
+static inline uint32_t ccp_pci_reg_read(void *base, int offset)
+{
+	volatile void *reg_addr = ((uint8_t *)base + offset);
+
+	return rte_le_to_cpu_32(rte_read32(reg_addr));
+}
+
+#define CCP_READ_REG(hw_addr, reg_offset) \
+	ccp_pci_reg_read(hw_addr, reg_offset)
+
+#define CCP_WRITE_REG(hw_addr, reg_offset, value) \
+	ccp_pci_reg_write(hw_addr, reg_offset, value)
+
+TAILQ_HEAD(ccp_list, ccp_device);
+
+extern struct ccp_list ccp_list;
+
+/**
+ * CCP device version
+ */
+enum ccp_device_version {
+	CCP_VERSION_5A = 0,
+	CCP_VERSION_5B,
+};
+
+/**
+ * A structure describing statictics.
+ */
+
+struct stats {
+	uint64_t enq_cnt;
+	uint64_t deq_cnt;
+	uint64_t err_cnt;
+};
+
+/**
+ * A structure describing a CCP command queue.
+ */
+struct ccp_queue {
+	struct ccp_device *dev;
+	char memz_name[RTE_MEMZONE_NAMESIZE];
+	struct stats stat;
+
+	rte_atomic64_t free_slots;
+	/**<available free slots updated from enq/deq calls */
+
+	/* Queue identifier */
+	uint64_t id;	/**< queue id */
+	uint64_t qidx;	/**< queue index */
+	uint64_t qsize;	/**< queue size */
+
+	/* Queue  address */
+	struct ccp_desc *qbase_desc;
+	void *qbase_addr;
+	phys_addr_t qbase_phys_addr;
+	/**< queue-page registers addr */
+	void *reg_base;
+
+	uint32_t qcontrol;
+	/**< queue ctrl reg*/
+
+	int lsb;
+	/**<lsb region assigned to queue*/
+	unsigned long lsbmask;
+	/**<lsb regions queue can access*/
+	unsigned long lsbmap[CCP_BITMAP_SIZE(LSB_SIZE)];
+	/**<All lsb resources which queue is using*/
+	uint32_t sb_key;
+	/**<lsb assigned for queue*/
+	uint32_t sb_iv;
+	/**<lsb assigned for iv*/
+	uint32_t sb_sha;
+	/**<lsb assigned for sha ctx*/
+	uint32_t sb_hmac;
+	/**<lsb assigned for hmac ctx*/
+} ____cacheline_aligned;
+
+/**
+ * A structure describing a CCP device.
+ */
+struct ccp_device {
+	TAILQ_ENTRY(ccp_device) next;
+	int id;
+	/**<CCP dev id on platform*/
+	struct ccp_queue cmd_q[MAX_HW_QUEUES];
+	/**<CCP queue*/
+	int cmd_q_count;
+	/**<# of ccp Queues*/
+	struct rte_pci_device pci;
+	struct stats stat;
+	unsigned long lsbmap[CCP_BITMAP_SIZE(SLSB_MAP_SIZE)];
+	/**<shared lsb mask of ccp*/
+	rte_spinlock_t lsb_lock;
+	/**<protection for shared lsb region allocation*/
+	int qidx;
+	/**<current queue index */
+	int hwrng_retries;
+	/**<retry counter for CCP TRNG */
+} __rte_cache_aligned;
+
+
+/**CCP H/W engine related*/
+/**
+ * ccp_engine - CCP operation identifiers
+ *
+ * @CCP_ENGINE_AES: AES operation
+ * @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation
+ * @CCP_ENGINE_3DES: DES/3DES operation
+ * @CCP_ENGINE_SHA: SHA operation
+ * @CCP_ENGINE_RSA: RSA operation
+ * @CCP_ENGINE_PASSTHRU: pass-through operation
+ * @CCP_ENGINE_ZLIB_DECOMPRESS: unused
+ * @CCP_ENGINE_ECC: ECC operation
+ */
+enum ccp_engine {
+	CCP_ENGINE_AES = 0,
+	CCP_ENGINE_XTS_AES_128,
+	CCP_ENGINE_3DES,
+	CCP_ENGINE_SHA,
+	CCP_ENGINE_RSA,
+	CCP_ENGINE_PASSTHRU,
+	CCP_ENGINE_ZLIB_DECOMPRESS,
+	CCP_ENGINE_ECC,
+	CCP_ENGINE__LAST,
+};
+
+
+/***** Passthru engine *****/
+/**
+ * ccp_passthru_bitwise - type of bitwise passthru operation
+ *
+ * @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed
+ * @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask
+ * @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask
+ * @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask
+ * @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask
+ */
+enum ccp_passthru_bitwise {
+	CCP_PASSTHRU_BITWISE_NOOP = 0,
+	CCP_PASSTHRU_BITWISE_AND,
+	CCP_PASSTHRU_BITWISE_OR,
+	CCP_PASSTHRU_BITWISE_XOR,
+	CCP_PASSTHRU_BITWISE_MASK,
+	CCP_PASSTHRU_BITWISE__LAST,
+};
+
+/**
+ * ccp_passthru_byteswap - type of byteswap passthru operation
+ *
+ * @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed
+ * @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words
+ * @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words
+ */
+enum ccp_passthru_byteswap {
+	CCP_PASSTHRU_BYTESWAP_NOOP = 0,
+	CCP_PASSTHRU_BYTESWAP_32BIT,
+	CCP_PASSTHRU_BYTESWAP_256BIT,
+	CCP_PASSTHRU_BYTESWAP__LAST,
+};
+
+/**
+ * CCP passthru
+ */
+struct ccp_passthru {
+	phys_addr_t src_addr;
+	phys_addr_t dest_addr;
+	enum ccp_passthru_bitwise bit_mod;
+	enum ccp_passthru_byteswap byte_swap;
+	int len;
+	int dir;
+};
+
+/* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
+union ccp_function {
+	struct {
+		uint16_t size:7;
+		uint16_t encrypt:1;
+		uint16_t mode:5;
+		uint16_t type:2;
+	} aes;
+	struct {
+		uint16_t size:7;
+		uint16_t encrypt:1;
+		uint16_t mode:5;
+		uint16_t type:2;
+	} des;
+	struct {
+		uint16_t size:7;
+		uint16_t encrypt:1;
+		uint16_t rsvd:5;
+		uint16_t type:2;
+	} aes_xts;
+	struct {
+		uint16_t rsvd1:10;
+		uint16_t type:4;
+		uint16_t rsvd2:1;
+	} sha;
+	struct {
+		uint16_t mode:3;
+		uint16_t size:12;
+	} rsa;
+	struct {
+		uint16_t byteswap:2;
+		uint16_t bitwise:3;
+		uint16_t reflect:2;
+		uint16_t rsvd:8;
+	} pt;
+	struct  {
+		uint16_t rsvd:13;
+	} zlib;
+	struct {
+		uint16_t size:10;
+		uint16_t type:2;
+		uint16_t mode:3;
+	} ecc;
+	uint16_t raw;
+};
+
+
+/**
+ * descriptor for version 5 CPP commands
+ * 8 32-bit words:
+ * word 0: function; engine; control bits
+ * word 1: length of source data
+ * word 2: low 32 bits of source pointer
+ * word 3: upper 16 bits of source pointer; source memory type
+ * word 4: low 32 bits of destination pointer
+ * word 5: upper 16 bits of destination pointer; destination memory
+ * type
+ * word 6: low 32 bits of key pointer
+ * word 7: upper 16 bits of key pointer; key memory type
+ */
+struct dword0 {
+	uint32_t soc:1;
+	uint32_t ioc:1;
+	uint32_t rsvd1:1;
+	uint32_t init:1;
+	uint32_t eom:1;
+	uint32_t function:15;
+	uint32_t engine:4;
+	uint32_t prot:1;
+	uint32_t rsvd2:7;
+};
+
+struct dword3 {
+	uint32_t src_hi:16;
+	uint32_t src_mem:2;
+	uint32_t lsb_cxt_id:8;
+	uint32_t rsvd1:5;
+	uint32_t fixed:1;
+};
+
+union dword4 {
+	uint32_t dst_lo;	/* NON-SHA */
+	uint32_t sha_len_lo;	/* SHA */
+};
+
+union dword5 {
+	struct {
+		uint32_t dst_hi:16;
+		uint32_t dst_mem:2;
+		uint32_t rsvd1:13;
+		uint32_t fixed:1;
+	}
+	fields;
+	uint32_t sha_len_hi;
+};
+
+struct dword7 {
+	uint32_t key_hi:16;
+	uint32_t key_mem:2;
+	uint32_t rsvd1:14;
+};
+
+struct ccp_desc {
+	struct dword0 dw0;
+	uint32_t length;
+	uint32_t src_lo;
+	struct dword3 dw3;
+	union dword4 dw4;
+	union dword5 dw5;
+	uint32_t key_lo;
+	struct dword7 dw7;
+};
+
+enum ccp_memtype {
+	CCP_MEMTYPE_SYSTEM = 0,
+	CCP_MEMTYPE_SB,
+	CCP_MEMTYPE_LOCAL,
+	CCP_MEMTYPE__LAST,
+};
+
+/**
+ * cmd id to follow order
+ */
+enum ccp_cmd_order {
+	CCP_CMD_CIPHER = 0,
+	CCP_CMD_AUTH,
+	CCP_CMD_CIPHER_HASH,
+	CCP_CMD_HASH_CIPHER,
+	CCP_CMD_COMBINED,
+	CCP_CMD_NOT_SUPPORTED,
+};
+
+static inline uint32_t
+low32_value(unsigned long addr)
+{
+	return ((uint64_t)addr) & 0x0ffffffff;
+}
+
+static inline uint32_t
+high32_value(unsigned long addr)
+{
+	return ((uint64_t)addr >> 32) & 0x00000ffff;
+}
+
+/*
+ * Start CCP device
+ */
+int ccp_dev_start(struct rte_cryptodev *dev);
+
+/**
+ * Detect ccp platform and initialize all ccp devices
+ *
+ * @param ccp_id rte_pci_id list for supported CCP devices
+ * @return no. of successfully initialized CCP devices
+ */
+int ccp_probe_devices(const struct rte_pci_id *ccp_id);
+
+/**
+ * allocate a ccp command queue
+ *
+ * @dev rte crypto device
+ * @param slot_req number of required
+ * @return allotted CCP queue on success otherwise NULL
+ */
+struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
+
+/**
+ * read hwrng value
+ *
+ * @param trng_value data pointer to write RNG value
+ * @return 0 on success otherwise -1
+ */
+int ccp_read_hwrng(uint32_t *trng_value);
+
+#endif /* _CCP_DEV_H_ */
diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c
new file mode 100644
index 0000000..efc2ebf
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pci.c
@@ -0,0 +1,331 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Advance Micro Devices, Inc. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Advance Micro Devices, Inc nor the names
+ *       of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written
+ *       permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_string_fns.h>
+
+#include <ccp_pci.h>
+
+static const char * const uio_module_names[] = {
+	"igb_uio",
+	"uio_pci_generic",
+};
+
+int
+ccp_check_pci_uio_module(void)
+{
+	FILE *fp;
+	int i;
+	char buf[BUFSIZ];
+
+	fp = fopen(PROC_MODULES, "r");
+	if (fp == NULL)
+		return -1;
+	i = 0;
+	while (uio_module_names[i] != NULL) {
+		while (fgets(buf, sizeof(buf), fp) != NULL) {
+			if (!strncmp(buf, uio_module_names[i],
+				     strlen(uio_module_names[i])))
+				return i;
+		}
+		i++;
+		rewind(fp);
+	}
+	printf("Insert igb_uio or uio_pci_generic kernel module(s)");
+	return -1;/* uio not inserted */
+}
+
+/*
+ * split up a pci address into its constituent parts.
+ */
+int
+ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+			  uint8_t *bus, uint8_t *devid, uint8_t *function)
+{
+	/* first split on ':' */
+	union splitaddr {
+		struct {
+			char *domain;
+			char *bus;
+			char *devid;
+			char *function;
+		};
+		char *str[PCI_FMT_NVAL];
+		/* last element-separator is "." not ":" */
+	} splitaddr;
+
+	char *buf_copy = strndup(buf, bufsize);
+
+	if (buf_copy == NULL)
+		return -1;
+
+	if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':')
+			!= PCI_FMT_NVAL - 1)
+		goto error;
+	/* final split is on '.' between devid and function */
+	splitaddr.function = strchr(splitaddr.devid, '.');
+	if (splitaddr.function == NULL)
+		goto error;
+	*splitaddr.function++ = '\0';
+
+	/* now convert to int values */
+	errno = 0;
+	*domain = (uint8_t)strtoul(splitaddr.domain, NULL, 16);
+	*bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16);
+	*devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16);
+	*function = (uint8_t)strtoul(splitaddr.function, NULL, 10);
+	if (errno != 0)
+		goto error;
+
+	free(buf_copy); /* free the copy made with strdup */
+	return 0;
+error:
+	free(buf_copy);
+	return -1;
+}
+
+int
+ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val)
+{
+	FILE *f;
+	char buf[BUFSIZ];
+	char *end = NULL;
+
+	f = fopen(filename, "r");
+	if (f == NULL)
+		return -1;
+	if (fgets(buf, sizeof(buf), f) == NULL) {
+		fclose(f);
+		return -1;
+	}
+	*val = strtoul(buf, &end, 0);
+	if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
+		fclose(f);
+		return -1;
+	}
+	fclose(f);
+	return 0;
+}
+
+/** IO resource type: */
+#define IORESOURCE_IO         0x00000100
+#define IORESOURCE_MEM        0x00000200
+
+/* parse one line of the "resource" sysfs file (note that the 'line'
+ * string is modified)
+ */
+static int
+ccp_pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr,
+				 uint64_t *end_addr, uint64_t *flags)
+{
+	union pci_resource_info {
+		struct {
+			char *phys_addr;
+			char *end_addr;
+			char *flags;
+		};
+		char *ptrs[PCI_RESOURCE_FMT_NVAL];
+	} res_info;
+
+	if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3)
+		return -1;
+	errno = 0;
+	*phys_addr = strtoull(res_info.phys_addr, NULL, 16);
+	*end_addr = strtoull(res_info.end_addr, NULL, 16);
+	*flags = strtoull(res_info.flags, NULL, 16);
+	if (errno != 0)
+		return -1;
+
+	return 0;
+}
+
+/* parse the "resource" sysfs file */
+int
+ccp_pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
+{
+	FILE *fp;
+	char buf[BUFSIZ];
+	int i;
+	uint64_t phys_addr, end_addr, flags;
+
+	fp = fopen(filename, "r");
+	if (fp == NULL)
+		return -1;
+
+	for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+		if (fgets(buf, sizeof(buf), fp) == NULL)
+			goto error;
+		if (ccp_pci_parse_one_sysfs_resource(buf, sizeof(buf),
+				&phys_addr, &end_addr, &flags) < 0)
+			goto error;
+
+		if (flags & IORESOURCE_MEM) {
+			dev->mem_resource[i].phys_addr = phys_addr;
+			dev->mem_resource[i].len = end_addr - phys_addr + 1;
+			/* not mapped for now */
+			dev->mem_resource[i].addr = NULL;
+		}
+	}
+	fclose(fp);
+	return 0;
+
+error:
+	fclose(fp);
+	return -1;
+}
+
+int
+ccp_find_uio_devname(const char *dirname)
+{
+
+	DIR *dir;
+	struct dirent *e;
+	char dirname_uio[PATH_MAX];
+	unsigned int uio_num;
+	int ret = -1;
+
+	/* depending on kernel version, uio can be located in uio/uioX
+	 * or uio:uioX
+	 */
+	snprintf(dirname_uio, sizeof(dirname_uio), "%s/uio", dirname);
+	dir = opendir(dirname_uio);
+	if (dir == NULL) {
+	/* retry with the parent directory might be different kernel version*/
+		dir = opendir(dirname);
+		if (dir == NULL)
+			return -1;
+	}
+
+	/* take the first file starting with "uio" */
+	while ((e = readdir(dir)) != NULL) {
+		/* format could be uio%d ...*/
+		int shortprefix_len = sizeof("uio") - 1;
+		/* ... or uio:uio%d */
+		int longprefix_len = sizeof("uio:uio") - 1;
+		char *endptr;
+
+		if (strncmp(e->d_name, "uio", 3) != 0)
+			continue;
+
+		/* first try uio%d */
+		errno = 0;
+		uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
+		if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
+			ret = uio_num;
+			break;
+		}
+
+		/* then try uio:uio%d */
+		errno = 0;
+		uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
+		if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
+			ret = uio_num;
+			break;
+		}
+	}
+	closedir(dir);
+	return ret;
+
+
+}
+
+#define UIO_NEWID "/sys/bus/pci/drivers/%s/new_id"
+#define UIO_BIND  "/sys/bus/pci/drivers/%s/bind"
+
+int
+ccp_bind_uio(int idx, const char *dirname, struct rte_pci_id *id)
+{
+	FILE *fp;
+	int cnt;
+	char path[PATH_MAX];
+	char *name = NULL;
+	char buf[BUFSIZ];
+	char driver_name[PATH_MAX];
+	char filename[PATH_MAX];
+
+	snprintf(filename, sizeof(filename), "%s/driver", dirname);
+	cnt = readlink(filename, path, PATH_MAX);
+	if (cnt >= PATH_MAX)
+		return -1;
+	if (cnt >= 0) {
+		path[cnt] = '\0';
+		name = strrchr(path, '/');
+		if (name) {
+			strncpy(driver_name, name + 1, strlen(name + 1) + 1);
+			if (!strcmp(driver_name, uio_module_names[0]) ||
+			    !strcmp(driver_name, uio_module_names[1]))
+				return 0; /* Already binded */
+			snprintf(filename, sizeof(filename), "%s/driver/unbind",
+				 dirname);
+			fp = fopen(filename, "w");
+			if (fp != NULL) {
+				name = strrchr(dirname, '/');
+				name += 1;
+				if (fwrite(name, strlen(name), 1, fp) == 0) {
+					fclose(fp);
+					return -1; /* Failed to unbind */
+				}
+				fclose(fp);
+			}
+		}
+	}
+	snprintf(filename, sizeof(filename), UIO_NEWID, uio_module_names[idx]);
+	snprintf(buf, sizeof(buf), "%x %x", id->vendor_id, id->device_id);
+	fp = fopen(filename, "w");
+	if (fp != NULL) {
+		if (fwrite(buf, strlen(buf), 1, fp) == 0) {
+			fclose(fp);
+			return -1; /* Failed to bind */
+		}
+		fclose(fp);
+	} else
+		return -1; /* Failed to bind */
+	snprintf(filename, sizeof(filename), UIO_BIND, uio_module_names[idx]);
+	name = strrchr(dirname, '/');
+	name += 1;
+	fp = fopen(filename, "w");
+	if (fp != NULL) {
+		if (fwrite(name, strlen(name), 1, fp) == 0) {
+			fclose(fp);
+			return -1; /* Failed to bind */
+		}
+		fclose(fp);
+		return 0; /* Bind success*/
+	}
+	return -1; /* Failed to bind */
+}
diff --git a/drivers/crypto/ccp/ccp_pci.h b/drivers/crypto/ccp/ccp_pci.h
new file mode 100644
index 0000000..f187141
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pci.h
@@ -0,0 +1,58 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Advance Micro Devices, Inc. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Advance Micro Devices, Inc nor the names
+ *       of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written
+ *       permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CCP_PCI_H_
+#define _CCP_PCI_H_
+
+#include <stdint.h>
+
+#include <rte_pci.h>
+
+#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
+#define PROC_MODULES "/proc/modules"
+
+int ccp_check_pci_uio_module(void);
+
+int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+			      uint8_t *bus, uint8_t *devid, uint8_t *function);
+
+int ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val);
+
+int ccp_pci_parse_sysfs_resource(const char *filename,
+				 struct rte_pci_device *dev);
+
+int ccp_find_uio_devname(const char *dirname);
+
+int ccp_bind_uio(int idx, const char *dirname, struct rte_pci_id *id);
+
+#endif /* _CCP_PCI_H_ */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
new file mode 100644
index 0000000..4b64aaf
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -0,0 +1,860 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Advance Micro Devices, Inc. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Advance Micro Devices, Inc nor the names
+ *       of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written
+ *       permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_malloc.h>
+
+#include <ccp_pmd_private.h>
+#include <ccp_dev.h>
+#include <ccp_crypto.h>
+
+static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 16,
+					 .max = 16,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			}, }
+		}, }
+	},
+#endif
+	{	/* SHA1 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA1,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 20,
+					 .max = 20,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 20,
+					 .max = 20,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA224 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA224,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA224 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-224 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_224,
+				 .block_size = 144,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-224  HMAC*/
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+				 .block_size = 144,
+				 .key_size = {
+					 .min = 1,
+					 .max = 144,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA256 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA256,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-256 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_256,
+				 .block_size = 136,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-256-HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+				 .block_size = 136,
+				 .key_size = {
+					 .min = 1,
+					 .max = 136,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA384 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA384,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 1,
+					 .max = 128,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-384 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_384,
+				 .block_size = 104,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-384-HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+				 .block_size = 104,
+				 .key_size = {
+					 .min = 1,
+					 .max = 104,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA512  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA512,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 1,
+					 .max = 128,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-512  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_512,
+				 .block_size = 72,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-512-HMAC  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+				 .block_size = 72,
+				 .key_size = {
+					 .min = 1,
+					 .max = 72,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/*AES-CMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				 .block_size = 16,
+				 .key_size = {
+					 .min = 16,
+					 .max = 32,
+					 .increment = 8
+				 },
+				 .digest_size = {
+					 .min = 16,
+					 .max = 16,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			}, }
+		}, }
+	},
+	{       /* AES ECB */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_ECB,
+				.block_size = 16,
+				.key_size = {
+				   .min = 16,
+				   .max = 32,
+				   .increment = 8
+				},
+				.iv_size = {
+				   .min = 0,
+				   .max = 0,
+				   .increment = 0
+				}
+			}, }
+		}, }
+	},
+	{       /* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 20,
+					.max = 36,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* 3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 16,
+					.max = 24,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{       /* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				 .algo = RTE_CRYPTO_AEAD_AES_GCM,
+				 .block_size = 16,
+				 .key_size = {
+					 .min = 16,
+					 .max = 32,
+					 .increment = 8
+				 },
+				 .digest_size = {
+					 .min = 16,
+					 .max = 16,
+					 .increment = 0
+				 },
+				 .aad_size = {
+					 .min = 0,
+					 .max = 65535,
+					 .increment = 1
+				 },
+				 .iv_size = {
+					 .min = 12,
+					 .max = 16,
+					 .increment = 4
+				 },
+			}, }
+		}, }
+	},
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+ccp_pmd_config(struct rte_cryptodev *dev __rte_unused,
+	       struct rte_cryptodev_config *config __rte_unused)
+{
+	return 0;
+}
+
+static int
+ccp_pmd_start(struct rte_cryptodev *dev)
+{
+	return ccp_dev_start(dev);
+}
+
+static void
+ccp_pmd_stop(struct rte_cryptodev *dev __rte_unused)
+{
+
+}
+
+static int
+ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
+{
+	return 0;
+}
+
+static void
+ccp_pmd_stats_get(struct rte_cryptodev *dev,
+		  struct rte_cryptodev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+		stats->enqueued_count += qp->qp_stats.enqueued_count;
+		stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+	}
+
+}
+
+static void
+ccp_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	}
+}
+
+static void
+ccp_pmd_info_get(struct rte_cryptodev *dev,
+		 struct rte_cryptodev_info *dev_info)
+{
+	struct ccp_private *internals = dev->data->dev_private;
+
+	if (dev_info != NULL) {
+		dev_info->driver_id = dev->driver_id;
+		dev_info->feature_flags = dev->feature_flags;
+		dev_info->capabilities = ccp_pmd_capabilities;
+		dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+		dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+	}
+}
+
+static int
+ccp_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+	if (dev->data->queue_pairs[qp_id] != NULL) {
+		rte_free(dev->data->queue_pairs[qp_id]);
+		dev->data->queue_pairs[qp_id] = NULL;
+	}
+	return 0;
+}
+
+static int
+ccp_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+		struct ccp_qp *qp)
+{
+	unsigned int n = snprintf(qp->name, sizeof(qp->name),
+			"ccp_pmd_%u_qp_%u",
+			dev->data->dev_id, qp->id);
+
+	if (n > sizeof(qp->name))
+		return -1;
+
+	return 0;
+}
+
+static struct rte_ring *
+ccp_pmd_qp_create_batch_info_ring(struct ccp_qp *qp,
+				  unsigned int ring_size, int socket_id)
+{
+	struct rte_ring *r;
+
+	r = rte_ring_lookup(qp->name);
+	if (r) {
+		if (r->size >= ring_size) {
+			CCP_LOG_INFO(
+				"Reusing ring %s for processed packets",
+				qp->name);
+			return r;
+		}
+		CCP_LOG_INFO(
+			"Unable to reuse ring %s for processed packets",
+			 qp->name);
+		return NULL;
+	}
+
+	return rte_ring_create(qp->name, ring_size, socket_id,
+			RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+static int
+ccp_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+		 const struct rte_cryptodev_qp_conf *qp_conf,
+		 int socket_id, struct rte_mempool *session_pool)
+{
+	struct ccp_private *internals = dev->data->dev_private;
+	struct ccp_qp *qp;
+	int retval = 0;
+
+	if (qp_id >= internals->max_nb_qpairs) {
+		CCP_LOG_ERR("Invalid qp_id %u, should be less than %u",
+			    qp_id, internals->max_nb_qpairs);
+		return (-EINVAL);
+	}
+
+	/* Free memory prior to re-allocation if needed. */
+	if (dev->data->queue_pairs[qp_id] != NULL)
+		ccp_pmd_qp_release(dev, qp_id);
+
+	/* Allocate the queue pair data structure. */
+	qp = rte_zmalloc_socket("CCP Crypto PMD Queue Pair", sizeof(*qp),
+					RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp == NULL) {
+		CCP_LOG_ERR("Failed to allocate queue pair memory");
+		return (-ENOMEM);
+	}
+
+	qp->dev = dev;
+	qp->id = qp_id;
+	dev->data->queue_pairs[qp_id] = qp;
+
+	retval = ccp_pmd_qp_set_unique_name(dev, qp);
+	if (retval) {
+		CCP_LOG_ERR("Failed to create unique name for ccp qp");
+		goto qp_setup_cleanup;
+	}
+
+	qp->processed_pkts = ccp_pmd_qp_create_batch_info_ring(qp,
+			qp_conf->nb_descriptors, socket_id);
+	if (qp->processed_pkts == NULL) {
+		CCP_LOG_ERR("Failed to create batch info ring");
+		goto qp_setup_cleanup;
+	}
+
+	qp->sess_mp = session_pool;
+
+	/* mempool for batch info*/
+	qp->batch_mp = rte_mempool_create(
+				qp->name,
+				qp_conf->nb_descriptors,
+				sizeof(struct ccp_batch_info),
+				RTE_CACHE_LINE_SIZE,
+				0, NULL, NULL, NULL, NULL,
+				SOCKET_ID_ANY, 0);
+	if (qp->batch_mp == NULL)
+		goto qp_setup_cleanup;
+	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	return 0;
+
+qp_setup_cleanup:
+	dev->data->queue_pairs[qp_id] = NULL;
+	if (qp)
+		rte_free(qp);
+	return -1;
+}
+
+static int
+ccp_pmd_qp_start(struct rte_cryptodev *dev __rte_unused,
+		 uint16_t queue_pair_id __rte_unused)
+{
+	return -ENOTSUP;
+}
+
+static int
+ccp_pmd_qp_stop(struct rte_cryptodev *dev __rte_unused,
+		uint16_t queue_pair_id __rte_unused)
+{
+	return -ENOTSUP;
+}
+
+static uint32_t
+ccp_pmd_qp_count(struct rte_cryptodev *dev)
+{
+	return dev->data->nb_queue_pairs;
+}
+
+static unsigned
+ccp_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct ccp_session);
+}
+
+static int
+ccp_pmd_session_configure(struct rte_cryptodev *dev,
+			  struct rte_crypto_sym_xform *xform,
+			  struct rte_cryptodev_sym_session *sess,
+			  struct rte_mempool *mempool)
+{
+	int ret;
+	void *sess_private_data;
+
+	if (unlikely(sess == NULL || xform == NULL)) {
+		CCP_LOG_ERR("Invalid session struct or xform");
+		return -ENOMEM;
+	}
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		CCP_LOG_ERR("Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+	ret = ccp_set_session_parameters(sess_private_data, xform);
+	if (ret != 0) {
+		CCP_LOG_ERR("failed configure session parameters");
+
+		/* Return session to mempool */
+		rte_mempool_put(mempool, sess_private_data);
+		return ret;
+	}
+	set_session_private_data(sess, dev->driver_id,
+				 sess_private_data);
+
+	return 0;
+}
+
+static void
+ccp_pmd_session_clear(struct rte_cryptodev *dev,
+		      struct rte_cryptodev_sym_session *sess)
+{
+	uint8_t index = dev->driver_id;
+	void *sess_priv = get_session_private_data(sess, index);
+
+	if (sess_priv) {
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+		rte_mempool_put(sess_mp, sess_priv);
+		memset(sess_priv, 0, sizeof(struct ccp_session));
+		set_session_private_data(sess, index, NULL);
+	}
+}
+
+struct rte_cryptodev_ops ccp_ops = {
+		.dev_configure		= ccp_pmd_config,
+		.dev_start		= ccp_pmd_start,
+		.dev_stop		= ccp_pmd_stop,
+		.dev_close		= ccp_pmd_close,
+
+		.stats_get		= ccp_pmd_stats_get,
+		.stats_reset		= ccp_pmd_stats_reset,
+
+		.dev_infos_get		= ccp_pmd_info_get,
+
+		.queue_pair_setup	= ccp_pmd_qp_setup,
+		.queue_pair_release	= ccp_pmd_qp_release,
+		.queue_pair_start	= ccp_pmd_qp_start,
+		.queue_pair_stop	= ccp_pmd_qp_stop,
+		.queue_pair_count	= ccp_pmd_qp_count,
+
+		.session_get_size	= ccp_pmd_session_get_size,
+		.session_configure	= ccp_pmd_session_configure,
+		.session_clear		= ccp_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops;
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
new file mode 100644
index 0000000..ab5e82b
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -0,0 +1,135 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Advance Micro Devices, Inc. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Advance Micro Devices, Inc nor the names
+ *       of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written
+ *       permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CCP_PMD_PRIVATE_H_
+#define _CCP_PMD_PRIVATE_H_
+
+#include <rte_config.h>
+#include <rte_cryptodev.h>
+
+#define CRYPTODEV_NAME_CCP_PMD crypto_ccp
+
+#define CCP_LOG_ERR(fmt, args...) \
+	RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",  \
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+			__func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_CCP_DEBUG
+#define CCP_LOG_INFO(fmt, args...) \
+	RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+			__func__, __LINE__, ## args)
+
+#define CCP_LOG_DBG(fmt, args...) \
+	RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+			__func__, __LINE__, ## args)
+#else
+#define CCP_LOG_INFO(fmt, args...)
+#define CCP_LOG_DBG(fmt, args...)
+#endif
+
+/**< Maximum queue pairs supported by CCP PMD */
+#define CCP_PMD_MAX_QUEUE_PAIRS	1
+#define CCP_NB_MAX_DESCRIPTORS 1024
+#define CCP_MAX_BURST 64
+
+#include <ccp_dev.h>
+
+/* private data structure for each CCP crypto device */
+struct ccp_private {
+	unsigned int max_nb_qpairs;	/**< Max number of queue pairs */
+	unsigned int max_nb_sessions;	/**< Max number of sessions */
+	uint8_t crypto_num_dev;		/**< Number of working crypto devices */
+	struct ccp_device *last_dev;	/**< Last working crypto device */
+};
+
+/* CCP batch info */
+struct ccp_batch_info {
+	struct rte_crypto_op *op[CCP_MAX_BURST];
+	/**< optable populated at enque time from app*/
+	int op_idx;
+	struct ccp_queue *cmd_q;
+	uint16_t opcnt;
+	/**<#  of crypto ops in batch*/
+	int desccnt;
+	/**<# of ccp queue descriptors*/
+	uint32_t head_offset;
+	/**< ccp queue head tail offsets time of enqueue*/
+	uint32_t tail_offset;
+	uint8_t lsb_buf[CCP_SB_BYTES * CCP_MAX_BURST];
+	phys_addr_t lsb_buf_phys;
+	/**< LSB intermediate buf for passthru */
+	int lsb_buf_idx;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	int auth_only;
+	/**< auth only ops batch */
+#endif
+} __rte_cache_aligned;
+
+/**< CCP crypto queue pair */
+struct ccp_qp {
+	uint16_t id;
+	/**< Queue Pair Identifier */
+	char name[RTE_CRYPTODEV_NAME_LEN];
+	/**< Unique Queue Pair Name */
+	struct rte_ring *processed_pkts;
+	/**< Ring for placing process packets */
+	struct rte_mempool *sess_mp;
+	/**< Session Mempool */
+	struct rte_mempool *batch_mp;
+	/**< Session Mempool for batch info */
+	struct rte_cryptodev_stats qp_stats;
+	/**< Queue pair statistics */
+	struct ccp_batch_info *b_info;
+	/**< Store ops pulled out of queue */
+	struct rte_cryptodev *dev;
+	/**< rte crypto device to which this qp belongs */
+} __rte_cache_aligned;
+
+
+/**< device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *ccp_pmd_ops;
+
+
+extern struct rte_cryptodev_ops *ccp_cpu_pmd_ops;
+uint16_t
+ccp_cpu_pmd_enqueue_burst(void *queue_pair,
+			  struct rte_crypto_op **ops,
+			  uint16_t nb_ops);
+uint16_t
+ccp_cpu_pmd_dequeue_burst(void *queue_pair,
+			  struct rte_crypto_op **ops,
+			  uint16_t nb_ops);
+
+#endif /* _CCP_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
new file mode 100644
index 0000000..e7e3b99
--- /dev/null
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -0,0 +1,283 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Advance Micro Devices, Inc. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Advance Micro Devices, Inc nor the names
+ *       of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written
+ *       permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
+#include <rte_vdev.h>
+#include <rte_malloc.h>
+
+#include <ccp_crypto.h>
+#include <ccp_dev.h>
+#include <ccp_pmd_private.h>
+
+/**
+ * Global static parameter used to find if CCP device is already initialized.
+ */
+static unsigned int ccp_pmd_init_done;
+uint8_t cryptodev_driver_id;
+
+static struct ccp_session *
+get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op)
+{
+	struct ccp_session *sess = NULL;
+
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		if (unlikely(op->sym->session == NULL))
+			return NULL;
+
+		sess = (struct ccp_session *)
+			get_session_private_data(
+				op->sym->session,
+				cryptodev_driver_id);
+	} else {
+		void *_sess;
+		void *_sess_private_data = NULL;
+
+		if (rte_mempool_get(qp->sess_mp, &_sess))
+			return NULL;
+		if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+			return NULL;
+
+		sess = (struct ccp_session *)_sess_private_data;
+
+		if (unlikely(ccp_set_session_parameters(sess,
+							op->sym->xform) != 0)) {
+			rte_mempool_put(qp->sess_mp, _sess);
+			rte_mempool_put(qp->sess_mp, _sess_private_data);
+			sess = NULL;
+		}
+		op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+		set_session_private_data(op->sym->session, cryptodev_driver_id,
+					 _sess_private_data);
+	}
+
+	return sess;
+}
+
+static uint16_t
+ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		      uint16_t nb_ops)
+{
+	struct ccp_session *sess = NULL;
+	struct ccp_qp *qp = queue_pair;
+	struct ccp_queue *cmd_q;
+	struct rte_cryptodev *dev = qp->dev;
+	int i, enq_cnt, slots_req = 0;
+
+	if (nb_ops == 0)
+		return 0;
+
+	if (unlikely(rte_ring_full(qp->processed_pkts) != 0))
+		return 0;
+
+	for (i = 0; i < nb_ops; i++) {
+		sess = get_ccp_session(qp, ops[i]);
+		if (unlikely(sess == NULL) && (i == 0)) {
+			qp->qp_stats.enqueue_err_count++;
+			return 0;
+		} else if (sess == NULL) {
+			nb_ops = i;
+			break;
+		}
+		slots_req += ccp_compute_slot_count(sess);
+	}
+
+	cmd_q = ccp_allot_queue(dev, slots_req);
+	if (unlikely(cmd_q == NULL))
+		return 0;
+
+	enq_cnt = process_ops_to_enqueue(qp, ops, cmd_q, nb_ops, slots_req);
+	qp->qp_stats.enqueued_count += enq_cnt;
+	return enq_cnt;
+}
+
+static uint16_t
+ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	struct ccp_qp *qp = queue_pair;
+	unsigned int nb_dequeued, i;
+
+	nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops);
+
+	/* Free session if a session-less crypto op. */
+	for (i = 0; i < nb_dequeued; i++)
+		if (unlikely(ops[i]->sess_type ==
+			     RTE_CRYPTO_OP_SESSIONLESS)) {
+			rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
+			ops[i]->sym->session = NULL;
+		}
+	qp->qp_stats.dequeued_count += nb_dequeued;
+
+	return nb_dequeued;
+}
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id ccp_pci_id[] = {
+	{
+		RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */
+	},
+	{
+		RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */
+	},
+	{.device_id = 0},
+};
+
+/** Remove ccp pmd */
+static int
+cryptodev_ccp_remove(struct rte_vdev_device *dev)
+{
+	const char *name;
+
+	name = rte_vdev_device_name(dev);
+	if (name == NULL)
+		return -EINVAL;
+
+	RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n",
+			name, rte_socket_id());
+
+	return 0;
+}
+
+/** Create crypto device */
+static int
+cryptodev_ccp_create(const char *name,
+		     struct rte_vdev_device *vdev,
+		     struct rte_crypto_vdev_init_params *init_params)
+{
+	struct rte_cryptodev *dev;
+	struct ccp_private *internals;
+	uint8_t cryptodev_cnt = 0;
+
+	if (init_params->name[0] == '\0')
+		snprintf(init_params->name, sizeof(init_params->name),
+				"%s", name);
+
+	dev = rte_cryptodev_vdev_pmd_init(init_params->name,
+			sizeof(struct ccp_private),
+			init_params->socket_id,
+			vdev);
+	if (dev == NULL) {
+		CCP_LOG_ERR("failed to create cryptodev vdev");
+		goto init_error;
+	}
+
+	cryptodev_cnt = ccp_probe_devices(ccp_pci_id);
+
+	if (cryptodev_cnt == 0) {
+		CCP_LOG_ERR("failed to detect CCP crypto device");
+		goto init_error;
+	}
+
+	printf("CCP : Crypto device count = %d\n", cryptodev_cnt);
+	dev->driver_id = cryptodev_driver_id;
+
+	/* register rx/tx burst functions for data path */
+	dev->dev_ops = ccp_pmd_ops;
+	dev->enqueue_burst = ccp_pmd_enqueue_burst;
+	dev->dequeue_burst = ccp_pmd_dequeue_burst;
+
+	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+			RTE_CRYPTODEV_FF_HW_ACCELERATED |
+			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+	internals = dev->data->dev_private;
+
+	internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+	internals->max_nb_sessions = init_params->max_nb_sessions;
+	internals->crypto_num_dev = cryptodev_cnt;
+
+	return 0;
+
+init_error:
+	CCP_LOG_ERR("driver %s: cryptodev_ccp_create failed",
+		    init_params->name);
+	cryptodev_ccp_remove(vdev);
+
+	return -EFAULT;
+}
+
+/** Probe ccp pmd */
+static int
+cryptodev_ccp_probe(struct rte_vdev_device *vdev)
+{
+	int rc = 0;
+	const char *name;
+	struct rte_crypto_vdev_init_params init_params = {
+		CCP_PMD_MAX_QUEUE_PAIRS,
+		RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+		rte_socket_id(),
+		{0}
+	};
+	const char *input_args;
+
+	if (ccp_pmd_init_done) {
+		RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
+		return -EFAULT;
+	}
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	input_args = rte_vdev_device_args(vdev);
+	rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
+	init_params.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS;
+
+	RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+			init_params.socket_id);
+	RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n",
+			init_params.max_nb_queue_pairs);
+	RTE_LOG(INFO, PMD, "Max number of sessions = %d\n",
+			init_params.max_nb_sessions);
+
+	rc = cryptodev_ccp_create(name, vdev, &init_params);
+	if (rc)
+		return rc;
+	ccp_pmd_init_done = 1;
+	return 0;
+}
+
+static struct rte_vdev_driver cryptodev_ccp_pmd_drv = {
+	.probe = cryptodev_ccp_probe,
+	.remove = cryptodev_ccp_remove
+};
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD,
+	"max_nb_queue_pairs=<int> max_nb_sessions=<int> socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_ccp_pmd_drv, cryptodev_driver_id);
diff --git a/drivers/crypto/ccp/rte_pmd_ccp_version.map b/drivers/crypto/ccp/rte_pmd_ccp_version.map
new file mode 100644
index 0000000..a753031
--- /dev/null
+++ b/drivers/crypto/ccp/rte_pmd_ccp_version.map
@@ -0,0 +1,3 @@
+DPDK_17.11 {
+	local: *;
+};
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index 0ceaa91..eea3c87 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -274,6 +274,24 @@ enum rte_crypto_auth_algorithm {
 	RTE_CRYPTO_AUTH_ZUC_EIA3,
 	/**< ZUC algorithm in EIA3 mode */
 
+	/**< SHA3 algorithm support*/
+	RTE_CRYPTO_AUTH_SHA3_224,
+	/**< 224 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+	/**< HMAC using 224 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_256,
+	/**< 256 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+	/**< HMAC using 256 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_384,
+	/**< 384 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+	/**< HMAC using 384 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_512,
+	/**< 512 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+	/**< HMAC using 512 bit SHA3 algorithm. */
+
 	RTE_CRYPTO_AUTH_LIST_END
 };
 
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index c25fdd9..c65bd55 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -152,6 +152,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB)    += -L$(AESNI_MULTI_BUFFER_LIB_PATH)
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM)   += -lrte_pmd_aesni_gcm
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM)   += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL)     += -lrte_pmd_openssl -lcrypto
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_CCP)         += -lrte_pmd_ccp -lcrypto
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += -lrte_pmd_null_crypto
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT)         += -lrte_pmd_qat -lcrypto
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G)      += -lrte_pmd_snow3g
-- 
2.7.4



More information about the dev mailing list