[dpdk-dev] [RFC PATCH v2 4/7] lib/librte_vhost: add vhost_user backend support

Fan Zhang roy.fan.zhang at intel.com
Thu Nov 2 21:10:56 CET 2017


This patch adds vhost_crypto device driver support to vhost_user.
Several new APIs are introduced for user to create and delete
vhost_crypto devices, fetch virtio queue descriptors and translate
to DPDK crypto operations for enqueuing to target cryptodev in the
backend. After dequeuing from the cryptodev, an API is introduced
to finalizing the crypto operations back to the frontend.

Signed-off-by: Fan Zhang <roy.fan.zhang at intel.com>
---
 lib/librte_vhost/rte_vhost_crypto.h |  150 +++++
 lib/librte_vhost/vhost_crypto.c     | 1254 +++++++++++++++++++++++++++++++++++
 2 files changed, 1404 insertions(+)
 create mode 100644 lib/librte_vhost/rte_vhost_crypto.h
 create mode 100644 lib/librte_vhost/vhost_crypto.c

diff --git a/lib/librte_vhost/rte_vhost_crypto.h b/lib/librte_vhost/rte_vhost_crypto.h
new file mode 100644
index 0000000..04718a2
--- /dev/null
+++ b/lib/librte_vhost/rte_vhost_crypto.h
@@ -0,0 +1,150 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer	 in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VHOST_CRYPTO_H_
+#define _VHOST_CRYPTO_H_
+
+#include <linux/virtio_crypto.h>
+#include <rte_cryptodev.h>
+#include <rte_hash.h>
+#include <rte_pause.h>
+#include "rte_vhost.h"
+
+#ifndef MAX_DATA_QUEUES
+#define MAX_DATA_QUEUES	(1)
+#endif
+
+#define VIRTIO_CRYPTO_CTRL_QUEUE	(0)
+#define VIRTIO_CRYPTO_MAX_NUM_DEVS	(64)
+
+/** Feature bits */
+#define VIRTIO_CRYPTO_F_CIPHER_SESSION_MODE	(1)
+#define VIRTIO_CRYPTO_F_HASH_SESSION_MODE	(2)
+#define VIRTIO_CRYPTO_F_MAC_SESSION_MODE	(3)
+#define VIRTIO_CRYPTO_F_AEAD_SESSION_MODE	(4)
+
+/**
+ *  Create Vhost-crypto instance
+ *
+ * @param vid
+ *  The identifier of the vhost device.
+ * @param cryptodev_id
+ *  The identifier of DPDK Cryptodev, the same cryptodev_id can be assigned to
+ *  multiple Vhost-crypto devices.
+ * @param sess_pool
+ *  The pointer to the created cryptodev session pool with the private data size
+ *  matches the target DPDK Cryptodev.
+ * @param socket_id
+ *  NUMA Socket ID to allocate resources on. *
+ * @return
+ *  0 if the Vhost Crypto Instance is created successfully.
+ *  Negative integer if otherwise
+ */
+int
+rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
+		struct rte_mempool *sess_pool, int socket_id);
+
+/**
+ *  Free the Vhost-crypto instance
+ *
+ * @param vid
+ *  The identifier of the vhost device.
+ * @return
+ *  0 if the Vhost Crypto Instance is created successfully.
+ *  Negative integer if otherwise
+ */
+int
+rte_vhost_crypto_free(int vid);
+
+/**
+ * Creates a crypto operation pool
+ *
+ * @param name
+ *  pool name
+ * @param type
+ *  crypto operation type, use RTE_CRYPTO_OP_TYPE_UNDEFINED for a pool which
+ *  supports all operation types
+ * @param nb_elts
+ *  number of elements in pool
+ * @param cache_size
+ *  Number of elements to cache on lcore, see *rte_mempool_create* for further
+ *  details about cache size
+ * @param socket_id
+ *  Socket to allocate memory on
+ * @return
+ *  On success pointer to mempool
+ *  On failure NULL
+ */
+struct rte_mempool *
+rte_vhost_crypto_create_cop_pool(const char *name, enum rte_crypto_op_type type,
+		unsigned nb_elts, unsigned cache_size, int socket_id);
+
+/**
+ * Fetch a number of vring descriptors from virt-queue and translate to DPDK
+ * crypto operations. After this function is executed, the user can enqueue
+ * the processed ops to the target cryptodev.
+ *
+ * @param vid
+ *  The identifier of the vhost device.
+ * @param qid
+ *  Virtio queue index.
+ * @param ops
+ *  The address of an array of pointers to *rte_crypto_op* structures that must
+ *  be large enough to store *nb_ops* pointers in it.
+ * @param nb_ops
+ *  The maximum number of operations to be fetched and translated.
+ * @return
+ *  The number of fetched and processed vhost crypto request operations.
+ */
+uint16_t
+rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
+		struct rte_crypto_op **ops, uint16_t nb_ops);
+/**
+ * Finalize the dequeued crypto ops. After the translated crypto ops are
+ * dequeued from the cryptodev, this function shall be called to write the
+ * processed data back to the vring descriptor (if no-copy is turned off) and
+ * obtain the bit mask of the vhost devices contained in the crypto ops.
+ *
+ * @param ops
+ *  The address of an array of *rte_crypto_op* structure that was dequeued
+ *  from cryptodev.
+ * @param nb_ops
+ *  The number of operations contained in the array.
+ * @return
+ *  The bit-mask of the vhost devices contained in the ops.
+ */
+uint64_t
+rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+#endif /**< _VHOST_CRYPTO_H_ */
diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
new file mode 100644
index 0000000..a179be8
--- /dev/null
+++ b/lib/librte_vhost/vhost_crypto.c
@@ -0,0 +1,1254 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer	 in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <rte_malloc.h>
+#include <rte_jhash.h>
+//#define _DEBUG_
+#ifdef CONFIG_RTE_LIBRTE_VHOST_DEBUG
+#include <rte_hexdump.h>
+#endif
+#include "vhost.h"
+#include "vhost_user.h"
+#include "rte_vhost_crypto.h"
+
+#define NB_MEMPOOL_OBJS			(1024)
+#define NB_CRYPTO_DESCRIPTORS		(1024)
+#define NB_CACHE_OBJS			(128)
+
+#define SESSION_MAP_ENTRIES		(1024) /**< Max nb sessions per vdev */
+#define MAX_KEY_SIZE			(32)
+#define VHOST_CRYPTO_MAX_IV_LEN		(16)
+#define MAX_COUNT_DOWN_TIMES		(100)
+
+#define VHOST_USER_MSG_CREATE_SESS	(23)
+#define VHOST_USER_MSG_CLOSE_SESS	(24)
+
+#define INHDR_LEN 		(sizeof(struct virtio_crypto_inhdr))
+#define IV_OFFSET		(sizeof(struct rte_crypto_op) + \
+				sizeof(struct rte_crypto_sym_op))
+
+#ifdef RTE_LIBRTE_VHOST_DEBUG
+#define VC_LOG_ERR(fmt, args...)				\
+	RTE_LOG(ERR, USER1, "[%s] %s() line %u: " fmt "\n",	\
+		"Vhost-Crypto",	__func__, __LINE__, ## args)
+#define VC_LOG_INFO(fmt, args...)				\
+	RTE_LOG(INFO, USER1, "[%s] %s() line %u: " fmt "\n",	\
+		"Vhost-Crypto",	__func__, __LINE__, ## args)
+
+#define VC_LOG_DBG(fmt, args...)				\
+	RTE_LOG(DEBUG, USER1, "[%s] %s() line %u: " fmt "\n",	\
+		"Vhost-Crypto",	__func__, __LINE__, ## args)
+#else
+#define VC_LOG_ERR(fmt, args...)				\
+	RTE_LOG(ERR, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
+#define VC_LOG_INFO(fmt, args...)				\
+	RTE_LOG(INFO, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
+#define VC_LOG_DBG(fmt, args...)
+#endif
+
+#define VIRTIO_CRYPTO_FEATURES ((1 << VIRTIO_F_NOTIFY_ON_EMPTY) |	\
+		(1 << VIRTIO_RING_F_INDIRECT_DESC) |			\
+		(1 << VIRTIO_RING_F_EVENT_IDX) |			\
+		(1 << VIRTIO_CRYPTO_SERVICE_CIPHER) |			\
+		(1 << VIRTIO_CRYPTO_SERVICE_HASH) |			\
+		(1 << VIRTIO_CRYPTO_SERVICE_MAC) |			\
+		(1 << VIRTIO_CRYPTO_SERVICE_AEAD) |			\
+		(1 << VIRTIO_NET_F_CTRL_VQ))
+
+/* Macro to transfer guest physical address to host virtual address */
+#define GPA_TO_VVA(t, m, a)						\
+		(t)(uintptr_t)rte_vhost_gpa_to_vva(m, a)
+
+/* Macro to get the buffer at the end of rte_crypto_op */
+#define REQ_OP_OFFSET 		(IV_OFFSET + VHOST_CRYPTO_MAX_IV_LEN)
+
+/**
+ * 1-to-1 mapping between RTE_CRYPTO_*ALGO* and VIRTIO_CRYPTO_*ALGO*, for
+ * algorithms not supported by RTE_CRYPTODEV, the -VIRTIO_CRYPTO_NOTSUPP is
+ * returned.
+ */
+static int cipher_algo_transform[] = {
+		RTE_CRYPTO_CIPHER_NULL,
+		RTE_CRYPTO_CIPHER_ARC4,
+		RTE_CRYPTO_CIPHER_AES_ECB,
+		RTE_CRYPTO_CIPHER_AES_CBC,
+		RTE_CRYPTO_CIPHER_AES_CTR,
+		-VIRTIO_CRYPTO_NOTSUPP, /* VIRTIO_CRYPTO_CIPHER_DES_ECB */
+		RTE_CRYPTO_CIPHER_DES_CBC,
+		RTE_CRYPTO_CIPHER_3DES_ECB,
+		RTE_CRYPTO_CIPHER_3DES_CBC,
+		RTE_CRYPTO_CIPHER_3DES_CTR,
+		RTE_CRYPTO_CIPHER_KASUMI_F8,
+		RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+		RTE_CRYPTO_CIPHER_AES_F8,
+		RTE_CRYPTO_CIPHER_AES_XTS,
+		RTE_CRYPTO_CIPHER_ZUC_EEA3
+};
+
+/**
+ * VIRTIO_CRYTPO_AUTH_* indexes are not sequential, the gaps are filled with
+ * -VIRTIO_CRYPTO_BADMSG errors.
+ */
+static int auth_algo_transform[] = {
+		RTE_CRYPTO_AUTH_NULL,
+		RTE_CRYPTO_AUTH_MD5_HMAC,
+		RTE_CRYPTO_AUTH_SHA1_HMAC,
+		RTE_CRYPTO_AUTH_SHA224_HMAC,
+		RTE_CRYPTO_AUTH_SHA256_HMAC,
+		RTE_CRYPTO_AUTH_SHA384_HMAC,
+		RTE_CRYPTO_AUTH_SHA512_HMAC,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_NOTSUPP, /* VIRTIO_CRYPTO_MAC_CMAC_3DES */
+		RTE_CRYPTO_AUTH_AES_CMAC,
+		RTE_CRYPTO_AUTH_KASUMI_F9,
+		RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		-VIRTIO_CRYPTO_BADMSG,
+		RTE_CRYPTO_AUTH_AES_GMAC,
+		-VIRTIO_CRYPTO_NOTSUPP, /* VIRTIO_CRYPTO_MAC_GMAC_TWOFISH */
+		RTE_CRYPTO_AUTH_AES_CBC_MAC,
+		-VIRTIO_CRYPTO_NOTSUPP, /* VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9 */
+		RTE_CRYPTO_AUTH_AES_XCBC_MAC
+};
+
+static int cipher_op_transform[] = {
+		-VIRTIO_CRYPTO_BADMSG, /* meaningless */
+		RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+		RTE_CRYPTO_CIPHER_OP_DECRYPT
+};
+
+static int chain_cipher_op_transform[] = {
+		-VIRTIO_CRYPTO_BADMSG, /* meaningless */
+		RTE_CRYPTO_CIPHER_OP_DECRYPT, /* HASH THEN CIPHER */
+		RTE_CRYPTO_CIPHER_OP_ENCRYPT /* CPIHER THEN HASH */
+};
+
+static int chain_auth_op_transform[] = {
+		-VIRTIO_CRYPTO_BADMSG, /* meaningless */
+		RTE_CRYPTO_AUTH_OP_VERIFY,
+		RTE_CRYPTO_AUTH_OP_GENERATE
+};
+
+static int iv_lens[] = {
+		-1, /* Invalid input */
+		0, /* RTE_CRYPTO_CIPHER_NULL */
+		8, /* RTE_CRYPTO_CIPHER_3DES_CBC */
+		8, /* RTE_CRYPTO_CIPHER_3DES_CTR */
+		8, /* RTE_CRYPTO_CIPHER_3DES_ECB */
+		16, /* RTE_CRYPTO_CIPHER_AES_CBC */
+		/* TODO: add common algos */
+};
+
+/**
+ * vhost_crypto struct is used to maintain a number of virtio_cryptos and
+ * one DPDK crypto device that deals with all crypto workloads. It is declared
+ * here and defined in vhost_crypto.c
+ */
+struct vhost_crypto {
+	/** Used to lookup DPDK Cryptodev Session based on VIRTIO crypto
+	 *  session ID.
+	 */
+	struct rte_hash *session_map;
+
+	/** DPDK cryptodev ID */
+	uint8_t cid;
+	struct rte_mempool *sess_pool;
+	uint16_t nb_qps;
+
+	uint64_t last_session_id;
+
+	uint64_t cache_session_id;
+	struct rte_cryptodev_sym_session *cache_session;
+	/** socket id for the device */
+	int socket_id;
+};
+
+struct vhost_crypto_data_req {
+	struct rte_mbuf m_src;
+	struct rte_mbuf m_dst;
+
+	struct vring_desc *descs;
+	struct vring_desc *wb_desc;
+
+	struct rte_vhost_memory *mem;
+
+	uint8_t *src_data;
+	uint8_t *dst_data;
+	uint8_t *hash_result;
+
+	struct virtio_crypto_inhdr *inhdr;
+
+	int vid;
+};
+
+static int
+transform_cipher_param(struct rte_crypto_sym_xform *xform,
+		VhostUserCryptoSessionParam *param) {
+	int ret;
+
+	ret = cipher_algo_transform[param->cipher_algo];
+	if (unlikely(ret < 0))
+		return ret;
+
+	xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+	xform->cipher.algo = ret;
+	xform->cipher.key.length = param->cipher_key_len;
+	if (xform->cipher.key.length > 0)
+		xform->cipher.key.data = param->cipher_key_buf;
+	ret = cipher_op_transform[param->dir];
+	if (unlikely(ret < 0)) {
+		VC_LOG_DBG("Bad operation type");
+		return ret;
+	}
+	xform->cipher.op = ret;
+	ret = iv_lens[xform->cipher.algo];
+	if (unlikely(ret < 0))
+		return ret;
+	xform->cipher.iv.length = ret;
+	xform->cipher.iv.offset = IV_OFFSET;
+	return 0;
+}
+
+static int
+transform_chain_param(struct rte_crypto_sym_xform *xforms,
+		VhostUserCryptoSessionParam *param)
+{
+	struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
+	int ret;
+
+	if (param->dir == VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER) {
+		xform_auth = xforms;
+		xform_cipher = xforms->next;
+	} else {
+		xform_cipher = xforms;
+		xform_auth = xforms->next;
+	}
+
+	/* cipher */
+	ret = cipher_algo_transform[param->cipher_algo];
+	if (unlikely(ret < 0))
+		return ret;
+	xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+	xform_cipher->cipher.algo = ret;
+	xform_cipher->cipher.key.length = param->cipher_key_len;
+	if (xform_cipher->cipher.key.length > 0)
+		xform_cipher->cipher.key.data = param->cipher_key_buf;
+	ret = chain_cipher_op_transform[param->dir];
+	if (unlikely(ret < 0))
+		return ret;
+	xform_cipher->cipher.op = ret;
+	ret = iv_lens[xform_cipher->cipher.algo];
+	if (unlikely(ret < 0))
+		return ret;
+	xform_cipher->cipher.iv.length = ret;
+	xform_cipher->cipher.iv.offset = IV_OFFSET;
+
+	/* auth */
+	xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+	ret = auth_algo_transform[param->hash_algo];
+	if (unlikely(ret < 0))
+		return ret;
+	xform_auth->auth.algo = ret;
+	ret = chain_auth_op_transform[param->dir];
+	if (unlikely(ret))
+		return ret;
+	xform_auth->auth.op = ret;
+	xform_auth->auth.digest_length = param->digest_len;
+	xform_auth->auth.key.length = param->auth_key_len;
+	if (xform_auth->auth.key.length)
+		xform_auth->auth.key.data = param->auth_key_buf;
+
+	return 0;
+}
+
+static void
+vhost_crypto_create_sess(struct vhost_crypto *vcrypto,
+		VhostUserCryptoSessionParam *sess_param)
+{
+	struct rte_crypto_sym_xform xform1 = {0}, xform2 = {0};
+	struct rte_cryptodev_sym_session *session;
+	int ret;
+
+	switch(sess_param->op_type) {
+	case VIRTIO_CRYPTO_SYM_OP_NONE:
+	case VIRTIO_CRYPTO_SYM_OP_CIPHER:
+		ret = transform_cipher_param(&xform1, sess_param);
+		if (unlikely(ret)) {
+			VC_LOG_ERR("Error transform session msg (%i)", ret);
+			sess_param->session_id = ret;
+			return;
+		}
+		break;
+	case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
+		if (unlikely(sess_param->hash_mode !=
+				VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)) {
+			sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
+			VC_LOG_ERR("Error transform session message (%i)",
+					-VIRTIO_CRYPTO_NOTSUPP);
+			return;
+		}
+
+		xform1.next = &xform2;
+
+		ret = transform_chain_param(&xform1, sess_param);
+		if (unlikely(ret)) {
+			VC_LOG_ERR("Error transform session message (%i)", ret);
+			sess_param->session_id = ret;
+			return;
+		}
+
+		break;
+	default:
+		VC_LOG_ERR("Algorithm not yet supported");
+		sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
+		return;
+	}
+
+	session = rte_cryptodev_sym_session_create(vcrypto->sess_pool);
+	if (!session) {
+		VC_LOG_ERR("Failed to create session");
+		sess_param->session_id = -VIRTIO_CRYPTO_ERR;
+		return;
+	}
+
+	if (rte_cryptodev_sym_session_init(vcrypto->cid, session, &xform1,
+			vcrypto->sess_pool) < 0) {
+		VC_LOG_ERR("Failed to initialize session");
+		sess_param->session_id = -VIRTIO_CRYPTO_ERR;
+		return;
+	}
+
+	/* insert hash to map */
+	if (rte_hash_add_key_data(vcrypto->session_map,
+			&vcrypto->last_session_id, session) < 0) {
+		VC_LOG_ERR("Failed to insert session to hash table");
+
+		if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0)
+			VC_LOG_ERR("Failed to clear session");
+		else {
+			if (rte_cryptodev_sym_session_free(session) < 0)
+				VC_LOG_ERR("Failed to free session");
+		}
+		sess_param->session_id = -VIRTIO_CRYPTO_ERR;
+		return;
+	}
+
+	VC_LOG_DBG("Session (key %lu, session %p) created.",
+			vcrypto->last_session_id, session);
+
+	sess_param->session_id = vcrypto->last_session_id;
+	vcrypto->last_session_id++;
+}
+
+static int
+vhost_crypto_close_sess(struct vhost_crypto *vcrypto, uint64_t session_id)
+{
+	struct rte_cryptodev_sym_session *session;
+	uint64_t sess_id = session_id;
+	int ret;
+
+	ret = rte_hash_lookup_data(vcrypto->session_map, &sess_id,
+			(void **)&session);
+
+	if (unlikely(ret < 0)) {
+		VC_LOG_ERR("Failed to delete session (key %lu).", session_id);
+		return -VIRTIO_CRYPTO_INVSESS;
+	}
+
+	if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0) {
+		VC_LOG_DBG("Failed to delete session");
+		return -VIRTIO_CRYPTO_ERR;
+	}
+
+	if (rte_cryptodev_sym_session_free(session) < 0) {
+		VC_LOG_DBG("Failed to delete session");
+		return -VIRTIO_CRYPTO_ERR;
+	}
+
+	if (rte_hash_del_key(vcrypto->session_map, &sess_id) < 0) {
+		VC_LOG_DBG("Failed to delete session from hash table.");
+		return -VIRTIO_CRYPTO_ERR;
+	}
+
+	VC_LOG_DBG("Session (key %lu, session %p) deleted.", sess_id,
+			session);
+
+	return 0;
+}
+
+static int
+vhost_crypto_msg_handler(struct virtio_net *dev, struct VhostUserMsg *msg,
+		int fd)
+{
+	struct vhost_user_dev_priv *priv;
+	struct vhost_crypto *vcrypto;
+	int ret = 0;
+
+	priv = dev->private_data;
+	if (unlikely(priv == NULL)) {
+		VC_LOG_ERR("Cannot find required data, is it initialized?");
+		return -ENOENT;
+	}
+	vcrypto = (struct vhost_crypto *)priv->data;
+
+	if (msg->request.master == VHOST_USER_MSG_CREATE_SESS) {
+		vhost_crypto_create_sess(vcrypto, &msg->payload.crypto_session);
+
+		msg->flags &= ~VHOST_USER_VERSION_MASK;
+		msg->flags &= ~VHOST_USER_NEED_REPLY;
+		msg->flags |= VHOST_USER_VERSION;
+		msg->flags |= VHOST_USER_REPLY_MASK;
+
+		ret = send_fd_message(fd, (char *)msg, VHOST_USER_HDR_SIZE +
+				msg->size, NULL, 0);
+	} else if (msg->request.master == VHOST_USER_MSG_CLOSE_SESS)
+		ret = vhost_crypto_close_sess(vcrypto, msg->payload.u64);
+	else
+		ret = -EINVAL;
+
+	return ret;
+}
+
+static __rte_always_inline struct vring_desc *
+find_write_desc(struct vring_desc *head, struct vring_desc *desc)
+{
+	if (desc->flags & VRING_DESC_F_WRITE)
+		return desc;
+
+	while (desc->flags & VRING_DESC_F_NEXT) {
+		desc = &head[desc->next];
+		if (desc->flags & VRING_DESC_F_WRITE)
+			return desc;
+	}
+
+	return NULL;
+}
+
+static __rte_always_inline struct virtio_crypto_inhdr *
+reach_inhdr(struct vring_desc *head, struct rte_vhost_memory *mem,
+		struct vring_desc *desc, uint32_t off)
+{
+	if (!(desc->flags & VRING_DESC_F_NEXT) && desc->len - off >= INHDR_LEN)
+		return GPA_TO_VVA(struct virtio_crypto_inhdr *, mem,
+				desc->addr + desc->len - INHDR_LEN);
+
+	while (desc->flags & VRING_DESC_F_NEXT)
+		desc = &head[desc->next];
+
+	return GPA_TO_VVA(struct virtio_crypto_inhdr *, mem,
+				desc->addr + desc->len - INHDR_LEN);
+}
+
+static __rte_always_inline int
+move_desc(struct vring_desc *head, struct vring_desc **cur_desc, uint32_t *off,
+		uint32_t size)
+{
+	struct vring_desc *desc = *cur_desc;
+	uint32_t offset = *off;
+	uint32_t left = size;
+	uint32_t to_move;
+
+	rte_prefetch0(&head[desc->next]);
+	to_move = RTE_MIN(desc->len - offset, left);
+	if (unlikely(to_move)) {
+		left -= to_move;
+	}
+
+	while ((desc->flags & VRING_DESC_F_NEXT) && left) {
+		desc = &head[desc->next];
+		rte_prefetch0(&head[desc->next]);
+		to_move = RTE_MIN(desc->len, left);
+		left -= to_move;
+	}
+
+	if (unlikely(left)) {
+		VC_LOG_ERR("Virtq is too small");
+		return -1;
+	}
+
+	if (likely(to_move == desc->len && (desc->flags & VRING_DESC_F_NEXT))) {
+		*cur_desc = &head[desc->next];
+		*off = 0;
+	} else {
+		*cur_desc = desc;
+		*off = to_move;
+	}
+
+	return 0;
+}
+
+static int
+copy_data(void *dst_data, struct vring_desc *head, struct rte_vhost_memory *mem,
+		struct vring_desc **cur_desc, uint32_t *off, uint32_t size)
+{
+	struct vring_desc *desc = *cur_desc;
+	uint32_t left = size;
+	uint32_t to_copy;
+	uint8_t *data = dst_data;
+	uint8_t *src;
+
+	rte_prefetch0(&head[desc->next]);
+	to_copy = RTE_MIN(desc->len - *off, left);
+	src = GPA_TO_VVA(uint8_t *, mem, desc->addr + *off);
+	rte_memcpy((uint8_t *)data, src, to_copy);
+	left -= to_copy;
+
+	while ((desc->flags & VRING_DESC_F_NEXT) && left) {
+		desc = &head[desc->next];
+		rte_prefetch0(&head[desc->next]);
+		to_copy = RTE_MIN(desc->len, left);
+		src = GPA_TO_VVA(uint8_t *, mem, desc->addr);
+		rte_memcpy(data + size - left, src, to_copy);
+		left -= to_copy;
+	}
+
+	if (unlikely(left)) {
+		VC_LOG_ERR("Virtq is too small, expect %uB, short %uB", size,
+				left);
+		return -1;
+	}
+
+	if (likely(to_copy == desc->len && (desc->flags & VRING_DESC_F_NEXT))) {
+		*cur_desc = &head[desc->next];
+		*off = 0;
+	} else {
+		*cur_desc = desc;
+		*off = to_copy;
+	}
+
+	return 0;
+}
+
+static __rte_always_inline void *
+get_data_ptr(struct vring_desc *head, struct rte_vhost_memory *mem,
+		struct vring_desc **cur_desc, uint32_t *off, uint32_t size)
+{
+	struct vring_desc *desc = *cur_desc;
+	uint8_t *data;
+
+	data = GPA_TO_VVA(void *, mem, desc->addr + *off);
+	if (unlikely(!data)) {
+		VC_LOG_ERR("Failed to get object");
+		return NULL;
+	}
+
+	if (unlikely(move_desc(head, cur_desc, off, size) < 0))
+		return NULL;
+
+	return data;
+}
+
+#ifdef RTE_LIBRTE_VHOST_CRYPTO_DATA_QUEUE_NO_COPY
+
+static int
+write_data(__rte_unused struct vhost_crypto_data_req *vc_req)
+{
+	return 0;
+}
+
+static __rte_always_inline void *
+get_rd_data(struct vring_desc *head, struct rte_vhost_memory *mem,
+		struct vring_desc **cur_desc, uint32_t *off, uint32_t size)
+{
+	return get_data_ptr(head, mem, cur_desc, off, size);
+}
+
+static __rte_always_inline void *
+get_wb_ptr(struct vring_desc *head, struct rte_vhost_memory *mem,
+		struct vring_desc **cur_desc, uint32_t *off, uint32_t size)
+{
+	return get_data_ptr(head, mem, cur_desc, off, size);
+}
+
+static __rte_always_inline void *
+free_data(__rte_unused void *data)
+{
+	return NULL;
+}
+
+#else
+
+static int
+write_data(struct vhost_crypto_data_req *vc_req)
+{
+	struct vring_desc *descs = vc_req->descs;
+	struct rte_vhost_memory *mem = vc_req->mem;
+	struct vring_desc *desc = vc_req->wb_desc;
+	uint32_t left = vc_req->m_dst.data_len;
+	uint32_t to_write;
+	uint8_t *src_data = vc_req->m_dst.buf_addr;
+	uint8_t *dst;
+
+	rte_prefetch0(&descs[desc->next]);
+	to_write = RTE_MIN(desc->len, left);
+	dst = GPA_TO_VVA(uint8_t *, mem, desc->addr);
+	rte_memcpy(dst, src_data, to_write);
+	left -= to_write;
+	src_data += to_write;
+
+#ifdef CONFIG_RTE_LIBRTE_VHOST_DEBUG
+	printf("desc addr %llu len %u:", desc->addr, desc->len);
+	rte_hexdump(stdout ,"", dst, to_write);
+#endif
+
+	while ((desc->flags & VRING_DESC_F_NEXT) && left) {
+		desc = &descs[desc->next];
+		rte_prefetch0(&descs[desc->next]);
+		to_write = RTE_MIN(desc->len, left);
+		dst = GPA_TO_VVA(uint8_t *, mem, desc->addr);
+		rte_memcpy(dst, src_data, to_write);
+#ifdef CONFIG_RTE_LIBRTE_VHOST_DEBUG
+		printf("desc addr %llu len %u:", desc->addr, desc->len);
+		rte_hexdump(stdout ,"", dst, to_write);
+#endif
+		left -= to_write;
+		src_data += to_write;
+	}
+
+	if (unlikely(left)) {
+		VC_LOG_ERR("Virtq is too small, expect %uB, short %uB",
+				vc_req->m_dst.buf_len, left);
+		return -1;
+	}
+
+	return 0;
+}
+
+static void *
+get_rd_data(struct vring_desc *head, struct rte_vhost_memory *mem,
+		struct vring_desc **cur_desc, uint32_t *off, uint32_t size)
+{
+	void *data = rte_malloc_socket(NULL, size, RTE_CACHE_LINE_SIZE,
+			rte_socket_id());
+	if (unlikely(!data)) {
+		VC_LOG_ERR("Insufficient memory");
+		return NULL;
+	}
+
+	if (unlikely(copy_data(data, head, mem, cur_desc, off, size) < 0)) {
+		rte_free(data);
+		return NULL;
+	}
+
+	return (void *)data;
+}
+
+static void *
+get_wb_ptr(struct vring_desc *head,
+		__rte_unused struct rte_vhost_memory *mem,
+		struct vring_desc **cur_desc, uint32_t *off, uint32_t size)
+{
+	uint8_t *data;
+
+	if (unlikely(move_desc(head, cur_desc, off, size) < 0))
+		return NULL;
+
+	data = rte_malloc_socket(NULL, size, RTE_CACHE_LINE_SIZE,
+			rte_socket_id());
+	if (unlikely(!data)) {
+		VC_LOG_ERR("Insufficient memory");
+		return NULL;
+	}
+
+	return data;
+}
+
+static __rte_always_inline void *
+free_data(void *data)
+{
+	rte_free(data);
+	return NULL;
+}
+
+#endif /* RTE_LIBRTE_VHOST_CRYPTO_DATA_QUEUE_NO_COPY */
+
+struct rte_mempool *
+rte_vhost_crypto_create_cop_pool(const char *name, enum rte_crypto_op_type type,
+		unsigned nb_elts, unsigned cache_size, int socket_id)
+{
+	return rte_crypto_op_pool_create(name, type, nb_elts, cache_size,
+			VHOST_CRYPTO_MAX_IV_LEN +
+			sizeof(struct vhost_crypto_data_req), socket_id);
+}
+
+int
+rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
+		struct rte_mempool *sess_pool, int socket_id)
+{
+	struct virtio_net *dev = get_device(vid);
+	struct rte_hash_parameters params = {0};
+	struct vhost_user_dev_priv *priv;
+	struct vhost_crypto *vcrypto;
+	char name[128];
+	int ret;
+
+	if (vid >= VIRTIO_CRYPTO_MAX_NUM_DEVS || !dev) {
+		VC_LOG_ERR("Invalid vid %i", vid);
+		return -EINVAL;
+	}
+
+	ret = rte_vhost_driver_set_features(dev->ifname,
+			VIRTIO_CRYPTO_FEATURES);
+	if (ret < 0) {
+		VC_LOG_ERR("Error setting features");
+		return -1;
+	}
+
+	priv = rte_zmalloc_socket(NULL, sizeof(*priv) + sizeof(*vcrypto),
+			RTE_CACHE_LINE_SIZE, socket_id);
+	if (!priv) {
+		VC_LOG_ERR("Insufficient memory");
+		return -ENOMEM;
+	}
+
+	vcrypto = (struct vhost_crypto *)priv->data;
+
+	vcrypto->sess_pool = sess_pool;
+	vcrypto->cid = cryptodev_id;
+	vcrypto->cache_session_id = UINT64_MAX;
+
+	snprintf(name, 127, "HASH_VHOST_CRYPT_%u", (unsigned)vid);
+	params.name = name;
+	params.entries = SESSION_MAP_ENTRIES;
+	params.hash_func = rte_jhash;
+	params.key_len = sizeof(uint64_t);
+	params.socket_id = socket_id;
+	vcrypto->session_map = rte_hash_create(&params);
+	if (!vcrypto->session_map) {
+		VC_LOG_ERR("Failed to creath session map");
+		ret = -ENOMEM;
+		goto error_exit;
+	}
+
+	priv->vhost_user_msg_handler = vhost_crypto_msg_handler;
+	dev->private_data = (void *)priv;
+
+	return 0;
+
+error_exit:
+	if (vcrypto->session_map)
+		rte_hash_free(vcrypto->session_map);
+	rte_free(priv);
+
+	return ret;
+}
+
+int
+rte_vhost_crypto_free(int vid)
+{
+	struct virtio_net *dev = get_device(vid);
+	struct vhost_user_dev_priv *priv;
+	struct vhost_crypto *vcrypto;
+
+	if (unlikely(dev == NULL)) {
+		VC_LOG_ERR("Invalid vid %i", vid);
+		return -EINVAL;
+	}
+
+	priv = dev->private_data;
+	if (unlikely(priv == NULL)) {
+		VC_LOG_ERR("Cannot find required data, is it initialized?");
+		return -ENOENT;
+	}
+
+	vcrypto = (struct vhost_crypto *)priv->data;
+	if (unlikely(vcrypto == NULL)) {
+		VC_LOG_ERR("Cannot find required data, is it initialized?");
+		return -ENOENT;
+	}
+
+	rte_hash_free(vcrypto->session_map);
+	rte_free(priv);
+	dev->private_data = NULL;
+
+	return 0;
+}
+
+/**
+ * Function prototype of symmetric crypto request translation operation. This
+ * should help avoid one branch
+ */
+typedef uint32_t (*prepare_sym_op_t)(struct rte_crypto_op *,
+		struct vhost_crypto_data_req *,
+		struct virtio_crypto_sym_data_req *,
+		struct vring_desc *, uint32_t, uint8_t *);
+
+static uint32_t
+prepare_not_support_op(__rte_unused struct rte_crypto_op *op,
+		__rte_unused struct vhost_crypto_data_req *vc_req,
+		__rte_unused struct virtio_crypto_sym_data_req *sym_req,
+		__rte_unused struct vring_desc *rd_desc,
+		__rte_unused uint32_t rd_offset,
+		uint8_t *retval)
+{
+	*retval = VIRTIO_CRYPTO_NOTSUPP;
+	return INHDR_LEN;
+}
+
+static uint32_t
+prepare_sym_cipher_op(struct rte_crypto_op *op,
+		struct vhost_crypto_data_req *vc_req,
+		struct virtio_crypto_sym_data_req *sym_req,
+		struct vring_desc *cur_desc, uint32_t cur_offset,
+		uint8_t *retval)
+{
+	struct virtio_crypto_cipher_data_req *cipher = &sym_req->u.cipher;
+	struct vring_desc *descs = vc_req->descs;
+	struct vring_desc *desc = cur_desc;
+	struct rte_vhost_memory *mem = vc_req->mem;
+	uint32_t offset = cur_offset;
+	uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
+	uint8_t ret = 0;
+
+	op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+	op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+	op->sym->cipher.data.length = cipher->para.src_data_len;
+
+	/* prepare */
+	/* iv */
+	if (unlikely(copy_data(iv_data, descs, mem, &desc, &offset,
+			cipher->para.iv_len) < 0)) {
+		ret = VIRTIO_CRYPTO_BADMSG;
+		goto error_exit;
+	}
+
+#ifdef CONFIG_RTE_LIBRTE_VHOST_DEBUG
+	rte_hexdump(stdout ,"IV:", iv_data, cipher->para.iv_len);
+#endif
+
+	/* src */
+	vc_req->src_data = get_rd_data(descs, mem, &desc, &offset,
+			cipher->para.src_data_len);
+	if (unlikely(!vc_req->src_data)) {
+		ret = VIRTIO_CRYPTO_ERR;
+		goto error_exit;
+	}
+#ifdef CONFIG_RTE_LIBRTE_VHOST_DEBUG
+	rte_hexdump(stdout ,"SRC:", vc_req->src_data, cipher->para.src_data_len);
+#endif
+	/* dst */
+	desc = find_write_desc(descs, desc);
+	if (unlikely(!desc)) {
+		VC_LOG_ERR("Cannot find write location");
+		ret = VIRTIO_CRYPTO_BADMSG;
+		goto error_exit;
+	}
+
+	vc_req->wb_desc = desc;
+	offset = 0;
+
+	vc_req->dst_data = get_wb_ptr(descs, mem, &desc, &offset,
+			cipher->para.dst_data_len);
+	if (unlikely(!vc_req->dst_data)) {
+		VC_LOG_ERR("Insufficient memory");
+		ret = VIRTIO_CRYPTO_ERR;
+		goto error_exit;
+	}
+
+	/* record inhdr */
+	vc_req->inhdr = get_data_ptr(descs, mem, &desc, &offset, INHDR_LEN);
+
+	/* src data */
+	vc_req->m_src.buf_addr = (void *)vc_req->src_data;
+	vc_req->m_src.buf_physaddr = rte_mem_virt2phy(vc_req->src_data);
+	vc_req->m_src.data_off = 0;
+	vc_req->m_src.data_len = cipher->para.src_data_len;
+	op->sym->m_src = &vc_req->m_src;
+	op->sym->cipher.data.offset = 0;
+
+	/* dst data */
+	vc_req->m_dst.buf_addr = (void *)(vc_req->dst_data);
+	vc_req->m_dst.buf_physaddr = rte_mem_virt2phy(vc_req->m_dst.buf_addr);
+	vc_req->m_dst.data_off = 0;
+	vc_req->m_dst.data_len = cipher->para.dst_data_len;
+	op->sym->m_dst = &vc_req->m_dst;
+
+	*retval = 0;
+	return cipher->para.dst_data_len + INHDR_LEN;
+
+error_exit:
+	*retval = ret;
+	return INHDR_LEN;
+}
+
+static uint32_t
+prepare_sym_chain_op(struct rte_crypto_op *op,
+		struct vhost_crypto_data_req *vc_req,
+		struct virtio_crypto_sym_data_req *sym_req,
+		struct vring_desc *cur_desc, uint32_t cur_offset,
+		uint8_t *retval)
+{
+	struct virtio_crypto_alg_chain_data_req *chain = &sym_req->u.chain;
+	struct vring_desc *descs = vc_req->descs;
+	struct vring_desc *desc = cur_desc;
+	struct rte_vhost_memory *mem = vc_req->mem;
+	uint32_t offset = cur_offset;
+	uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
+	uint8_t ret;
+
+	/* prepare */
+	/* iv */
+	if (unlikely(copy_data(iv_data, descs, mem, &desc, &offset,
+			chain->para.iv_len) < 0)) {
+		ret = VIRTIO_CRYPTO_BADMSG;
+		goto error_exit;
+	}
+
+	/* src */
+	vc_req->src_data = get_rd_data(descs, mem, &desc, &offset,
+			chain->para.src_data_len);
+	if (unlikely(!vc_req->src_data)) {
+		ret = VIRTIO_CRYPTO_ERR;
+		goto error_exit;
+	}
+
+	/* dst */
+	desc = find_write_desc(descs, desc);
+	if (unlikely(!desc)) {
+		VC_LOG_ERR("Cannot find write location");
+		ret = VIRTIO_CRYPTO_BADMSG;
+		goto error_exit;
+	}
+
+	vc_req->wb_desc = desc;
+	offset = 0;
+
+	vc_req->dst_data = get_wb_ptr(descs, mem, &desc, &offset,
+			chain->para.dst_data_len);
+	if (unlikely(!vc_req->dst_data)) {
+		VC_LOG_ERR("Insufficient memory");
+		ret = VIRTIO_CRYPTO_ERR;
+		goto error_exit;
+	}
+
+	/* hash result */
+	vc_req->hash_result = get_wb_ptr(descs, mem, &desc, &offset,
+			chain->para.hash_result_len);
+	if (unlikely(!vc_req->hash_result)) {
+		VC_LOG_ERR("Insufficient memory");
+		ret = VIRTIO_CRYPTO_ERR;
+		goto error_exit;
+	}
+
+	/* record inhdr */
+	vc_req->inhdr = get_data_ptr(descs, mem, &desc, &offset, INHDR_LEN);
+
+	op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+	op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+
+	vc_req->m_src.buf_addr = (void *)vc_req->src_data;
+	vc_req->m_src.buf_physaddr = rte_mem_virt2phy(vc_req->m_src.buf_addr);
+	vc_req->m_src.data_off = 0;
+	vc_req->m_src.data_len = chain->para.src_data_len;
+	op->sym->m_src = &vc_req->m_src;
+	op->sym->cipher.data.offset = chain->para.cipher_start_src_offset;
+	op->sym->cipher.data.length = chain->para.src_data_len -
+			chain->para.cipher_start_src_offset;
+	/* dst data */
+	vc_req->m_dst.buf_addr = (void *)vc_req->dst_data;
+	vc_req->m_dst.buf_physaddr = rte_mem_virt2phy(vc_req->m_dst.buf_addr);
+	vc_req->m_dst.data_off = 0;
+	vc_req->m_dst.data_len = chain->para.dst_data_len +
+			chain->para.hash_result_len;
+
+	/* auth */
+	op->sym->auth.data.offset = chain->para.hash_start_src_offset;
+	op->sym->auth.data.length = chain->para.len_to_hash;
+	op->sym->auth.digest.data = (void *)vc_req->hash_result;
+	op->sym->auth.digest.phys_addr = rte_mem_virt2phy(vc_req->hash_result);
+
+	*retval = 0;
+
+	return vc_req->m_dst.data_len + INHDR_LEN;
+
+error_exit:
+	*retval = ret;
+	return INHDR_LEN;
+}
+
+const prepare_sym_op_t prepare_sym_ops[] = {
+		prepare_not_support_op, /* VIRTIO_CRYPTO_SYM_OP_NONE */
+		prepare_sym_cipher_op,
+		prepare_sym_chain_op,
+};
+
+static uint32_t
+vhost_crypto_process_cop(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+		struct vring_desc *head, uint16_t desc_idx,
+		struct rte_vhost_memory *mem, int vid)
+{
+	struct vhost_crypto_data_req *vc_req = rte_crypto_op_ctod_offset(op,
+			struct vhost_crypto_data_req *, REQ_OP_OFFSET);
+	struct rte_cryptodev_sym_session *session;
+	struct virtio_crypto_op_data_req *req;
+	struct virtio_crypto_inhdr *inhdr;
+	struct vring_desc *desc;
+	uint64_t session_id;
+	uint32_t offset = 0;
+	uint32_t len = INHDR_LEN;
+	int ret = 0;
+
+	if (unlikely(!vc_req))
+		return 0;
+
+	if (likely(head->flags & VRING_DESC_F_INDIRECT)) {
+		head = GPA_TO_VVA(struct vring_desc *, mem, head->addr);
+		if (unlikely(!head))
+			return 0;
+		desc_idx = 0;
+	}
+
+	vc_req->mem = mem;
+	vc_req->descs = head;
+
+	desc = &head[desc_idx];
+
+	req = get_rd_data(head, mem, &desc, &offset, sizeof(*req));
+	if (unlikely(!req)) {
+		VC_LOG_ERR("Failed to retrive req");
+		return 0;
+	}
+
+	if (unlikely(req->header.opcode != VIRTIO_CRYPTO_CIPHER_ENCRYPT &&
+			req->header.opcode != VIRTIO_CRYPTO_CIPHER_DECRYPT))
+	{
+		VC_LOG_ERR("Req %u not yet supported", req->header.opcode);
+		ret = -1;
+		inhdr = reach_inhdr(head, mem, desc, 0);
+		if (likely(inhdr != NULL))
+			inhdr->status = VIRTIO_CRYPTO_ERR;
+		else
+			len = 0;
+		goto finalize;
+	}
+
+	session_id = req->header.session_id;
+
+	/* one branch to avoid unecessary table lookup */
+	if (vcrypto->cache_session_id != session_id) {
+		ret = rte_hash_lookup_data(vcrypto->session_map, &session_id,
+				(void **)&session);
+		if (unlikely(ret < 0)) {
+			VC_LOG_DBG("Failed to retrieve session id %lu",
+					session_id);
+			ret = -1;
+			inhdr = reach_inhdr(head, mem, desc, 0);
+			if (likely(inhdr != NULL))
+				inhdr->status = VIRTIO_CRYPTO_ERR;
+			else
+				len = 0;
+			goto finalize;
+		}
+
+		vcrypto->cache_session = session;
+		vcrypto->cache_session_id = session_id;
+	}
+
+	session = vcrypto->cache_session;
+
+	ret = rte_crypto_op_attach_sym_session(op, session);
+	if (unlikely(ret < 0)) {
+		VC_LOG_ERR("Failed to attach session to op");
+		ret = -1;
+		inhdr = reach_inhdr(head, mem, desc, 0);
+		if (likely(inhdr != NULL))
+			inhdr->status = VIRTIO_CRYPTO_ERR;
+		else
+			len = 0;
+		goto finalize;
+	}
+
+	len = (*prepare_sym_ops[req->u.sym_req.op_type])(op, vc_req,
+			&req->u.sym_req, desc, offset, (uint8_t *)&ret);
+	if (unlikely(ret)) {
+		inhdr = reach_inhdr(head, mem, desc, 0);
+		if (likely(inhdr != NULL))
+			inhdr->status = VIRTIO_CRYPTO_ERR;
+		else
+			len = 0;
+		goto finalize;
+	}
+
+	inhdr = reach_inhdr(head, mem, desc, 0);
+	if (unlikely(!inhdr)) {
+		ret = -1;
+		len = 0;
+		goto finalize;
+	}
+	vc_req->inhdr = inhdr;
+
+	vc_req->vid = vid;
+
+finalize:
+	free_data(req);
+	if (unlikely(ret)) {
+		free_data(vc_req->src_data);
+		free_data(vc_req->dst_data);
+		free_data(vc_req->hash_result);
+	}
+
+	return len;
+}
+
+uint16_t
+rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct virtio_net *dev = get_device(vid);
+	struct vhost_user_dev_priv *priv;
+	struct rte_vhost_memory *mem;
+	struct vhost_crypto *vcrypto;
+	struct vhost_virtqueue *vq;
+	uint16_t avail_idx;
+	uint16_t start_idx;
+	uint16_t count;
+	uint16_t i;
+
+	if (unlikely(dev == NULL)) {
+		VC_LOG_ERR("Invalid vid %i", vid);
+		return -EINVAL;
+	}
+
+	priv = dev->private_data;
+	if (unlikely(priv == NULL)) {
+		VC_LOG_ERR("Cannot find required data, is it initialized?");
+		return -ENOENT;
+	}
+
+	vcrypto = (struct vhost_crypto *)priv->data;
+	if (unlikely(vcrypto == NULL)) {
+		VC_LOG_ERR("Cannot find required data, is it initialized?");
+		return -ENOENT;
+	}
+
+	vq = dev->virtqueue[qid];
+	mem = dev->mem;
+
+	avail_idx = *((volatile uint16_t *)&vq->avail->idx);
+	start_idx = vq->last_used_idx;
+	count = avail_idx - start_idx;
+	count = RTE_MIN(count, nb_ops);
+
+	for (i = 0; i < count; i++) {
+		uint16_t used_idx = (start_idx + i) & (vq->size - 1);
+		uint16_t desc_idx = vq->avail->ring[used_idx];
+		struct vring_desc *head = &vq->desc[desc_idx];
+		uint32_t len;
+
+		vq->used->ring[desc_idx].id = desc_idx;
+		len = vhost_crypto_process_cop(vcrypto, ops[i], head, desc_idx,
+				mem, vid);
+		if (unlikely(len == 0))
+			break;
+		vq->used->ring[desc_idx].len = len;
+	}
+
+	*(volatile uint16_t *)&vq->used->idx += i;
+	vq->last_used_idx += i;
+
+	return i;
+}
+
+static __rte_always_inline uint64_t
+vhost_crypto_finalize_one_request(struct rte_crypto_op *op)
+{
+	struct vhost_crypto_data_req *vc_req = rte_crypto_op_ctod_offset(op,
+			struct vhost_crypto_data_req *, REQ_OP_OFFSET);
+	uint64_t vid_mask = 0;
+
+	if (unlikely(!vc_req)) {
+		VC_LOG_ERR("Failed to retrieve vc_req");
+		return 0;
+	}
+
+	if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) {
+		VC_LOG_ERR("Crypto Op failed to be processed");
+		vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
+		goto exit;
+	}
+
+	/* write back dst */
+	if (unlikely(write_data(vc_req) < 0)) {
+		vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
+		goto exit;
+	}
+
+	vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
+	vid_mask = 1ULL << vc_req->vid;
+
+exit:
+	vc_req->src_data = free_data(vc_req->src_data);
+	vc_req->dst_data = free_data(vc_req->dst_data);
+	vc_req->hash_result = free_data(vc_req->hash_result);
+
+	return vid_mask;
+}
+
+uint64_t
+rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	uint64_t vid_mask = 0;
+	uint16_t i;
+
+	for (i = 0; i < nb_ops; i++)
+		vid_mask |= vhost_crypto_finalize_one_request(ops[i]);
+
+	return vid_mask;
+}
-- 
2.9.5



More information about the dev mailing list