[dpdk-dev] [PATCH v5 5/8] lib/librte_vhost: add public function implementation

Fan Zhang roy.fan.zhang at intel.com
Wed Apr 4 12:08:59 CEST 2018


This patch adds public API implementation to vhost crypto.

Signed-off-by: Fan Zhang <roy.fan.zhang at intel.com>
---
 lib/librte_vhost/rte_vhost_crypto.h    |  97 ++++++++++++-
 lib/librte_vhost/rte_vhost_version.map |  11 ++
 lib/librte_vhost/vhost_crypto.c        | 255 +++++++++++++++++++++++++++++++++
 3 files changed, 362 insertions(+), 1 deletion(-)

diff --git a/lib/librte_vhost/rte_vhost_crypto.h b/lib/librte_vhost/rte_vhost_crypto.h
index b6be9d87d..acdc7f0fe 100644
--- a/lib/librte_vhost/rte_vhost_crypto.h
+++ b/lib/librte_vhost/rte_vhost_crypto.h
@@ -5,10 +5,105 @@
 #ifndef _VHOST_CRYPTO_H_
 #define _VHOST_CRYPTO_H_
 
+#define VHOST_CRYPTO_MBUF_POOL_SIZE		(8192)
+#define VHOST_CRYPTO_MAX_BURST_SIZE		(64)
+#define VHOST_CRYPTO_SESSION_MAP_ENTRIES	(1024) /**< Max nb sessions */
+/** max nb virtual queues in a burst for finalizing*/
+#define VIRTIO_CRYPTO_MAX_NUM_BURST_VQS		(64)
+
 enum rte_vhost_crypto_zero_copy {
 	RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE = 0,
-	RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE,
+	RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE = 1,
 	RTE_VHOST_CRYPTO_MAX_ZERO_COPY_OPTIONS
 };
 
+/**
+ *  Create Vhost-crypto instance
+ *
+ * @param vid
+ *  The identifier of the vhost device.
+ * @param cryptodev_id
+ *  The identifier of DPDK Cryptodev, the same cryptodev_id can be assigned to
+ *  multiple Vhost-crypto devices.
+ * @param sess_pool
+ *  The pointer to the created cryptodev session pool with the private data size
+ *  matches the target DPDK Cryptodev.
+ * @param socket_id
+ *  NUMA Socket ID to allocate resources on. *
+ * @return
+ *  0 if the Vhost Crypto Instance is created successfully.
+ *  Negative integer if otherwise
+ */
+int
+rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
+		struct rte_mempool *sess_pool, int socket_id);
+
+/**
+ *  Free the Vhost-crypto instance
+ *
+ * @param vid
+ *  The identifier of the vhost device.
+ * @return
+ *  0 if the Vhost Crypto Instance is created successfully.
+ *  Negative integer if otherwise.
+ */
+int
+rte_vhost_crypto_free(int vid);
+
+/**
+ *  Enable or disable zero copy feature
+ *
+ * @param vid
+ *  The identifier of the vhost device.
+ * @param option
+ *  Flag of zero copy feature.
+ * @return
+ *  0 if completed successfully.
+ *  Negative integer if otherwise.
+ */
+int __rte_experimental
+rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option);
+
+/**
+ * Fetch a number of vring descriptors from virt-queue and translate to DPDK
+ * crypto operations. After this function is executed, the user can enqueue
+ * the processed ops to the target cryptodev.
+ *
+ * @param vid
+ *  The identifier of the vhost device.
+ * @param qid
+ *  Virtio queue index.
+ * @param ops
+ *  The address of an array of pointers to *rte_crypto_op* structures that must
+ *  be large enough to store *nb_ops* pointers in it.
+ * @param nb_ops
+ *  The maximum number of operations to be fetched and translated.
+ * @return
+ *  The number of fetched and processed vhost crypto request operations.
+ */
+uint16_t
+rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
+		struct rte_crypto_op **ops, uint16_t nb_ops);
+/**
+ * Finalize the dequeued crypto ops. After the translated crypto ops are
+ * dequeued from the cryptodev, this function shall be called to write the
+ * processed data back to the vring descriptor (if no-copy is turned off).
+ *
+ * @param ops
+ *  The address of an array of *rte_crypto_op* structure that was dequeued
+ *  from cryptodev.
+ * @param nb_ops
+ *  The number of operations contained in the array.
+ * @callfds
+ *  The callfd number(s) contained in this burst, this shall be an array with
+ *  no less than VIRTIO_CRYPTO_MAX_NUM_BURST_VQS elements.
+ * @nb_callfds
+ *  The number of call_fd numbers exist in the callfds.
+ * @return
+ *  The number of ops processed.
+ */
+uint16_t
+rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
+		uint16_t nb_ops, int *callfds, uint16_t *nb_callfds);
+
 #endif /**< _VHOST_CRYPTO_H_ */
diff --git a/lib/librte_vhost/rte_vhost_version.map b/lib/librte_vhost/rte_vhost_version.map
index df0103129..65807488a 100644
--- a/lib/librte_vhost/rte_vhost_version.map
+++ b/lib/librte_vhost/rte_vhost_version.map
@@ -59,3 +59,14 @@ DPDK_18.02 {
 	rte_vhost_vring_call;
 
 } DPDK_17.08;
+
+DPDK_18.05 {
+	global:
+
+	rte_vhost_crypto_create;
+	rte_vhost_crypto_free;
+	rte_vhost_crypto_fetch_requests;
+	rte_vhost_crypto_finalize_requests;
+	rte_vhost_crypto_set_zero_copy;
+
+} DPDK_18.02;
diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
index 67f1189ec..aac2eb1df 100644
--- a/lib/librte_vhost/vhost_crypto.c
+++ b/lib/librte_vhost/vhost_crypto.c
@@ -5,6 +5,7 @@
 
 #include <rte_malloc.h>
 #include <rte_hash.h>
+#include <rte_jhash.h>
 #include <rte_mbuf.h>
 #include <rte_cryptodev.h>
 
@@ -35,6 +36,13 @@
 #define VC_LOG_DBG(fmt, args...)
 #endif
 
+#define VIRTIO_CRYPTO_FEATURES ((1 << VIRTIO_F_NOTIFY_ON_EMPTY) |	\
+		(1 << VIRTIO_RING_F_INDIRECT_DESC) |			\
+		(1 << VIRTIO_RING_F_EVENT_IDX) |			\
+		(1 << VIRTIO_CRYPTO_SERVICE_CIPHER) |			\
+		(1 << VIRTIO_CRYPTO_SERVICE_MAC) |			\
+		(1 << VIRTIO_NET_F_CTRL_VQ))
+
 #define GPA_TO_VVA(t, m, a)	((t)(uintptr_t)rte_vhost_gpa_to_vva(m, a))
 
 static int
@@ -1058,3 +1066,250 @@ vhost_crypto_complete_one_vm_requests(struct rte_crypto_op **ops,
 
 	return processed;
 }
+
+int
+rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
+		struct rte_mempool *sess_pool, int socket_id)
+{
+	struct virtio_net *dev = get_device(vid);
+	struct rte_hash_parameters params = {0};
+	struct vhost_crypto *vcrypto;
+	char name[128];
+	int ret;
+
+	if (!dev) {
+		VC_LOG_ERR("Invalid vid %i", vid);
+		return -EINVAL;
+	}
+
+	ret = rte_vhost_driver_set_features(dev->ifname,
+			VIRTIO_CRYPTO_FEATURES);
+	if (ret < 0) {
+		VC_LOG_ERR("Error setting features");
+		return -1;
+	}
+
+	vcrypto = rte_zmalloc_socket(NULL, sizeof(*vcrypto),
+			RTE_CACHE_LINE_SIZE, socket_id);
+	if (!vcrypto) {
+		VC_LOG_ERR("Insufficient memory");
+		return -ENOMEM;
+	}
+
+	vcrypto->sess_pool = sess_pool;
+	vcrypto->cid = cryptodev_id;
+	vcrypto->cache_session_id = UINT64_MAX;
+	vcrypto->last_session_id = 1;
+	vcrypto->dev = dev;
+	vcrypto->option = RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE;
+
+	snprintf(name, 127, "HASH_VHOST_CRYPT_%u", (uint32_t)vid);
+	params.name = name;
+	params.entries = VHOST_CRYPTO_SESSION_MAP_ENTRIES;
+	params.hash_func = rte_jhash;
+	params.key_len = sizeof(uint64_t);
+	params.socket_id = socket_id;
+	vcrypto->session_map = rte_hash_create(&params);
+	if (!vcrypto->session_map) {
+		VC_LOG_ERR("Failed to creath session map");
+		ret = -ENOMEM;
+		goto error_exit;
+	}
+
+	snprintf(name, 127, "MBUF_POOL_VM_%u", (uint32_t)vid);
+	vcrypto->mbuf_pool = rte_pktmbuf_pool_create(name,
+			VHOST_CRYPTO_MBUF_POOL_SIZE, 512,
+			sizeof(struct vhost_crypto_data_req),
+			RTE_MBUF_DEFAULT_DATAROOM * 2 + RTE_PKTMBUF_HEADROOM,
+			rte_socket_id());
+	if (!vcrypto->mbuf_pool) {
+		VC_LOG_ERR("Failed to creath mbuf pool");
+		ret = -ENOMEM;
+		goto error_exit;
+	}
+
+	dev->extern_data = vcrypto;
+	dev->extern_ops.pre_msg_handle = NULL;
+	dev->extern_ops.post_msg_handle = vhost_crypto_msg_post_handler;
+
+	return 0;
+
+error_exit:
+	if (vcrypto->session_map)
+		rte_hash_free(vcrypto->session_map);
+	if (vcrypto->mbuf_pool)
+		rte_mempool_free(vcrypto->mbuf_pool);
+
+	rte_free(vcrypto);
+
+	return ret;
+}
+
+int
+rte_vhost_crypto_free(int vid)
+{
+	struct virtio_net *dev = get_device(vid);
+	struct vhost_crypto *vcrypto;
+
+	if (unlikely(dev == NULL)) {
+		VC_LOG_ERR("Invalid vid %i", vid);
+		return -EINVAL;
+	}
+
+	vcrypto = dev->extern_data;
+	if (unlikely(vcrypto == NULL)) {
+		VC_LOG_ERR("Cannot find required data, is it initialized?");
+		return -ENOENT;
+	}
+
+	rte_hash_free(vcrypto->session_map);
+	rte_mempool_free(vcrypto->mbuf_pool);
+	rte_free(vcrypto);
+
+	dev->extern_data = NULL;
+	dev->extern_ops.pre_msg_handle = NULL;
+	dev->extern_ops.post_msg_handle = NULL;
+
+	return 0;
+}
+
+int __rte_experimental
+rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
+{
+	struct virtio_net *dev = get_device(vid);
+	struct vhost_crypto *vcrypto;
+
+	if (unlikely(dev == NULL)) {
+		VC_LOG_ERR("Invalid vid %i", vid);
+		return -EINVAL;
+	}
+
+	if (unlikely(option < 0 || option >=
+			RTE_VHOST_CRYPTO_MAX_ZERO_COPY_OPTIONS)) {
+		VC_LOG_ERR("Invalid option %i", option);
+		return -EINVAL;
+	}
+
+	vcrypto = (struct vhost_crypto *)dev->extern_data;
+	if (unlikely(vcrypto == NULL)) {
+		VC_LOG_ERR("Cannot find required data, is it initialized?");
+		return -ENOENT;
+	}
+
+	if (vcrypto->option == (uint8_t)option)
+		return 0;
+
+	if (!(rte_mempool_full(vcrypto->mbuf_pool))) {
+		VC_LOG_ERR("Cannot update zero copy as mempool is not full");
+		return -EINVAL;
+	}
+
+	vcrypto->option = (uint8_t)option;
+
+	return 0;
+}
+
+uint16_t
+rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct rte_mbuf *mbufs[VHOST_CRYPTO_MAX_BURST_SIZE * 2];
+	struct virtio_net *dev = get_device(vid);
+	struct rte_vhost_memory *mem;
+	struct vhost_crypto *vcrypto;
+	struct vhost_virtqueue *vq;
+	uint16_t avail_idx;
+	uint16_t start_idx;
+	uint16_t required;
+	uint16_t count;
+	uint16_t i;
+
+	if (unlikely(dev == NULL)) {
+		VC_LOG_ERR("Invalid vid %i", vid);
+		return -EINVAL;
+	}
+
+	if (unlikely(qid >= VHOST_MAX_QUEUE_PAIRS)) {
+		VC_LOG_ERR("Invalid qid %u", qid);
+		return -EINVAL;
+	}
+
+	vcrypto = (struct vhost_crypto *)dev->extern_data;
+	if (unlikely(vcrypto == NULL)) {
+		VC_LOG_ERR("Cannot find required data, is it initialized?");
+		return -ENOENT;
+	}
+
+	vq = dev->virtqueue[qid];
+	mem = dev->mem;
+
+	avail_idx = *((volatile uint16_t *)&vq->avail->idx);
+	start_idx = vq->last_used_idx;
+	count = avail_idx - start_idx;
+	count = RTE_MIN(count, VHOST_CRYPTO_MAX_BURST_SIZE);
+	count = RTE_MIN(count, nb_ops);
+
+	if (unlikely(count == 0))
+		return 0;
+
+	/* for zero copy, we need 2 empty mbufs for src and dst, otherwise
+	 * we need only 1 mbuf as src and dst
+	 */
+	required = count * 2;
+	if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool, (void **)mbufs,
+			required) < 0)) {
+		VC_LOG_ERR("Insufficient memory");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < count; i++) {
+		uint16_t used_idx = (start_idx + i) & (vq->size - 1);
+		uint16_t desc_idx = vq->avail->ring[used_idx];
+		struct vring_desc *head = &vq->desc[desc_idx];
+		struct rte_crypto_op *op = ops[i];
+
+		op->sym->m_src = mbufs[i * 2];
+		op->sym->m_dst = mbufs[i * 2 + 1];
+		op->sym->m_src->data_off = 0;
+		op->sym->m_dst->data_off = 0;
+
+		if (unlikely(vhost_crypto_process_one_req(vcrypto, vq, op, head,
+				desc_idx, mem)) < 0)
+			break;
+	}
+
+	vq->last_used_idx += i;
+
+	return i;
+}
+
+uint16_t
+rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
+		uint16_t nb_ops, int *callfds, uint16_t *nb_callfds)
+{
+	struct rte_crypto_op **tmp_ops = ops;
+	uint16_t count = 0, left = nb_ops;
+	int callfd;
+	uint16_t idx = 0;
+
+	while (left) {
+		count = vhost_crypto_complete_one_vm_requests(tmp_ops, left,
+				&callfd);
+		if (unlikely(count == 0))
+			break;
+
+		tmp_ops = &tmp_ops[count];
+		left -= count;
+
+		callfds[idx++] = callfd;
+
+		if (unlikely(idx >= VIRTIO_CRYPTO_MAX_NUM_BURST_VQS)) {
+			VC_LOG_ERR("Too many vqs");
+			break;
+		}
+	}
+
+	*nb_callfds = idx;
+
+	return nb_ops - left;
+}
-- 
2.13.6



More information about the dev mailing list