[dpdk-dev] [PATCH 3/8] cryptodev: add helper functions for new datapath interface

Akhil Goyal gakhil at marvell.com
Sun Aug 29 14:51:34 CEST 2021


Add helper functions and macros to help drivers to
transition to new datapath interface.

Signed-off-by: Akhil Goyal <gakhil at marvell.com>
---
 lib/cryptodev/cryptodev_pmd.h | 246 ++++++++++++++++++++++++++++++++++
 lib/cryptodev/rte_cryptodev.c |  40 +++++-
 lib/cryptodev/version.map     |   4 +
 3 files changed, 289 insertions(+), 1 deletion(-)

diff --git a/lib/cryptodev/cryptodev_pmd.h b/lib/cryptodev/cryptodev_pmd.h
index eeaea13a23..d40e5cee94 100644
--- a/lib/cryptodev/cryptodev_pmd.h
+++ b/lib/cryptodev/cryptodev_pmd.h
@@ -70,6 +70,13 @@ struct cryptodev_driver {
 	const struct rte_driver *driver;
 	uint8_t id;
 };
+/**
+ * @internal
+ * The pool of *rte_cryptodev* structures. The size of the pool
+ * is configured at compile-time in the <rte_cryptodev.c> file.
+ */
+extern struct rte_cryptodev rte_crypto_devices[];
+
 
 /**
  * Get the rte_cryptodev structure device pointer for the device. Assumes a
@@ -529,6 +536,245 @@ __rte_internal
 void
 rte_cryptodev_api_reset(struct rte_cryptodev_api *api);
 
+/**
+ * @internal
+ * Helper routine for cryptodev_dequeue_burst.
+ * Should be called as first thing on entrance to the PMD's
+ * rte_cryptodev_dequeue_burst implementation.
+ * Does necessary checks and returns pointer to cryptodev identifier.
+ *
+ * @param dev_id
+ *  The device identifier of the crypto device.
+ * @param qp_id
+ *  The index of the queue pair from which processed crypto ops will
+ *  be dequeued.
+ *
+ * @return
+ *  Pointer to device queue pair on success or NULL otherwise.
+ */
+__rte_internal
+static inline void *
+_rte_cryptodev_dequeue_prolog(uint8_t dev_id, uint8_t qp_id)
+{
+	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
+
+	return dev->data->queue_pairs[qp_id];
+}
+
+/**
+ * @internal
+ * Helper routine for crypto driver dequeue API.
+ * Should be called at exit from PMD's rte_cryptodev_dequeue_burst
+ * implementation.
+ * Does necessary post-processing - invokes RX callbacks if any, tracing, etc.
+ *
+ * @param dev_id
+ *  The device identifier of the Crypto device.
+ * @param qp_id
+ *  The index of the queue pair from which to retrieve input crypto_ops.
+ * @param ops
+ *   The address of an array of pointers to *rte_crypto_op* structures that
+ *   have been retrieved from the device.
+ * @param nb_ops
+ *   The number of ops that were retrieved from the device.
+ *
+ * @return
+ *  The number of crypto ops effectively supplied to the *ops* array.
+ */
+__rte_internal
+static inline uint16_t
+_rte_cryptodev_dequeue_epilog(uint16_t dev_id, uint16_t qp_id,
+	struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+#ifdef RTE_CRYPTO_CALLBACKS
+	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
+
+	if (unlikely(dev->deq_cbs != NULL)) {
+		struct rte_cryptodev_cb_rcu *list;
+		struct rte_cryptodev_cb *cb;
+
+		/* __ATOMIC_RELEASE memory order was used when the
+		 * call back was inserted into the list.
+		 * Since there is a clear dependency between loading
+		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+		 * not required.
+		 */
+		list = &dev->deq_cbs[qp_id];
+		rte_rcu_qsbr_thread_online(list->qsbr, 0);
+		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
+
+		while (cb != NULL) {
+			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
+					cb->arg);
+			cb = cb->next;
+		};
+
+		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
+	}
+#endif
+
+	return nb_ops;
+}
+#define _RTE_CRYPTO_DEQ_FUNC(fn)	_rte_crypto_deq_##fn
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD dequeue functions.
+ */
+#define _RTE_CRYPTO_DEQ_PROTO(fn) \
+	uint16_t _RTE_CRYPTO_DEQ_FUNC(fn)(uint8_t dev_id, uint8_t qp_id, \
+			struct rte_crypto_op **ops, uint16_t nb_ops)
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD dequeue functions.
+ */
+#define _RTE_CRYPTO_DEQ_DEF(fn) \
+_RTE_CRYPTO_DEQ_PROTO(fn) \
+{ \
+	void *qp = _rte_cryptodev_dequeue_prolog(dev_id, qp_id); \
+	if (qp == NULL) \
+		return 0; \
+	nb_ops = fn(qp, ops, nb_ops); \
+	return _rte_cryptodev_dequeue_epilog(dev_id, qp_id, ops, nb_ops); \
+}
+
+/**
+ * @internal
+ * Helper routine for cryptodev_enqueue_burst.
+ * Should be called as first thing on entrance to the PMD's
+ * rte_cryptodev_enqueue_burst implementation.
+ * Does necessary checks and returns pointer to cryptodev queue pair.
+ *
+ * @param dev_id
+ *  The device identifier of the crypto device.
+ * @param qp_id
+ *  The index of the queue pair in which packets will be enqueued.
+ * @param ops
+ *   The address of an array of pointers to *rte_crypto_op* structures that
+ *   will be enqueued to the device.
+ * @param nb_ops
+ *   The number of ops that will be sent to the device.
+ *
+ * @return
+ *  Pointer to device queue pair on success or NULL otherwise.
+ */
+__rte_internal
+static inline void *
+_rte_cryptodev_enqueue_prolog(uint8_t dev_id, uint8_t qp_id,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
+
+#ifdef RTE_CRYPTO_CALLBACKS
+	if (unlikely(dev->enq_cbs != NULL)) {
+		struct rte_cryptodev_cb_rcu *list;
+		struct rte_cryptodev_cb *cb;
+
+		/* __ATOMIC_RELEASE memory order was used when the
+		 * call back was inserted into the list.
+		 * Since there is a clear dependency between loading
+		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+		 * not required.
+		 */
+		list = &dev->enq_cbs[qp_id];
+		rte_rcu_qsbr_thread_online(list->qsbr, 0);
+		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
+
+		while (cb != NULL) {
+			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
+					cb->arg);
+			cb = cb->next;
+		};
+
+		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
+	}
+#endif
+	return dev->data->queue_pairs[qp_id];
+}
+
+#define _RTE_CRYPTO_ENQ_FUNC(fn)	_rte_crypto_enq_##fn
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD enqueue functions.
+ */
+#define _RTE_CRYPTO_ENQ_PROTO(fn) \
+	uint16_t _RTE_CRYPTO_ENQ_FUNC(fn)(uint8_t dev_id, uint8_t qp_id, \
+			struct rte_crypto_op **ops, uint16_t nb_ops)
+
+/**
+ * @internal
+ * Helper macro to create new API wrappers for existing PMD enqueue functions.
+ */
+#define _RTE_CRYPTO_ENQ_DEF(fn) \
+_RTE_CRYPTO_ENQ_PROTO(fn) \
+{ \
+	void *qp = _rte_cryptodev_enqueue_prolog(dev_id, qp_id, ops, nb_ops); \
+	if (qp == NULL) \
+		return 0; \
+	return fn(qp, ops, nb_ops); \
+}
+
+/**
+ * @internal
+ * Helper routine to get enqueue burst function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Crypto device.
+ *
+ * @return
+ *  The function if valid else NULL
+ */
+__rte_internal
+rte_crypto_enqueue_burst_t
+rte_crypto_get_enq_burst_fn(uint8_t dev_id);
+
+/**
+ * @internal
+ * Helper routine to get dequeue burst function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Crypto device.
+ *
+ * @return
+ *  The function if valid else NULL
+ */
+__rte_internal
+rte_crypto_dequeue_burst_t
+rte_crypto_get_deq_burst_fn(uint8_t dev_id);
+
+/**
+ * @internal
+ * Helper routine to set enqueue burst function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Crypto device.
+ *
+ * @return
+ *  0		Success.
+ *  -EINVAL	Failure if dev_id or fn are in-valid.
+ */
+__rte_internal
+int
+rte_crypto_set_enq_burst_fn(uint8_t dev_id, rte_crypto_enqueue_burst_t fn);
+
+/**
+ * @internal
+ * Helper routine to set dequeue burst function of a given device.
+ *
+ * @param dev_id
+ *  The device identifier of the Crypto device.
+ *
+ * @return
+ *  0		Success.
+ *  -EINVAL	Failure if dev_id or fn are in-valid.
+ */
+__rte_internal
+int
+rte_crypto_set_deq_burst_fn(uint8_t dev_id, rte_crypto_dequeue_burst_t fn);
+
+
 static inline void *
 get_sym_session_private_data(const struct rte_cryptodev_sym_session *sess,
 		uint8_t driver_id) {
diff --git a/lib/cryptodev/rte_cryptodev.c b/lib/cryptodev/rte_cryptodev.c
index 26f8390668..4ab82d21d0 100644
--- a/lib/cryptodev/rte_cryptodev.c
+++ b/lib/cryptodev/rte_cryptodev.c
@@ -44,7 +44,7 @@
 
 static uint8_t nb_drivers;
 
-static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
+struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
 
 struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
 
@@ -1270,6 +1270,44 @@ rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
 			socket_id);
 }
 
+rte_crypto_enqueue_burst_t
+rte_crypto_get_enq_burst_fn(uint8_t dev_id)
+{
+	if (dev_id >= RTE_CRYPTO_MAX_DEVS) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+	return rte_cryptodev_api[dev_id].enqueue_burst;
+}
+
+rte_crypto_dequeue_burst_t
+rte_crypto_get_deq_burst_fn(uint8_t dev_id)
+{
+	if (dev_id >= RTE_CRYPTO_MAX_DEVS) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+	return rte_cryptodev_api[dev_id].dequeue_burst;
+}
+
+int
+rte_crypto_set_enq_burst_fn(uint8_t dev_id, rte_crypto_enqueue_burst_t fn)
+{
+	if (dev_id >= RTE_CRYPTO_MAX_DEVS || fn == NULL)
+		return -EINVAL;
+	rte_cryptodev_api[dev_id].enqueue_burst = fn;
+	return 0;
+}
+
+int
+rte_crypto_set_deq_burst_fn(uint8_t dev_id, rte_crypto_dequeue_burst_t fn)
+{
+	if (dev_id >= RTE_CRYPTO_MAX_DEVS || fn == NULL)
+		return -EINVAL;
+	rte_cryptodev_api[dev_id].dequeue_burst = fn;
+	return 0;
+}
+
 struct rte_cryptodev_cb *
 rte_cryptodev_add_enq_callback(uint8_t dev_id,
 			       uint16_t qp_id,
diff --git a/lib/cryptodev/version.map b/lib/cryptodev/version.map
index 050089ae55..b64384cc05 100644
--- a/lib/cryptodev/version.map
+++ b/lib/cryptodev/version.map
@@ -116,6 +116,10 @@ EXPERIMENTAL {
 INTERNAL {
 	global:
 
+	rte_crypto_get_deq_burst_fn;
+	rte_crypto_get_enq_burst_fn;
+	rte_crypto_set_deq_burst_fn;
+	rte_crypto_set_enq_burst_fn;
 	rte_cryptodev_allocate_driver;
 	rte_cryptodev_api_reset;
 	rte_cryptodev_pmd_allocate;
-- 
2.25.1



More information about the dev mailing list