[dpdk-dev] [PATCH 4/8] cryptodev: use new API for datapath functions

Akhil Goyal gakhil at marvell.com
Sun Aug 29 14:51:35 CEST 2021


The datapath inline APIs (rte_cryptodev_enqueue_burst/
rte_cryptodev_dequeue_burst) are updated to use the new
rte_crytodev_api->enqueue_burst/rte_cryptodev_api->dequeue_burst
APIs based on the dev_id

Signed-off-by: Akhil Goyal <gakhil at marvell.com>
---
 lib/cryptodev/rte_cryptodev.h | 62 +++--------------------------------
 1 file changed, 5 insertions(+), 57 deletions(-)

diff --git a/lib/cryptodev/rte_cryptodev.h b/lib/cryptodev/rte_cryptodev.h
index 3d99dd1cf5..49919a43a4 100644
--- a/lib/cryptodev/rte_cryptodev.h
+++ b/lib/cryptodev/rte_cryptodev.h
@@ -1820,36 +1820,10 @@ static inline uint16_t
 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
 		struct rte_crypto_op **ops, uint16_t nb_ops)
 {
-	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
-
 	rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
-	nb_ops = (*dev->dequeue_burst)
-			(dev->data->queue_pairs[qp_id], ops, nb_ops);
-#ifdef RTE_CRYPTO_CALLBACKS
-	if (unlikely(dev->deq_cbs != NULL)) {
-		struct rte_cryptodev_cb_rcu *list;
-		struct rte_cryptodev_cb *cb;
 
-		/* __ATOMIC_RELEASE memory order was used when the
-		 * call back was inserted into the list.
-		 * Since there is a clear dependency between loading
-		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
-		 * not required.
-		 */
-		list = &dev->deq_cbs[qp_id];
-		rte_rcu_qsbr_thread_online(list->qsbr, 0);
-		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
-
-		while (cb != NULL) {
-			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
-					cb->arg);
-			cb = cb->next;
-		};
-
-		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
-	}
-#endif
-	return nb_ops;
+	return rte_cryptodev_api[dev_id].dequeue_burst(
+				dev_id, qp_id, ops, nb_ops);
 }
 
 /**
@@ -1887,36 +1861,10 @@ static inline uint16_t
 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
 		struct rte_crypto_op **ops, uint16_t nb_ops)
 {
-	struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
-
-#ifdef RTE_CRYPTO_CALLBACKS
-	if (unlikely(dev->enq_cbs != NULL)) {
-		struct rte_cryptodev_cb_rcu *list;
-		struct rte_cryptodev_cb *cb;
-
-		/* __ATOMIC_RELEASE memory order was used when the
-		 * call back was inserted into the list.
-		 * Since there is a clear dependency between loading
-		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
-		 * not required.
-		 */
-		list = &dev->enq_cbs[qp_id];
-		rte_rcu_qsbr_thread_online(list->qsbr, 0);
-		cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
-
-		while (cb != NULL) {
-			nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
-					cb->arg);
-			cb = cb->next;
-		};
-
-		rte_rcu_qsbr_thread_offline(list->qsbr, 0);
-	}
-#endif
-
 	rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
-	return (*dev->enqueue_burst)(
-			dev->data->queue_pairs[qp_id], ops, nb_ops);
+
+	return rte_cryptodev_api[dev_id].enqueue_burst(
+				dev_id, qp_id, ops, nb_ops);
 }
 
 
-- 
2.25.1



More information about the dev mailing list