[dpdk-dev] [PATCH] crypto/aesni_gcm: do crypto op in dequeue function

Sergio Gonzalez Monroy sergio.gonzalez.monroy at intel.com
Wed Mar 29 15:42:53 CEST 2017


There is bug when more crypto ops are enqueued than dequeued.
The return value is not checked when trying to enqueue the
processed crypto op into the internal ring, which in the case of being
full will results in crypto ops and mbufs being leaked.
The issue is more obvious with different cores doing enqueue/dequeue.

This patch moves the crypto operation to the dequeue function which
fixes the above issue without having to check for the number of free
entries in the ring.

Fixes: eec136f3c54f ("aesni_gcm: add driver for AES-GCM crypto operations")

Signed-off-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy at intel.com>
---
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c | 31 +++++++++++++++++--------------
 1 file changed, 17 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index a2d10a5..0ca834e 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -375,55 +375,58 @@ handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
 		rte_mempool_put(qp->sess_mp, op->sym->session);
 		op->sym->session = NULL;
 	}
-
-	rte_ring_enqueue(qp->processed_pkts, (void *)op);
 }
 
 static uint16_t
-aesni_gcm_pmd_enqueue_burst(void *queue_pair,
+aesni_gcm_pmd_dequeue_burst(void *queue_pair,
 		struct rte_crypto_op **ops, uint16_t nb_ops)
 {
 	struct aesni_gcm_session *sess;
 	struct aesni_gcm_qp *qp = queue_pair;
 
-	int i, retval = 0;
+	int retval = 0;
+	unsigned i, nb_dequeued;
+
+	nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+			(void **)ops, nb_ops);
 
-	for (i = 0; i < nb_ops; i++) {
+	for (i = 0; i < nb_dequeued; i++) {
 
 		sess = aesni_gcm_get_session(qp, ops[i]->sym);
 		if (unlikely(sess == NULL)) {
 			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			qp->qp_stats.enqueue_err_count++;
+			qp->qp_stats.dequeue_err_count++;
 			break;
 		}
 
 		retval = process_gcm_crypto_op(ops[i]->sym, sess);
 		if (retval < 0) {
 			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			qp->qp_stats.enqueue_err_count++;
+			qp->qp_stats.dequeue_err_count++;
 			break;
 		}
 
 		handle_completed_gcm_crypto_op(qp, ops[i]);
-
-		qp->qp_stats.enqueued_count++;
 	}
+
+	qp->qp_stats.dequeued_count += i;
+
 	return i;
 }
 
 static uint16_t
-aesni_gcm_pmd_dequeue_burst(void *queue_pair,
+aesni_gcm_pmd_enqueue_burst(void *queue_pair,
 		struct rte_crypto_op **ops, uint16_t nb_ops)
 {
 	struct aesni_gcm_qp *qp = queue_pair;
 
-	unsigned nb_dequeued;
+	unsigned nb_enqueued;
 
-	nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+	nb_enqueued = rte_ring_enqueue_burst(qp->processed_pkts,
 			(void **)ops, nb_ops);
-	qp->qp_stats.dequeued_count += nb_dequeued;
+	qp->qp_stats.enqueued_count += nb_enqueued;
 
-	return nb_dequeued;
+	return nb_enqueued;
 }
 
 static int aesni_gcm_remove(const char *name);
-- 
2.9.3



More information about the dev mailing list