[dpdk-dev] [PATCH v2] crypto/qat: fix to avoid buffer overwrite in OOP case

Fiona Trahe fiona.trahe at intel.com
Fri Dec 9 16:39:05 CET 2016


In out-of-place operation, data is DMAed from source mbuf
to destination mbuf. To avoid header data in dest mbuf being
overwritten, the minimal data-set should be DMAed.

Fixes: 39e0bee48e81 ("crypto/qat: rework request builder for performance")

Signed-off-by: Fiona Trahe <fiona.trahe at intel.com>
CC: stable at dpdk.org
---
This patch depends on following patch :
  crypto: remove unused digest-appended feature
  http://dpdk.org/dev/patchwork/patch/17079/

v2:
 - included ref for stable

 drivers/crypto/qat/qat_crypto.c | 66 ++++++++++++++++++++---------------------
 drivers/crypto/qat/qat_crypto.h |  1 +
 2 files changed, 34 insertions(+), 33 deletions(-)

diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 6a6bd2e..afce4ac 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -955,7 +955,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
 	uint32_t cipher_len = 0, cipher_ofs = 0;
 	uint32_t auth_len = 0, auth_ofs = 0;
 	uint32_t min_ofs = 0;
-	uint64_t buf_start = 0;
+	uint64_t src_buf_start = 0, dst_buf_start = 0;
 
 
 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
@@ -1077,27 +1077,40 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
 	if (do_cipher && do_auth)
 		min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
 
-
-	/* Start DMA at nearest aligned address below min_ofs */
-	#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
-	buf_start = rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs) &
-							QAT_64_BTYE_ALIGN_MASK;
-
-	if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src)
-			- rte_pktmbuf_headroom(op->sym->m_src)) > buf_start)) {
-		/* alignment has pushed addr ahead of start of mbuf
-		 * so revert and take the performance hit
+	if (unlikely(op->sym->m_dst != NULL)) {
+		/* Out-of-place operation (OOP)
+		 * Don't align DMA start. DMA the minimum data-set
+		 * so as not to overwrite data in dest buffer
 		 */
-		buf_start = rte_pktmbuf_mtophys(op->sym->m_src);
+		src_buf_start =
+			rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs);
+		dst_buf_start =
+			rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs);
+	} else {
+		/* In-place operation
+		 * Start DMA at nearest aligned address below min_ofs
+		 */
+		src_buf_start =
+			rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs)
+						& QAT_64_BTYE_ALIGN_MASK;
+
+		if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src) -
+					rte_pktmbuf_headroom(op->sym->m_src))
+							> src_buf_start)) {
+			/* alignment has pushed addr ahead of start of mbuf
+			 * so revert and take the performance hit
+			 */
+			src_buf_start =
+				rte_pktmbuf_mtophys_offset(op->sym->m_src,
+								min_ofs);
+		}
+		dst_buf_start = src_buf_start;
 	}
 
-	qat_req->comn_mid.dest_data_addr =
-		qat_req->comn_mid.src_data_addr = buf_start;
-
 	if (do_cipher) {
 		cipher_param->cipher_offset =
-					(uint32_t)rte_pktmbuf_mtophys_offset(
-					op->sym->m_src, cipher_ofs) - buf_start;
+				(uint32_t)rte_pktmbuf_mtophys_offset(
+				op->sym->m_src, cipher_ofs) - src_buf_start;
 		cipher_param->cipher_length = cipher_len;
 	} else {
 		cipher_param->cipher_offset = 0;
@@ -1105,7 +1118,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
 	}
 	if (do_auth) {
 		auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
-					op->sym->m_src, auth_ofs) - buf_start;
+				op->sym->m_src, auth_ofs) - src_buf_start;
 		auth_param->auth_len = auth_len;
 	} else {
 		auth_param->auth_off = 0;
@@ -1118,21 +1131,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
 		(cipher_param->cipher_offset + cipher_param->cipher_length)
 		: (auth_param->auth_off + auth_param->auth_len);
 
-
-	/* out-of-place operation (OOP) */
-	if (unlikely(op->sym->m_dst != NULL)) {
-
-		if (do_auth)
-			qat_req->comn_mid.dest_data_addr =
-				rte_pktmbuf_mtophys_offset(op->sym->m_dst,
-						auth_ofs)
-						- auth_param->auth_off;
-		else
-			qat_req->comn_mid.dest_data_addr =
-				rte_pktmbuf_mtophys_offset(op->sym->m_dst,
-						cipher_ofs)
-						- cipher_param->cipher_offset;
-	}
+	qat_req->comn_mid.src_data_addr = src_buf_start;
+	qat_req->comn_mid.dest_data_addr = dst_buf_start;
 
 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
 			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 0afe74e..6b84488 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -43,6 +43,7 @@
  */
 #define ALIGN_POW2_ROUNDUP(num, align) \
 	(((num) + (align) - 1) & ~((align) - 1))
+#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
 
 /**
  * Structure associated with each queue.
-- 
2.5.0



More information about the dev mailing list