[dpdk-dev] [dpdk-dev v11 4/4] test/crypto: add unit-test for cryptodev raw API test

Akhil Goyal akhil.goyal at nxp.com
Sat Oct 10 21:55:07 CEST 2020


Hi Fan,

> +static uint32_t
> +get_raw_dp_dequeue_count(void *user_data __rte_unused)
> +{
> +	return 1;
Why is this 1 always? There could be jobs >1 which are processed.

> +}
> +
> +static void
> +post_process_raw_dp_op(void *user_data,	uint32_t index __rte_unused,
> +		uint8_t is_op_success)
> +{
> +	struct rte_crypto_op *op = user_data;
> +	op->status = is_op_success ? RTE_CRYPTO_OP_STATUS_SUCCESS :
> +			RTE_CRYPTO_OP_STATUS_ERROR;
> +}
> +
> +void
> +process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
> +		struct rte_crypto_op *op, uint8_t is_cipher, uint8_t is_auth,
> +		uint8_t len_in_bits, uint8_t cipher_iv_len)
> +{
> +	struct rte_crypto_sym_op *sop = op->sym;
> +	struct rte_crypto_op *ret_op = NULL;
> +	struct rte_crypto_vec data_vec[UINT8_MAX];
> +	struct rte_crypto_va_iova_ptr cipher_iv, digest, aad_auth_iv;
> +	union rte_crypto_sym_ofs ofs;
> +	struct rte_crypto_sym_vec vec;
> +	struct rte_crypto_sgl sgl;
> +	uint32_t max_len;
> +	union rte_cryptodev_session_ctx sess;
> +	uint32_t count = 0;
> +	struct rte_crypto_raw_dp_ctx *ctx;
> +	uint32_t cipher_offset = 0, cipher_len = 0, auth_offset = 0,
> +			auth_len = 0;
> +	int32_t n;
> +	uint32_t n_success;
> +	int ctx_service_size;
> +	int32_t status = 0;
> +
> +	ctx_service_size = rte_cryptodev_get_raw_dp_ctx_size(dev_id);
> +	if (ctx_service_size < 0) {
> +		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
> +		return;
> +	}
> +
> +	ctx = malloc(ctx_service_size);
> +	if (!ctx) {
> +		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
> +		return;
> +	}
> +
> +	/* Both are enums, setting crypto_sess will suit any session type */
> +	sess.crypto_sess = op->sym->session;
> +
> +	if (rte_cryptodev_configure_raw_dp_ctx(dev_id, qp_id, ctx,
> +			op->sess_type, sess, 0) < 0) {
> +		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
> +		goto exit;
> +	}
> +
> +	cipher_iv.iova = 0;
> +	cipher_iv.va = NULL;
> +	aad_auth_iv.iova = 0;
> +	aad_auth_iv.va = NULL;
> +	digest.iova = 0;
> +	digest.va = NULL;
> +	sgl.vec = data_vec;
> +	vec.num = 1;
> +	vec.sgl = &sgl;
> +	vec.iv = &cipher_iv;
> +	vec.digest = &digest;
> +	vec.aad = &aad_auth_iv;
> +	vec.status = &status;
> +
> +	ofs.raw = 0;
> +
> +	if (is_cipher && is_auth) {
> +		cipher_offset = sop->cipher.data.offset;
> +		cipher_len = sop->cipher.data.length;
> +		auth_offset = sop->auth.data.offset;
> +		auth_len = sop->auth.data.length;
> +		max_len = RTE_MAX(cipher_offset + cipher_len,
> +				auth_offset + auth_len);
> +		if (len_in_bits) {
> +			max_len = max_len >> 3;
> +			cipher_offset = cipher_offset >> 3;
> +			auth_offset = auth_offset >> 3;
> +			cipher_len = cipher_len >> 3;
> +			auth_len = auth_len >> 3;
> +		}
> +		ofs.ofs.cipher.head = cipher_offset;
> +		ofs.ofs.cipher.tail = max_len - cipher_offset - cipher_len;
> +		ofs.ofs.auth.head = auth_offset;
> +		ofs.ofs.auth.tail = max_len - auth_offset - auth_len;
> +		cipher_iv.va = rte_crypto_op_ctod_offset(op, void *,
> IV_OFFSET);
> +		cipher_iv.iova = rte_crypto_op_ctophys_offset(op, IV_OFFSET);
> +		aad_auth_iv.va = rte_crypto_op_ctod_offset(
> +				op, void *, IV_OFFSET + cipher_iv_len);
> +		aad_auth_iv.iova = rte_crypto_op_ctophys_offset(op,
> IV_OFFSET +
> +				cipher_iv_len);
> +		digest.va = (void *)sop->auth.digest.data;
> +		digest.iova = sop->auth.digest.phys_addr;
> +
> +	} else if (is_cipher) {
> +		cipher_offset = sop->cipher.data.offset;
> +		cipher_len = sop->cipher.data.length;
> +		max_len = cipher_len + cipher_offset;
> +		if (len_in_bits) {
> +			max_len = max_len >> 3;
> +			cipher_offset = cipher_offset >> 3;
> +			cipher_len = cipher_len >> 3;
> +		}
> +		ofs.ofs.cipher.head = cipher_offset;
> +		ofs.ofs.cipher.tail = max_len - cipher_offset - cipher_len;
> +		cipher_iv.va = rte_crypto_op_ctod_offset(op, void *,
> IV_OFFSET);
> +		cipher_iv.iova = rte_crypto_op_ctophys_offset(op, IV_OFFSET);
> +
> +	} else if (is_auth) {
> +		auth_offset = sop->auth.data.offset;
> +		auth_len = sop->auth.data.length;
> +		max_len = auth_len + auth_offset;
> +		if (len_in_bits) {
> +			max_len = max_len >> 3;
> +			auth_offset = auth_offset >> 3;
> +			auth_len = auth_len >> 3;
> +		}
> +		ofs.ofs.auth.head = auth_offset;
> +		ofs.ofs.auth.tail = max_len - auth_offset - auth_len;
> +		aad_auth_iv.va = rte_crypto_op_ctod_offset(
> +				op, void *, IV_OFFSET + cipher_iv_len);
> +		aad_auth_iv.iova = rte_crypto_op_ctophys_offset(op,
> IV_OFFSET +
> +				cipher_iv_len);
> +		digest.va = (void *)sop->auth.digest.data;
> +		digest.iova = sop->auth.digest.phys_addr;
> +
> +	} else { /* aead */
> +		cipher_offset = sop->aead.data.offset;
> +		cipher_len = sop->aead.data.length;
> +		max_len = cipher_len + cipher_offset;
> +		if (len_in_bits) {
> +			max_len = max_len >> 3;
> +			cipher_offset = cipher_offset >> 3;
> +			cipher_len = cipher_len >> 3;
> +		}
> +		ofs.ofs.cipher.head = cipher_offset;
> +		ofs.ofs.cipher.tail = max_len - cipher_offset - cipher_len;
> +		cipher_iv.va = rte_crypto_op_ctod_offset(op, void *,
> IV_OFFSET);
> +		cipher_iv.iova = rte_crypto_op_ctophys_offset(op, IV_OFFSET);
> +		aad_auth_iv.va = (void *)sop->aead.aad.data;
> +		aad_auth_iv.iova = sop->aead.aad.phys_addr;
> +		digest.va = (void *)sop->aead.digest.data;
> +		digest.iova = sop->aead.digest.phys_addr;
> +	}
> +
> +	n = rte_crypto_mbuf_to_vec(sop->m_src, 0, max_len,
> +			data_vec, RTE_DIM(data_vec));
> +	if (n < 0 || n > sop->m_src->nb_segs) {
> +		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
> +		goto exit;
> +	}
> +
> +	sgl.num = n;
> +
> +	if (rte_cryptodev_raw_enqueue_burst(ctx, &vec, ofs, (void **)&op,
> +			&status)
> +			< 1) {
> +		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
> +		goto exit;
> +	}
This check will always fail in your case. I believe you have not tested it with the
Recent changes that I suggested on V10.
rte_cryptodev_raw_enqueue_burst will return 0 if rte_cryptodev_raw_enqueue_done
need to be called or else the number of successfully enqueued descriptors.


> +
> +	if (status == 0) {
> +		status = rte_cryptodev_raw_enqueue_done(ctx, 1);
> +		if (status < 0) {
> +			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
> +			goto exit;
> +		}
> +	}
> +
> +	n = n_success = 0;
> +	while (count++ < MAX_RAW_DEQUEUE_COUNT && n == 0) {
> +		n = rte_cryptodev_raw_dequeue_burst(ctx,
> +			get_raw_dp_dequeue_count,
> post_process_raw_dp_op,
> +				(void **)&ret_op, 0, &n_success, &status);
> +
> +		if (n == 0)
> +			rte_pause();
> +	}

Same comment here as well. 
rte_cryptodev_raw_dequeue_burst will return 0 if dequeue_done need to be called.
These checks does not seem to be consistent with the API documentation.

> +
> +	if (n == 1 && status == 0) {
> +		if (rte_cryptodev_raw_dequeue_done(ctx, 1) < 0) {
> +			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
> +			goto exit;
> +		}
> +	}
> +
> +	op->status = (count == MAX_RAW_DEQUEUE_COUNT + 1 || ret_op !=
> op ||
> +			n_success < 1) ? RTE_CRYPTO_OP_STATUS_ERROR :
> +					RTE_CRYPTO_OP_STATUS_SUCCESS;
> +
> +exit:
> +	free(ctx);
> +}
> +


More information about the dev mailing list