[dpdk-dev] [PATCH] app/crypto-perf: support IMIX

Pablo de Lara pablo.de.lara.guarch at intel.com
Wed Dec 13 14:14:08 CET 2017


Add support for IMIX performance tests, where a distribution
of various packet sizes can be submitted to a crypto
device, testing a closer to a real world scenario.

A sequence of packet sizes, selected randomly from a list of packet
sizes (with "buffer-sz" parameter) with a list of the weights
per packet size (using "imix" parameter), is generated
(the length of this sequence is the same length as the pool,
set with "pool-sz" parameter).

This sequence is used repeteadly for all the crypto
operations submitted to the crypto device (with "--total-ops" parameter).

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch at intel.com>
---
 app/test-crypto-perf/cperf_ops.c                 | 77 ++++++++++++++------
 app/test-crypto-perf/cperf_ops.h                 |  2 +-
 app/test-crypto-perf/cperf_options.h             |  4 ++
 app/test-crypto-perf/cperf_options_parsing.c     | 61 ++++++++++++++--
 app/test-crypto-perf/cperf_test_latency.c        |  4 +-
 app/test-crypto-perf/cperf_test_pmd_cyclecount.c |  8 ++-
 app/test-crypto-perf/cperf_test_throughput.c     |  3 +-
 app/test-crypto-perf/cperf_test_verify.c         |  3 +-
 app/test-crypto-perf/main.c                      | 89 ++++++++++++++++++++----
 doc/guides/tools/cryptoperf.rst                  | 14 ++++
 10 files changed, 220 insertions(+), 45 deletions(-)

diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c
index 23d30ca..e3c7ab4 100644
--- a/app/test-crypto-perf/cperf_ops.c
+++ b/app/test-crypto-perf/cperf_ops.c
@@ -41,7 +41,7 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
 		const struct cperf_options *options,
 		const struct cperf_test_vector *test_vector __rte_unused,
-		uint16_t iv_offset __rte_unused)
+		uint16_t iv_offset __rte_unused, uint32_t *imix_idx)
 {
 	uint16_t i;
 
@@ -62,7 +62,12 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
 							dst_buf_offset);
 
 		/* cipher parameters */
-		sym_op->cipher.data.length = options->test_buffer_size;
+		if (options->imix_distribution_count) {
+			sym_op->cipher.data.length =
+				options->imix_buffer_sizes[*imix_idx];
+			*imix_idx = (*imix_idx + 1) % options->pool_sz;
+		} else
+			sym_op->cipher.data.length = options->test_buffer_size;
 		sym_op->cipher.data.offset = 0;
 	}
 
@@ -75,7 +80,7 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops,
 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
 		const struct cperf_options *options,
 		const struct cperf_test_vector *test_vector __rte_unused,
-		uint16_t iv_offset __rte_unused)
+		uint16_t iv_offset __rte_unused, uint32_t *imix_idx)
 {
 	uint16_t i;
 
@@ -96,7 +101,12 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops,
 							dst_buf_offset);
 
 		/* auth parameters */
-		sym_op->auth.data.length = options->test_buffer_size;
+		if (options->imix_distribution_count) {
+			sym_op->auth.data.length =
+				options->imix_buffer_sizes[*imix_idx];
+			*imix_idx = (*imix_idx + 1) % options->pool_sz;
+		} else
+			sym_op->auth.data.length = options->test_buffer_size;
 		sym_op->auth.data.offset = 0;
 	}
 
@@ -109,7 +119,7 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
 		const struct cperf_options *options,
 		const struct cperf_test_vector *test_vector,
-		uint16_t iv_offset)
+		uint16_t iv_offset, uint32_t *imix_idx)
 {
 	uint16_t i;
 
@@ -130,12 +140,17 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops,
 							dst_buf_offset);
 
 		/* cipher parameters */
+		if (options->imix_distribution_count) {
+			sym_op->cipher.data.length =
+				options->imix_buffer_sizes[*imix_idx];
+			*imix_idx = (*imix_idx + 1) % options->pool_sz;
+		} else
+			sym_op->cipher.data.length = options->test_buffer_size;
+
 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
 				options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
 				options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
-			sym_op->cipher.data.length = options->test_buffer_size << 3;
-		else
-			sym_op->cipher.data.length = options->test_buffer_size;
+			sym_op->cipher.data.length <<= 3;
 
 		sym_op->cipher.data.offset = 0;
 	}
@@ -160,7 +175,7 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
 		const struct cperf_options *options,
 		const struct cperf_test_vector *test_vector,
-		uint16_t iv_offset)
+		uint16_t iv_offset, uint32_t *imix_idx)
 {
 	uint16_t i;
 
@@ -225,12 +240,17 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
 
 		}
 
+		if (options->imix_distribution_count) {
+			sym_op->auth.data.length =
+				options->imix_buffer_sizes[*imix_idx];
+			*imix_idx = (*imix_idx + 1) % options->pool_sz;
+		} else
+			sym_op->auth.data.length = options->test_buffer_size;
+
 		if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
 				options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
 				options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
-			sym_op->auth.data.length = options->test_buffer_size << 3;
-		else
-			sym_op->auth.data.length = options->test_buffer_size;
+			sym_op->auth.data.length <<= 3;
 
 		sym_op->auth.data.offset = 0;
 	}
@@ -255,7 +275,7 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
 		const struct cperf_options *options,
 		const struct cperf_test_vector *test_vector,
-		uint16_t iv_offset)
+		uint16_t iv_offset, uint32_t *imix_idx)
 {
 	uint16_t i;
 
@@ -276,12 +296,17 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
 							dst_buf_offset);
 
 		/* cipher parameters */
+		if (options->imix_distribution_count) {
+			sym_op->cipher.data.length =
+				options->imix_buffer_sizes[*imix_idx];
+			*imix_idx = (*imix_idx + 1) % options->pool_sz;
+		} else
+			sym_op->cipher.data.length = options->test_buffer_size;
+
 		if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
 				options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
 				options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
-			sym_op->cipher.data.length = options->test_buffer_size << 3;
-		else
-			sym_op->cipher.data.length = options->test_buffer_size;
+			sym_op->cipher.data.length <<= 3;
 
 		sym_op->cipher.data.offset = 0;
 
@@ -321,12 +346,17 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
 					rte_pktmbuf_iova_offset(buf, offset);
 		}
 
+		if (options->imix_distribution_count) {
+			sym_op->auth.data.length =
+				options->imix_buffer_sizes[*imix_idx];
+			*imix_idx = (*imix_idx + 1) % options->pool_sz;
+		} else
+			sym_op->auth.data.length = options->test_buffer_size;
+
 		if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
 				options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
 				options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
-			sym_op->auth.data.length = options->test_buffer_size << 3;
-		else
-			sym_op->auth.data.length = options->test_buffer_size;
+			sym_op->auth.data.length <<= 3;
 
 		sym_op->auth.data.offset = 0;
 	}
@@ -360,7 +390,7 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
 		const struct cperf_options *options,
 		const struct cperf_test_vector *test_vector,
-		uint16_t iv_offset)
+		uint16_t iv_offset, uint32_t *imix_idx)
 {
 	uint16_t i;
 	/* AAD is placed after the IV */
@@ -384,7 +414,12 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
 							dst_buf_offset);
 
 		/* AEAD parameters */
-		sym_op->aead.data.length = options->test_buffer_size;
+		if (options->imix_distribution_count) {
+			sym_op->aead.data.length =
+				options->imix_buffer_sizes[*imix_idx];
+			*imix_idx = (*imix_idx + 1) % options->pool_sz;
+		} else
+			sym_op->aead.data.length = options->test_buffer_size;
 		sym_op->aead.data.offset = 0;
 
 		sym_op->aead.aad.data = rte_crypto_op_ctod_offset(ops[i],
diff --git a/app/test-crypto-perf/cperf_ops.h b/app/test-crypto-perf/cperf_ops.h
index 94951cc..be4c476 100644
--- a/app/test-crypto-perf/cperf_ops.h
+++ b/app/test-crypto-perf/cperf_ops.h
@@ -51,7 +51,7 @@ typedef int (*cperf_populate_ops_t)(struct rte_crypto_op **ops,
 		uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
 		const struct cperf_options *options,
 		const struct cperf_test_vector *test_vector,
-		uint16_t iv_offset);
+		uint16_t iv_offset, uint32_t *imix_idx);
 
 struct cperf_op_fns {
 	cperf_sessions_create_t sess_create;
diff --git a/app/test-crypto-perf/cperf_options.h b/app/test-crypto-perf/cperf_options.h
index da4fb47..b0c9f44 100644
--- a/app/test-crypto-perf/cperf_options.h
+++ b/app/test-crypto-perf/cperf_options.h
@@ -13,6 +13,7 @@
 #define CPERF_BUFFER_SIZE	("buffer-sz")
 #define CPERF_SEGMENT_SIZE	("segment-sz")
 #define CPERF_DESC_NB		("desc-nb")
+#define CPERF_IMIX		("imix")
 
 #define CPERF_DEVTYPE		("devtype")
 #define CPERF_OPTYPE		("optype")
@@ -73,6 +74,7 @@ struct cperf_options {
 	uint32_t total_ops;
 	uint32_t segment_sz;
 	uint32_t test_buffer_size;
+	uint32_t *imix_buffer_sizes;
 	uint32_t nb_descriptors;
 	uint16_t nb_qps;
 
@@ -122,6 +124,8 @@ struct cperf_options {
 
 	/* pmd-cyclecount specific options */
 	uint32_t pmdcc_delay;
+	uint32_t imix_distribution_list[MAX_LIST];
+	uint8_t imix_distribution_count;
 };
 
 void
diff --git a/app/test-crypto-perf/cperf_options_parsing.c b/app/test-crypto-perf/cperf_options_parsing.c
index ad43e84..ad5fa76 100644
--- a/app/test-crypto-perf/cperf_options_parsing.c
+++ b/app/test-crypto-perf/cperf_options_parsing.c
@@ -57,6 +57,7 @@ usage(char *progname)
 		" --total-ops N: set the number of total operations performed\n"
 		" --burst-sz N: set the number of packets per burst\n"
 		" --buffer-sz N: set the size of a single packet\n"
+		" --imix N: set the distribution of packet sizes\n"
 		" --segment-sz N: set the size of the segment to use\n"
 		" --desc-nb N: set number of descriptors for each crypto device\n"
 		" --devtype TYPE: set crypto device type to use\n"
@@ -243,6 +244,8 @@ parse_list(const char *arg, uint32_t *list, uint32_t *min, uint32_t *max)
 	char *token;
 	uint32_t number;
 	uint8_t count = 0;
+	uint32_t temp_min;
+	uint32_t temp_max;
 
 	char *copy_arg = strdup(arg);
 
@@ -261,8 +264,8 @@ parse_list(const char *arg, uint32_t *list, uint32_t *min, uint32_t *max)
 			goto err_list;
 
 		list[count++] = number;
-		*min = number;
-		*max = number;
+		temp_min = number;
+		temp_max = number;
 	} else
 		goto err_list;
 
@@ -283,14 +286,19 @@ parse_list(const char *arg, uint32_t *list, uint32_t *min, uint32_t *max)
 
 		list[count++] = number;
 
-		if (number < *min)
-			*min = number;
-		if (number > *max)
-			*max = number;
+		if (number < temp_min)
+			temp_min = number;
+		if (number > temp_max)
+			temp_max = number;
 
 		token = strtok(NULL, ",");
 	}
 
+	if (min)
+		*min = temp_min;
+	if (max)
+		*max = temp_max;
+
 	free(copy_arg);
 	return count;
 
@@ -387,6 +395,29 @@ parse_segment_sz(struct cperf_options *opts, const char *arg)
 }
 
 static int
+parse_imix(struct cperf_options *opts, const char *arg)
+{
+	int ret;
+
+	ret = parse_list(arg, opts->imix_distribution_list,
+				NULL, NULL);
+	if (ret < 0) {
+		RTE_LOG(ERR, USER1, "failed to parse imix distribution\n");
+		return -1;
+	}
+
+	opts->imix_distribution_count = ret;
+
+	if (opts->imix_distribution_count <= 1) {
+		RTE_LOG(ERR, USER1, "imix distribution should have "
+				"at least two entries\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
 parse_desc_nb(struct cperf_options *opts, const char *arg)
 {
 	int ret = parse_uint32_t(&opts->nb_descriptors, arg);
@@ -722,6 +753,7 @@ static struct option lgopts[] = {
 	{ CPERF_SEGMENT_SIZE, required_argument, 0, 0 },
 	{ CPERF_DESC_NB, required_argument, 0, 0 },
 
+	{ CPERF_IMIX, required_argument, 0, 0 },
 	{ CPERF_DEVTYPE, required_argument, 0, 0 },
 	{ CPERF_OPTYPE, required_argument, 0, 0 },
 
@@ -786,6 +818,7 @@ cperf_options_default(struct cperf_options *opts)
 	 */
 	opts->segment_sz = 0;
 
+	opts->imix_distribution_count = 0;
 	strncpy(opts->device_type, "crypto_aesni_mb",
 			sizeof(opts->device_type));
 	opts->nb_qps = 1;
@@ -835,6 +868,7 @@ cperf_opts_parse_long(int opt_idx, struct cperf_options *opts)
 		{ CPERF_OPTYPE,		parse_op_type },
 		{ CPERF_SESSIONLESS,	parse_sessionless },
 		{ CPERF_OUT_OF_PLACE,	parse_out_of_place },
+		{ CPERF_IMIX,		parse_imix },
 		{ CPERF_TEST_FILE,	parse_test_file },
 		{ CPERF_TEST_NAME,	parse_test_name },
 		{ CPERF_CIPHER_ALGO,	parse_cipher_algo },
@@ -973,6 +1007,14 @@ cperf_options_check(struct cperf_options *options)
 		return -EINVAL;
 	}
 
+	if ((options->imix_distribution_count != 0) &&
+			(options->imix_distribution_count !=
+				options->buffer_size_count)) {
+		RTE_LOG(ERR, USER1, "IMIX distribution must have the same "
+				"number of buffer sizes\n");
+		return -EINVAL;
+	}
+
 	if (options->test == CPERF_TEST_TYPE_VERIFY &&
 			options->test_file == NULL) {
 		RTE_LOG(ERR, USER1, "Define path to the file with test"
@@ -1025,6 +1067,13 @@ cperf_options_check(struct cperf_options *options)
 		return -EINVAL;
 	}
 
+	if (options->test == CPERF_TEST_TYPE_VERIFY &&
+			options->imix_distribution_count > 0) {
+		RTE_LOG(ERR, USER1, "IMIX is not allowed when "
+				"using the verify test.\n");
+		return -EINVAL;
+	}
+
 	if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
 		if (options->cipher_op != RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
 				options->auth_op !=
diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c
index ca2a4ba..7c42b59 100644
--- a/app/test-crypto-perf/cperf_test_latency.c
+++ b/app/test-crypto-perf/cperf_test_latency.c
@@ -154,6 +154,7 @@ cperf_latency_test_runner(void *arg)
 	struct cperf_latency_ctx *ctx = arg;
 	uint16_t test_burst_size;
 	uint8_t burst_size_idx = 0;
+	uint32_t imix_idx = 0;
 
 	static int only_once;
 
@@ -228,7 +229,8 @@ cperf_latency_test_runner(void *arg)
 			(ctx->populate_ops)(ops, ctx->src_buf_offset,
 					ctx->dst_buf_offset,
 					burst_size, ctx->sess, ctx->options,
-					ctx->test_vector, iv_offset);
+					ctx->test_vector, iv_offset,
+					&imix_idx);
 
 			tsc_start = rte_rdtsc_precise();
 
diff --git a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
index 9b41724..a91f68b 100644
--- a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
+++ b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
@@ -169,6 +169,7 @@ pmd_cyclecount_bench_ops(struct pmd_cyclecount_state *state, uint32_t cur_op,
 	uint32_t iter_ops_needed =
 			RTE_MIN(state->opts->nb_descriptors, iter_ops_left);
 	uint32_t cur_iter_op;
+	uint32_t imix_idx = 0;
 
 	for (cur_iter_op = 0; cur_iter_op < iter_ops_needed;
 			cur_iter_op += test_burst_size) {
@@ -193,7 +194,8 @@ pmd_cyclecount_bench_ops(struct pmd_cyclecount_state *state, uint32_t cur_op,
 				state->ctx->dst_buf_offset,
 				burst_size,
 				state->ctx->sess, state->opts,
-				state->ctx->test_vector, iv_offset);
+				state->ctx->test_vector, iv_offset,
+				&imix_idx);
 
 #ifdef CPERF_LINEARIZATION_ENABLE
 		/* Check if source mbufs require coalescing */
@@ -218,6 +220,7 @@ pmd_cyclecount_build_ops(struct pmd_cyclecount_state *state,
 		uint32_t iter_ops_needed, uint16_t test_burst_size)
 {
 	uint32_t cur_iter_op;
+	uint32_t imix_idx = 0;
 
 	for (cur_iter_op = 0; cur_iter_op < iter_ops_needed;
 			cur_iter_op += test_burst_size) {
@@ -242,7 +245,8 @@ pmd_cyclecount_build_ops(struct pmd_cyclecount_state *state,
 				state->ctx->dst_buf_offset,
 				burst_size,
 				state->ctx->sess, state->opts,
-				state->ctx->test_vector, iv_offset);
+				state->ctx->test_vector, iv_offset,
+				&imix_idx);
 	}
 	return 0;
 }
diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c
index b84dc63..905cc36 100644
--- a/app/test-crypto-perf/cperf_test_throughput.c
+++ b/app/test-crypto-perf/cperf_test_throughput.c
@@ -120,6 +120,7 @@ cperf_throughput_test_runner(void *test_ctx)
 	struct cperf_throughput_ctx *ctx = test_ctx;
 	uint16_t test_burst_size;
 	uint8_t burst_size_idx = 0;
+	uint32_t imix_idx = 0;
 
 	static int only_once;
 
@@ -193,7 +194,7 @@ cperf_throughput_test_runner(void *test_ctx)
 					ctx->dst_buf_offset,
 					ops_needed, ctx->sess,
 					ctx->options, ctx->test_vector,
-					iv_offset);
+					iv_offset, &imix_idx);
 
 			/**
 			 * When ops_needed is smaller than ops_enqd, the
diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c
index 6945c8b..46fed15 100644
--- a/app/test-crypto-perf/cperf_test_verify.c
+++ b/app/test-crypto-perf/cperf_test_verify.c
@@ -264,6 +264,7 @@ cperf_verify_test_runner(void *test_ctx)
 
 	uint64_t i;
 	uint16_t ops_unused = 0;
+	uint32_t imix_idx = 0;
 
 	struct rte_crypto_op *ops[ctx->options->max_burst_size];
 	struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
@@ -317,7 +318,7 @@ cperf_verify_test_runner(void *test_ctx)
 		(ctx->populate_ops)(ops, ctx->src_buf_offset,
 				ctx->dst_buf_offset,
 				ops_needed, ctx->sess, ctx->options,
-				ctx->test_vector, iv_offset);
+				ctx->test_vector, iv_offset, &imix_idx);
 
 
 		/* Populate the mbuf with the test vector, for verification */
diff --git a/app/test-crypto-perf/main.c b/app/test-crypto-perf/main.c
index 29373f5..fc96886 100644
--- a/app/test-crypto-perf/main.c
+++ b/app/test-crypto-perf/main.c
@@ -33,6 +33,8 @@
 #include <stdio.h>
 #include <unistd.h>
 
+#include <rte_malloc.h>
+#include <rte_random.h>
 #include <rte_eal.h>
 #include <rte_cryptodev.h>
 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
@@ -536,13 +538,45 @@ main(int argc, char **argv)
 		i++;
 	}
 
-	/* Get first size from range or list */
-	if (opts.inc_buffer_size != 0)
-		opts.test_buffer_size = opts.min_buffer_size;
-	else
-		opts.test_buffer_size = opts.buffer_size_list[0];
+	if (opts.imix_distribution_count != 0) {
+		uint8_t buffer_size_count = opts.buffer_size_count;
+		uint16_t distribution_total[buffer_size_count];
+		uint32_t op_idx;
+		uint32_t test_average_size = 0;
+		const uint32_t *buffer_size_list = opts.buffer_size_list;
+		const uint32_t *imix_distribution_list = opts.imix_distribution_list;
+
+		opts.imix_buffer_sizes = rte_malloc(NULL,
+					sizeof(uint32_t) * opts.pool_sz,
+					0);
+		/*
+		 * Calculate accumulated distribution of
+		 * probabilities per packet size
+		 */
+		distribution_total[0] = imix_distribution_list[0];
+		for (i = 1; i < buffer_size_count; i++)
+			distribution_total[i] = imix_distribution_list[i] +
+				distribution_total[i-1];
+
+		/* Calculate a random sequence of packet sizes, based on distribution */
+		for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
+			uint16_t random_number = rte_rand() %
+				distribution_total[buffer_size_count - 1];
+			for (i = 0; i < buffer_size_count; i++)
+				if (random_number < distribution_total[i])
+					break;
+
+			opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
+		}
+
+		/* Calculate average buffer size for the IMIX distribution */
+		for (i = 0; i < buffer_size_count; i++)
+			test_average_size += buffer_size_list[i] *
+				imix_distribution_list[i];
+
+		opts.test_buffer_size = test_average_size /
+				distribution_total[buffer_size_count - 1];
 
-	while (opts.test_buffer_size <= opts.max_buffer_size) {
 		i = 0;
 		RTE_LCORE_FOREACH_SLAVE(lcore_id) {
 
@@ -561,14 +595,45 @@ main(int argc, char **argv)
 			rte_eal_wait_lcore(lcore_id);
 			i++;
 		}
+	} else {
 
 		/* Get next size from range or list */
 		if (opts.inc_buffer_size != 0)
-			opts.test_buffer_size += opts.inc_buffer_size;
-		else {
-			if (++buffer_size_idx == opts.buffer_size_count)
-				break;
-			opts.test_buffer_size = opts.buffer_size_list[buffer_size_idx];
+			opts.test_buffer_size = opts.min_buffer_size;
+		else
+			opts.test_buffer_size = opts.buffer_size_list[0];
+
+		while (opts.test_buffer_size <= opts.max_buffer_size) {
+			i = 0;
+			RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+
+				if (i == nb_cryptodevs)
+					break;
+
+				cdev_id = enabled_cdevs[i];
+
+				rte_eal_remote_launch(cperf_testmap[opts.test].runner,
+					ctx[cdev_id], lcore_id);
+				i++;
+			}
+			i = 0;
+			RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+
+				if (i == nb_cryptodevs)
+					break;
+				rte_eal_wait_lcore(lcore_id);
+				i++;
+			}
+
+			/* Get next size from range or list */
+			if (opts.inc_buffer_size != 0)
+				opts.test_buffer_size += opts.inc_buffer_size;
+			else {
+				if (++buffer_size_idx == opts.buffer_size_count)
+					break;
+				opts.test_buffer_size =
+					opts.buffer_size_list[buffer_size_idx];
+			}
 		}
 	}
 
@@ -585,7 +650,7 @@ main(int argc, char **argv)
 	for (i = 0; i < nb_cryptodevs &&
 			i < RTE_CRYPTO_MAX_DEVS; i++)
 		rte_cryptodev_stop(enabled_cdevs[i]);
-
+	rte_free(opts.imix_buffer_sizes);
 	free_test_vector(t_vec, &opts);
 
 	printf("\n");
diff --git a/doc/guides/tools/cryptoperf.rst b/doc/guides/tools/cryptoperf.rst
index 7e12677..75ce46d 100644
--- a/doc/guides/tools/cryptoperf.rst
+++ b/doc/guides/tools/cryptoperf.rst
@@ -171,6 +171,20 @@ The following are the appication command-line options:
             is the maximum size (i.e. ``--buffer-sz 16:2:32``)
           * List of values, up to 32 values, separated in commas (i.e. ``--buffer-sz 32,64,128``)
 
+* ``--imix <n>``
+
+        Set the distribution of packet sizes.
+
+        A list of weights must be passed, containing the same number of items than buffer-sz,
+        so each item in this list will be the weight of the packet size on the same position
+        in the buffer-sz parameter (a list have to be passed in that parameter).
+
+        Example:
+
+        To test a distribution of 20% packets of 64 bytes, 40% packets of 100 bytes and 40% packets
+        of 256 bytes, the command line would be: ``--buffer-sz 64,100,256 --imix 20,40,40``.
+        Note that the weights do not have to be percentages, so using ``--imix 1,2,2`` would result
+        in the same distribution
 
 * ``--segment-sz <n>``
 
-- 
2.9.4



More information about the dev mailing list