[PATCH v1 07/13] test/bbdev: add support for BLER for 4G

Vargas, Hernan hernan.vargas at intel.com
Mon Feb 13 21:59:53 CET 2023



> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin at redhat.com>
> Sent: Tuesday, January 31, 2023 4:20 AM
> To: Vargas, Hernan <hernan.vargas at intel.com>; dev at dpdk.org;
> gakhil at marvell.com; Rix, Tom <trix at redhat.com>
> Cc: Chautru, Nicolas <nicolas.chautru at intel.com>; Zhang, Qi Z
> <qi.z.zhang at intel.com>
> Subject: Re: [PATCH v1 07/13] test/bbdev: add support for BLER for 4G
> 
> 
> 
> On 1/17/23 17:50, Hernan Vargas wrote:
> > New feature to add BLER support for 4G in bbdev-test.
> >
> > Signed-off-by: Hernan Vargas <hernan.vargas at intel.com>
> > ---
> >   app/test-bbdev/test_bbdev_perf.c | 184
> ++++++++++++++++++++++++++++++-
> >   1 file changed, 183 insertions(+), 1 deletion(-)
> >
> > diff --git a/app/test-bbdev/test_bbdev_perf.c
> > b/app/test-bbdev/test_bbdev_perf.c
> > index ffb2de0604..69b86cdeb1 100644
> > --- a/app/test-bbdev/test_bbdev_perf.c
> > +++ b/app/test-bbdev/test_bbdev_perf.c
> > @@ -1756,6 +1756,30 @@ gen_qm2_llr(int8_t *llrs, uint32_t j, double N0,
> double llr_max)
> >   	llrs[j] = (int8_t) b;
> >   }
> >
> > +/* Simple LLR generation assuming AWGN and QPSK */ static void
> > +gen_turbo_llr(int8_t *llrs, uint32_t j, double N0, double llr_max) {
> > +	double b, b1, n;
> > +	double coeff = 2.0 * sqrt(N0);
> > +
> > +	/* Ignore in vectors null LLRs not to be saturated */
> > +	if (llrs[j] == 0)
> > +		return;
> > +
> > +	/* Note don't change sign here */
> > +	n = randn(j % 2);
> > +	b1 = ((llrs[j] > 0 ? 2.0 : -2.0)
> > +			+ coeff * n) / N0;
> > +	b = b1 * (1 << 4);
> > +	b = round(b);
> > +	if (b > llr_max)
> > +		b = llr_max;
> > +	if (b < -llr_max)
> > +		b = -llr_max;
> > +	llrs[j] = (int8_t) b;
> > +}
> > +
> >   /* Generate LLR for a given SNR */
> >   static void
> >   generate_llr_input(uint16_t n, struct rte_bbdev_op_data *inputs, @@
> > -1791,6 +1815,27 @@ generate_llr_input(uint16_t n, struct
> rte_bbdev_op_data *inputs,
> >   	}
> >   }
> >
> > +/* Generate LLR for turbo decoder for a given SNR */ static void
> > +generate_turbo_llr_input(uint16_t n, struct rte_bbdev_op_data *inputs,
> > +		struct rte_bbdev_dec_op *ref_op)
> > +{
> > +	struct rte_mbuf *m;
> > +	uint32_t i, j, range;
> > +	double N0, llr_max;
> > +
> > +	llr_max = 127;
> > +	range = ref_op->turbo_dec.input.length;
> > +	N0 = 1.0 / pow(10.0, get_snr() / 10.0);
> > +
> > +	for (i = 0; i < n; ++i) {
> > +		m = inputs[i].data;
> > +		int8_t *llrs = rte_pktmbuf_mtod_offset(m, int8_t *, 0);
> > +		for (j = 0; j < range; ++j)
> > +			gen_turbo_llr(llrs, j, N0, llr_max);
> > +	}
> > +}
> > +
> >   static void
> >   copy_reference_ldpc_dec_op(struct rte_bbdev_dec_op **ops, unsigned
> int n,
> >   		unsigned int start_idx,
> > @@ -2301,6 +2346,31 @@ validate_ldpc_bler(struct rte_bbdev_dec_op
> **ops, const uint16_t n)
> >   	return errors;
> >   }
> >
> > +/* Check Number of code blocks errors */ static int
> > +validate_turbo_bler(struct rte_bbdev_dec_op **ops, const uint16_t n)
> > +{
> > +	unsigned int i;
> > +	struct op_data_entries *hard_data_orig =
> > +			&test_vector.entries[DATA_HARD_OUTPUT];
> > +	struct rte_bbdev_op_turbo_dec *ops_td;
> > +	struct rte_bbdev_op_data *hard_output;
> > +	int errors = 0;
> > +	struct rte_mbuf *m;
> > +
> > +	for (i = 0; i < n; ++i) {
> > +		ops_td = &ops[i]->turbo_dec;
> > +		hard_output = &ops_td->hard_output;
> > +		m = hard_output->data;
> > +		if (memcmp(rte_pktmbuf_mtod_offset(m, uint32_t *, 0),
> > +				hard_data_orig->segments[0].addr,
> > +				hard_data_orig->segments[0].length))
> 
> No need to check mbuf is at least as large as segment 0 length?
>
> > +			errors++;
> > +	}
> > +	return errors;
> > +}
> > +
> > +
> >   static int
> >   validate_ldpc_dec_op(struct rte_bbdev_dec_op **ops, const uint16_t n,
> >   		struct rte_bbdev_dec_op *ref_op, const int vector_mask)
> @@ -3736,6
> > +3806,114 @@ bler_pmd_lcore_ldpc_dec(void *arg)
> >   	return TEST_SUCCESS;
> >   }
> >
> > +
> > +static int
> > +bler_pmd_lcore_turbo_dec(void *arg)
> > +{
> > +	struct thread_params *tp = arg;
> > +	uint16_t enq, deq;
> > +	uint64_t total_time = 0, start_time;
> > +	const uint16_t queue_id = tp->queue_id;
> > +	const uint16_t burst_sz = tp->op_params->burst_sz;
> > +	const uint16_t num_ops = tp->op_params->num_to_process;
> > +	struct rte_bbdev_dec_op *ops_enq[num_ops];
> > +	struct rte_bbdev_dec_op *ops_deq[num_ops];
> > +	struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
> > +	struct test_buffers *bufs = NULL;
> > +	int i, j, ret;
> > +	struct rte_bbdev_info info;
> > +	uint16_t num_to_enq;
> > +
> > +	TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
> > +			"BURST_SIZE should be <= %u", MAX_BURST);
> > +
> > +	rte_bbdev_info_get(tp->dev_id, &info);
> > +
> > +	TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
> > +			"NUM_OPS cannot exceed %u for this device",
> > +			info.drv.queue_size_lim);
> > +
> > +	bufs = &tp->op_params-
> >q_bufs[GET_SOCKET(info.socket_id)][queue_id];
> > +
> > +	rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START,
> > +__ATOMIC_RELAXED);
> > +
> > +	ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq,
> num_ops);
> > +	TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
> num_ops);
> > +
> > +	/* For BLER tests we need to enable early termination */
> > +	if (!check_bit(ref_op->turbo_dec.op_flags,
> > +			RTE_BBDEV_TURBO_EARLY_TERMINATION))
> > +		ref_op->turbo_dec.op_flags +=
> > +				RTE_BBDEV_TURBO_EARLY_TERMINATION;
> > +	ref_op->turbo_dec.iter_max = get_iter_max();
> > +	ref_op->turbo_dec.iter_count = ref_op->turbo_dec.iter_max;
> > +
> > +	if (test_vector.op_type != RTE_BBDEV_OP_NONE)
> > +		copy_reference_dec_op(ops_enq, num_ops, 0, bufs-
> >inputs,
> > +				bufs->hard_outputs, bufs->soft_outputs,
> > +				ref_op);
> > +	generate_turbo_llr_input(num_ops, bufs->inputs, ref_op);
> > +
> > +	/* Set counter to validate the ordering */
> > +	for (j = 0; j < num_ops; ++j)
> > +		ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
> > +
> > +	for (i = 0; i < 1; ++i) { /* Could add more iterations */
> > +		for (j = 0; j < num_ops; ++j) {
> > +			mbuf_reset(
> > +			ops_enq[j]->turbo_dec.hard_output.data);
> > +		}
> > +
> > +		start_time = rte_rdtsc_precise();
> > +
> > +		for (enq = 0, deq = 0; enq < num_ops;) {
> > +			num_to_enq = burst_sz;
> > +
> > +			if (unlikely(num_ops - enq < num_to_enq))
> > +				num_to_enq = num_ops - enq;
> > +
> > +			enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
> > +					queue_id, &ops_enq[enq],
> num_to_enq);
> 
> If for some reason the operation fails (looks like it can fail in at least ACC200 I
> checked), it ends up doing an endless loop as enc won't get incremented.
The python wrapper script used to invoke test-bbdev has a configurable timeout (default 600 seconds) that would stop the test in case it ends up in an infinite loop.

> > +
> > +			deq += rte_bbdev_dequeue_dec_ops(tp->dev_id,
> > +					queue_id, &ops_deq[deq], enq -
> deq);
> > +		}
> > +
> > +		/* dequeue the remaining */
> > +		while (deq < enq) {
> > +			deq += rte_bbdev_dequeue_dec_ops(tp->dev_id,
> > +					queue_id, &ops_deq[deq], enq -
> deq);
> > +		}
> > +
> > +		total_time += rte_rdtsc_precise() - start_time;
> > +	}
> > +
> > +	tp->iter_count = 0;
> > +	tp->iter_average = 0;
> > +	/* get the max of iter_count for all dequeued ops */
> > +	for (i = 0; i < num_ops; ++i) {
> > +		tp->iter_count = RTE_MAX(ops_enq[i]-
> >turbo_dec.iter_count,
> > +				tp->iter_count);
> > +		tp->iter_average += (double) ops_enq[i]-
> >turbo_dec.iter_count;
> > +	}
> > +
> > +	tp->iter_average /= num_ops;
> > +	tp->bler = (double) validate_turbo_bler(ops_deq, num_ops) /
> num_ops;
> > +
> > +	rte_bbdev_dec_op_free_bulk(ops_enq, num_ops);
> > +
> > +	double tb_len_bits = calc_dec_TB_size(ref_op);
> > +	tp->ops_per_sec = ((double)num_ops * 1) /
> > +			((double)total_time / (double)rte_get_tsc_hz());
> > +	tp->mbps = (((double)(num_ops * 1 * tb_len_bits)) /
> > +			1000000.0) / ((double)total_time /
> > +			(double)rte_get_tsc_hz());
> > +	printf("TBS %.0f Time %.0f\n", tb_len_bits, 1000000.0 *
> > +			((double)total_time / (double)rte_get_tsc_hz()));
> > +
> > +	return TEST_SUCCESS;
> > +}
> > +
> >   static int
> >   throughput_pmd_lcore_ldpc_dec(void *arg)
> >   {
> > @@ -4193,7 +4371,7 @@ print_dec_bler(struct thread_params *t_params,
> unsigned int used_cores)
> >   	total_bler /= used_cores;
> >   	total_iter /= used_cores;
> >
> > -	printf("SNR %.2f BLER %.1f %% - Iterations %.1f %d - Tp %.1f Mbps
> %s\n",
> > +	printf("SNR %.2f BLER %.1f %% - Iterations %.1f %d - Tp %.3f Mbps
> > +%s\n",
> >   			snr, total_bler * 100, total_iter, get_iter_max(),
> >   			total_mbps, get_vector_filename());
> >   }
> > @@ -4245,6 +4423,10 @@ bler_test(struct active_device *ad,
> >   			&& !check_bit(test_vector.ldpc_dec.op_flags,
> >   			RTE_BBDEV_LDPC_LLR_COMPRESSION))
> >   		bler_function = bler_pmd_lcore_ldpc_dec;
> > +	else if ((test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) &&
> > +			!check_bit(test_vector.turbo_dec.op_flags,
> > +			RTE_BBDEV_TURBO_SOFT_OUTPUT))
> > +		bler_function = bler_pmd_lcore_turbo_dec;
> >   	else
> >   		return TEST_SKIPPED;
> >



More information about the dev mailing list