[dpdk-dev] [PATCH] app/test: add unit test cases for mbuf library APIs

Olivier Matz olivier.matz at 6wind.com
Mon Jun 3 10:39:36 CEST 2019


Hi Lavanya,

Please find some comments inline.

On Mon, Apr 15, 2019 at 01:40:15PM +0100, Lavanya Govindarajan wrote:
> added new unit test cases for
> rte_validate_tx_offload,
> rte_pktmbuf_alloc_bulk,
> rte_pktmbuf_read,
> rte_pktmbuf_ext_shinfo_init_helper,
> rte_pktmbuf_attach_extbuf,
> rte_mbuf_ext_refcnt_read,
> rte_mbuf_ext_refcnt_update,
> rte_mbuf_ext_refcnt_set,
> rte_pktmbuf_detach_extbuf
> 
> Signed-off-by: Lavanya Govindarajan <lavanyax.govindarajan at intel.com>
> ---
>  app/test/test_mbuf.c | 820 ++++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 817 insertions(+), 3 deletions(-)
> 
> diff --git a/app/test/test_mbuf.c b/app/test/test_mbuf.c
> index 030385ec5..74259b313 100644
> --- a/app/test/test_mbuf.c
> +++ b/app/test/test_mbuf.c

[...]

> +/* Test flags for tx_offload capacity */
> +enum test_mbuf_tx_ol_flag {
> +	MBUF_TEST_INVALID_FLAG = 0,
> +	MBUF_TEST_IP_CKSUM_IPV6_SET,
> +	MBUF_TEST_IP_TYPE_NOT_SET,
> +	MBUF_TEST_IP_TYPE_SET,
> +	MBUF_TEST_NULL_TSO_SEGSZ,
> +	MBUF_TEST_TSO_IP_CKSUM_NOT_SET,
> +	MBUF_TEST_TSO_IPV6_SET,
> +	MBUF_TEST_TSO_IP_CKSUM_SET,
> +	MBUF_TEST_OUTER_IPV4_NOT_SET,
> +	MBUF_TEST_OUTER_IPV4_SET,
> +	MBUF_TEST_OL_MASK_NOT_SET
> +};

[...]

> +/*
> + * Test to validate tx offload flags in a packet
> + *  - Allocate a mbuf and append header and data.
> + *  - Set mbuf ol_flag (offload flag) to validate fragmented headers.
> + *  - Validate if IP checksum is counted only for IPV4 packet.
> + *  - Validate if IP type is set when PKT_TX_L4_MASK is set.
> + *  - Test to confirm IP type is set when required.
> + *  - Validate if TSO segment size is non zero when TCP_SEG is set.
> + *  - Validate if IP checksum is set for TSO capability.
> + *  - Test to confirm all the TSO packet requirements are met.
> + *  - Validate if outer IP checksum set for non outer IPv4 packet.
> + *  - Test to confirm outer IP checksum is set for outer IPV4 packet.
> + *  - Confirm if packets with no PKT_TX_OFFLOAD_MASK are skipped.
> + */
> +static int
> +test_mbuf_validate_tx_offload(struct rte_mempool *pktmbuf_pool,
> +		uint32_t test_ol_flag)
> +{
> +	struct rte_mbuf *m = NULL;
> +	int ret = 0;
> +
> +	/* alloc a mbuf and do sanity check */
> +	m = rte_pktmbuf_alloc(pktmbuf_pool);
> +	if (m == NULL)
> +		GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
> +	if (rte_pktmbuf_pkt_len(m) != 0)
> +		GOTO_FAIL("%s: Bad packet length\n", __func__);
> +	rte_mbuf_sanity_check(m, 0);
> +	m->ol_flags = 0;
> +
> +	switch (test_ol_flag) {

Not sure I'm seeing why we need an enum. It results in something like this:

	enum {
		...
	}

	test(num)
	{
		switch (num) {
		case 0: ...
		case 1: ...
		case 2: ...
		}
	}

	test(0);
	test(1);
	test(2);

Instead, what about having a generic function like below:

	test_mbuf_validate_tx_offload(const char *test_name,
		struct rte_mempool *pktmbuf_pool,
		uint64_t ol_flags,
		uint16_t segsize,
		int expected_retval)
	{
		...

		m->ol_flags ol_flags;
		m->tso_segsz = segsize;
		ret = rte_validate_tx_offload(m);
		if (ret != expected_retval)
			GOTO_FAIL("%s(%s): expected ret val: %d; recvd: %d\n",
					__func__, test_name, expected, ret);
		...
	}


> +/*
> + * Test for allocating a bulk of mbufs
> + * define an array with positive sizes for mbufs allocations.
> + */
> +static int
> +test_rte_pktmbuf_alloc_bulk(struct rte_mempool *pktmbuf_pool)
> +{
> +	int ret = 0;
> +	unsigned int idx, loop;
> +	unsigned int alloc_counts[] = {
> +		0,
> +		MEMPOOL_CACHE_SIZE - 1,
> +		MEMPOOL_CACHE_SIZE,
> +		MEMPOOL_CACHE_SIZE + 1,
> +		MEMPOOL_CACHE_SIZE * 1.5,
> +		MEMPOOL_CACHE_SIZE * 2,
> +		MEMPOOL_CACHE_SIZE * 2 - 1,
> +		MEMPOOL_CACHE_SIZE * 2 + 1,
> +		89,                         /* random number */
> +		MEMPOOL_CACHE_SIZE          /* repeat cache size */
> +	};

instead of testing these particular values, why not testing every
values between 0 and NB_MBUF ?

> +	/* allocate a large array of mbuf pointers */
> +	struct rte_mbuf *mbufs[NB_MBUF];
> +	for (loop = 0; loop < NB_MBUF; loop++)
> +		mbufs[loop] = (void *)NULL;

you can use memset() or struct rte_mbuf *mbufs[NB_MBUF] = { 0 };

> +
> +	for (idx = 0; idx < RTE_DIM(alloc_counts); idx++) {
> +		ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool, mbufs,
> +				alloc_counts[idx]);
> +		if (ret == 0) {
> +			for (loop = 0; loop < alloc_counts[idx] &&
> +					mbufs[loop] != NULL; loop++)
> +				rte_pktmbuf_free(mbufs[loop]);
> +		} else if (ret != 0) {
> +			printf("%s: Bulk alloc failed count(%u); ret val(%d)\n",
> +					__func__, alloc_counts[idx], ret);
> +			return -1;
> +		}
> +	}
> +	return 0;
> +}
> +
> +/*
> + * Negative testing for allocating a bulk of mbufs
> + * define an array including negative sizes for mbufs allocations.
> + */
> +static int
> +test_neg_rte_pktmbuf_alloc_bulk(struct rte_mempool *pktmbuf_pool)
> +{
> +	int loop, ret = 0;
> +	unsigned int idx;
> +	int neg_alloc_counts[] = {
> +		MEMPOOL_CACHE_SIZE - NB_MBUF,
> +		-1,
> +		NB_MBUF - 2,
> +		NB_MBUF,
> +		NB_MBUF + 1,
> +		NB_MBUF * 8,
> +	};

The count parameter of rte_pktmbuf_alloc_bulk() is unsigned, so it is
not necessary to test negative values. Instead, you can eventually test
UINT_MAX.

Also, I feel that the (NB_MBUF - 2) and (NB_MBUF) cases are invalid: they
depend on the amount of mbuf in cache. If the cache is empty, the test will
fail.

> +	struct rte_mbuf *mbufs[NB_MBUF * 8];
> +	for (loop = 0; loop < NB_MBUF * 8; loop++)
> +		mbufs[loop] = (void *)NULL;
> +
> +	for (idx = 0; idx < RTE_DIM(neg_alloc_counts); idx++) {
> +		ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool, mbufs,
> +				neg_alloc_counts[idx]);
> +		if (ret == 0) {
> +			printf("%s: Bulk alloc must fail! count(%u); ret(%d)\n",
> +					__func__, neg_alloc_counts[idx], ret);
> +			for (loop = 0; loop < neg_alloc_counts[idx] &&
> +					mbufs[loop] != NULL; loop++)
> +				rte_pktmbuf_free(mbufs[loop]);
> +			return -1;
> +		}
> +	}
> +	return 0;
> +}
> +

[...]

> +/*
> + * Test to read mbuf packet data from offset
> + */
> +static int
> +test_rte_pktmbuf_read_from_offset(struct rte_mempool *pktmbuf_pool)
> +{
> +	struct rte_mbuf *m = NULL;
> +	struct ether_hdr *hdr = NULL;
> +	char *data = NULL;
> +	const char *data_copy = NULL;
> +	unsigned int off;
> +	unsigned int hdr_len = sizeof(struct ether_hdr);
> +
> +	/* alloc a mbuf */
> +	m = rte_pktmbuf_alloc(pktmbuf_pool);
> +	if (m == NULL)
> +		GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
> +
> +	if (rte_pktmbuf_pkt_len(m) != 0)
> +		GOTO_FAIL("%s: Bad packet length\n", __func__);
> +	rte_mbuf_sanity_check(m, 0);
> +
> +	/* prepend an ethernet header */
> +	hdr = (struct ether_hdr *)rte_pktmbuf_prepend(m, hdr_len);
> +	if (hdr == NULL)
> +		GOTO_FAIL("%s: Cannot prepend header\n", __func__);
> +	if (rte_pktmbuf_pkt_len(m) != hdr_len)
> +		GOTO_FAIL("%s: Bad pkt length", __func__);
> +	if (rte_pktmbuf_data_len(m) != hdr_len)
> +		GOTO_FAIL("%s: Bad data length", __func__);
> +	memset(hdr, 0xde, hdr_len);
> +
> +	/* read mbuf header info from 0 offset */
> +	data_copy = rte_pktmbuf_read(m, 0, hdr_len, NULL);
> +	if (data_copy == NULL)
> +		GOTO_FAIL("%s: Error in reading header!\n", __func__);
> +	for (off = 0; off < hdr_len; off++) {
> +		if (data_copy[off] != (char)0xde)
> +			GOTO_FAIL("Header info corrupted at offset %u", off);
> +	}
> +
> +	/* append sample data after ethernet header */
> +	data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
> +	if (data == NULL)
> +		GOTO_FAIL("%s: Cannot append data\n", __func__);
> +	if (rte_pktmbuf_pkt_len(m) != hdr_len + MBUF_TEST_DATA_LEN2)
> +		GOTO_FAIL("%s: Bad packet length\n", __func__);
> +	if (rte_pktmbuf_data_len(m) != hdr_len + MBUF_TEST_DATA_LEN2)
> +		GOTO_FAIL("%s: Bad data length\n", __func__);
> +	memset(data, 0xcc, MBUF_TEST_DATA_LEN2);
> +
> +	/* read mbuf data after header info */
> +	data_copy = rte_pktmbuf_read(m, hdr_len, MBUF_TEST_DATA_LEN2, NULL);
> +	if (data_copy == NULL)
> +		GOTO_FAIL("%s: Error in reading header data!\n", __func__);
> +	for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
> +		if (data_copy[off] != (char)0xcc)
> +			GOTO_FAIL("Data corrupted at offset %u", off);
> +	}
> +
> +	/* partial reading of mbuf data */
> +	data_copy = rte_pktmbuf_read(m, hdr_len + 5, MBUF_TEST_DATA_LEN2 - 5,
> +			NULL);
> +	if (data_copy == NULL)
> +		GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
> +	if (strlen(data_copy) != MBUF_TEST_DATA_LEN2 - 5)
> +		GOTO_FAIL("%s: Incorrect data length!\n", __func__);
> +	for (off = 0; off < MBUF_TEST_DATA_LEN2 - 5; off++) {
> +		if (data_copy[off] != (char)0xcc)
> +			GOTO_FAIL("Data corrupted at offset %u", off);
> +	}
> +
> +	/* read length greater than mbuf data_len */
> +	if (rte_pktmbuf_read(m, hdr_len, rte_pktmbuf_data_len(m) + 1,
> +				NULL) != NULL)
> +		GOTO_FAIL("%s: Requested len is larger than mbuf data len!\n",
> +				__func__);
> +
> +	/* read length greater than mbuf pkt_len */
> +	if (rte_pktmbuf_read(m, hdr_len, rte_pktmbuf_pkt_len(m) + 1,
> +				NULL) != NULL)
> +		GOTO_FAIL("%s: Requested len is larger than mbuf pkt len!\n",
> +				__func__);
> +
> +	/* read data of zero len from valid offset */
> +	data_copy = rte_pktmbuf_read(m, hdr_len, 0, NULL);
> +	if (data_copy == NULL)
> +		GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
> +	if (strlen(data_copy) != MBUF_TEST_DATA_LEN2)
> +		GOTO_FAIL("%s: Corrupted data content!\n", __func__);
> +	for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
> +		if (data_copy[off] != (char)0xcc)
> +			GOTO_FAIL("Data corrupted at offset %u", off);
> +	}
> +
> +	/* read data of zero length from zero offset */
> +	data_copy = rte_pktmbuf_read(m, 0, 0, NULL);
> +	if (data_copy == NULL)
> +		GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
> +	/* check if the received address is the beginning of header info */
> +	if (hdr != (const struct ether_hdr *)data_copy)
> +		GOTO_FAIL("%s: Corrupted data address!\n", __func__);
> +
> +	/* read data of negative size length from valid offset */
> +	data_copy = rte_pktmbuf_read(m, hdr_len, -1, NULL);
> +	if (data_copy == NULL)
> +		GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
> +	/* check if the received address is the beginning of data segment */
> +	if (data_copy != data)
> +		GOTO_FAIL("%s: Corrupted data address!\n", __func__);
> +
> +	/* try to read from mbuf with negative size offset */
> +	data_copy = rte_pktmbuf_read(m, -1, 0, NULL);
> +	if (data_copy != NULL)
> +		GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
> +
> +	/* try to read from mbuf with negative size offset and len */
> +	data_copy = rte_pktmbuf_read(m, -1, -1, NULL);
> +	if (data_copy != NULL)
> +		GOTO_FAIL("%s: Error in reading packet data!\n", __func__);

I'm not sure it makes sense to test negative values, since arguments
are unsigned. If we really want to check that case, I'd prefer to test
UINT32_MAX (which is equivalent).

> +
> +	rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
> +
> +	rte_pktmbuf_free(m);
> +	m = NULL;
> +
> +	return 0;
> +fail:
> +	if (m) {
> +		rte_pktmbuf_free(m);
> +		m = NULL;
> +	}
> +	return -1;
> +}
> +
> +/*
> + * Test to read data from chain of mbufs with data segments.
> + */
> +static int
> +test_rte_pktmbuf_read_from_chain(struct rte_mempool *pktmbuf_pool)
> +{
> +	struct rte_mbuf *pkt = NULL;
> +	struct rte_mbuf *pkt_seg = NULL;
> +	struct rte_mbuf *pkt_burst[MBUF_TEST_BURST];
> +	char *data = NULL;
> +	const char *read_data = NULL;
> +	char data_buf[EXT_BUF_TEST_DATA_LEN];
> +	uint32_t seg, i, total_pkt_len;
> +	uint16_t nb_pkt, off;
> +	uint32_t nb_segs = MBUF_TEST_DATA_LEN3 / MBUF_TEST_SEG_SIZE;
> +	memset(data_buf, 0, EXT_BUF_TEST_DATA_LEN);
> +
> +	/* allocate pkts in a burst */
> +	for (nb_pkt = 0; nb_pkt < MBUF_TEST_BURST; nb_pkt++) {
> +		/* alloc a mbuf */
> +		pkt = rte_pktmbuf_alloc(pktmbuf_pool);
> +		if (pkt == NULL)
> +			GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
> +		if (rte_pktmbuf_pkt_len(pkt) != 0)
> +			GOTO_FAIL("%s: Bad packet length\n", __func__);
> +		rte_mbuf_sanity_check(pkt, 0);
> +
> +		data = rte_pktmbuf_append(pkt, MBUF_TEST_DATA_LEN3);
> +		if (data == NULL)
> +			GOTO_FAIL("%s: Cannot append data\n", __func__);
> +		if (rte_pktmbuf_pkt_len(pkt) != MBUF_TEST_DATA_LEN3)
> +			GOTO_FAIL("%s: Bad packet length\n", __func__);
> +		if (rte_pktmbuf_data_len(pkt) != MBUF_TEST_DATA_LEN3)
> +			GOTO_FAIL("%s: Bad data length\n", __func__);
> +		memset(data, 0xfe, MBUF_TEST_DATA_LEN3);
> +		total_pkt_len = pkt->data_len;
> +		/* allocate mbuf segments for each pkt */
> +		for (seg = 0; seg < nb_segs; seg++) {
> +			pkt_seg = rte_pktmbuf_alloc(pktmbuf_pool);
> +			if (pkt_seg == NULL)
> +				GOTO_FAIL("%s: mbuf allocation failed!\n",
> +						__func__);
> +			if (rte_pktmbuf_pkt_len(pkt_seg) != 0)
> +				GOTO_FAIL("%s: Bad packet length\n", __func__);
> +			rte_mbuf_sanity_check(pkt_seg, 0);
> +			data = rte_pktmbuf_append(pkt_seg,
> +					MBUF_TEST_SEG_SIZE);
> +			if (data == NULL)
> +				GOTO_FAIL("%s: Cannot append data\n", __func__);

If a failure happens in this area, there is a mbuf leak.

> +			if (rte_pktmbuf_pkt_len(pkt_seg) !=
> +					MBUF_TEST_SEG_SIZE)
> +				GOTO_FAIL("%s: Bad packet length\n", __func__);
> +			if (rte_pktmbuf_data_len(pkt_seg) !=
> +					MBUF_TEST_SEG_SIZE)
> +				GOTO_FAIL("%s: Bad data length\n", __func__);
> +			memset(data, (seg + nb_pkt + 0xaa) % 0x0ff,
> +					MBUF_TEST_SEG_SIZE);
> +
> +			/* create chained mbufs */
> +			rte_pktmbuf_chain(pkt, pkt_seg);
> +			total_pkt_len += rte_pktmbuf_data_len(pkt_seg);
> +			pkt_seg = pkt_seg->next;
> +		}
> +		pkt_burst[nb_pkt] = pkt;
> +		if (rte_pktmbuf_pkt_len(pkt) != total_pkt_len)
> +			GOTO_FAIL("%s: Incorrect mbuf packet length!\n",
> +					__func__);
> +	}
> +	/* point to the first chained mbufs burst */
> +	pkt = pkt_burst[0];
> +	if (rte_pktmbuf_is_contiguous(pkt))
> +		GOTO_FAIL("Buffer should be non contiguous!");
> +	/* read data of first mbuf segment */
> +	read_data = rte_pktmbuf_read(pkt, 0, MBUF_TEST_DATA_LEN3, NULL);
> +	if (read_data == NULL)
> +		GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
> +	for (off = 0; off < MBUF_TEST_DATA_LEN3; off++) {
> +		if (read_data[off] != (char)0xfe)
> +			GOTO_FAIL("Data corrupted at offset %u", off);
> +	}
> +
> +	/* read data after first mbuf segment */
> +	read_data = rte_pktmbuf_read(pkt, pkt->data_len, MBUF_TEST_SEG_SIZE,
> +			&data_buf);
> +	if (read_data == NULL)
> +		GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
> +	for (off = 0; off < MBUF_TEST_SEG_SIZE; off++) {
> +		if (read_data[off] != (char)0xaa)
> +			GOTO_FAIL("Data corrupted at offset %u", off);
> +	}
> +
> +	/* read data with offset into second mbuf segment */
> +	read_data = rte_pktmbuf_read(pkt, pkt->data_len + 5,
> +			MBUF_TEST_SEG_SIZE - 5, NULL);
> +	if (read_data == NULL)
> +		GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
> +	for (off = 0; off < MBUF_TEST_SEG_SIZE - 5; off++) {
> +		if (read_data[off] != (char)0xaa)
> +			GOTO_FAIL("Data corrupted at offset %u", off);
> +	}
> +
> +	/* read data with offset into fourth segment of 5th mbuf packet burst */
> +	pkt = pkt_burst[4];
> +	read_data = rte_pktmbuf_read(pkt,
> +			pkt->data_len + (MBUF_TEST_SEG_SIZE * 2) + 3,
> +			MBUF_TEST_SEG_SIZE - 3, NULL);
> +	if (read_data == NULL)
> +		GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
> +	for (off = 0; off < MBUF_TEST_SEG_SIZE - 3; off++) {
> +		/* mbuf array index = 4, seg index = 2; index starts from 0 */
> +		if (read_data[off] != (char)(4 + 2 + 0xaa) % 0x0ff)
> +			GOTO_FAIL("Data corrupted at offset %u", off);
> +	}
> +
> +	/* read data beyond one mbuf segment length */
> +	read_data = rte_pktmbuf_read(pkt, pkt->data_len + MBUF_TEST_SEG_SIZE,
> +			MBUF_TEST_SEG_SIZE + 2, &data_buf);
> +	if (read_data == NULL)
> +		GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
> +	if (read_data != data_buf)
> +		GOTO_FAIL("%s: Invalid data address!\n", __func__);
> +	for (off = 0; off < MBUF_TEST_SEG_SIZE + 2; off++) {
> +		if (off < MBUF_TEST_SEG_SIZE &&
> +				data_buf[off] != (char)(4 + 1 + 0xaa) % 0x0ff)
> +			GOTO_FAIL("Data corrupted at offset %u", off);
> +		if (off >= MBUF_TEST_SEG_SIZE &&
> +				data_buf[off] != (char)(4 + 2 + 0xaa) % 0x0ff)
> +			GOTO_FAIL("Data corrupted at offset %u", off);
> +	}
> +
> +	/* read data of all mbuf segments after the initial data offset */
> +	read_data = rte_pktmbuf_read(pkt, pkt->data_len,
> +			MBUF_TEST_SEG_SIZE * nb_segs, &data_buf);
> +	if (read_data == NULL)
> +		GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
> +	if (read_data != data_buf)
> +		GOTO_FAIL("%s: Invalid data address!\n", __func__);
> +	for (off = 0; off < MBUF_TEST_SEG_SIZE * nb_segs; off++) {
> +		if (off < MBUF_TEST_SEG_SIZE &&
> +				data_buf[off] != (char)(4 + 0 + 0xaa) % 0x0ff)
> +			GOTO_FAIL("Data corrupted at offset %u", off);
> +		if ((off >= MBUF_TEST_SEG_SIZE && off < MBUF_TEST_SEG_SIZE * 2)
> +				&& data_buf[off] !=
> +				(char)(4 + 1 + 0xaa) % 0x0ff)
> +			GOTO_FAIL("Data corrupted at offset %u", off);
> +		if ((off >= MBUF_TEST_SEG_SIZE * 2 &&
> +					off < MBUF_TEST_SEG_SIZE * 3) &&
> +				data_buf[off] != (char)(4 + 2 + 0xaa) % 0x0ff)
> +			GOTO_FAIL("Data corrupted at offset %u", off);
> +		if (off >= MBUF_TEST_SEG_SIZE * 4 &&
> +				data_buf[off] != (char)(4 + 3 + 0xaa) % 0x0ff)
> +			GOTO_FAIL("Data corrupted at offset %u", off);
> +	}

I feel it's a bit unfortunate to have a long function that mixes the
creation of a mbuf chain (that could be factorized for other tests) and
the test itself. Also, I don't see the reason to have a burst of packet.
finally, it would be better to have data containing [0x00 0x01 0x02 ...]
instead of a fixed value. It would help to detect shifts. This last
comment applies for every tests.

For instance, something like this:

static struct rte_mbuf *
create_packet(uint16_t *seg_lengths, unsigned int flags)
{
	/* create a mbuf with several segments, filled with data
	 * [0x00 0x01 0x02 ...]
	 * The flags can provide different behaviors. */
}

#define MAX_SEG 16
struct test_case {
	uint16_t seg_lengths[MAX_SEG];
	unsigned int flags;
	uint32_t read_off;
	uint32_t read_len;
};

int my_test(void)
{
	struct rte_mbuf *m;
	struct test_case test_cases[] = {
		{ .seg_lengths = { 1000, 100 }, .flags = 0, .read_off = 0, .read_len = 1000 },
		{ .seg_lengths = { 1000, 100 }, .flags = 0, .read_off = 0, .read_len = 1001 },
		{ .seg_lengths = { 1000, 100 }, .flags = 0, .read_off = 0, .read_len = 1100 },
		{ .seg_lengths = { 1000, 100 }, .flags = 0, .read_off = 100, .read_len = 1000 },
		{ .seg_lengths = { 1000, 100 }, .flags = NO_HEADER, .read_off = 0, .read_len = 1000 },
		{ .seg_lengths = { 100, 100, 100 }, .flags = 0, .read_off = 0, .read_len = 300 },
		{ .seg_lengths = { 100, 100, 100 }, .flags = 0, .read_off = 99, .read_len = 201 },
		{ .seg_lengths = { 1000, 1, 1000 }, .flags = 0, .read_off = 1000, .read_len = 2 },
		/* ... */
	};
	unsigned int i;

	for (i = 0; i < RTE_DIM(test_cases); i++) {
		m = create_packet(test_cases[i].seg_lengths, test_cases[i].flags);
		if (m == NULL) {
			/* ... */
		}

		/* call rte_pktmbuf_read() in buffer
		 * check that buffer[0] == read_off % 256
		 * check that buffer[1] == (read_off + 1) % 256
		 * check that buffer[2] == (read_off + 2) % 256
		 * ...
		 * check that buffer[read_len] == (read_off + read_len) % 256
		 */
	}

	return 0;
}

That's just an example of course, but I feel it can provide a better
coverage of corner cases, and is more easily extensible. Note that what
you did is still better than no test.


> +/* Define a free call back function to be used for external buffer */
> +static void
> +ext_buf_free_callback_fn(void *addr __rte_unused, void *opaque)
> +{
> +	void *ext_buf_addr = opaque;
> +	if (ext_buf_addr == NULL) {
> +		printf("External buffer address is invalid\n");
> +		return;
> +	}
> +	rte_free(ext_buf_addr);
> +	ext_buf_addr = NULL;
> +	printf("External buffer freed via callback\n");
> +}
> +
> +/*
> + * Test to initialize shared data in external buffer before attaching to mbuf
> + *  - Allocate mbuf with no data.
> + *  - Allocate external buffer with size should be large enough to accommodate
> + *     rte_mbuf_ext_shared_info.
> + *  - Invoke pktmbuf_ext_shinfo_init_helper to initialize shared data.
> + *  - Invoke rte_pktmbuf_attach_extbuf to attach external buffer to the mbuf.
> + *  - Clone another mbuf and attach the same external buffer to it.
> + *  - Invoke rte_pktmbuf_detach_extbuf to detach the external buffer from mbuf.
> + */
> +static int
> +test_pktmbuf_ext_shinfo_init_helper(struct rte_mempool *pktmbuf_pool)
> +{
> +	struct rte_mbuf *m = NULL;
> +	struct rte_mbuf *clone = NULL;
> +	struct rte_mbuf_ext_shared_info *ret_shinfo = NULL;
> +	rte_iova_t buf_iova;
> +	void *ext_buf_addr = NULL;
> +	uint16_t buf_len = EXT_BUF_TEST_DATA_LEN +
> +				sizeof(struct rte_mbuf_ext_shared_info);
> +
> +	/* alloc a mbuf */
> +	m = rte_pktmbuf_alloc(pktmbuf_pool);
> +	if (m == NULL)
> +		GOTO_FAIL("%s: mbuf allocation failed!\n", __func__);
> +	if (rte_pktmbuf_pkt_len(m) != 0)
> +		GOTO_FAIL("%s: Bad packet length\n", __func__);
> +	rte_mbuf_sanity_check(m, 0);
> +
> +	ext_buf_addr = rte_malloc("External buffer", buf_len,
> +			RTE_CACHE_LINE_SIZE);
> +	if (ext_buf_addr == NULL)
> +		GOTO_FAIL("%s: External buffer allocation failed\n", __func__);
> +
> +	ret_shinfo = rte_pktmbuf_ext_shinfo_init_helper(ext_buf_addr, &buf_len,
> +		ext_buf_free_callback_fn, ext_buf_addr);
> +	if (ret_shinfo == NULL)
> +		GOTO_FAIL("%s: Shared info initialization failed!\n", __func__);
> +
> +	if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 1)
> +		GOTO_FAIL("%s: External refcount is not 1\n", __func__);
> +
> +	if (rte_mbuf_refcnt_read(m) != 1)
> +		GOTO_FAIL("%s: Invalid refcnt in mbuf\n", __func__);
> +
> +	buf_iova = rte_mempool_virt2iova(ext_buf_addr);
> +	rte_pktmbuf_attach_extbuf(m, ext_buf_addr, buf_iova, buf_len,
> +		ret_shinfo);
> +	if (m->ol_flags != EXT_ATTACHED_MBUF)
> +		GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
> +				__func__);
> +
> +	/* allocate one more mbuf */
> +	clone = rte_pktmbuf_clone(m, pktmbuf_pool);
> +	if (clone == NULL)
> +		GOTO_FAIL("%s: mbuf clone allocation failed!\n", __func__);
> +	if (rte_pktmbuf_pkt_len(clone) != 0)
> +		GOTO_FAIL("%s: Bad packet length\n", __func__);
> +
> +	/* attach the same external buffer to the cloned mbuf */
> +	rte_pktmbuf_attach_extbuf(clone, ext_buf_addr, buf_iova, buf_len,
> +			ret_shinfo);

Instead of:

  clone = rte_pktmbuf_clone(m, pktmbuf_pool);
  rte_pktmbuf_attach_extbuf(clone, ext_buf_addr, buf_iova, buf_len,
                 ret_shinfo);     /*  << useless */

I'd prefer:
  m2 = rte_pktmbuf_alloc(pktmbuf_pool);
  rte_pktmbuf_attach_extbuf(clone, ext_buf_addr, buf_iova, buf_len,
                 ret_shinfo);

Or just:
  clone = rte_pktmbuf_clone(m, pktmbuf_pool);



> +	if (clone->ol_flags != EXT_ATTACHED_MBUF)
> +		GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
> +				__func__);
> +
> +	if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 2)
> +		GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__);
> +
> +	/* test to manually update ext_buf_ref_cnt from 2 to 3*/
> +	rte_mbuf_ext_refcnt_update(ret_shinfo, 1);
> +	if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 3)
> +		GOTO_FAIL("%s: Update ext_buf ref_cnt failed\n", __func__);
> +
> +	/* reset the ext_refcnt before freeing the external buffer */
> +	rte_mbuf_ext_refcnt_set(ret_shinfo, 2);
> +	if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 2)
> +		GOTO_FAIL("%s: set ext_buf ref_cnt failed\n", __func__);
> +
> +	/* detach the external buffer from mbufs */
> +	rte_pktmbuf_detach_extbuf(m);
> +	/* check if ref cnt is decremented */
> +	if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 1)
> +		GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__);
> +
> +	rte_pktmbuf_detach_extbuf(clone);
> +	if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 0)
> +		GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__);
> +
> +	rte_pktmbuf_free(m);
> +	m = NULL;
> +	rte_pktmbuf_free(clone);
> +	clone = NULL;
> +
> +	return 0;
> +
> +fail:
> +	if (m) {
> +		rte_pktmbuf_free(m);
> +		m = NULL;
> +	}
> +	if (clone) {
> +		rte_pktmbuf_free(clone);
> +		clone = NULL;
> +	}
> +	if (ext_buf_addr != NULL) {
> +		rte_free(ext_buf_addr);
> +		ext_buf_addr = NULL;
> +	}
> +	return -1;
> +}
> +
>  static int
>  test_mbuf(void)
>  {
> @@ -1134,7 +1856,8 @@ test_mbuf(void)
>  
>  	/* create pktmbuf pool if it does not exist */
>  	pktmbuf_pool = rte_pktmbuf_pool_create("test_pktmbuf_pool",
> -			NB_MBUF, 32, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY);
> +			NB_MBUF, MEMPOOL_CACHE_SIZE, 0, MBUF_DATA_SIZE,
> +			SOCKET_ID_ANY);
>  
>  	if (pktmbuf_pool == NULL) {
>  		printf("cannot allocate mbuf pool\n");
> @@ -1144,7 +1867,8 @@ test_mbuf(void)
>  	/* create a specific pktmbuf pool with a priv_size != 0 and no data
>  	 * room size */
>  	pktmbuf_pool2 = rte_pktmbuf_pool_create("test_pktmbuf_pool2",
> -			NB_MBUF, 32, MBUF2_PRIV_SIZE, 0, SOCKET_ID_ANY);
> +			NB_MBUF, MEMPOOL_CACHE_SIZE, MBUF2_PRIV_SIZE, 0,
> +			SOCKET_ID_ANY);
>  
>  	if (pktmbuf_pool2 == NULL) {
>  		printf("cannot allocate mbuf pool\n");
> @@ -1226,7 +1950,96 @@ test_mbuf(void)
>  		goto err;
>  	}
>  
> +	/* test to validate tx offload flags */
> +	if (test_mbuf_validate_tx_offload(pktmbuf_pool,
> +				MBUF_TEST_IP_CKSUM_IPV6_SET) < 0) {
> +		printf("%d:test_mbuf_validate_tx_offload() failed\n", __LINE__);
> +		goto err;
> +	}
> +	if (test_mbuf_validate_tx_offload(pktmbuf_pool,
> +				MBUF_TEST_IP_TYPE_NOT_SET) < 0) {
> +		printf("%d:test_mbuf_validate_tx_offload() failed\n", __LINE__);
> +		goto err;
> +	}
> +	if (test_mbuf_validate_tx_offload(pktmbuf_pool,
> +				MBUF_TEST_IP_TYPE_SET) < 0) {
> +		printf("%d:test_mbuf_validate_tx_offload() failed\n", __LINE__);
> +		goto err;
> +	}
> +	if (test_mbuf_validate_tx_offload(pktmbuf_pool,
> +				MBUF_TEST_NULL_TSO_SEGSZ) < 0) {
> +		printf("%d:test_mbuf_validate_tx_offload() failed\n", __LINE__);
> +		goto err;
> +	}
> +	if (test_mbuf_validate_tx_offload(pktmbuf_pool,
> +				MBUF_TEST_TSO_IP_CKSUM_NOT_SET) < 0) {
> +		printf("%d:test_mbuf_validate_tx_offload() failed\n", __LINE__);
> +		goto err;
> +	}
> +	if (test_mbuf_validate_tx_offload(pktmbuf_pool,
> +				MBUF_TEST_TSO_IPV6_SET) < 0) {
> +		printf("%d:test_mbuf_validate_tx_offload() failed\n", __LINE__);
> +		goto err;
> +	}
> +	if (test_mbuf_validate_tx_offload(pktmbuf_pool,
> +				MBUF_TEST_TSO_IP_CKSUM_SET) < 0) {
> +		printf("%d:test_mbuf_validate_tx_offload() failed\n", __LINE__);
> +		goto err;
> +	}
> +	if (test_mbuf_validate_tx_offload(pktmbuf_pool,
> +				MBUF_TEST_OUTER_IPV4_NOT_SET) < 0) {
> +		printf("%d:test_mbuf_validate_tx_offload() failed\n", __LINE__);
> +		goto err;
> +	}
> +	if (test_mbuf_validate_tx_offload(pktmbuf_pool,
> +				MBUF_TEST_OUTER_IPV4_SET) < 0) {
> +		printf("%d:test_mbuf_validate_tx_offload() failed\n", __LINE__);
> +		goto err;
> +	}
> +	if (test_mbuf_validate_tx_offload(pktmbuf_pool,
> +				MBUF_TEST_OL_MASK_NOT_SET) < 0) {
> +		printf("%d:test_mbuf_validate_tx_offload() failed\n", __LINE__);
> +		goto err;
> +	}
> +
> +	/* test for allocating a bulk of mbufs with various sizes */
> +	if (test_rte_pktmbuf_alloc_bulk(pktmbuf_pool) < 0) {
> +		printf("test_rte_pktmbuf_alloc_bulk() failed\n");
> +		goto err;
> +	}
> +
> +	/* test for allocating a bulk of mbufs with various sizes */
> +	if (test_neg_rte_pktmbuf_alloc_bulk(pktmbuf_pool) < 0) {
> +		printf("test_neg_rte_pktmbuf_alloc_bulk() failed\n");
> +		goto err;
> +	}
> +
> +	/* test to read mbuf packet */
> +	if (test_rte_pktmbuf_read(pktmbuf_pool) < 0) {
> +		printf("test_rte_pktmbuf_read() failed\n");
> +		goto err;
> +	}
> +
> +	/* test to read mbuf packet from offset */
> +	if (test_rte_pktmbuf_read_from_offset(pktmbuf_pool) < 0) {
> +		printf("test_rte_pktmbuf_read_from_offset() failed\n");
> +		goto err;
> +	}
> +
> +	/* test to read data from chain of mbufs with data segments */
> +	if (test_rte_pktmbuf_read_from_chain(pktmbuf_pool) < 0) {
> +		printf("test_rte_pktmbuf_read_from_chain() failed\n");
> +		goto err;
> +	}
> +
> +	/* test to initialize shared info. at the end of external buffer */
> +	if (test_pktmbuf_ext_shinfo_init_helper(pktmbuf_pool) < 0) {
> +		printf("test_pktmbuf_ext_shinfo_init_helper() failed\n");
> +		goto err;
> +	}
> +
>  	ret = 0;
> +
>  err:
>  	rte_mempool_free(pktmbuf_pool);
>  	rte_mempool_free(pktmbuf_pool2);
> @@ -1234,3 +2047,4 @@ test_mbuf(void)
>  }
>  
>  REGISTER_TEST_COMMAND(mbuf_autotest, test_mbuf);
> +#undef GOTO_FAIL
> -- 
> 2.17.2
> 

Thanks for this work!

Olivier


More information about the dev mailing list