[dpdk-dev] [PATCH v4 3/8] test/ring: add functional tests for zero copy APIs

Honnappa Nagarahalli honnappa.nagarahalli at arm.com
Sat Oct 24 18:11:07 CEST 2020


Add functional tests for zero copy APIs. Test enqueue/dequeue
functions are created using the zero copy APIs to fit into
the existing testing method.

Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli at arm.com>
Reviewed-by: Dharmik Thakkar <dharmik.thakkar at arm.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev at intel.com>
---
 app/test/test_ring.c | 196 +++++++++++++++++++++++++++++++++++++++++++
 app/test/test_ring.h |  42 ++++++++++
 2 files changed, 238 insertions(+)

diff --git a/app/test/test_ring.c b/app/test/test_ring.c
index 329d538a9..99fe4b46f 100644
--- a/app/test/test_ring.c
+++ b/app/test/test_ring.c
@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright(c) 2010-2014 Intel Corporation
+ * Copyright(c) 2020 Arm Limited
  */
 
 #include <string.h>
@@ -68,6 +69,149 @@
 
 static const int esize[] = {-1, 4, 8, 16, 20};
 
+/* Wrappers around the zero-copy APIs. The wrappers match
+ * the normal enqueue/dequeue API declarations.
+ */
+static unsigned int
+test_ring_enqueue_zc_bulk(struct rte_ring *r, void * const *obj_table,
+	unsigned int n, unsigned int *free_space)
+{
+	unsigned int ret;
+	struct rte_ring_zc_data zcd;
+
+	ret = rte_ring_enqueue_zc_bulk_start(r, n, &zcd, free_space);
+	if (ret > 0) {
+		/* Copy the data to the ring */
+		test_ring_copy_to(&zcd, obj_table, sizeof(void *), ret);
+		rte_ring_enqueue_zc_finish(r, ret);
+	}
+
+	return ret;
+}
+
+static unsigned int
+test_ring_enqueue_zc_bulk_elem(struct rte_ring *r, const void *obj_table,
+	unsigned int esize, unsigned int n, unsigned int *free_space)
+{
+	unsigned int ret;
+	struct rte_ring_zc_data zcd;
+
+	ret = rte_ring_enqueue_zc_bulk_elem_start(r, esize, n,
+				&zcd, free_space);
+	if (ret > 0) {
+		/* Copy the data to the ring */
+		test_ring_copy_to(&zcd, obj_table, esize, ret);
+		rte_ring_enqueue_zc_finish(r, ret);
+	}
+
+	return ret;
+}
+
+static unsigned int
+test_ring_enqueue_zc_burst(struct rte_ring *r, void * const *obj_table,
+	unsigned int n, unsigned int *free_space)
+{
+	unsigned int ret;
+	struct rte_ring_zc_data zcd;
+
+	ret = rte_ring_enqueue_zc_burst_start(r, n, &zcd, free_space);
+	if (ret > 0) {
+		/* Copy the data to the ring */
+		test_ring_copy_to(&zcd, obj_table, sizeof(void *), ret);
+		rte_ring_enqueue_zc_finish(r, ret);
+	}
+
+	return ret;
+}
+
+static unsigned int
+test_ring_enqueue_zc_burst_elem(struct rte_ring *r, const void *obj_table,
+	unsigned int esize, unsigned int n, unsigned int *free_space)
+{
+	unsigned int ret;
+	struct rte_ring_zc_data zcd;
+
+	ret = rte_ring_enqueue_zc_burst_elem_start(r, esize, n,
+				&zcd, free_space);
+	if (ret > 0) {
+		/* Copy the data to the ring */
+		test_ring_copy_to(&zcd, obj_table, esize, ret);
+		rte_ring_enqueue_zc_finish(r, ret);
+	}
+
+	return ret;
+}
+
+static unsigned int
+test_ring_dequeue_zc_bulk(struct rte_ring *r, void **obj_table,
+	unsigned int n, unsigned int *available)
+{
+	unsigned int ret;
+	struct rte_ring_zc_data zcd;
+
+	ret = rte_ring_dequeue_zc_bulk_start(r, n, &zcd, available);
+	if (ret > 0) {
+		/* Copy the data from the ring */
+		test_ring_copy_from(&zcd, obj_table, sizeof(void *), ret);
+		rte_ring_dequeue_zc_finish(r, ret);
+	}
+
+	return ret;
+}
+
+static unsigned int
+test_ring_dequeue_zc_bulk_elem(struct rte_ring *r, void *obj_table,
+	unsigned int esize, unsigned int n, unsigned int *available)
+{
+	unsigned int ret;
+	struct rte_ring_zc_data zcd;
+
+	ret = rte_ring_dequeue_zc_bulk_elem_start(r, esize, n,
+				&zcd, available);
+	if (ret > 0) {
+		/* Copy the data from the ring */
+		test_ring_copy_from(&zcd, obj_table, esize, ret);
+		rte_ring_dequeue_zc_finish(r, ret);
+	}
+
+	return ret;
+}
+
+static unsigned int
+test_ring_dequeue_zc_burst(struct rte_ring *r, void **obj_table,
+	unsigned int n, unsigned int *available)
+{
+	unsigned int ret;
+	struct rte_ring_zc_data zcd;
+
+	ret = rte_ring_dequeue_zc_burst_start(r, n, &zcd, available);
+	if (ret > 0) {
+		/* Copy the data from the ring */
+		test_ring_copy_from(&zcd, obj_table, sizeof(void *), ret);
+		rte_ring_dequeue_zc_finish(r, ret);
+	}
+
+	return ret;
+}
+
+static unsigned int
+test_ring_dequeue_zc_burst_elem(struct rte_ring *r, void *obj_table,
+	unsigned int esize, unsigned int n, unsigned int *available)
+{
+	unsigned int ret;
+	struct rte_ring_zc_data zcd;
+
+	ret = rte_ring_dequeue_zc_burst_elem_start(r, esize, n,
+				&zcd, available);
+	if (ret > 0) {
+		/* Copy the data from the ring */
+		test_ring_copy_from(&zcd, obj_table, esize, ret);
+		rte_ring_dequeue_zc_finish(r, ret);
+	}
+
+	return ret;
+}
+
 static const struct {
 	const char *desc;
 	uint32_t api_type;
@@ -219,6 +363,58 @@ static const struct {
 			.felem = rte_ring_dequeue_burst_elem,
 		},
 	},
+	{
+		.desc = "SP/SC sync mode (ZC)",
+		.api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_SPSC,
+		.create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
+		.enq = {
+			.flegacy = test_ring_enqueue_zc_bulk,
+			.felem = test_ring_enqueue_zc_bulk_elem,
+		},
+		.deq = {
+			.flegacy = test_ring_dequeue_zc_bulk,
+			.felem = test_ring_dequeue_zc_bulk_elem,
+		},
+	},
+	{
+		.desc = "MP_HTS/MC_HTS sync mode (ZC)",
+		.api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
+		.create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
+		.enq = {
+			.flegacy = test_ring_enqueue_zc_bulk,
+			.felem = test_ring_enqueue_zc_bulk_elem,
+		},
+		.deq = {
+			.flegacy = test_ring_dequeue_zc_bulk,
+			.felem = test_ring_dequeue_zc_bulk_elem,
+		},
+	},
+	{
+		.desc = "SP/SC sync mode (ZC)",
+		.api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_SPSC,
+		.create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
+		.enq = {
+			.flegacy = test_ring_enqueue_zc_burst,
+			.felem = test_ring_enqueue_zc_burst_elem,
+		},
+		.deq = {
+			.flegacy = test_ring_dequeue_zc_burst,
+			.felem = test_ring_dequeue_zc_burst_elem,
+		},
+	},
+	{
+		.desc = "MP_HTS/MC_HTS sync mode (ZC)",
+		.api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
+		.create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
+		.enq = {
+			.flegacy = test_ring_enqueue_zc_burst,
+			.felem = test_ring_enqueue_zc_burst_elem,
+		},
+		.deq = {
+			.flegacy = test_ring_dequeue_zc_burst,
+			.felem = test_ring_dequeue_zc_burst_elem,
+		},
+	}
 };
 
 static unsigned int
diff --git a/app/test/test_ring.h b/app/test/test_ring.h
index b44711398..b525abb79 100644
--- a/app/test/test_ring.h
+++ b/app/test/test_ring.h
@@ -55,6 +55,48 @@ test_ring_inc_ptr(void *obj, int esize, unsigned int n)
 	return (void *)((uint32_t *)obj + (n * sz / sizeof(uint32_t)));
 }
 
+static inline void
+test_ring_mem_copy(void *dst, void * const *src, int esize, unsigned int num)
+{
+	size_t sz;
+
+	sz = num * sizeof(void *);
+	if (esize != -1)
+		sz = esize * num;
+
+	memcpy(dst, src, sz);
+}
+
+/* Copy to the ring memory */
+static inline void
+test_ring_copy_to(struct rte_ring_zc_data *zcd, void * const *src, int esize,
+	unsigned int num)
+{
+	test_ring_mem_copy(zcd->ptr1, src, esize, zcd->n1);
+	if (zcd->n1 != num) {
+		if (esize == -1)
+			src = src + zcd->n1;
+		else
+			src = (void * const *)((const uint32_t *)src +
+					(zcd->n1 * esize / sizeof(uint32_t)));
+		test_ring_mem_copy(zcd->ptr2, src,
+					esize, num - zcd->n1);
+	}
+}
+
+/* Copy from the ring memory */
+static inline void
+test_ring_copy_from(struct rte_ring_zc_data *zcd, void *dst, int esize,
+	unsigned int num)
+{
+	test_ring_mem_copy(dst, zcd->ptr1, esize, zcd->n1);
+
+	if (zcd->n1 != num) {
+		dst = test_ring_inc_ptr(dst, esize, zcd->n1);
+		test_ring_mem_copy(dst, zcd->ptr2, esize, num - zcd->n1);
+	}
+}
+
 static __rte_always_inline unsigned int
 test_ring_enqueue(struct rte_ring *r, void **obj, int esize, unsigned int n,
 			unsigned int api_type)
-- 
2.17.1



More information about the dev mailing list