[dpdk-dev] [PATCH v3 10/12] app/eventdev: add pipeline atq test

Pavan Nikhilesh pbhagavatula at caviumnetworks.com
Wed Jan 10 15:51:42 CET 2018


This is a pipeline test case that aims at testing the following with
``all types queue`` eventdev scheme.
1. Measure the end-to-end performance of an event dev with a ethernet dev.
2. Maintain packet ordering from Rx to Tx.

The atq queue test functions as same as ``pipeline_queue`` test.
The difference is, It uses, ``all type queue scheme`` instead of separate
queues for each stage and thus reduces the number of queues required to
realize the use case.

Note: The --prod_type_ethdev is mandatory for running the application.

Example command to run pipeline atq test:
sudo build/app/dpdk-test-eventdev -c 0xf -s 0x8 --vdev=event_sw0 -- \
--test=pipeline_atq --wlcore=1 --prod_type_ethdev --stlist=ao

Signed-off-by: Pavan Nikhilesh <pbhagavatula at caviumnetworks.com>
---

 v3 Changes:
 - redo queue creation by changing the queue count to nb_ethdev
 - add SPDX licence tags

 app/test-eventdev/Makefile            |   1 +
 app/test-eventdev/test_pipeline_atq.c | 158 ++++++++++++++++++++++++++++++++++
 2 files changed, 159 insertions(+)
 create mode 100644 app/test-eventdev/test_pipeline_atq.c

diff --git a/app/test-eventdev/Makefile b/app/test-eventdev/Makefile
index 30bebfb2f..6e3e36fb8 100644
--- a/app/test-eventdev/Makefile
+++ b/app/test-eventdev/Makefile
@@ -53,5 +53,6 @@ SRCS-y += test_perf_atq.c

 SRCS-y += test_pipeline_common.c
 SRCS-y += test_pipeline_queue.c
+SRCS-y += test_pipeline_atq.c

 include $(RTE_SDK)/mk/rte.app.mk
diff --git a/app/test-eventdev/test_pipeline_atq.c b/app/test-eventdev/test_pipeline_atq.c
new file mode 100644
index 000000000..642ef0375
--- /dev/null
+++ b/app/test-eventdev/test_pipeline_atq.c
@@ -0,0 +1,158 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 Cavium, Inc.
+ */
+
+#include "test_pipeline_common.h"
+
+/* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
+
+static __rte_always_inline int
+pipeline_atq_nb_event_queues(struct evt_options *opt)
+{
+	RTE_SET_USED(opt);
+
+	return rte_eth_dev_count();
+}
+
+static int
+worker_wrapper(void *arg)
+{
+	RTE_SET_USED(arg);
+	rte_panic("invalid worker\n");
+}
+
+static int
+pipeline_atq_launch_lcores(struct evt_test *test, struct evt_options *opt)
+{
+	return pipeline_launch_lcores(test, opt, worker_wrapper);
+}
+
+static int
+pipeline_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
+{
+	int ret;
+	int nb_ports;
+	int nb_queues;
+	uint8_t queue;
+	struct rte_event_dev_info info;
+
+	nb_ports = evt_nr_active_lcores(opt->wlcores);
+	nb_queues = rte_eth_dev_count();
+
+	rte_event_dev_info_get(opt->dev_id, &info);
+
+	const struct rte_event_dev_config config = {
+			.nb_event_queues = nb_queues,
+			.nb_event_ports = nb_ports,
+			.nb_events_limit  = info.max_num_events,
+			.nb_event_queue_flows = opt->nb_flows,
+			.nb_event_port_dequeue_depth =
+				info.max_event_port_dequeue_depth,
+			.nb_event_port_enqueue_depth =
+				info.max_event_port_enqueue_depth,
+	};
+	ret = rte_event_dev_configure(opt->dev_id, &config);
+	if (ret) {
+		evt_err("failed to configure eventdev %d", opt->dev_id);
+		return ret;
+	}
+
+	struct rte_event_queue_conf q_conf = {
+			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+			.nb_atomic_flows = opt->nb_flows,
+			.nb_atomic_order_sequences = opt->nb_flows,
+	};
+	/* queue configurations */
+	for (queue = 0; queue < nb_queues; queue++) {
+		q_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+
+		ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
+		if (ret) {
+			evt_err("failed to setup queue=%d", queue);
+			return ret;
+		}
+	}
+
+	/* port configuration */
+	const struct rte_event_port_conf p_conf = {
+			.dequeue_depth = opt->wkr_deq_dep,
+			.enqueue_depth = info.max_event_port_dequeue_depth,
+			.new_event_threshold = info.max_num_events,
+	};
+
+	ret = pipeline_event_port_setup(test, opt, nb_queues, p_conf);
+	if (ret)
+		return ret;
+
+	ret = pipeline_event_rx_adapter_setup(opt, 1, p_conf);
+	if (ret)
+		return ret;
+
+	if (!evt_has_distributed_sched(opt->dev_id)) {
+		uint32_t service_id;
+		rte_event_dev_service_id_get(opt->dev_id, &service_id);
+		ret = evt_service_setup(service_id);
+		if (ret) {
+			evt_err("No service lcore found to run event dev.");
+			return ret;
+		}
+	}
+
+	ret = rte_event_dev_start(opt->dev_id);
+	if (ret) {
+		evt_err("failed to start eventdev %d", opt->dev_id);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void
+pipeline_atq_opt_dump(struct evt_options *opt)
+{
+	pipeline_opt_dump(opt, pipeline_atq_nb_event_queues(opt));
+}
+
+static int
+pipeline_atq_opt_check(struct evt_options *opt)
+{
+	return pipeline_opt_check(opt, pipeline_atq_nb_event_queues(opt));
+}
+
+static bool
+pipeline_atq_capability_check(struct evt_options *opt)
+{
+	struct rte_event_dev_info dev_info;
+
+	rte_event_dev_info_get(opt->dev_id, &dev_info);
+	if (dev_info.max_event_queues < pipeline_atq_nb_event_queues(opt) ||
+			dev_info.max_event_ports <
+			evt_nr_active_lcores(opt->wlcores)) {
+		evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
+			pipeline_atq_nb_event_queues(opt),
+			dev_info.max_event_queues,
+			evt_nr_active_lcores(opt->wlcores),
+			dev_info.max_event_ports);
+	}
+
+	return true;
+}
+
+static const struct evt_test_ops pipeline_atq =  {
+	.cap_check          = pipeline_atq_capability_check,
+	.opt_check          = pipeline_atq_opt_check,
+	.opt_dump           = pipeline_atq_opt_dump,
+	.test_setup         = pipeline_test_setup,
+	.mempool_setup      = pipeline_mempool_setup,
+	.ethdev_setup	    = pipeline_ethdev_setup,
+	.eventdev_setup     = pipeline_atq_eventdev_setup,
+	.launch_lcores      = pipeline_atq_launch_lcores,
+	.eventdev_destroy   = pipeline_eventdev_destroy,
+	.mempool_destroy    = pipeline_mempool_destroy,
+	.ethdev_destroy	    = pipeline_ethdev_destroy,
+	.test_result        = pipeline_test_result,
+	.test_destroy       = pipeline_test_destroy,
+};
+
+EVT_TEST_REGISTER(pipeline_atq);
--
2.15.1



More information about the dev mailing list