[dpdk-dev] [PATCH v2] app/eventdev: fix port dequeue depth configuration

Pavan Nikhilesh pbhagavatula at caviumnetworks.com
Tue Jan 30 12:17:25 CET 2018


The port dequeue depth value has to be compared against the maximum
allowed dequeue depth reported by the event drivers.

Fixes: 3617aae53f92 ("app/eventdev: add event Rx adapter setup")

Signed-off-by: Pavan Nikhilesh <pbhagavatula at caviumnetworks.com>
Acked-by: Jerin Jacob <jerin.jacob at caviumnetworks.com>
---

 v2 Changes:
 - Pass port_conf as pointer.

 app/test-eventdev/test_perf_atq.c       | 13 ++++++++++++-
 app/test-eventdev/test_perf_common.c    | 29 +++++++----------------------
 app/test-eventdev/test_perf_common.h    |  3 ++-
 app/test-eventdev/test_perf_queue.c     | 12 +++++++++++-
 app/test-eventdev/test_pipeline_atq.c   |  3 +++
 app/test-eventdev/test_pipeline_queue.c |  3 +++
 6 files changed, 38 insertions(+), 25 deletions(-)

diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c
index d07a05425..18c02562b 100644
--- a/app/test-eventdev/test_perf_atq.c
+++ b/app/test-eventdev/test_perf_atq.c
@@ -207,7 +207,18 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
 		}
 	}

-	ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues);
+	if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
+		opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
+
+	/* port configuration */
+	struct rte_event_port_conf p_conf = {
+			.dequeue_depth = opt->wkr_deq_dep,
+			.enqueue_depth = dev_info.max_event_port_dequeue_depth,
+			.new_event_threshold = dev_info.max_num_events,
+	};
+
+	ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues,
+			&p_conf);
 	if (ret)
 		return ret;

diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index e279d81a5..3be210452 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -217,7 +217,7 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt,

 static int
 perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
-		struct rte_event_port_conf prod_conf)
+		struct rte_event_port_conf *prod_conf)
 {
 	int ret = 0;
 	uint16_t prod;
@@ -239,7 +239,7 @@ perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
 		}
 		queue_conf.ev.queue_id = prod * stride;
 		ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
-				&prod_conf);
+				prod_conf);
 		if (ret) {
 			evt_err("failed to create rx adapter[%d]", prod);
 			return ret;
@@ -285,22 +285,12 @@ perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,

 int
 perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
-				uint8_t stride, uint8_t nb_queues)
+				uint8_t stride, uint8_t nb_queues,
+				struct rte_event_port_conf *port_conf)
 {
 	struct test_perf *t = evt_test_priv(test);
 	uint16_t port, prod;
 	int ret = -1;
-	struct rte_event_port_conf port_conf;
-
-	memset(&port_conf, 0, sizeof(struct rte_event_port_conf));
-	rte_event_port_default_conf_get(opt->dev_id, 0, &port_conf);
-
-	/* port configuration */
-	const struct rte_event_port_conf wkr_p_conf = {
-			.dequeue_depth = opt->wkr_deq_dep,
-			.enqueue_depth = port_conf.enqueue_depth,
-			.new_event_threshold = port_conf.new_event_threshold,
-	};

 	/* setup one port per worker, linking to all queues */
 	for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
@@ -313,7 +303,7 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
 		w->processed_pkts = 0;
 		w->latency = 0;

-		ret = rte_event_port_setup(opt->dev_id, port, &wkr_p_conf);
+		ret = rte_event_port_setup(opt->dev_id, port, port_conf);
 		if (ret) {
 			evt_err("failed to setup port %d", port);
 			return ret;
@@ -327,18 +317,13 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
 	}

 	/* port for producers, no links */
-	struct rte_event_port_conf prod_conf = {
-			.dequeue_depth = port_conf.dequeue_depth,
-			.enqueue_depth = port_conf.enqueue_depth,
-			.new_event_threshold = port_conf.new_event_threshold,
-	};
 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
 		for ( ; port < perf_nb_event_ports(opt); port++) {
 			struct prod_data *p = &t->prod[port];
 			p->t = t;
 		}

-		ret = perf_event_rx_adapter_setup(opt, stride, prod_conf);
+		ret = perf_event_rx_adapter_setup(opt, stride, port_conf);
 		if (ret)
 			return ret;
 	} else {
@@ -352,7 +337,7 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
 			p->t = t;

 			ret = rte_event_port_setup(opt->dev_id, port,
-					&prod_conf);
+					port_conf);
 			if (ret) {
 				evt_err("failed to setup port %d", port);
 				return ret;
diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
index f8d516ce4..8187ab936 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -133,7 +133,8 @@ int perf_test_setup(struct evt_test *test, struct evt_options *opt);
 int perf_ethdev_setup(struct evt_test *test, struct evt_options *opt);
 int perf_mempool_setup(struct evt_test *test, struct evt_options *opt);
 int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
-				uint8_t stride, uint8_t nb_queues);
+				uint8_t stride, uint8_t nb_queues,
+				struct rte_event_port_conf *port_conf);
 int perf_event_dev_service_setup(uint8_t dev_id);
 int perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
 		int (*worker)(void *));
diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c
index d5b890876..6ef85c100 100644
--- a/app/test-eventdev/test_perf_queue.c
+++ b/app/test-eventdev/test_perf_queue.c
@@ -219,8 +219,18 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
 		}
 	}

+	if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
+		opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
+
+	/* port configuration */
+	struct rte_event_port_conf p_conf = {
+			.dequeue_depth = opt->wkr_deq_dep,
+			.enqueue_depth = dev_info.max_event_port_dequeue_depth,
+			.new_event_threshold = dev_info.max_num_events,
+	};
+
 	ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
-					nb_queues);
+					nb_queues, &p_conf);
 	if (ret)
 		return ret;

diff --git a/app/test-eventdev/test_pipeline_atq.c b/app/test-eventdev/test_pipeline_atq.c
index 36abbddda..dd7189776 100644
--- a/app/test-eventdev/test_pipeline_atq.c
+++ b/app/test-eventdev/test_pipeline_atq.c
@@ -378,6 +378,9 @@ pipeline_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
 		}
 	}

+	if (opt->wkr_deq_dep > info.max_event_port_dequeue_depth)
+		opt->wkr_deq_dep = info.max_event_port_dequeue_depth;
+
 	/* port configuration */
 	const struct rte_event_port_conf p_conf = {
 			.dequeue_depth = opt->wkr_deq_dep,
diff --git a/app/test-eventdev/test_pipeline_queue.c b/app/test-eventdev/test_pipeline_queue.c
index 835fe0782..02fc27cf8 100644
--- a/app/test-eventdev/test_pipeline_queue.c
+++ b/app/test-eventdev/test_pipeline_queue.c
@@ -397,6 +397,9 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
 		}
 	}

+	if (opt->wkr_deq_dep > info.max_event_port_dequeue_depth)
+		opt->wkr_deq_dep = info.max_event_port_dequeue_depth;
+
 	/* port configuration */
 	const struct rte_event_port_conf p_conf = {
 			.dequeue_depth = opt->wkr_deq_dep,
--
2.16.0



More information about the dev mailing list