[dpdk-dev] [PATCH v3] eventdev: update app and examples for new eventdev ABI

Timothy McDaniel timothy.mcdaniel at intel.com
Wed Oct 14 19:33:06 CEST 2020


Several data structures and constants changed, or were added,
in the previous patch.  This commit updates the dependent
apps and examples to use the new ABI.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel at intel.com>
Acked-by: Pavan Nikhilesh <pbhagavatula at marvell.com>
Acked-by: Harry van Haaren <harry.van.haaren at intel.com>
---
 app/test-eventdev/evt_common.h                     | 11 ++++++++
 app/test-eventdev/test_order_atq.c                 | 28 +++++++++++++++------
 app/test-eventdev/test_order_common.c              |  1 +
 app/test-eventdev/test_order_queue.c               | 29 ++++++++++++++++------
 app/test/test_eventdev.c                           |  4 +--
 .../eventdev_pipeline/pipeline_worker_generic.c    |  6 +++--
 examples/eventdev_pipeline/pipeline_worker_tx.c    |  1 +
 examples/l2fwd-event/l2fwd_event_generic.c         |  7 ++++--
 examples/l2fwd-event/l2fwd_event_internal_port.c   |  6 +++--
 examples/l3fwd/l3fwd_event_generic.c               |  7 ++++--
 examples/l3fwd/l3fwd_event_internal_port.c         |  6 +++--
 11 files changed, 80 insertions(+), 26 deletions(-)

diff --git a/app/test-eventdev/evt_common.h b/app/test-eventdev/evt_common.h
index f9d7378..a1da1cf 100644
--- a/app/test-eventdev/evt_common.h
+++ b/app/test-eventdev/evt_common.h
@@ -104,6 +104,16 @@ evt_has_all_types_queue(uint8_t dev_id)
 			true : false;
 }
 
+static inline bool
+evt_has_flow_id(uint8_t dev_id)
+{
+	struct rte_event_dev_info dev_info;
+
+	rte_event_dev_info_get(dev_id, &dev_info);
+	return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_CARRY_FLOW_ID) ?
+			true : false;
+}
+
 static inline int
 evt_service_setup(uint32_t service_id)
 {
@@ -169,6 +179,7 @@ evt_configure_eventdev(struct evt_options *opt, uint8_t nb_queues,
 			.dequeue_timeout_ns = opt->deq_tmo_nsec,
 			.nb_event_queues = nb_queues,
 			.nb_event_ports = nb_ports,
+			.nb_single_link_event_port_queues = 0,
 			.nb_events_limit  = info.max_num_events,
 			.nb_event_queue_flows = opt->nb_flows,
 			.nb_event_port_dequeue_depth =
diff --git a/app/test-eventdev/test_order_atq.c b/app/test-eventdev/test_order_atq.c
index 3366cfc..cfcb1dc 100644
--- a/app/test-eventdev/test_order_atq.c
+++ b/app/test-eventdev/test_order_atq.c
@@ -19,7 +19,7 @@ order_atq_process_stage_0(struct rte_event *const ev)
 }
 
 static int
-order_atq_worker(void *arg)
+order_atq_worker(void *arg, const bool flow_id_cap)
 {
 	ORDER_WORKER_INIT;
 	struct rte_event ev;
@@ -34,6 +34,9 @@ order_atq_worker(void *arg)
 			continue;
 		}
 
+		if (!flow_id_cap)
+			ev.flow_id = ev.mbuf->udata64;
+
 		if (ev.sub_event_type == 0) { /* stage 0 from producer */
 			order_atq_process_stage_0(&ev);
 			while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
@@ -50,7 +53,7 @@ order_atq_worker(void *arg)
 }
 
 static int
-order_atq_worker_burst(void *arg)
+order_atq_worker_burst(void *arg, const bool flow_id_cap)
 {
 	ORDER_WORKER_INIT;
 	struct rte_event ev[BURST_SIZE];
@@ -68,6 +71,9 @@ order_atq_worker_burst(void *arg)
 		}
 
 		for (i = 0; i < nb_rx; i++) {
+			if (!flow_id_cap)
+				ev[i].flow_id = ev[i].mbuf->udata64;
+
 			if (ev[i].sub_event_type == 0) { /*stage 0 */
 				order_atq_process_stage_0(&ev[i]);
 			} else if (ev[i].sub_event_type == 1) { /* stage 1 */
@@ -95,11 +101,19 @@ worker_wrapper(void *arg)
 {
 	struct worker_data *w  = arg;
 	const bool burst = evt_has_burst_mode(w->dev_id);
-
-	if (burst)
-		return order_atq_worker_burst(arg);
-	else
-		return order_atq_worker(arg);
+	const bool flow_id_cap = evt_has_flow_id(w->dev_id);
+
+	if (burst) {
+		if (flow_id_cap)
+			return order_atq_worker_burst(arg, true);
+		else
+			return order_atq_worker_burst(arg, false);
+	} else {
+		if (flow_id_cap)
+			return order_atq_worker(arg, true);
+		else
+			return order_atq_worker(arg, false);
+	}
 }
 
 static int
diff --git a/app/test-eventdev/test_order_common.c b/app/test-eventdev/test_order_common.c
index 4190f9a..7942390 100644
--- a/app/test-eventdev/test_order_common.c
+++ b/app/test-eventdev/test_order_common.c
@@ -49,6 +49,7 @@ order_producer(void *arg)
 		const uint32_t flow = (uintptr_t)m % nb_flows;
 		/* Maintain seq number per flow */
 		m->seqn = producer_flow_seq[flow]++;
+		m->udata64 = flow;
 
 		ev.flow_id = flow;
 		ev.mbuf = m;
diff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c
index 495efd9..1511c00 100644
--- a/app/test-eventdev/test_order_queue.c
+++ b/app/test-eventdev/test_order_queue.c
@@ -19,7 +19,7 @@ order_queue_process_stage_0(struct rte_event *const ev)
 }
 
 static int
-order_queue_worker(void *arg)
+order_queue_worker(void *arg, const bool flow_id_cap)
 {
 	ORDER_WORKER_INIT;
 	struct rte_event ev;
@@ -34,6 +34,9 @@ order_queue_worker(void *arg)
 			continue;
 		}
 
+		if (!flow_id_cap)
+			ev.flow_id = ev.mbuf->udata64;
+
 		if (ev.queue_id == 0) { /* from ordered queue */
 			order_queue_process_stage_0(&ev);
 			while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
@@ -50,7 +53,7 @@ order_queue_worker(void *arg)
 }
 
 static int
-order_queue_worker_burst(void *arg)
+order_queue_worker_burst(void *arg, const bool flow_id_cap)
 {
 	ORDER_WORKER_INIT;
 	struct rte_event ev[BURST_SIZE];
@@ -68,6 +71,10 @@ order_queue_worker_burst(void *arg)
 		}
 
 		for (i = 0; i < nb_rx; i++) {
+
+			if (!flow_id_cap)
+				ev[i].flow_id = ev[i].mbuf->udata64;
+
 			if (ev[i].queue_id == 0) { /* from ordered queue */
 				order_queue_process_stage_0(&ev[i]);
 			} else if (ev[i].queue_id == 1) {/* from atomic queue */
@@ -95,11 +102,19 @@ worker_wrapper(void *arg)
 {
 	struct worker_data *w  = arg;
 	const bool burst = evt_has_burst_mode(w->dev_id);
-
-	if (burst)
-		return order_queue_worker_burst(arg);
-	else
-		return order_queue_worker(arg);
+	const bool flow_id_cap = evt_has_flow_id(w->dev_id);
+
+	if (burst) {
+		if (flow_id_cap)
+			return order_queue_worker_burst(arg, true);
+		else
+			return order_queue_worker_burst(arg, false);
+	} else {
+		if (flow_id_cap)
+			return order_queue_worker(arg, true);
+		else
+			return order_queue_worker(arg, false);
+	}
 }
 
 static int
diff --git a/app/test/test_eventdev.c b/app/test/test_eventdev.c
index 43ccb1c..62019c1 100644
--- a/app/test/test_eventdev.c
+++ b/app/test/test_eventdev.c
@@ -559,10 +559,10 @@ test_eventdev_port_setup(void)
 	if (!(info.event_dev_cap &
 	      RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
 		pconf.enqueue_depth = info.max_event_port_enqueue_depth;
-		pconf.disable_implicit_release = 1;
+		pconf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
 		ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
 		TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
-		pconf.disable_implicit_release = 0;
+		pconf.event_port_cfg = 0;
 	}
 
 	ret = rte_event_port_setup(TEST_DEV_ID, info.max_event_ports,
diff --git a/examples/eventdev_pipeline/pipeline_worker_generic.c b/examples/eventdev_pipeline/pipeline_worker_generic.c
index 42ff4ee..f70ab0c 100644
--- a/examples/eventdev_pipeline/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline/pipeline_worker_generic.c
@@ -129,6 +129,7 @@ setup_eventdev_generic(struct worker_data *worker_data)
 	struct rte_event_dev_config config = {
 			.nb_event_queues = nb_queues,
 			.nb_event_ports = nb_ports,
+			.nb_single_link_event_port_queues = 1,
 			.nb_events_limit  = 4096,
 			.nb_event_queue_flows = 1024,
 			.nb_event_port_dequeue_depth = 128,
@@ -143,7 +144,7 @@ setup_eventdev_generic(struct worker_data *worker_data)
 			.schedule_type = cdata.queue_type,
 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
 			.nb_atomic_flows = 1024,
-		.nb_atomic_order_sequences = 1024,
+			.nb_atomic_order_sequences = 1024,
 	};
 	struct rte_event_queue_conf tx_q_conf = {
 			.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
@@ -167,7 +168,8 @@ setup_eventdev_generic(struct worker_data *worker_data)
 	disable_implicit_release = (dev_info.event_dev_cap &
 			RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
 
-	wkr_p_conf.disable_implicit_release = disable_implicit_release;
+	wkr_p_conf.event_port_cfg = disable_implicit_release ?
+		RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;
 
 	if (dev_info.max_num_events < config.nb_events_limit)
 		config.nb_events_limit = dev_info.max_num_events;
diff --git a/examples/eventdev_pipeline/pipeline_worker_tx.c b/examples/eventdev_pipeline/pipeline_worker_tx.c
index 55bb2f7..ca6cd20 100644
--- a/examples/eventdev_pipeline/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline/pipeline_worker_tx.c
@@ -436,6 +436,7 @@ setup_eventdev_worker_tx_enq(struct worker_data *worker_data)
 	struct rte_event_dev_config config = {
 			.nb_event_queues = nb_queues,
 			.nb_event_ports = nb_ports,
+			.nb_single_link_event_port_queues = 0,
 			.nb_events_limit  = 4096,
 			.nb_event_queue_flows = 1024,
 			.nb_event_port_dequeue_depth = 128,
diff --git a/examples/l2fwd-event/l2fwd_event_generic.c b/examples/l2fwd-event/l2fwd_event_generic.c
index 2dc95e5..9a3167c 100644
--- a/examples/l2fwd-event/l2fwd_event_generic.c
+++ b/examples/l2fwd-event/l2fwd_event_generic.c
@@ -126,8 +126,11 @@ l2fwd_event_port_setup_generic(struct l2fwd_resources *rsrc)
 	if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
 		event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
 
-	event_p_conf.disable_implicit_release =
-		evt_rsrc->disable_implicit_release;
+	event_p_conf.event_port_cfg = 0;
+	if (evt_rsrc->disable_implicit_release)
+		event_p_conf.event_port_cfg |=
+			RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
+
 	evt_rsrc->deq_depth = def_p_conf.dequeue_depth;
 
 	for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
diff --git a/examples/l2fwd-event/l2fwd_event_internal_port.c b/examples/l2fwd-event/l2fwd_event_internal_port.c
index 63d57b4..203a14c 100644
--- a/examples/l2fwd-event/l2fwd_event_internal_port.c
+++ b/examples/l2fwd-event/l2fwd_event_internal_port.c
@@ -123,8 +123,10 @@ l2fwd_event_port_setup_internal_port(struct l2fwd_resources *rsrc)
 	if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
 		event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
 
-	event_p_conf.disable_implicit_release =
-		evt_rsrc->disable_implicit_release;
+	event_p_conf.event_port_cfg = 0;
+	if (evt_rsrc->disable_implicit_release)
+		event_p_conf.event_port_cfg |=
+			RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
 
 	for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
 								event_p_id++) {
diff --git a/examples/l3fwd/l3fwd_event_generic.c b/examples/l3fwd/l3fwd_event_generic.c
index f8c9843..c80573f 100644
--- a/examples/l3fwd/l3fwd_event_generic.c
+++ b/examples/l3fwd/l3fwd_event_generic.c
@@ -115,8 +115,11 @@ l3fwd_event_port_setup_generic(void)
 	if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
 		event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
 
-	event_p_conf.disable_implicit_release =
-		evt_rsrc->disable_implicit_release;
+	event_p_conf.event_port_cfg = 0;
+	if (evt_rsrc->disable_implicit_release)
+		event_p_conf.event_port_cfg |=
+			RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
+
 	evt_rsrc->deq_depth = def_p_conf.dequeue_depth;
 
 	for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
diff --git a/examples/l3fwd/l3fwd_event_internal_port.c b/examples/l3fwd/l3fwd_event_internal_port.c
index 03ac581..9916a7f 100644
--- a/examples/l3fwd/l3fwd_event_internal_port.c
+++ b/examples/l3fwd/l3fwd_event_internal_port.c
@@ -113,8 +113,10 @@ l3fwd_event_port_setup_internal_port(void)
 	if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
 		event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
 
-	event_p_conf.disable_implicit_release =
-		evt_rsrc->disable_implicit_release;
+	event_p_conf.event_port_cfg = 0;
+	if (evt_rsrc->disable_implicit_release)
+		event_p_conf.event_port_cfg |=
+			RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
 
 	for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
 								event_p_id++) {
-- 
2.6.4



More information about the dev mailing list