[dpdk-dev] [PATCH v4 05/10] examples/l2fwd-event: add eventdev queue and port setup

pbhagavatula at marvell.com pbhagavatula at marvell.com
Tue Sep 24 11:42:04 CEST 2019


From: Pavan Nikhilesh <pbhagavatula at marvell.com>

Add event device queue and port setup based on event eth Tx adapter
capabilities.

Signed-off-by: Sunil Kumar Kori <skori at marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula at marvell.com>
---
 examples/l2fwd-event/l2fwd_eventdev.c         |  10 +
 examples/l2fwd-event/l2fwd_eventdev.h         |  18 ++
 examples/l2fwd-event/l2fwd_eventdev_generic.c | 179 +++++++++++++++++-
 .../l2fwd_eventdev_internal_port.c            | 173 ++++++++++++++++-
 4 files changed, 378 insertions(+), 2 deletions(-)

diff --git a/examples/l2fwd-event/l2fwd_eventdev.c b/examples/l2fwd-event/l2fwd_eventdev.c
index 0d0d3b8b9..7a3d077ae 100644
--- a/examples/l2fwd-event/l2fwd_eventdev.c
+++ b/examples/l2fwd-event/l2fwd_eventdev.c
@@ -216,6 +216,7 @@ eventdev_resource_setup(void)
 {
 	struct eventdev_resources *eventdev_rsrc = get_eventdev_rsrc();
 	uint16_t ethdev_count = rte_eth_dev_count_avail();
+	uint32_t event_queue_cfg = 0;
 	uint32_t service_id;
 	int32_t ret;
 
@@ -233,6 +234,15 @@ eventdev_resource_setup(void)
 	/* Ethernet device configuration */
 	eth_dev_port_setup(ethdev_count);
 
+	/* Event device configuration */
+	event_queue_cfg = eventdev_rsrc->ops.eventdev_setup(ethdev_count);
+
+	/* Event queue configuration */
+	eventdev_rsrc->ops.event_queue_setup(ethdev_count, event_queue_cfg);
+
+	/* Event port configuration */
+	eventdev_rsrc->ops.event_port_setup();
+
 	/* Start event device service */
 	ret = rte_event_dev_service_id_get(eventdev_rsrc->event_d_id,
 			&service_id);
diff --git a/examples/l2fwd-event/l2fwd_eventdev.h b/examples/l2fwd-event/l2fwd_eventdev.h
index d380faff5..1d43200e2 100644
--- a/examples/l2fwd-event/l2fwd_eventdev.h
+++ b/examples/l2fwd-event/l2fwd_eventdev.h
@@ -26,6 +26,17 @@ typedef void (*event_port_setup_cb)(void);
 typedef void (*service_setup_cb)(void);
 typedef void (*event_loop_cb)(void);
 
+struct eventdev_queues {
+	uint8_t *event_q_id;
+	uint8_t	nb_queues;
+};
+
+struct eventdev_ports {
+	uint8_t *event_p_id;
+	uint8_t	nb_ports;
+	rte_spinlock_t lock;
+};
+
 struct eventdev_setup_ops {
 	event_queue_setup_cb event_queue_setup;
 	event_port_setup_cb event_port_setup;
@@ -36,9 +47,14 @@ struct eventdev_setup_ops {
 };
 
 struct eventdev_resources {
+	struct rte_event_port_conf def_p_conf;
 	struct l2fwd_port_statistics *stats;
+	/* Default port config. */
+	uint8_t disable_implicit_release;
 	struct eventdev_setup_ops ops;
 	struct rte_mempool *pkt_pool;
+	struct eventdev_queues evq;
+	struct eventdev_ports evp;
 	uint64_t timer_period;
 	uint32_t *dst_ports;
 	uint32_t service_id;
@@ -47,6 +63,8 @@ struct eventdev_resources {
 	uint8_t event_d_id;
 	uint8_t sync_mode;
 	uint8_t tx_mode_q;
+	uint8_t deq_depth;
+	uint8_t has_burst;
 	uint8_t mac_updt;
 	uint8_t enabled;
 	uint8_t nb_args;
diff --git a/examples/l2fwd-event/l2fwd_eventdev_generic.c b/examples/l2fwd-event/l2fwd_eventdev_generic.c
index e3990f8b0..65166fded 100644
--- a/examples/l2fwd-event/l2fwd_eventdev_generic.c
+++ b/examples/l2fwd-event/l2fwd_eventdev_generic.c
@@ -17,8 +17,185 @@
 #include "l2fwd_common.h"
 #include "l2fwd_eventdev.h"
 
+static uint32_t
+eventdev_setup_generic(uint16_t ethdev_count)
+{
+	struct eventdev_resources *eventdev_rsrc = get_eventdev_rsrc();
+	struct rte_event_dev_config event_d_conf = {
+		.nb_events_limit  = 4096,
+		.nb_event_queue_flows = 1024,
+		.nb_event_port_dequeue_depth = 128,
+		.nb_event_port_enqueue_depth = 128
+	};
+	struct rte_event_dev_info dev_info;
+	const uint8_t event_d_id = 0; /* Always use first event device only */
+	uint32_t event_queue_cfg = 0;
+	uint16_t num_workers = 0;
+	int ret;
+
+	/* Event device configurtion */
+	rte_event_dev_info_get(event_d_id, &dev_info);
+	eventdev_rsrc->disable_implicit_release = !!(dev_info.event_dev_cap &
+				    RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
+
+	if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
+		event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+
+	/* One queue for each ethdev port + one Tx adapter Single link queue. */
+	event_d_conf.nb_event_queues = ethdev_count + 1;
+	if (dev_info.max_event_queues < event_d_conf.nb_event_queues)
+		event_d_conf.nb_event_queues = dev_info.max_event_queues;
+
+	if (dev_info.max_num_events < event_d_conf.nb_events_limit)
+		event_d_conf.nb_events_limit = dev_info.max_num_events;
+
+	if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows)
+		event_d_conf.nb_event_queue_flows =
+						dev_info.max_event_queue_flows;
+
+	if (dev_info.max_event_port_dequeue_depth <
+				event_d_conf.nb_event_port_dequeue_depth)
+		event_d_conf.nb_event_port_dequeue_depth =
+				dev_info.max_event_port_dequeue_depth;
+
+	if (dev_info.max_event_port_enqueue_depth <
+				event_d_conf.nb_event_port_enqueue_depth)
+		event_d_conf.nb_event_port_enqueue_depth =
+				dev_info.max_event_port_enqueue_depth;
+
+	num_workers = rte_lcore_count() - rte_service_lcore_count();
+	if (dev_info.max_event_ports < num_workers)
+		num_workers = dev_info.max_event_ports;
+
+	event_d_conf.nb_event_ports = num_workers;
+	eventdev_rsrc->evp.nb_ports = num_workers;
+
+	eventdev_rsrc->has_burst = !!(dev_info.event_dev_cap &
+				    RTE_EVENT_DEV_CAP_BURST_MODE);
+
+	ret = rte_event_dev_configure(event_d_id, &event_d_conf);
+	if (ret < 0)
+		rte_exit(EXIT_FAILURE, "Error in configuring event device");
+
+	eventdev_rsrc->event_d_id = event_d_id;
+	return event_queue_cfg;
+}
+
+static void
+event_port_setup_generic(void)
+{
+	struct eventdev_resources *eventdev_rsrc = get_eventdev_rsrc();
+	uint8_t event_d_id = eventdev_rsrc->event_d_id;
+	struct rte_event_port_conf event_p_conf = {
+		.dequeue_depth = 32,
+		.enqueue_depth = 32,
+		.new_event_threshold = 4096
+	};
+	struct rte_event_port_conf def_p_conf;
+	uint8_t event_p_id;
+	int32_t ret;
+
+	/* Service cores are not used to run worker thread */
+	eventdev_rsrc->evp.nb_ports = eventdev_rsrc->evp.nb_ports;
+	eventdev_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) *
+					eventdev_rsrc->evp.nb_ports);
+	if (!eventdev_rsrc->evp.event_p_id)
+		rte_exit(EXIT_FAILURE, " No space is available");
+
+	memset(&def_p_conf, 0, sizeof(struct rte_event_port_conf));
+	rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf);
+
+	if (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold)
+		event_p_conf.new_event_threshold =
+			def_p_conf.new_event_threshold;
+
+	if (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth)
+		event_p_conf.dequeue_depth = def_p_conf.dequeue_depth;
+
+	if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
+		event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
+
+	event_p_conf.disable_implicit_release =
+		eventdev_rsrc->disable_implicit_release;
+	eventdev_rsrc->deq_depth = def_p_conf.dequeue_depth;
+
+	for (event_p_id = 0; event_p_id < eventdev_rsrc->evp.nb_ports;
+								event_p_id++) {
+		ret = rte_event_port_setup(event_d_id, event_p_id,
+					   &event_p_conf);
+		if (ret < 0) {
+			rte_exit(EXIT_FAILURE,
+				 "Error in configuring event port %d\n",
+				 event_p_id);
+		}
+
+		ret = rte_event_port_link(event_d_id, event_p_id,
+					  eventdev_rsrc->evq.event_q_id,
+					  NULL,
+					  eventdev_rsrc->evq.nb_queues - 1);
+		if (ret != (eventdev_rsrc->evq.nb_queues - 1)) {
+			rte_exit(EXIT_FAILURE, "Error in linking event port %d "
+				 "to event queue", event_p_id);
+		}
+		eventdev_rsrc->evp.event_p_id[event_p_id] = event_p_id;
+	}
+	/* init spinlock */
+	rte_spinlock_init(&eventdev_rsrc->evp.lock);
+
+	eventdev_rsrc->def_p_conf = event_p_conf;
+}
+
+static void
+event_queue_setup_generic(uint16_t ethdev_count, uint32_t event_queue_cfg)
+{
+	struct eventdev_resources *eventdev_rsrc = get_eventdev_rsrc();
+	uint8_t event_d_id = eventdev_rsrc->event_d_id;
+	struct rte_event_queue_conf event_q_conf = {
+		.nb_atomic_flows = 1024,
+		.nb_atomic_order_sequences = 1024,
+		.event_queue_cfg = event_queue_cfg,
+		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL
+	};
+	struct rte_event_queue_conf def_q_conf;
+	uint8_t event_q_id;
+	int32_t ret;
+
+	event_q_conf.schedule_type = eventdev_rsrc->sync_mode;
+	eventdev_rsrc->evq.nb_queues = ethdev_count + 1;
+	eventdev_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) *
+					eventdev_rsrc->evq.nb_queues);
+	if (!eventdev_rsrc->evq.event_q_id)
+		rte_exit(EXIT_FAILURE, "Memory allocation failure");
+
+	rte_event_queue_default_conf_get(event_d_id, 0, &def_q_conf);
+	if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows)
+		event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows;
+
+	for (event_q_id = 0; event_q_id < (eventdev_rsrc->evq.nb_queues - 1);
+								event_q_id++) {
+		ret = rte_event_queue_setup(event_d_id, event_q_id,
+					    &event_q_conf);
+		if (ret < 0) {
+			rte_exit(EXIT_FAILURE,
+				 "Error in configuring event queue");
+		}
+		eventdev_rsrc->evq.event_q_id[event_q_id] = event_q_id;
+	}
+
+	event_q_conf.event_queue_cfg |= RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
+	event_q_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
+	ret = rte_event_queue_setup(event_d_id, event_q_id, &event_q_conf);
+	if (ret < 0) {
+		rte_exit(EXIT_FAILURE,
+			 "Error in configuring event queue for Tx adapter");
+	}
+	eventdev_rsrc->evq.event_q_id[event_q_id] = event_q_id;
+}
+
 void
 eventdev_set_generic_ops(struct eventdev_setup_ops *ops)
 {
-	RTE_SET_USED(ops);
+	ops->eventdev_setup = eventdev_setup_generic;
+	ops->event_queue_setup = event_queue_setup_generic;
+	ops->event_port_setup = event_port_setup_generic;
 }
diff --git a/examples/l2fwd-event/l2fwd_eventdev_internal_port.c b/examples/l2fwd-event/l2fwd_eventdev_internal_port.c
index a0d2111f9..52cb07707 100644
--- a/examples/l2fwd-event/l2fwd_eventdev_internal_port.c
+++ b/examples/l2fwd-event/l2fwd_eventdev_internal_port.c
@@ -17,8 +17,179 @@
 #include "l2fwd_common.h"
 #include "l2fwd_eventdev.h"
 
+static uint32_t
+eventdev_setup_internal_port(uint16_t ethdev_count)
+{
+	struct eventdev_resources *eventdev_rsrc = get_eventdev_rsrc();
+	struct rte_event_dev_config event_d_conf = {
+		.nb_events_limit  = 4096,
+		.nb_event_queue_flows = 1024,
+		.nb_event_port_dequeue_depth = 128,
+		.nb_event_port_enqueue_depth = 128
+	};
+	struct rte_event_dev_info dev_info;
+	uint8_t disable_implicit_release;
+	const uint8_t event_d_id = 0; /* Always use first event device only */
+	uint32_t event_queue_cfg = 0;
+	uint16_t num_workers = 0;
+	int ret;
+
+	/* Event device configurtion */
+	rte_event_dev_info_get(event_d_id, &dev_info);
+
+	disable_implicit_release = !!(dev_info.event_dev_cap &
+				    RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
+	eventdev_rsrc->disable_implicit_release =
+						disable_implicit_release;
+
+	if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
+		event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+
+	event_d_conf.nb_event_queues = ethdev_count;
+	if (dev_info.max_event_queues < event_d_conf.nb_event_queues)
+		event_d_conf.nb_event_queues = dev_info.max_event_queues;
+
+	if (dev_info.max_num_events < event_d_conf.nb_events_limit)
+		event_d_conf.nb_events_limit = dev_info.max_num_events;
+
+	if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows)
+		event_d_conf.nb_event_queue_flows =
+						dev_info.max_event_queue_flows;
+
+	if (dev_info.max_event_port_dequeue_depth <
+				event_d_conf.nb_event_port_dequeue_depth)
+		event_d_conf.nb_event_port_dequeue_depth =
+				dev_info.max_event_port_dequeue_depth;
+
+	if (dev_info.max_event_port_enqueue_depth <
+				event_d_conf.nb_event_port_enqueue_depth)
+		event_d_conf.nb_event_port_enqueue_depth =
+				dev_info.max_event_port_enqueue_depth;
+
+	num_workers = rte_lcore_count();
+	if (dev_info.max_event_ports < num_workers)
+		num_workers = dev_info.max_event_ports;
+
+	event_d_conf.nb_event_ports = num_workers;
+	eventdev_rsrc->evp.nb_ports = num_workers;
+	eventdev_rsrc->has_burst = !!(dev_info.event_dev_cap &
+				    RTE_EVENT_DEV_CAP_BURST_MODE);
+
+	ret = rte_event_dev_configure(event_d_id, &event_d_conf);
+	if (ret < 0)
+		rte_exit(EXIT_FAILURE, "Error in configuring event device");
+
+	eventdev_rsrc->event_d_id = event_d_id;
+	return event_queue_cfg;
+}
+
+static void
+event_port_setup_internal_port(void)
+{
+	struct eventdev_resources *eventdev_rsrc = get_eventdev_rsrc();
+	uint8_t event_d_id = eventdev_rsrc->event_d_id;
+	struct rte_event_port_conf event_p_conf = {
+		.dequeue_depth = 32,
+		.enqueue_depth = 32,
+		.new_event_threshold = 4096
+	};
+	struct rte_event_port_conf def_p_conf;
+	uint8_t event_p_id;
+	int32_t ret;
+
+	eventdev_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) *
+					eventdev_rsrc->evp.nb_ports);
+	if (!eventdev_rsrc->evp.event_p_id)
+		rte_exit(EXIT_FAILURE,
+			 "Failed to allocate memory for Event Ports");
+
+	rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf);
+	if (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold)
+		event_p_conf.new_event_threshold =
+						def_p_conf.new_event_threshold;
+
+	if (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth)
+		event_p_conf.dequeue_depth = def_p_conf.dequeue_depth;
+
+	if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
+		event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
+
+	event_p_conf.disable_implicit_release =
+		eventdev_rsrc->disable_implicit_release;
+
+	for (event_p_id = 0; event_p_id < eventdev_rsrc->evp.nb_ports;
+								event_p_id++) {
+		ret = rte_event_port_setup(event_d_id, event_p_id,
+					   &event_p_conf);
+		if (ret < 0) {
+			rte_exit(EXIT_FAILURE,
+				 "Error in configuring event port %d\n",
+				 event_p_id);
+		}
+
+		ret = rte_event_port_link(event_d_id, event_p_id, NULL,
+					  NULL, 0);
+		if (ret < 0) {
+			rte_exit(EXIT_FAILURE, "Error in linking event port %d "
+				 "to event queue", event_p_id);
+		}
+		eventdev_rsrc->evp.event_p_id[event_p_id] = event_p_id;
+
+		/* init spinlock */
+		rte_spinlock_init(&eventdev_rsrc->evp.lock);
+	}
+
+	eventdev_rsrc->def_p_conf = event_p_conf;
+}
+
+static void
+event_queue_setup_internal_port(uint16_t ethdev_count, uint32_t event_queue_cfg)
+{
+	struct eventdev_resources *eventdev_rsrc = get_eventdev_rsrc();
+	uint8_t event_d_id = eventdev_rsrc->event_d_id;
+	struct rte_event_queue_conf event_q_conf = {
+		.nb_atomic_flows = 1024,
+		.nb_atomic_order_sequences = 1024,
+		.event_queue_cfg = event_queue_cfg,
+		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL
+	};
+	struct rte_event_queue_conf def_q_conf;
+	uint8_t event_q_id = 0;
+	int32_t ret;
+
+	rte_event_queue_default_conf_get(event_d_id, event_q_id, &def_q_conf);
+
+	if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows)
+		event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows;
+
+	if (def_q_conf.nb_atomic_order_sequences <
+					event_q_conf.nb_atomic_order_sequences)
+		event_q_conf.nb_atomic_order_sequences =
+					def_q_conf.nb_atomic_order_sequences;
+
+	event_q_conf.event_queue_cfg = event_queue_cfg;
+	event_q_conf.schedule_type = eventdev_rsrc->sync_mode;
+	eventdev_rsrc->evq.nb_queues = ethdev_count;
+	eventdev_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) *
+					eventdev_rsrc->evq.nb_queues);
+	if (!eventdev_rsrc->evq.event_q_id)
+		rte_exit(EXIT_FAILURE, "Memory allocation failure");
+
+	for (event_q_id = 0; event_q_id < ethdev_count; event_q_id++) {
+		ret = rte_event_queue_setup(event_d_id, event_q_id,
+					    &event_q_conf);
+		if (ret < 0) {
+			rte_exit(EXIT_FAILURE,
+				 "Error in configuring event queue");
+		}
+		eventdev_rsrc->evq.event_q_id[event_q_id] = event_q_id;
+	}
+}
+
 void
 eventdev_set_internal_port_ops(struct eventdev_setup_ops *ops)
 {
-	RTE_SET_USED(ops);
+	ops->eventdev_setup = eventdev_setup_internal_port;
+	ops->event_queue_setup = event_queue_setup_internal_port;
+	ops->event_port_setup = event_port_setup_internal_port;
 }
-- 
2.17.1



More information about the dev mailing list