[dpdk-dev] [PATCH 2/2] event/dpaa: add select based event support

Hemant Agrawal hemant.agrawal at nxp.com
Thu Aug 30 07:33:16 CEST 2018


Signed-off-by: Hemant Agrawal <hemant.agrawal at nxp.com>
---
 config/common_base                       |   1 +
 config/defconfig_arm64-dpaa-linuxapp-gcc |   1 +
 drivers/event/dpaa/dpaa_eventdev.c       | 148 +++++++++++++++++++++++--------
 drivers/event/dpaa/dpaa_eventdev.h       |   8 +-
 4 files changed, 115 insertions(+), 43 deletions(-)

diff --git a/config/common_base b/config/common_base
index 4bcbaf9..01a6f17 100644
--- a/config/common_base
+++ b/config/common_base
@@ -199,6 +199,7 @@ CONFIG_RTE_LIBRTE_DPAA_BUS=n
 CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=n
 CONFIG_RTE_LIBRTE_DPAA_PMD=n
 CONFIG_RTE_LIBRTE_DPAA_HWDEBUG=n
+CONFIG_RTE_LIBRTE_DPAA_EVENT_INTR_MODE=n
 
 #
 # Compile NXP DPAA2 FSL-MC Bus
diff --git a/config/defconfig_arm64-dpaa-linuxapp-gcc b/config/defconfig_arm64-dpaa-linuxapp-gcc
index c47aec0..cdaaa4c 100644
--- a/config/defconfig_arm64-dpaa-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa-linuxapp-gcc
@@ -21,3 +21,4 @@ CONFIG_RTE_PKTMBUF_HEADROOM=128
 # NXP DPAA Bus
 CONFIG_RTE_LIBRTE_DPAA_DEBUG_DRIVER=n
 CONFIG_RTE_LIBRTE_DPAA_HWDEBUG=n
+CONFIG_RTE_LIBRTE_DPAA_EVENT_INTR_MODE=y
diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index 9ddaf30..b82a8a9 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -47,14 +47,18 @@ static int
 dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
 				 uint64_t *timeout_ticks)
 {
-	uint64_t cycles_per_second;
-
 	EVENTDEV_INIT_FUNC_TRACE();
 
 	RTE_SET_USED(dev);
 
+#ifdef RTE_LIBRTE_DPAA_EVENT_INTR_MODE
+	*timeout_ticks = ns/1000;
+#else
+	uint64_t cycles_per_second;
+
 	cycles_per_second = rte_get_timer_hz();
-	*timeout_ticks = ns * (cycles_per_second / NS_PER_S);
+	*timeout_ticks = (ns * cycles_per_second) / NS_PER_S;
+#endif
 
 	return 0;
 }
@@ -100,6 +104,58 @@ dpaa_event_enqueue(void *port, const struct rte_event *ev)
 	return dpaa_event_enqueue_burst(port, ev, 1);
 }
 
+#ifdef RTE_LIBRTE_DPAA_EVENT_INTR_MODE
+static void drain_4_bytes(int fd, fd_set *fdset)
+{
+	if (FD_ISSET(fd, fdset)) {
+		/* drain 4 bytes */
+		uint32_t junk;
+		ssize_t sjunk = read(qman_thread_fd(), &junk, sizeof(junk));
+		if (sjunk != sizeof(junk))
+			DPAA_EVENTDEV_ERR("UIO irq read error");
+	}
+}
+
+static inline int
+dpaa_event_dequeue_wait(uint64_t timeout_ticks)
+{
+	int fd_qman, nfds;
+	int ret;
+	fd_set readset;
+
+	/* Go into (and back out of) IRQ mode for each select,
+	 * it simplifies exit-path considerations and other
+	 * potential nastiness.
+	 */
+	struct timeval tv = {
+		.tv_sec = timeout_ticks / 1000000,
+		.tv_usec = timeout_ticks % 1000000
+	};
+
+	fd_qman = qman_thread_fd();
+	nfds = fd_qman + 1;
+	FD_ZERO(&readset);
+	FD_SET(fd_qman, &readset);
+
+	qman_irqsource_add(QM_PIRQ_DQRI);
+
+	ret = select(nfds, &readset, NULL, NULL, &tv);
+	if (ret < 0)
+		return ret;
+	/* Calling irqsource_remove() prior to thread_irq()
+	 * means thread_irq() will not process whatever caused
+	 * the interrupts, however it does ensure that, once
+	 * thread_irq() re-enables interrupts, they won't fire
+	 * again immediately.
+	 */
+	qman_irqsource_remove(~0);
+	drain_4_bytes(fd_qman, &readset);
+	qman_thread_irq();
+
+	return ret;
+}
+#endif
+
 static uint16_t
 dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
 			 uint16_t nb_events, uint64_t timeout_ticks)
@@ -107,8 +163,8 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
 	int ret;
 	u16 ch_id;
 	void *buffers[8];
-	u32 num_frames, i;
-	uint64_t wait_time, cur_ticks, start_ticks;
+	u32 num_frames, i, irq = 0;
+	uint64_t cur_ticks = 0, wait_time_ticks = 0;
 	struct dpaa_port *portal = (struct dpaa_port *)port;
 	struct rte_mbuf *mbuf;
 
@@ -147,20 +203,32 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
 	}
 	DPAA_PER_LCORE_DQRR_HELD = 0;
 
-	if (portal->timeout == DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID)
-		wait_time = timeout_ticks;
+	if (timeout_ticks)
+		wait_time_ticks = timeout_ticks;
 	else
-		wait_time = portal->timeout;
+		wait_time_ticks = portal->timeout_us;
 
-	/* Lets dequeue the frames */
-	start_ticks = rte_get_timer_cycles();
-	wait_time += start_ticks;
+#ifndef RTE_LIBRTE_DPAA_EVENT_INTR_MODE
+	wait_time_ticks += rte_get_timer_cycles();
+#endif
 	do {
+		/* Lets dequeue the frames */
 		num_frames = qman_portal_dequeue(ev, nb_events, buffers);
-		if (num_frames != 0)
+		if (irq)
+			irq = 0;
+		if (num_frames)
 			break;
+#ifdef RTE_LIBRTE_DPAA_EVENT_INTR_MODE
+		if (wait_time_ticks) { /* wait for time */
+			if (dpaa_event_dequeue_wait(wait_time_ticks) > 0) {
+				irq = 1;
+				continue;
+			}
+			break; /* no event after waiting */
+		}
+#endif
 		cur_ticks = rte_get_timer_cycles();
-	} while (cur_ticks < wait_time);
+	} while (cur_ticks < wait_time_ticks);
 
 	return num_frames;
 }
@@ -184,7 +252,7 @@ dpaa_event_dev_info_get(struct rte_eventdev *dev,
 	dev_info->max_dequeue_timeout_ns =
 		DPAA_EVENT_MAX_DEQUEUE_TIMEOUT;
 	dev_info->dequeue_timeout_ns =
-		DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
+		DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
 	dev_info->max_event_queues =
 		DPAA_EVENT_MAX_QUEUES;
 	dev_info->max_event_queue_flows =
@@ -230,15 +298,6 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
 	priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
 	priv->event_dev_cfg = conf->event_dev_cfg;
 
-	/* Check dequeue timeout method is per dequeue or global */
-	if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
-		/*
-		 * Use timeout value as given in dequeue operation.
-		 * So invalidating this timetout value.
-		 */
-		priv->dequeue_timeout_ns = 0;
-	}
-
 	ch_id = rte_malloc("dpaa-channels",
 			  sizeof(uint32_t) * priv->nb_event_queues,
 			  RTE_CACHE_LINE_SIZE);
@@ -260,24 +319,34 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
 	/* Lets prepare event ports */
 	memset(&priv->ports[0], 0,
 	      sizeof(struct dpaa_port) * priv->nb_event_ports);
+
+	/* Check dequeue timeout method is per dequeue or global */
 	if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
-		for (i = 0; i < priv->nb_event_ports; i++) {
-			priv->ports[i].timeout =
-				DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID;
-		}
-	} else if (priv->dequeue_timeout_ns == 0) {
-		for (i = 0; i < priv->nb_event_ports; i++) {
-			dpaa_event_dequeue_timeout_ticks(NULL,
-				DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS,
-				&priv->ports[i].timeout);
-		}
+		/*
+		 * Use timeout value as given in dequeue operation.
+		 * So invalidating this timeout value.
+		 */
+		priv->dequeue_timeout_ns = 0;
+
+	} else if (conf->dequeue_timeout_ns == 0) {
+		priv->dequeue_timeout_ns = DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
 	} else {
-		for (i = 0; i < priv->nb_event_ports; i++) {
-			dpaa_event_dequeue_timeout_ticks(NULL,
-				priv->dequeue_timeout_ns,
-				&priv->ports[i].timeout);
-		}
+		priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
 	}
+
+	for (i = 0; i < priv->nb_event_ports; i++) {
+#ifdef RTE_LIBRTE_DPAA_EVENT_INTR_MODE
+		priv->ports[i].timeout_us = priv->dequeue_timeout_ns/1000;
+#else
+		uint64_t cycles_per_second;
+
+		cycles_per_second = rte_get_timer_hz();
+		priv->ports[i].timeout_us =
+			(priv->dequeue_timeout_ns * cycles_per_second)
+				/ NS_PER_S;
+#endif
+	}
+
 	/*
 	 * TODO: Currently portals are affined with threads. Maximum threads
 	 * can be created equals to number of lcore.
@@ -454,7 +523,8 @@ dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,
 		event_queue->event_port = NULL;
 	}
 
-	event_port->num_linked_evq = event_port->num_linked_evq - i;
+	if (event_port->num_linked_evq)
+		event_port->num_linked_evq = event_port->num_linked_evq - i;
 
 	return (int)i;
 }
diff --git a/drivers/event/dpaa/dpaa_eventdev.h b/drivers/event/dpaa/dpaa_eventdev.h
index 3994bd6..2021339 100644
--- a/drivers/event/dpaa/dpaa_eventdev.h
+++ b/drivers/event/dpaa/dpaa_eventdev.h
@@ -12,8 +12,8 @@
 
 #define EVENTDEV_NAME_DPAA_PMD		event_dpaa1
 
-#define DPAA_EVENT_MAX_PORTS			8
-#define DPAA_EVENT_MAX_QUEUES			16
+#define DPAA_EVENT_MAX_PORTS			4
+#define DPAA_EVENT_MAX_QUEUES			8
 #define DPAA_EVENT_MIN_DEQUEUE_TIMEOUT	1
 #define DPAA_EVENT_MAX_DEQUEUE_TIMEOUT	(UINT32_MAX - 1)
 #define DPAA_EVENT_MAX_QUEUE_FLOWS		2048
@@ -21,7 +21,7 @@
 #define DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS	0
 #define DPAA_EVENT_MAX_EVENT_PORT		RTE_MIN(RTE_MAX_LCORE, INT8_MAX)
 #define DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH	8
-#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS	100UL
+#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS	100000UL
 #define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID	((uint64_t)-1)
 #define DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH	1
 #define DPAA_EVENT_MAX_NUM_EVENTS		(INT32_MAX - 1)
@@ -54,7 +54,7 @@ struct dpaa_port {
 	struct dpaa_eventq evq_info[DPAA_EVENT_MAX_QUEUES];
 	uint8_t num_linked_evq;
 	uint8_t is_port_linked;
-	uint64_t timeout;
+	uint64_t timeout_us;
 };
 
 struct dpaa_eventdev {
-- 
2.7.4



More information about the dev mailing list