[dpdk-dev] [PATCH] event/octeontx2: unlink queues during port release

Shijith Thotton sthotton at marvell.com
Wed Nov 18 09:54:11 CET 2020


Unlinking queues from port should be done during port release. Doing it
during device re-configuration could result in segfault as ports array
is re-allocated based on new number of ports.

Fixes: f7ac8b66b23c ("event/octeontx2: support linking queues to ports")
Cc: stable at dpdk.org

Signed-off-by: Shijith Thotton <sthotton at marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula at marvell.com>
---
 drivers/event/octeontx2/otx2_evdev.c | 98 +++++++++++++++++-----------
 drivers/event/octeontx2/otx2_evdev.h | 12 ++++
 2 files changed, 71 insertions(+), 39 deletions(-)

diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c
index b31c26e95..ed29b8325 100644
--- a/drivers/event/octeontx2/otx2_evdev.c
+++ b/drivers/event/octeontx2/otx2_evdev.c
@@ -689,7 +689,36 @@ sso_lf_cfg(struct otx2_sso_evdev *dev, struct otx2_mbox *mbox,
 static void
 otx2_sso_port_release(void *port)
 {
-	rte_free(port);
+	struct otx2_ssogws_cookie *gws_cookie = ssogws_get_cookie(port);
+	struct otx2_sso_evdev *dev;
+	int i;
+
+	if (!gws_cookie->configured)
+		goto free;
+
+	dev = sso_pmd_priv(gws_cookie->event_dev);
+	if (dev->dual_ws) {
+		struct otx2_ssogws_dual *ws = port;
+
+		for (i = 0; i < dev->nb_event_queues; i++) {
+			sso_port_link_modify((struct otx2_ssogws *)
+					     &ws->ws_state[0], i, false);
+			sso_port_link_modify((struct otx2_ssogws *)
+					     &ws->ws_state[1], i, false);
+		}
+		memset(ws, 0, sizeof(*ws));
+	} else {
+		struct otx2_ssogws *ws = port;
+
+		for (i = 0; i < dev->nb_event_queues; i++)
+			sso_port_link_modify(ws, i, false);
+		memset(ws, 0, sizeof(*ws));
+	}
+
+	memset(gws_cookie, 0, sizeof(*gws_cookie));
+
+free:
+	rte_free(gws_cookie);
 }
 
 static void
@@ -699,33 +728,6 @@ otx2_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
 	RTE_SET_USED(queue_id);
 }
 
-static void
-sso_clr_links(const struct rte_eventdev *event_dev)
-{
-	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
-	int i, j;
-
-	for (i = 0; i < dev->nb_event_ports; i++) {
-		if (dev->dual_ws) {
-			struct otx2_ssogws_dual *ws;
-
-			ws = event_dev->data->ports[i];
-			for (j = 0; j < dev->nb_event_queues; j++) {
-				sso_port_link_modify((struct otx2_ssogws *)
-						&ws->ws_state[0], j, false);
-				sso_port_link_modify((struct otx2_ssogws *)
-						&ws->ws_state[1], j, false);
-			}
-		} else {
-			struct otx2_ssogws *ws;
-
-			ws = event_dev->data->ports[i];
-			for (j = 0; j < dev->nb_event_queues; j++)
-				sso_port_link_modify(ws, j, false);
-		}
-	}
-}
-
 static void
 sso_restore_links(const struct rte_eventdev *event_dev)
 {
@@ -803,6 +805,7 @@ sso_configure_dual_ports(const struct rte_eventdev *event_dev)
 	}
 
 	for (i = 0; i < dev->nb_event_ports; i++) {
+		struct otx2_ssogws_cookie *gws_cookie;
 		struct otx2_ssogws_dual *ws;
 		uintptr_t base;
 
@@ -811,14 +814,20 @@ sso_configure_dual_ports(const struct rte_eventdev *event_dev)
 		} else {
 			/* Allocate event port memory */
 			ws = rte_zmalloc_socket("otx2_sso_ws",
-					sizeof(struct otx2_ssogws_dual),
+					sizeof(struct otx2_ssogws_dual) +
+					RTE_CACHE_LINE_SIZE,
 					RTE_CACHE_LINE_SIZE,
 					event_dev->data->socket_id);
-		}
-		if (ws == NULL) {
-			otx2_err("Failed to alloc memory for port=%d", i);
-			rc = -ENOMEM;
-			break;
+			if (ws == NULL) {
+				otx2_err("Failed to alloc memory for port=%d",
+					 i);
+				rc = -ENOMEM;
+				break;
+			}
+
+			/* First cache line reserved for cookie */
+			ws = (struct otx2_ssogws_dual *)
+				((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
 		}
 
 		ws->port = i;
@@ -830,6 +839,10 @@ sso_configure_dual_ports(const struct rte_eventdev *event_dev)
 		sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[1], base);
 		vws++;
 
+		gws_cookie = ssogws_get_cookie(ws);
+		gws_cookie->event_dev = event_dev;
+		gws_cookie->configured = 1;
+
 		event_dev->data->ports[i] = ws;
 	}
 
@@ -866,6 +879,7 @@ sso_configure_ports(const struct rte_eventdev *event_dev)
 	}
 
 	for (i = 0; i < nb_lf; i++) {
+		struct otx2_ssogws_cookie *gws_cookie;
 		struct otx2_ssogws *ws;
 		uintptr_t base;
 
@@ -878,7 +892,8 @@ sso_configure_ports(const struct rte_eventdev *event_dev)
 
 		/* Allocate event port memory */
 		ws = rte_zmalloc_socket("otx2_sso_ws",
-					sizeof(struct otx2_ssogws),
+					sizeof(struct otx2_ssogws) +
+					RTE_CACHE_LINE_SIZE,
 					RTE_CACHE_LINE_SIZE,
 					event_dev->data->socket_id);
 		if (ws == NULL) {
@@ -887,10 +902,18 @@ sso_configure_ports(const struct rte_eventdev *event_dev)
 			break;
 		}
 
+		/* First cache line reserved for cookie */
+		ws = (struct otx2_ssogws *)
+			((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
+
 		ws->port = i;
 		base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | i << 12);
 		sso_set_port_ops(ws, base);
 
+		gws_cookie = ssogws_get_cookie(ws);
+		gws_cookie->event_dev = event_dev;
+		gws_cookie->configured = 1;
+
 		event_dev->data->ports[i] = ws;
 	}
 
@@ -1099,11 +1122,8 @@ otx2_sso_configure(const struct rte_eventdev *event_dev)
 		return -EINVAL;
 	}
 
-	if (dev->configured) {
+	if (dev->configured)
 		sso_unregister_irqs(event_dev);
-		/* Clear any prior port-queue mapping. */
-		sso_clr_links(event_dev);
-	}
 
 	if (dev->nb_event_queues) {
 		/* Finit any previous queues. */
diff --git a/drivers/event/octeontx2/otx2_evdev.h b/drivers/event/octeontx2/otx2_evdev.h
index 547e29d4a..116f953f5 100644
--- a/drivers/event/octeontx2/otx2_evdev.h
+++ b/drivers/event/octeontx2/otx2_evdev.h
@@ -217,6 +217,18 @@ sso_pmd_priv(const struct rte_eventdev *event_dev)
 	return event_dev->data->dev_private;
 }
 
+struct otx2_ssogws_cookie {
+	const struct rte_eventdev *event_dev;
+	bool configured;
+};
+
+static inline struct otx2_ssogws_cookie *
+ssogws_get_cookie(void *ws)
+{
+	return (struct otx2_ssogws_cookie *)
+		((uint8_t *)ws - RTE_CACHE_LINE_SIZE);
+}
+
 static const union mbuf_initializer mbuf_init = {
 	.fields = {
 		.data_off = RTE_PKTMBUF_HEADROOM,
-- 
2.25.1



More information about the dev mailing list