[dpdk-dev] [PATCH v5 03/15] examples/ipsec-secgw: add eventdev port-lcore link

Lukasz Bartosik lbartosik at marvell.com
Thu Feb 27 17:18:25 CET 2020


From: Anoob Joseph <anoobj at marvell.com>

Add event device port-lcore link and specify which event queues should
be connected to the event port. Generate a default config for event
port-lcore links if it is not specified in the configuration. This
routine will check the number of available ports and then create links
according to the number of cores available. This patch also adds a new
entry in the eventmode conf to denote that all queues are to be linked
with every port. This enables one core to receive packets from all
ethernet ports.

Signed-off-by: Anoob Joseph <anoobj at marvell.com>
Signed-off-by: Lukasz Bartosik <lbartosik at marvell.com>
---
 examples/ipsec-secgw/event_helper.c | 126 ++++++++++++++++++++++++++++++++++++
 examples/ipsec-secgw/event_helper.h |  33 ++++++++++
 2 files changed, 159 insertions(+)

diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c
index 0c38474..c90249f 100644
--- a/examples/ipsec-secgw/event_helper.c
+++ b/examples/ipsec-secgw/event_helper.c
@@ -1,11 +1,33 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (C) 2020 Marvell International Ltd.
  */
+#include <rte_bitmap.h>
 #include <rte_ethdev.h>
 #include <rte_eventdev.h>
+#include <rte_malloc.h>
 
 #include "event_helper.h"
 
+static inline unsigned int
+eh_get_next_active_core(struct eventmode_conf *em_conf, unsigned int prev_core)
+{
+	unsigned int next_core;
+
+	/* Get next active core skipping cores reserved as eth cores */
+	do {
+		/* Get the next core */
+		next_core = rte_get_next_lcore(prev_core, 0, 0);
+
+		/* Check if we have reached max lcores */
+		if (next_core == RTE_MAX_LCORE)
+			return next_core;
+
+		prev_core = next_core;
+	} while (rte_bitmap_get(em_conf->eth_core_mask, next_core));
+
+	return next_core;
+}
+
 static int
 eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
 {
@@ -77,6 +99,71 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
 }
 
 static int
+eh_set_default_conf_link(struct eventmode_conf *em_conf)
+{
+	struct eventdev_params *eventdev_config;
+	struct eh_event_link_info *link;
+	unsigned int lcore_id = -1;
+	int i, link_index;
+
+	/*
+	 * Create a 1:1 mapping from event ports to cores. If the number
+	 * of event ports is lesser than the cores, some cores won't
+	 * execute worker. If there are more event ports, then some ports
+	 * won't be used.
+	 *
+	 */
+
+	/*
+	 * The event queue-port mapping is done according to the link. Since
+	 * we are falling back to the default link config, enabling
+	 * "all_ev_queue_to_ev_port" mode flag. This will map all queues
+	 * to the port.
+	 */
+	em_conf->ext_params.all_ev_queue_to_ev_port = 1;
+
+	/* Get first event dev conf */
+	eventdev_config = &(em_conf->eventdev_config[0]);
+
+	/* Loop through the ports */
+	for (i = 0; i < eventdev_config->nb_eventport; i++) {
+
+		/* Get next active core id */
+		lcore_id = eh_get_next_active_core(em_conf,
+				lcore_id);
+
+		if (lcore_id == RTE_MAX_LCORE) {
+			/* Reached max cores */
+			return 0;
+		}
+
+		/* Save the current combination as one link */
+
+		/* Get the index */
+		link_index = em_conf->nb_link;
+
+		/* Get the corresponding link */
+		link = &(em_conf->link[link_index]);
+
+		/* Save link */
+		link->eventdev_id = eventdev_config->eventdev_id;
+		link->event_port_id = i;
+		link->lcore_id = lcore_id;
+
+		/*
+		 * Don't set eventq_id as by default all queues
+		 * need to be mapped to the port, which is controlled
+		 * by the operating mode.
+		 */
+
+		/* Update number of links */
+		em_conf->nb_link++;
+	}
+
+	return 0;
+}
+
+static int
 eh_validate_conf(struct eventmode_conf *em_conf)
 {
 	int ret;
@@ -91,6 +178,16 @@ eh_validate_conf(struct eventmode_conf *em_conf)
 			return ret;
 	}
 
+	/*
+	 * Check if links are specified. Else generate a default config for
+	 * the event ports used.
+	 */
+	if (em_conf->nb_link == 0) {
+		ret = eh_set_default_conf_link(em_conf);
+		if (ret != 0)
+			return ret;
+	}
+
 	return 0;
 }
 
@@ -102,6 +199,8 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf)
 	struct rte_event_dev_config eventdev_conf;
 	struct eventdev_params *eventdev_config;
 	int nb_eventdev = em_conf->nb_eventdev;
+	struct eh_event_link_info *link;
+	uint8_t *queue = NULL;
 	uint8_t eventdev_id;
 	int nb_eventqueue;
 	uint8_t i, j;
@@ -199,6 +298,33 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf)
 		}
 	}
 
+	/* Make event queue - event port link */
+	for (j = 0; j <  em_conf->nb_link; j++) {
+
+		/* Get link info */
+		link = &(em_conf->link[j]);
+
+		/* Get event dev ID */
+		eventdev_id = link->eventdev_id;
+
+		/*
+		 * If "all_ev_queue_to_ev_port" params flag is selected, all
+		 * queues need to be mapped to the port.
+		 */
+		if (em_conf->ext_params.all_ev_queue_to_ev_port)
+			queue = NULL;
+		else
+			queue = &(link->eventq_id);
+
+		/* Link queue to port */
+		ret = rte_event_port_link(eventdev_id, link->event_port_id,
+				queue, NULL, 1);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to link event port %d", ret);
+			return ret;
+		}
+	}
+
 	/* Start event devices */
 	for (i = 0; i < nb_eventdev; i++) {
 
diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h
index 040f977..c8afc84 100644
--- a/examples/ipsec-secgw/event_helper.h
+++ b/examples/ipsec-secgw/event_helper.h
@@ -16,6 +16,13 @@
 /* Max event devices supported */
 #define EVENT_MODE_MAX_EVENT_DEVS RTE_EVENT_MAX_DEVS
 
+/* Max event queues supported per event device */
+#define EVENT_MODE_MAX_EVENT_QUEUES_PER_DEV RTE_EVENT_MAX_QUEUES_PER_DEV
+
+/* Max event-lcore links */
+#define EVENT_MODE_MAX_LCORE_LINKS \
+	(EVENT_MODE_MAX_EVENT_DEVS * EVENT_MODE_MAX_EVENT_QUEUES_PER_DEV)
+
 /**
  * Packet transfer mode of the application
  */
@@ -32,17 +39,43 @@ struct eventdev_params {
 	uint8_t ev_queue_mode;
 };
 
+/**
+ * Event-lcore link configuration
+ */
+struct eh_event_link_info {
+	uint8_t eventdev_id;
+		/**< Event device ID */
+	uint8_t event_port_id;
+		/**< Event port ID */
+	uint8_t eventq_id;
+		/**< Event queue to be linked to the port */
+	uint8_t lcore_id;
+		/**< Lcore to be polling on this port */
+};
+
 /* Eventmode conf data */
 struct eventmode_conf {
 	int nb_eventdev;
 		/**< No of event devs */
 	struct eventdev_params eventdev_config[EVENT_MODE_MAX_EVENT_DEVS];
 		/**< Per event dev conf */
+	uint8_t nb_link;
+		/**< No of links */
+	struct eh_event_link_info
+		link[EVENT_MODE_MAX_LCORE_LINKS];
+		/**< Per link conf */
+	struct rte_bitmap *eth_core_mask;
+		/**< Core mask of cores to be used for software Rx and Tx */
 	union {
 		RTE_STD_C11
 		struct {
 			uint64_t sched_type			: 2;
 		/**< Schedule type */
+			uint64_t all_ev_queue_to_ev_port	: 1;
+		/**<
+		 * When enabled, all event queues need to be mapped to
+		 * each event port
+		 */
 		};
 		uint64_t u64;
 	} ext_params;
-- 
2.7.4



More information about the dev mailing list