[dpdk-dev] [PATCH] examples/l2fwd-event: add l2fwd with eventdev mode

Sunil Kumar Kori skori at marvell.com
Fri Sep 6 10:20:21 CEST 2019


Patchset adds a new application to demonstrate usage of poll
and eventdev mode.

Following is the summary of newly added features:
1. Exposing following new command line parameters.
 - mode: It dictates the mode of operation i.e. poll or eventdev.
 - eventq_sync: It dictates eventq synchronization method i.e.
                atomic or ordered. Currently only atomic and
                ordered methods are implemented.

2. By default, application will be working into eventdev mode.
3. All the eventdev resources are initialized with default values.
        no configuration are exposed to the user.

        Following is the summary of default configuration:
         - Single instance of eventdev supported.
         - Number of event ports are equal to number of worker thread.
         - Number of event queue are equal number of ethernet ports.
         - Each event port is linked to all existing event queues.
         - Dedicated Rx adapter for each Ethernet port and all Ethernet
                port Rx queues are added to respective Rx adapter.
         - Dedicated Tx adapter for each Ethernet port and all Ethernet
                port Rx queues are added to respective Rx adapter.

Signed-off-by: Sunil Kumar Kori <skori at marvell.com>
---
 examples/Makefile                     |   1 +
 examples/l2fwd-event/Makefile         |  57 +++
 examples/l2fwd-event/l2fwd_common.h   |  46 ++
 examples/l2fwd-event/l2fwd_eventdev.c | 686 ++++++++++++++++++++++++++++++
 examples/l2fwd-event/l2fwd_eventdev.h |  82 ++++
 examples/l2fwd-event/main.c           | 771 ++++++++++++++++++++++++++++++++++
 examples/l2fwd-event/meson.build      |  12 +
 7 files changed, 1655 insertions(+)
 create mode 100644 examples/l2fwd-event/Makefile
 create mode 100644 examples/l2fwd-event/l2fwd_common.h
 create mode 100644 examples/l2fwd-event/l2fwd_eventdev.c
 create mode 100644 examples/l2fwd-event/l2fwd_eventdev.h
 create mode 100644 examples/l2fwd-event/main.c
 create mode 100644 examples/l2fwd-event/meson.build

diff --git a/examples/Makefile b/examples/Makefile
index de11dd4..d18504b 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -34,6 +34,7 @@ endif
 DIRS-$(CONFIG_RTE_LIBRTE_HASH) += ipv4_multicast
 DIRS-$(CONFIG_RTE_LIBRTE_KNI) += kni
 DIRS-y += l2fwd
+DIRS-y += l2fwd-event
 ifneq ($(PQOS_INSTALL_PATH),)
 DIRS-y += l2fwd-cat
 endif
diff --git a/examples/l2fwd-event/Makefile b/examples/l2fwd-event/Makefile
new file mode 100644
index 0000000..14d8470
--- /dev/null
+++ b/examples/l2fwd-event/Makefile
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+#
+
+# binary name
+APP = l2fwd-event
+
+# all source are stored in SRCS-y
+SRCS-y := main.c l2fwd_eventdev.c
+
+# Build using pkg-config variables if possible
+ifeq ($(shell pkg-config --exists libdpdk && echo 0),0)
+
+all: shared
+.PHONY: shared static
+shared: build/$(APP)-shared
+	ln -sf $(APP)-shared build/$(APP)
+static: build/$(APP)-static
+	ln -sf $(APP)-static build/$(APP)
+
+PKGCONF=pkg-config --define-prefix
+
+PC_FILE := $(shell $(PKGCONF) --path libdpdk)
+CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk)
+LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk)
+LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk)
+
+build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
+	$(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED)
+
+build/$(APP)-static: $(SRCS-y) Makefile $(PC_FILE) | build
+	$(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_STATIC)
+
+build:
+	@mkdir -p $@
+
+.PHONY: clean
+clean:
+	rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared
+	test -d build && rmdir -p build || true
+
+else # Build using legacy build system
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, detect a build directory, by looking for a path with a .config
+RTE_TARGET ?= $(notdir $(abspath $(dir $(firstword $(wildcard $(RTE_SDK)/*/.config)))))
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+include $(RTE_SDK)/mk/rte.extapp.mk
+endif
diff --git a/examples/l2fwd-event/l2fwd_common.h b/examples/l2fwd-event/l2fwd_common.h
new file mode 100644
index 0000000..28919e4
--- /dev/null
+++ b/examples/l2fwd-event/l2fwd_common.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __L2FWD_COMMON_H__
+#define __L2FWD_COMMON_H__
+
+#define MAX_PKT_BURST 32
+#define MAX_RX_QUEUE_PER_LCORE 16
+#define MAX_TX_QUEUE_PER_PORT 16
+
+#define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
+
+struct lcore_queue_conf {
+	uint32_t rx_port_list[MAX_RX_QUEUE_PER_LCORE];
+	uint32_t n_rx_port;
+} __rte_cache_aligned;
+
+/* Per-port statistics struct */
+struct l2fwd_port_statistics {
+	uint64_t dropped;
+	uint64_t tx;
+	uint64_t rx;
+} __rte_cache_aligned;
+
+extern struct rte_mempool *l2fwd_pktmbuf_pool;
+
+extern struct rte_ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS];
+
+extern uint32_t l2fwd_enabled_port_mask;
+
+extern int mac_updating;
+
+extern uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
+
+extern struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
+
+extern volatile bool force_quit;
+
+extern uint64_t timer_period;
+
+void l2fwd_mac_updating(struct rte_mbuf *m, uint32_t dest_portid);
+
+void print_stats(void);
+
+#endif /* __L2FWD_EVENTDEV_H__ */
diff --git a/examples/l2fwd-event/l2fwd_eventdev.c b/examples/l2fwd-event/l2fwd_eventdev.c
new file mode 100644
index 0000000..744040e
--- /dev/null
+++ b/examples/l2fwd-event/l2fwd_eventdev.c
@@ -0,0 +1,686 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <stdbool.h>
+#include <getopt.h>
+
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_spinlock.h>
+
+#include "l2fwd_common.h"
+#include "l2fwd_eventdev.h"
+
+enum {
+	CMD_LINE_OPT_MODE_NUM = 265,
+	CMD_LINE_OPT_EVENTQ_SYNC_NUM,
+};
+
+static const struct option eventdev_lgopts[] = {
+	{CMD_LINE_OPT_MODE, 1, 0, CMD_LINE_OPT_MODE_NUM},
+	{CMD_LINE_OPT_EVENTQ_SYNC, 1, 0, CMD_LINE_OPT_EVENTQ_SYNC_NUM},
+	{NULL, 0, 0, 0}
+};
+
+/* Eventdev command line options */
+int evd_argc;
+char *evd_argv[3];
+
+/* Default configurations */
+int pkt_transfer_mode = PACKET_TRANSFER_MODE_EVENTDEV;
+int eventq_sync_mode = RTE_SCHED_TYPE_ATOMIC;
+uint32_t num_workers = RTE_MAX_LCORE;
+struct eventdev_resources eventdev_rsrc;
+
+static struct rte_eth_conf port_config = {
+	.rxmode = {
+		.mq_mode = ETH_MQ_RX_RSS,
+		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
+		.split_hdr_size = 0,
+		.offloads = DEV_RX_OFFLOAD_CHECKSUM
+	},
+	.rx_adv_conf = {
+		.rss_conf = {
+			.rss_key = NULL,
+			.rss_hf = ETH_RSS_IP,
+		}
+	},
+	.txmode = {
+		.mq_mode = ETH_MQ_TX_NONE,
+	}
+};
+
+static struct rte_event_dev_config event_d_conf = {
+	.nb_event_queues = 1,
+	.nb_event_ports = RTE_MAX_LCORE,
+	.nb_events_limit  = 4096,
+	.nb_event_queue_flows = 1024,
+	.nb_event_port_dequeue_depth = 128,
+	.nb_event_port_enqueue_depth = 128
+};
+
+static struct rte_event_port_conf event_p_conf = {
+	.dequeue_depth = 32,
+	.enqueue_depth = 32,
+	.new_event_threshold = 4096
+};
+
+static struct rte_event_queue_conf event_q_conf = {
+	.nb_atomic_flows = 1024,
+	.nb_atomic_order_sequences = 1024,
+	.event_queue_cfg = 0,
+	.schedule_type = RTE_SCHED_TYPE_ATOMIC,
+	.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST
+};
+
+static struct rte_event_eth_rx_adapter_queue_conf eth_q_conf = {
+	.rx_queue_flags = 0,
+	.servicing_weight = 1,
+	.ev = {
+		.queue_id = 0,
+		.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
+		.sched_type = RTE_SCHED_TYPE_ATOMIC
+	}
+};
+
+static void print_ethaddr(const char *name,
+			  const struct rte_ether_addr *eth_addr)
+{
+	char buf[RTE_ETHER_ADDR_FMT_SIZE];
+	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
+	printf("%s%s", name, buf);
+}
+
+static void parse_mode(const char *optarg)
+{
+	if (!strncmp(optarg, "poll", 4))
+		pkt_transfer_mode = PACKET_TRANSFER_MODE_POLL;
+	else if (!strncmp(optarg, "eventdev", 8))
+		pkt_transfer_mode = PACKET_TRANSFER_MODE_EVENTDEV;
+}
+
+static void parse_eventq_sync(const char *optarg)
+{
+	if (!strncmp(optarg, "ordered", 7))
+		eventq_sync_mode = RTE_SCHED_TYPE_ORDERED;
+	else if (!strncmp(optarg, "atomic", 6))
+		eventq_sync_mode = RTE_SCHED_TYPE_ATOMIC;
+}
+
+static int parse_eventdev_args(int argc, char **argv)
+{
+	char **argvopt = argv;
+	int32_t opt, ret = -1;
+	int32_t option_index;
+
+	while ((opt = getopt_long(argc, argvopt, "", eventdev_lgopts,
+				  &option_index)) != EOF) {
+		switch (opt) {
+		case CMD_LINE_OPT_MODE_NUM:
+			parse_mode(optarg);
+			break;
+
+		case CMD_LINE_OPT_EVENTQ_SYNC_NUM:
+			parse_eventq_sync(optarg);
+			break;
+
+		case '?':
+			/* skip other parameters except eventdev specific */
+			break;
+
+		default:
+			printf("Invalid eventdev parameter\n");
+			return -1;
+		}
+	}
+
+	if (pkt_transfer_mode == PACKET_TRANSFER_MODE_EVENTDEV)
+		ret = EVENT_DEV_PARAM_PRESENT;
+
+	return ret;
+}
+
+/* Send burst of packets on an output interface */
+static inline int send_burst_eventdev_generic(struct rte_mbuf *m[], uint16_t n,
+					      uint16_t event_p_id)
+{
+	struct rte_event events[MAX_PKT_BURST];
+	uint8_t event_d_id;
+	int ret, i;
+
+	event_d_id = eventdev_rsrc.event_d_id;
+
+	for (i = 0; i < n; i++) {
+		events[i].queue_id = 0;
+		events[i].op = RTE_EVENT_OP_FORWARD;
+		events[i].mbuf = m[i];
+	}
+
+	ret = rte_event_enqueue_burst(event_d_id, event_p_id, events, n);
+	if (unlikely(ret < n)) {
+		do {
+			rte_pktmbuf_free(m[ret]);
+		} while (++ret < n);
+	}
+
+	return 0;
+}
+
+/* Send burst of packets on an output interface */
+static inline int send_burst_eventdev_adapter(struct rte_mbuf *m[], uint16_t n,
+					      uint16_t event_p_id)
+{
+	struct rte_event events[MAX_PKT_BURST];
+	uint8_t event_d_id;
+	int32_t ret, i;
+
+	event_d_id = eventdev_rsrc.event_d_id;
+
+	for (i = 0; i < n; i++) {
+		events[i].queue_id = 0;
+		events[i].op = RTE_EVENT_OP_FORWARD;
+		events[i].mbuf = m[i];
+		rte_event_eth_tx_adapter_txq_set(events[i].mbuf, 0);
+	}
+
+	ret = rte_event_eth_tx_adapter_enqueue(event_d_id, event_p_id,
+					       events, n);
+	if (unlikely(ret < n)) {
+		do {
+			rte_pktmbuf_free(m[ret]);
+		} while (++ret < n);
+	}
+
+	return 0;
+}
+
+static uint32_t event_dev_setup(uint16_t ethdev_count)
+{
+	struct rte_event_dev_info dev_info;
+	const uint8_t event_d_id = 0; /* Always use first event device only */
+	uint32_t event_queue_cfg = 0;
+	int ret;
+
+	/* Event device configurtion */
+	rte_event_dev_info_get(event_d_id, &dev_info);
+
+	if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
+		event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+
+	event_d_conf.nb_event_queues = ethdev_count;
+	if (dev_info.max_event_queues < event_d_conf.nb_event_queues)
+		event_d_conf.nb_event_queues = dev_info.max_event_queues;
+
+	if (dev_info.max_num_events < event_d_conf.nb_events_limit)
+		event_d_conf.nb_events_limit = dev_info.max_num_events;
+
+	if (dev_info.max_event_port_dequeue_depth <
+				event_d_conf.nb_event_port_dequeue_depth)
+		event_d_conf.nb_event_port_dequeue_depth =
+				dev_info.max_event_port_dequeue_depth;
+
+	if (dev_info.max_event_port_enqueue_depth <
+				event_d_conf.nb_event_port_enqueue_depth)
+		event_d_conf.nb_event_port_enqueue_depth =
+				dev_info.max_event_port_enqueue_depth;
+
+	num_workers = rte_lcore_count();
+	if (dev_info.max_event_ports < num_workers)
+		num_workers = dev_info.max_event_ports;
+
+	event_d_conf.nb_event_ports = num_workers;
+
+	ret = rte_event_dev_configure(event_d_id, &event_d_conf);
+	if (ret < 0)
+		rte_exit(EXIT_FAILURE, "Error in configuring event device");
+
+	eventdev_rsrc.event_d_id = event_d_id;
+	return event_queue_cfg;
+}
+
+static void event_port_setup(void)
+{
+	uint8_t event_d_id = eventdev_rsrc.event_d_id;
+	struct rte_event_port_conf evp_conf;
+	uint8_t event_p_id;
+	int32_t ret;
+
+	eventdev_rsrc.evp.nb_ports = num_workers;
+	eventdev_rsrc.evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) *
+					eventdev_rsrc.evp.nb_ports);
+	if (!eventdev_rsrc.evp.event_p_id)
+		rte_exit(EXIT_FAILURE, " No space is available");
+
+	for (event_p_id = 0; event_p_id < num_workers; event_p_id++) {
+		rte_event_port_default_conf_get(event_d_id, event_p_id,
+						&evp_conf);
+
+		if (evp_conf.new_event_threshold <
+					event_p_conf.new_event_threshold)
+			event_p_conf.new_event_threshold =
+					evp_conf.new_event_threshold;
+
+		if (evp_conf.dequeue_depth < event_p_conf.dequeue_depth)
+			event_p_conf.dequeue_depth = evp_conf.dequeue_depth;
+
+		if (evp_conf.enqueue_depth < event_p_conf.enqueue_depth)
+			event_p_conf.enqueue_depth = evp_conf.enqueue_depth;
+
+		ret = rte_event_port_setup(event_d_id, event_p_id,
+					   &event_p_conf);
+		if (ret < 0) {
+			rte_exit(EXIT_FAILURE,
+				 "Error in configuring event port %d\n",
+				 event_p_id);
+		}
+
+		ret = rte_event_port_link(event_d_id, event_p_id, NULL,
+					  NULL, 0);
+		if (ret < 0) {
+			rte_exit(EXIT_FAILURE, "Error in linking event port %d "
+				 "to event queue", event_p_id);
+		}
+		eventdev_rsrc.evp.event_p_id[event_p_id] = event_p_id;
+
+		/* init spinlock */
+		rte_spinlock_init(&eventdev_rsrc.evp.lock);
+	}
+}
+
+static void event_queue_setup(uint16_t ethdev_count, uint32_t event_queue_cfg)
+{
+	uint8_t event_d_id = eventdev_rsrc.event_d_id;
+	struct rte_event_queue_conf evq_conf;
+	uint8_t event_q_id = 0;
+	int32_t ret;
+
+	rte_event_queue_default_conf_get(event_d_id, event_q_id, &evq_conf);
+
+	if (evq_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows)
+		event_q_conf.nb_atomic_flows = evq_conf.nb_atomic_flows;
+
+	if (evq_conf.nb_atomic_order_sequences <
+					event_q_conf.nb_atomic_order_sequences)
+		event_q_conf.nb_atomic_order_sequences =
+					evq_conf.nb_atomic_order_sequences;
+
+	event_q_conf.event_queue_cfg = event_queue_cfg;
+	event_q_conf.schedule_type = eventq_sync_mode;
+	eventdev_rsrc.evq.nb_queues = ethdev_count;
+	eventdev_rsrc.evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) *
+					eventdev_rsrc.evq.nb_queues);
+	if (!eventdev_rsrc.evq.event_q_id)
+		rte_exit(EXIT_FAILURE, "Memory allocation failure");
+
+	for (event_q_id = 0; event_q_id < ethdev_count; event_q_id++) {
+		ret = rte_event_queue_setup(event_d_id, event_q_id,
+					    &event_q_conf);
+		if (ret < 0) {
+			rte_exit(EXIT_FAILURE,
+				 "Error in configuring event queue");
+		}
+		eventdev_rsrc.evq.event_q_id[event_q_id] = event_q_id;
+	}
+}
+
+static void rx_tx_adapter_setup(uint16_t ethdev_count)
+{
+	uint8_t event_d_id = eventdev_rsrc.event_d_id;
+	uint32_t service_id;
+	uint32_t cap = 0;
+	int32_t ret, i;
+
+	eventdev_rsrc.rx_adptr.nb_rx_adptr = ethdev_count;
+	eventdev_rsrc.rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+					eventdev_rsrc.rx_adptr.nb_rx_adptr);
+	if (!eventdev_rsrc.rx_adptr.rx_adptr) {
+		free(eventdev_rsrc.evp.event_p_id);
+		free(eventdev_rsrc.evq.event_q_id);
+		rte_exit(EXIT_FAILURE,
+			 "failed to allocate memery for Rx adapter");
+	}
+
+	for (i = 0; i < ethdev_count; i++) {
+		ret = rte_event_eth_rx_adapter_create(i, event_d_id,
+						      &event_p_conf);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+				 "failed to create rx adapter[%d]", i);
+
+		ret = rte_event_eth_rx_adapter_caps_get(event_d_id, i, &cap);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+				 "failed to get event rx adapter capabilities");
+
+		/* Configure user requested sync mode */
+		eth_q_conf.ev.queue_id = eventdev_rsrc.evq.event_q_id[i];
+		eth_q_conf.ev.sched_type = eventq_sync_mode;
+		ret = rte_event_eth_rx_adapter_queue_add(i, i, -1, &eth_q_conf);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+				 "Failed to add queues to Rx adapter");
+
+		if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
+			ret = rte_event_eth_rx_adapter_service_id_get(i,
+								&service_id);
+			if (ret != -ESRCH && ret != 0) {
+				rte_exit(EXIT_FAILURE,
+				"Error getting the service ID for rx adptr\n");
+			}
+
+			rte_service_runstate_set(service_id, 1);
+			rte_service_set_runstate_mapped_check(service_id, 1);
+		}
+
+		ret = rte_event_eth_rx_adapter_start(i);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+				 "Rx adapter[%d] start failed", i);
+
+		eventdev_rsrc.rx_adptr.rx_adptr[i] = i;
+	}
+
+	eventdev_rsrc.tx_adptr.nb_tx_adptr = ethdev_count;
+	eventdev_rsrc.tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+					eventdev_rsrc.tx_adptr.nb_tx_adptr);
+	if (!eventdev_rsrc.tx_adptr.tx_adptr) {
+		free(eventdev_rsrc.rx_adptr.rx_adptr);
+		free(eventdev_rsrc.evp.event_p_id);
+		free(eventdev_rsrc.evq.event_q_id);
+		rte_exit(EXIT_FAILURE,
+			 "failed to allocate memery for Rx adapter");
+	}
+
+	eventdev_rsrc.send_burst_eventdev = send_burst_eventdev_adapter;
+	for (i = 0; i < ethdev_count; i++) {
+		ret = rte_event_eth_tx_adapter_create(i, event_d_id,
+						      &event_p_conf);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+				 "failed to create tx adapter[%d]", i);
+
+		ret = rte_event_eth_tx_adapter_caps_get(event_d_id, i, &cap);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+				 "Failed to get event tx adapter capabilities");
+
+		if (!(cap & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) {
+			ret = rte_event_eth_tx_adapter_service_id_get(i,
+								   &service_id);
+			if (ret != -ESRCH && ret != 0) {
+				rte_exit(EXIT_FAILURE,
+					 "Failed to get Tx adapter service ID");
+			}
+
+			rte_service_runstate_set(service_id, 1);
+			rte_service_set_runstate_mapped_check(service_id, 1);
+			eventdev_rsrc.send_burst_eventdev =
+						send_burst_eventdev_generic;
+		}
+
+		ret = rte_event_eth_tx_adapter_queue_add(i, i, -1);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+				 "failed to add queues to Tx adapter");
+
+		ret = rte_event_eth_tx_adapter_start(i);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+				 "Tx adapter[%d] start failed", i);
+
+		eventdev_rsrc.tx_adptr.tx_adptr[i] = i;
+	}
+}
+
+static void eth_dev_port_setup(uint16_t ethdev_count __rte_unused)
+{
+	struct rte_eth_conf local_port_conf = port_config;
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_txconf txconf;
+	struct rte_eth_rxconf rxconf;
+	uint16_t nb_rx_queue = 1;
+	uint16_t n_tx_queue = 1;
+	uint16_t nb_rxd = 1024;
+	uint16_t nb_txd = 1024;
+	uint16_t portid;
+	int32_t ret;
+
+	/* initialize all ports */
+	RTE_ETH_FOREACH_DEV(portid) {
+		/* skip ports that are not enabled */
+		if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
+			printf("\nSkipping disabled port %d\n", portid);
+			continue;
+		}
+
+		/* init port */
+		printf("Initializing port %d ... ", portid);
+		fflush(stdout);
+		printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
+			nb_rx_queue, n_tx_queue);
+
+		rte_eth_dev_info_get(portid, &dev_info);
+		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+			local_port_conf.txmode.offloads |=
+						DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+						dev_info.flow_type_rss_offloads;
+		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+				port_config.rx_adv_conf.rss_conf.rss_hf) {
+			printf("Port %u modified RSS hash function "
+			       "based on hardware support,"
+			       "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+			       portid,
+			       port_config.rx_adv_conf.rss_conf.rss_hf,
+			       local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+		}
+
+		ret = rte_eth_dev_configure(portid, nb_rx_queue, n_tx_queue,
+					    &local_port_conf);
+		if (ret < 0)
+			rte_exit(EXIT_FAILURE,
+				 "Cannot configure device: err=%d, port=%d\n",
+				 ret, portid);
+
+		ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+						       &nb_txd);
+		if (ret < 0)
+			rte_exit(EXIT_FAILURE,
+				 "Cannot adjust number of descriptors: err=%d, "
+				 "port=%d\n", ret, portid);
+
+		rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]);
+		print_ethaddr(" Address:", &l2fwd_ports_eth_addr[portid]);
+		printf("\n");
+
+
+		/* init one Rx queue per port */
+		rxconf = dev_info.default_rxconf;
+		rxconf.offloads = local_port_conf.rxmode.offloads;
+		ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd, 0, &rxconf,
+					     l2fwd_pktmbuf_pool);
+		if (ret < 0)
+			rte_exit(EXIT_FAILURE,
+				 "rte_eth_rx_queue_setup: err=%d, "
+				 "port=%d\n", ret, portid);
+
+		/* init one Tx queue per port */
+		txconf = dev_info.default_txconf;
+		txconf.offloads = local_port_conf.txmode.offloads;
+		ret = rte_eth_tx_queue_setup(portid, 0, nb_txd, 0, &txconf);
+		if (ret < 0)
+			rte_exit(EXIT_FAILURE,
+				 "rte_eth_tx_queue_setup: err=%d, "
+				 "port=%d\n", ret, portid);
+	}
+}
+
+static void
+l2fwd_eventdev_forward(struct rte_mbuf *m[], uint32_t portid,
+		       uint16_t nb_rx, uint16_t event_p_id)
+{
+	uint32_t dst_port, i;
+
+	dst_port = l2fwd_dst_ports[portid];
+
+	for (i = 0; i < nb_rx; i++) {
+		if (mac_updating)
+			l2fwd_mac_updating(m[i], dst_port);
+
+		m[i]->port = dst_port;
+	}
+
+	port_statistics[dst_port].tx += nb_rx;
+	eventdev_rsrc.send_burst_eventdev(m, nb_rx, event_p_id);
+}
+
+/* main eventdev processing loop */
+void l2fwd_main_loop_eventdev(void)
+{
+	uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc;
+	struct rte_event events[MAX_PKT_BURST];
+	struct rte_mbuf *mbuf[MAX_PKT_BURST];
+	uint32_t i, j = 0, nb_rx;
+	uint16_t event_d_id;
+	uint16_t event_p_id;
+	uint32_t lcore_id;
+	uint16_t deq_len;
+	uint32_t portid;
+
+	prev_tsc = 0;
+	timer_tsc = 0;
+
+	/* Assign dedicated event port for enqueue/dequeue operation */
+	deq_len = event_d_conf.nb_event_port_dequeue_depth;
+	event_d_id = eventdev_rsrc.event_d_id;
+	event_p_id = get_free_event_port();
+	lcore_id = rte_lcore_id();
+
+	RTE_LOG(INFO, L2FWD, "entering eventdev main loop on lcore %u\n",
+		lcore_id);
+
+	while (!force_quit) {
+
+		/* if timer is enabled */
+		if (timer_period > 0) {
+			cur_tsc = rte_rdtsc();
+			diff_tsc = cur_tsc - prev_tsc;
+
+			/* advance the timer */
+			timer_tsc += diff_tsc;
+
+			/* if timer has reached its timeout */
+			if (unlikely(timer_tsc >= timer_period)) {
+
+				/* do this only on master core */
+				if (lcore_id == rte_get_master_lcore()) {
+					print_stats();
+
+					/* reset the timer */
+					timer_tsc = 0;
+				}
+			}
+			prev_tsc = cur_tsc;
+		}
+
+		/* Read packet from eventdev */
+		nb_rx = rte_event_dequeue_burst(event_d_id, event_p_id,
+						events, deq_len, 0);
+		if (nb_rx == 0) {
+			rte_pause();
+			continue;
+		}
+
+		for (i = 0; i < nb_rx; i++) {
+			mbuf[i] = events[i].mbuf;
+			rte_prefetch0(rte_pktmbuf_mtod(mbuf[i], void *));
+		}
+
+		portid = mbuf[0]->port;
+		port_statistics[portid].rx++;
+		for (i = 1; i < nb_rx; i++) {
+			if (portid != mbuf[i]->port) {
+				l2fwd_eventdev_forward(&mbuf[j], portid, i - j,
+						       event_p_id);
+				j = i;
+				portid = mbuf[i]->port;
+			}
+			port_statistics[portid].rx++;
+		}
+
+		/* Send remaining packets */
+		l2fwd_eventdev_forward(&mbuf[j], portid, i - j, event_p_id);
+	}
+}
+
+int get_free_event_port(void)
+{
+	static int index;
+	int port_id;
+
+	rte_spinlock_lock(&eventdev_rsrc.evp.lock);
+	if (index >= eventdev_rsrc.evp.nb_ports) {
+		printf("No free event port is available\n");
+		return -1;
+	}
+
+	port_id = eventdev_rsrc.evp.event_p_id[index];
+	index++;
+	rte_spinlock_unlock(&eventdev_rsrc.evp.lock);
+
+	return port_id;
+}
+
+int eventdev_resource_setup(int argc, char **argv)
+{
+	uint16_t ethdev_count = rte_eth_dev_count_avail();
+	uint32_t event_queue_cfg = 0;
+	uint32_t service_id;
+	int32_t ret;
+
+	/* Parse eventdev command line options */
+	ret = parse_eventdev_args(argc, argv);
+	if (ret < 0)
+		return ret;
+
+	if (rte_event_dev_count() < 1)
+		rte_exit(EXIT_FAILURE, "No Eventdev found");
+
+	/* Ethernet device configuration */
+	eth_dev_port_setup(ethdev_count);
+
+	/* Event device configuration */
+	event_queue_cfg = event_dev_setup(ethdev_count);
+
+	/* Event queue configuration */
+	event_queue_setup(ethdev_count, event_queue_cfg);
+
+	/* Event port configuration */
+	event_port_setup();
+
+	/* Rx/Tx adapters configuration */
+	rx_tx_adapter_setup(ethdev_count);
+
+	/* Start event device service */
+	ret = rte_event_dev_service_id_get(eventdev_rsrc.event_d_id,
+					   &service_id);
+	if (ret != -ESRCH && ret != 0)
+		rte_exit(EXIT_FAILURE, "Error in starting eventdev");
+
+	rte_service_runstate_set(service_id, 1);
+	rte_service_set_runstate_mapped_check(service_id, 1);
+
+	/* Start event device */
+	ret = rte_event_dev_start(eventdev_rsrc.event_d_id);
+	if (ret < 0)
+		rte_exit(EXIT_FAILURE, "Error in starting eventdev");
+
+	return EVENT_DEV_PARAM_PRESENT;
+}
diff --git a/examples/l2fwd-event/l2fwd_eventdev.h b/examples/l2fwd-event/l2fwd_eventdev.h
new file mode 100644
index 0000000..d5972f2
--- /dev/null
+++ b/examples/l2fwd-event/l2fwd_eventdev.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __L2FWD_EVENTDEV_H__
+#define __L2FWD_EVENTDEV_H__
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <rte_spinlock.h>
+
+/*
+ * This expression is used to calculate the number of mbufs needed
+ * depending on user input, taking  into account memory for rx and
+ * tx hardware rings, cache per lcore and mtable per port per lcore.
+ * RTE_MAX is used to ensure that NB_MBUF never goes below a minimum
+ * value of 8192
+ */
+#define NUM_MBUF(nports) RTE_MAX(		\
+	(nports*nb_rx_queue*nb_rxd +		\
+	nports*nb_lcores*MAX_PKT_BURST +	\
+	nports*n_tx_queue*nb_txd +		\
+	nb_lcores*256),				\
+	8192)
+
+#define EVENT_DEV_PARAM_PRESENT	0x8000	/* Random value*/
+
+/* Packet transfer mode of the application */
+#define PACKET_TRANSFER_MODE_POLL  1
+#define PACKET_TRANSFER_MODE_EVENTDEV  2
+
+#define CMD_LINE_OPT_MODE "mode"
+#define CMD_LINE_OPT_EVENTQ_SYNC "eventq-sync"
+
+typedef int (*tx_eventdev_t)(struct rte_mbuf *m[], uint16_t n, uint16_t port);
+
+struct eventdev_queues {
+	uint8_t *event_q_id;
+	uint8_t	nb_queues;
+};
+
+struct eventdev_ports {
+	uint8_t *event_p_id;
+	uint8_t	nb_ports;
+	rte_spinlock_t lock;
+};
+
+struct eventdev_rx_adptr {
+	uint8_t	nb_rx_adptr;
+	uint8_t *rx_adptr;
+};
+
+struct eventdev_tx_adptr {
+	uint8_t	nb_tx_adptr;
+	uint8_t *tx_adptr;
+};
+
+struct eventdev_resources {
+	tx_eventdev_t	send_burst_eventdev;
+	struct eventdev_rx_adptr rx_adptr;
+	struct eventdev_tx_adptr tx_adptr;
+	struct eventdev_queues evq;
+	struct eventdev_ports evp;
+	uint8_t event_d_id;
+};
+
+extern int pkt_transfer_mode;
+extern int eventq_sync_mode;
+extern struct eventdev_resources eventdev_rsrc;
+extern int evd_argc;
+extern char *evd_argv[3];
+
+/* Event device and required resource setup function */
+int eventdev_resource_setup(int argc, char **argv);
+
+/* Returns next available event port */
+int get_free_event_port(void);
+
+/* Event processing function */
+void l2fwd_main_loop_eventdev(void);
+
+#endif /* __L2FWD_EVENTDEV_H__ */
diff --git a/examples/l2fwd-event/main.c b/examples/l2fwd-event/main.c
new file mode 100644
index 0000000..41f73b0
--- /dev/null
+++ b/examples/l2fwd-event/main.c
@@ -0,0 +1,771 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+#include <setjmp.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <errno.h>
+#include <getopt.h>
+#include <signal.h>
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_eal.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+
+#include "l2fwd_common.h"
+#include "l2fwd_eventdev.h"
+
+volatile bool force_quit;
+
+/* MAC updating enabled by default */
+int mac_updating = 1;
+
+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
+#define MEMPOOL_CACHE_SIZE 256
+
+/*
+ * Configurable number of RX/TX ring descriptors
+ */
+#define RTE_TEST_RX_DESC_DEFAULT 1024
+#define RTE_TEST_TX_DESC_DEFAULT 1024
+static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
+
+/* ethernet addresses of ports */
+struct rte_ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS];
+
+/* mask of enabled ports */
+uint32_t l2fwd_enabled_port_mask;
+
+/* list of enabled ports */
+uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
+
+static unsigned int l2fwd_rx_queue_per_lcore = 1;
+
+struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
+
+static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
+
+static struct rte_eth_conf port_conf = {
+	.rxmode = {
+		.split_hdr_size = 0,
+	},
+	.txmode = {
+		.mq_mode = ETH_MQ_TX_NONE,
+	},
+};
+
+struct rte_mempool *l2fwd_pktmbuf_pool;
+
+struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
+
+#define MAX_TIMER_PERIOD 86400 /* 1 day max */
+/* A tsc-based timer responsible for triggering statistics printout */
+uint64_t timer_period = 10; /* default period is 10 seconds */
+
+/* Print out statistics on packets dropped */
+void print_stats(void)
+{
+	uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
+	uint32_t portid;
+
+	total_packets_dropped = 0;
+	total_packets_tx = 0;
+	total_packets_rx = 0;
+
+	const char clr[] = {27, '[', '2', 'J', '\0' };
+	const char topLeft[] = {27, '[', '1', ';', '1', 'H', '\0' };
+
+		/* Clear screen and move to top left */
+	printf("%s%s", clr, topLeft);
+
+	printf("\nPort statistics ====================================");
+
+	for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
+		/* skip disabled ports */
+		if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+			continue;
+		printf("\nStatistics for port %u ------------------------------"
+			   "\nPackets sent: %24"PRIu64
+			   "\nPackets received: %20"PRIu64
+			   "\nPackets dropped: %21"PRIu64,
+			   portid,
+			   port_statistics[portid].tx,
+			   port_statistics[portid].rx,
+			   port_statistics[portid].dropped);
+
+		total_packets_dropped += port_statistics[portid].dropped;
+		total_packets_tx += port_statistics[portid].tx;
+		total_packets_rx += port_statistics[portid].rx;
+	}
+	printf("\nAggregate statistics ==============================="
+		   "\nTotal packets sent: %18"PRIu64
+		   "\nTotal packets received: %14"PRIu64
+		   "\nTotal packets dropped: %15"PRIu64,
+		   total_packets_tx,
+		   total_packets_rx,
+		   total_packets_dropped);
+	printf("\n====================================================\n");
+}
+
+void l2fwd_mac_updating(struct rte_mbuf *m, uint32_t dest_portid)
+{
+	struct rte_ether_hdr *eth;
+	void *tmp;
+
+	eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+
+	/* 02:00:00:00:00:xx */
+	tmp = &eth->d_addr.addr_bytes[0];
+	*((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dest_portid << 40);
+
+	/* src addr */
+	rte_ether_addr_copy(&l2fwd_ports_eth_addr[dest_portid], &eth->s_addr);
+}
+
+static void
+l2fwd_simple_forward(struct rte_mbuf *m, uint32_t portid)
+{
+	uint32_t dst_port;
+	int32_t sent;
+	struct rte_eth_dev_tx_buffer *buffer;
+
+	dst_port = l2fwd_dst_ports[portid];
+
+	if (mac_updating)
+		l2fwd_mac_updating(m, dst_port);
+
+	buffer = tx_buffer[dst_port];
+	sent = rte_eth_tx_buffer(dst_port, 0, buffer, m);
+	if (sent)
+		port_statistics[dst_port].tx += sent;
+}
+
+/* main processing loop */
+static void l2fwd_main_loop(void)
+{
+	uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc, drain_tsc;
+	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+	struct rte_eth_dev_tx_buffer *buffer;
+	struct lcore_queue_conf *qconf;
+	uint32_t i, j, portid, nb_rx;
+	struct rte_mbuf *m;
+	uint32_t lcore_id;
+	int32_t sent;
+
+	drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S *
+			BURST_TX_DRAIN_US;
+	prev_tsc = 0;
+	timer_tsc = 0;
+
+	lcore_id = rte_lcore_id();
+	qconf = &lcore_queue_conf[lcore_id];
+
+	if (qconf->n_rx_port == 0) {
+		RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id);
+		return;
+	}
+
+	RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id);
+
+	for (i = 0; i < qconf->n_rx_port; i++) {
+
+		portid = qconf->rx_port_list[i];
+		RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id,
+			portid);
+
+	}
+
+	while (!force_quit) {
+
+		cur_tsc = rte_rdtsc();
+
+		/*
+		 * TX burst queue drain
+		 */
+		diff_tsc = cur_tsc - prev_tsc;
+		if (unlikely(diff_tsc > drain_tsc)) {
+			for (i = 0; i < qconf->n_rx_port; i++) {
+				portid =
+					l2fwd_dst_ports[qconf->rx_port_list[i]];
+				buffer = tx_buffer[portid];
+				sent = rte_eth_tx_buffer_flush(portid, 0,
+							       buffer);
+				if (sent)
+					port_statistics[portid].tx += sent;
+			}
+
+			/* if timer is enabled */
+			if (timer_period > 0) {
+				/* advance the timer */
+				timer_tsc += diff_tsc;
+
+				/* if timer has reached its timeout */
+				if (unlikely(timer_tsc >= timer_period)) {
+					/* do this only on master core */
+					if (lcore_id ==
+						rte_get_master_lcore()) {
+						print_stats();
+						/* reset the timer */
+						timer_tsc = 0;
+					}
+				}
+			}
+
+			prev_tsc = cur_tsc;
+		}
+
+		/*
+		 * Read packet from RX queues
+		 */
+		for (i = 0; i < qconf->n_rx_port; i++) {
+
+			portid = qconf->rx_port_list[i];
+			nb_rx = rte_eth_rx_burst(portid, 0,
+						 pkts_burst, MAX_PKT_BURST);
+
+			port_statistics[portid].rx += nb_rx;
+
+			for (j = 0; j < nb_rx; j++) {
+				m = pkts_burst[j];
+				rte_prefetch0(rte_pktmbuf_mtod(m, void *));
+				l2fwd_simple_forward(m, portid);
+			}
+		}
+	}
+}
+
+static int
+l2fwd_launch_one_lcore(__attribute__((unused)) void *dummy)
+{
+	if (pkt_transfer_mode == PACKET_TRANSFER_MODE_EVENTDEV)
+		l2fwd_main_loop_eventdev();
+	else
+		l2fwd_main_loop();
+	return 0;
+}
+
+/* display usage */
+static void
+l2fwd_usage(const char *prgname)
+{
+	printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
+	       "  -p PORTMASK: hexadecimal bitmask of ports to configure\n"
+	       "  -q NQ: number of queue (=ports) per lcore (default is 1)\n"
+	       "  -T PERIOD: statistics will be refreshed each PERIOD seconds "
+	       "		(0 to disable, 10 default, 86400 maximum)\n"
+	       "  --[no-]mac-updating: Enable or disable MAC addresses updating (enabled by default)\n"
+	       "      When enabled:\n"
+	       "       - The source MAC address is replaced by the TX port MAC address\n"
+	       "       - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n"
+	       "  --mode: Packet transfer mode for I/O, poll or eventdev\n"
+	       "          Default mode = eventdev\n"
+	       "  --eventq-sync:Event queue synchronization method,\n"
+	       "                ordered or atomic.\nDefault: atomic\n"
+	       "                Valid only if --mode=eventdev\n\n",
+	       prgname);
+}
+
+static int
+l2fwd_parse_portmask(const char *portmask)
+{
+	char *end = NULL;
+	unsigned long pm;
+
+	/* parse hexadecimal string */
+	pm = strtoul(portmask, &end, 16);
+	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
+		return -1;
+
+	if (pm == 0)
+		return -1;
+
+	return pm;
+}
+
+static unsigned int
+l2fwd_parse_nqueue(const char *q_arg)
+{
+	char *end = NULL;
+	unsigned long n;
+
+	/* parse hexadecimal string */
+	n = strtoul(q_arg, &end, 10);
+	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+		return 0;
+	if (n == 0)
+		return 0;
+	if (n >= MAX_RX_QUEUE_PER_LCORE)
+		return 0;
+
+	return n;
+}
+
+static int
+l2fwd_parse_timer_period(const char *q_arg)
+{
+	char *end = NULL;
+	int n;
+
+	/* parse number string */
+	n = strtol(q_arg, &end, 10);
+	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+		return -1;
+	if (n >= MAX_TIMER_PERIOD)
+		return -1;
+
+	return n;
+}
+
+static const char short_options[] =
+	"p:"  /* portmask */
+	"q:"  /* number of queues */
+	"T:"  /* timer period */
+	;
+
+#define CMD_LINE_OPT_MAC_UPDATING "mac-updating"
+#define CMD_LINE_OPT_NO_MAC_UPDATING "no-mac-updating"
+
+enum {
+	/* long options mapped to a short option */
+
+	/* first long only option value must be >= 256, so that we won't
+	 * conflict with short options
+	 */
+	CMD_LINE_OPT_MIN_NUM = 256,
+};
+
+static const struct option lgopts[] = {
+	{ CMD_LINE_OPT_MAC_UPDATING, no_argument, &mac_updating, 1},
+	{ CMD_LINE_OPT_NO_MAC_UPDATING, no_argument, &mac_updating, 0},
+	{NULL, 0, 0, 0}
+};
+
+/* Parse the argument given in the command line of the application */
+static int
+l2fwd_parse_args(int argc, char **argv)
+{
+	int opt, ret, timer_secs;
+	char **argvopt;
+	int option_index;
+	char *prgname = argv[0];
+
+	evd_argv[0] = argv[0];
+	evd_argc++;
+	argvopt = argv;
+
+	while ((opt = getopt_long(argc, argvopt, short_options,
+				  lgopts, &option_index)) != EOF) {
+
+		switch (opt) {
+		/* portmask */
+		case 'p':
+			l2fwd_enabled_port_mask = l2fwd_parse_portmask(optarg);
+			if (l2fwd_enabled_port_mask == 0) {
+				printf("invalid portmask\n");
+				l2fwd_usage(prgname);
+				return -1;
+			}
+			break;
+
+		/* nqueue */
+		case 'q':
+			l2fwd_rx_queue_per_lcore = l2fwd_parse_nqueue(optarg);
+			if (l2fwd_rx_queue_per_lcore == 0) {
+				printf("invalid queue number\n");
+				l2fwd_usage(prgname);
+				return -1;
+			}
+			break;
+
+		/* timer period */
+		case 'T':
+			timer_secs = l2fwd_parse_timer_period(optarg);
+			if (timer_secs < 0) {
+				printf("invalid timer period\n");
+				l2fwd_usage(prgname);
+				return -1;
+			}
+			timer_period = timer_secs;
+			break;
+
+		case '?':
+			/* May be eventdev options are encountered. skip for
+			 * now. Will be processed later.
+			 */
+			evd_argv[evd_argc] = argv[optind - 1];
+			evd_argc++;
+			break;
+
+		/* long options */
+		case 0:
+			break;
+
+		default:
+			l2fwd_usage(prgname);
+			return -1;
+		}
+	}
+
+	if (optind >= 0)
+		argv[optind-1] = prgname;
+
+	ret = optind-1;
+	optind = 1; /* reset getopt lib */
+	return ret;
+}
+
+/* Check the link status of all ports in up to 9s, and print them finally */
+static void
+check_all_ports_link_status(uint32_t port_mask)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
+	uint16_t portid;
+	uint8_t count, all_ports_up, print_flag = 0;
+	struct rte_eth_link link;
+
+	printf("\nChecking link status...");
+	fflush(stdout);
+	for (count = 0; count <= MAX_CHECK_TIME; count++) {
+		if (force_quit)
+			return;
+		all_ports_up = 1;
+		RTE_ETH_FOREACH_DEV(portid) {
+			if (force_quit)
+				return;
+			if ((port_mask & (1 << portid)) == 0)
+				continue;
+			memset(&link, 0, sizeof(link));
+			rte_eth_link_get_nowait(portid, &link);
+			/* print link status if flag set */
+			if (print_flag == 1) {
+				if (link.link_status)
+					printf(
+					"Port%d Link Up. Speed %u Mbps - %s\n",
+						portid, link.link_speed,
+				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+					("full-duplex") : ("half-duplex\n"));
+				else
+					printf("Port %d Link Down\n", portid);
+				continue;
+			}
+			/* clear all_ports_up flag if any link down */
+			if (link.link_status == ETH_LINK_DOWN) {
+				all_ports_up = 0;
+				break;
+			}
+		}
+		/* after finally printing all link status, get out */
+		if (print_flag == 1)
+			break;
+
+		if (all_ports_up == 0) {
+			printf(".");
+			fflush(stdout);
+			rte_delay_ms(CHECK_INTERVAL);
+		}
+
+		/* set the print_flag if all ports up or timeout */
+		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
+			print_flag = 1;
+			printf("done\n");
+		}
+	}
+}
+
+static void
+signal_handler(int signum)
+{
+	if (signum == SIGINT || signum == SIGTERM) {
+		printf("\n\nSignal %d received, preparing to exit...\n",
+				signum);
+		force_quit = true;
+	}
+}
+
+int
+main(int argc, char **argv)
+{
+	uint16_t nb_ports_available = 0;
+	struct lcore_queue_conf *qconf;
+	uint32_t lcore_id, rx_lcore_id;
+	uint32_t nb_ports_in_mask = 0;
+	uint32_t nb_lcores = 0;
+	uint16_t portid, last_port;
+	uint32_t nb_mbufs;
+	uint16_t nb_ports;
+	int ret;
+
+	/* init EAL */
+	ret = rte_eal_init(argc, argv);
+	if (ret < 0)
+		rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
+	argc -= ret;
+	argv += ret;
+
+	force_quit = false;
+	signal(SIGINT, signal_handler);
+	signal(SIGTERM, signal_handler);
+
+	/* parse application arguments (after the EAL ones) */
+	ret = l2fwd_parse_args(argc, argv);
+	if (ret < 0)
+		rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n");
+
+	printf("MAC updating %s\n", mac_updating ? "enabled" : "disabled");
+
+	/* convert to number of cycles */
+	timer_period *= rte_get_timer_hz();
+
+	nb_ports = rte_eth_dev_count_avail();
+	if (nb_ports == 0)
+		rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
+
+	/* check port mask to possible port mask */
+	if (l2fwd_enabled_port_mask & ~((1 << nb_ports) - 1))
+		rte_exit(EXIT_FAILURE, "Invalid portmask; possible (0x%x)\n",
+			(1 << nb_ports) - 1);
+
+	/* reset l2fwd_dst_ports */
+	for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
+		l2fwd_dst_ports[portid] = 0;
+	last_port = 0;
+
+	/*
+	 * Each logical core is assigned a dedicated TX queue on each port.
+	 */
+	RTE_ETH_FOREACH_DEV(portid) {
+		/* skip ports that are not enabled */
+		if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+			continue;
+
+		if (nb_ports_in_mask % 2) {
+			l2fwd_dst_ports[portid] = last_port;
+			l2fwd_dst_ports[last_port] = portid;
+		} else {
+			last_port = portid;
+		}
+
+		nb_ports_in_mask++;
+	}
+	if (nb_ports_in_mask % 2) {
+		printf("Notice: odd number of ports in portmask.\n");
+		l2fwd_dst_ports[last_port] = last_port;
+	}
+
+	rx_lcore_id = 0;
+	qconf = NULL;
+
+	nb_mbufs = RTE_MAX(nb_ports * (nb_rxd + nb_txd + MAX_PKT_BURST +
+		nb_lcores * MEMPOOL_CACHE_SIZE), 8192U);
+
+	/* create the mbuf pool */
+	l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", nb_mbufs,
+		MEMPOOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
+		rte_socket_id());
+	if (l2fwd_pktmbuf_pool == NULL)
+		rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
+
+	/* Configure eventdev parameters if user has requested */
+	ret = eventdev_resource_setup(evd_argc, evd_argv);
+	if (ret == EVENT_DEV_PARAM_PRESENT) {
+		/* All settings are done. Now enable eth devices */
+		RTE_ETH_FOREACH_DEV(portid) {
+			/* skip ports that are not enabled */
+			if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+				continue;
+
+			ret = rte_eth_dev_start(portid);
+			if (ret < 0)
+				rte_exit(EXIT_FAILURE,
+					 "rte_eth_dev_start:err=%d, port=%u\n",
+					 ret, portid);
+		}
+		goto skip_port_config;
+	}
+
+	/* Initialize the port/queue configuration of each logical core */
+	RTE_ETH_FOREACH_DEV(portid) {
+		/* skip ports that are not enabled */
+		if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+			continue;
+
+		/* get the lcore_id for this port */
+		while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
+		       lcore_queue_conf[rx_lcore_id].n_rx_port ==
+		       l2fwd_rx_queue_per_lcore) {
+			rx_lcore_id++;
+			if (rx_lcore_id >= RTE_MAX_LCORE)
+				rte_exit(EXIT_FAILURE, "Not enough cores\n");
+		}
+
+		if (qconf != &lcore_queue_conf[rx_lcore_id]) {
+			/* Assigned a new logical core in the loop above. */
+			qconf = &lcore_queue_conf[rx_lcore_id];
+			nb_lcores++;
+		}
+
+		qconf->rx_port_list[qconf->n_rx_port] = portid;
+		qconf->n_rx_port++;
+		printf("Lcore %u: RX port %u\n", rx_lcore_id, portid);
+	}
+
+
+	/* Initialise each port */
+	RTE_ETH_FOREACH_DEV(portid) {
+		struct rte_eth_rxconf rxq_conf;
+		struct rte_eth_txconf txq_conf;
+		struct rte_eth_conf local_port_conf = port_conf;
+		struct rte_eth_dev_info dev_info;
+
+		/* skip ports that are not enabled */
+		if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
+			printf("Skipping disabled port %u\n", portid);
+			continue;
+		}
+		nb_ports_available++;
+
+		/* init port */
+		printf("Initializing port %u... ", portid);
+		fflush(stdout);
+		rte_eth_dev_info_get(portid, &dev_info);
+		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+			local_port_conf.txmode.offloads |=
+				DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+		ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
+		if (ret < 0)
+			rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
+				  ret, portid);
+
+		ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+						       &nb_txd);
+		if (ret < 0)
+			rte_exit(EXIT_FAILURE,
+				 "Cannot adjust number of descriptors: err=%d, port=%u\n",
+				 ret, portid);
+
+		rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]);
+
+		/* init one RX queue */
+		fflush(stdout);
+		rxq_conf = dev_info.default_rxconf;
+		rxq_conf.offloads = local_port_conf.rxmode.offloads;
+		ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
+					     rte_eth_dev_socket_id(portid),
+					     &rxq_conf,
+					     l2fwd_pktmbuf_pool);
+		if (ret < 0)
+			rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
+				  ret, portid);
+
+		/* init one TX queue on each port */
+		fflush(stdout);
+		txq_conf = dev_info.default_txconf;
+		txq_conf.offloads = local_port_conf.txmode.offloads;
+		ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
+				rte_eth_dev_socket_id(portid),
+				&txq_conf);
+		if (ret < 0)
+			rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
+				ret, portid);
+
+		/* Initialize TX buffers */
+		tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
+				RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
+				rte_eth_dev_socket_id(portid));
+		if (tx_buffer[portid] == NULL)
+			rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
+					portid);
+
+		rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
+
+		ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[portid],
+				rte_eth_tx_buffer_count_callback,
+				&port_statistics[portid].dropped);
+		if (ret < 0)
+			rte_exit(EXIT_FAILURE,
+			"Cannot set error callback for tx buffer on port %u\n",
+				 portid);
+
+		/* Start device */
+		ret = rte_eth_dev_start(portid);
+		if (ret < 0)
+			rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n",
+				  ret, portid);
+
+		printf("done:\n");
+
+		rte_eth_promiscuous_enable(portid);
+
+		printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
+				portid,
+				l2fwd_ports_eth_addr[portid].addr_bytes[0],
+				l2fwd_ports_eth_addr[portid].addr_bytes[1],
+				l2fwd_ports_eth_addr[portid].addr_bytes[2],
+				l2fwd_ports_eth_addr[portid].addr_bytes[3],
+				l2fwd_ports_eth_addr[portid].addr_bytes[4],
+				l2fwd_ports_eth_addr[portid].addr_bytes[5]);
+
+		/* initialize port stats */
+		memset(&port_statistics, 0, sizeof(port_statistics));
+	}
+
+	if (!nb_ports_available) {
+		rte_exit(EXIT_FAILURE,
+			"All available ports are disabled. Please set portmask.\n");
+	}
+
+skip_port_config:
+	check_all_ports_link_status(l2fwd_enabled_port_mask);
+
+	ret = 0;
+	/* launch per-lcore init on every lcore */
+	rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, NULL, CALL_MASTER);
+	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+		if (rte_eal_wait_lcore(lcore_id) < 0) {
+			ret = -1;
+			break;
+		}
+	}
+
+	RTE_ETH_FOREACH_DEV(portid) {
+		if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+			continue;
+		printf("Closing port %d...", portid);
+		rte_eth_dev_stop(portid);
+		rte_eth_dev_close(portid);
+		printf(" Done\n");
+	}
+	printf("Bye...\n");
+
+	return ret;
+}
diff --git a/examples/l2fwd-event/meson.build b/examples/l2fwd-event/meson.build
new file mode 100644
index 0000000..16eadb0
--- /dev/null
+++ b/examples/l2fwd-event/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+#
+
+# meson file, for building this example as part of a main DPDK build.
+#
+# To build this example as a standalone application with an already-installed
+# DPDK instance, use 'make'
+
+sources = files(
+	'main.c'
+)
-- 
1.8.3.1



More information about the dev mailing list