[dpdk-dev] [PATCH 33/36] event/cnxk: add Rx adapter support

pbhagavatula at marvell.com pbhagavatula at marvell.com
Sat Mar 6 17:29:38 CET 2021


From: Pavan Nikhilesh <pbhagavatula at marvell.com>

Add support for event eth Rx adapter.

Signed-off-by: Pavan Nikhilesh <pbhagavatula at marvell.com>
---
 doc/guides/eventdevs/cnxk.rst            |   4 +
 drivers/event/cnxk/cn10k_eventdev.c      |  76 +++++++++++
 drivers/event/cnxk/cn10k_worker.h        |   4 +
 drivers/event/cnxk/cn9k_eventdev.c       |  82 ++++++++++++
 drivers/event/cnxk/cn9k_worker.h         |   4 +
 drivers/event/cnxk/cnxk_eventdev.h       |  21 +++
 drivers/event/cnxk/cnxk_eventdev_adptr.c | 157 +++++++++++++++++++++++
 7 files changed, 348 insertions(+)

diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst
index c42784a3b..abab7f742 100644
--- a/doc/guides/eventdevs/cnxk.rst
+++ b/doc/guides/eventdevs/cnxk.rst
@@ -39,6 +39,10 @@ Features of the OCTEON CNXK SSO PMD are:
   time granularity of 2.5us on CN9K and 1us on CN10K.
 - Up to 256 TIM rings aka event timer adapters.
 - Up to 8 rings traversed in parallel.
+- HW managed packets enqueued from ethdev to eventdev exposed through event eth
+  RX adapter.
+- N:1 ethernet device Rx queue to Event queue mapping.
+- Full Rx offload support defined through ethdev queue configuration.
 
 Prerequisites and Compilation procedure
 ---------------------------------------
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 2b2025cdb..72175e16f 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -407,6 +407,76 @@ cn10k_sso_selftest(void)
 	return cnxk_sso_selftest(RTE_STR(event_cn10k));
 }
 
+static int
+cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
+			      const struct rte_eth_dev *eth_dev, uint32_t *caps)
+{
+	int rc;
+
+	RTE_SET_USED(event_dev);
+	rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 9);
+	if (rc)
+		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+	else
+		*caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
+			RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
+			RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
+
+	return 0;
+}
+
+static void
+cn10k_sso_set_lookup_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
+{
+	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+	int i;
+
+	for (i = 0; i < dev->nb_event_ports; i++) {
+		struct cn10k_sso_hws *ws = event_dev->data->ports[i];
+		ws->lookup_mem = lookup_mem;
+	}
+}
+
+static int
+cn10k_sso_rx_adapter_queue_add(
+	const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
+	int32_t rx_queue_id,
+	const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+	void *lookup_mem;
+	int rc;
+
+	rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
+	if (rc)
+		return -EINVAL;
+
+	rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
+					   queue_conf);
+	if (rc)
+		return -EINVAL;
+
+	lookup_mem = ((struct cn10k_eth_rxq *)eth_dev->data->rx_queues[0])
+			     ->lookup_mem;
+	cn10k_sso_set_lookup_mem(event_dev, lookup_mem);
+	cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+	return 0;
+}
+
+static int
+cn10k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
+			       const struct rte_eth_dev *eth_dev,
+			       int32_t rx_queue_id)
+{
+	int rc;
+
+	rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
+	if (rc)
+		return -EINVAL;
+
+	return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
+}
+
 static struct rte_eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
@@ -420,6 +490,12 @@ static struct rte_eventdev_ops cn10k_sso_dev_ops = {
 	.port_unlink = cn10k_sso_port_unlink,
 	.timeout_ticks = cnxk_sso_timeout_ticks,
 
+	.eth_rx_adapter_caps_get = cn10k_sso_rx_adapter_caps_get,
+	.eth_rx_adapter_queue_add = cn10k_sso_rx_adapter_queue_add,
+	.eth_rx_adapter_queue_del = cn10k_sso_rx_adapter_queue_del,
+	.eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
+	.eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
+
 	.timer_adapter_caps_get = cnxk_tim_caps_get,
 
 	.dump = cnxk_sso_dump,
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index ed4e3bd63..d418e80aa 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -5,9 +5,13 @@
 #ifndef __CN10K_WORKER_H__
 #define __CN10K_WORKER_H__
 
+#include "cnxk_ethdev.h"
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
 
+#include "cn10k_ethdev.h"
+#include "cn10k_rx.h"
+
 /* SSO Operations */
 
 static __rte_always_inline uint8_t
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index e39b4ded2..4aa577bd5 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -481,6 +481,82 @@ cn9k_sso_selftest(void)
 	return cnxk_sso_selftest(RTE_STR(event_cn9k));
 }
 
+static int
+cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
+			     const struct rte_eth_dev *eth_dev, uint32_t *caps)
+{
+	int rc;
+
+	RTE_SET_USED(event_dev);
+	rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 9);
+	if (rc)
+		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+	else
+		*caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
+			RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
+			RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
+
+	return 0;
+}
+
+static void
+cn9k_sso_set_lookup_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
+{
+	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+	int i;
+
+	for (i = 0; i < dev->nb_event_ports; i++) {
+		if (dev->dual_ws) {
+			struct cn9k_sso_hws_dual *dws =
+				event_dev->data->ports[i];
+			dws->lookup_mem = lookup_mem;
+		} else {
+			struct cn9k_sso_hws *ws = event_dev->data->ports[i];
+			ws->lookup_mem = lookup_mem;
+		}
+	}
+}
+
+static int
+cn9k_sso_rx_adapter_queue_add(
+	const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
+	int32_t rx_queue_id,
+	const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+	void *lookup_mem;
+	int rc;
+
+	rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
+	if (rc)
+		return -EINVAL;
+
+	rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
+					   queue_conf);
+	if (rc)
+		return -EINVAL;
+
+	lookup_mem = ((struct cn9k_eth_rxq *)eth_dev->data->rx_queues[0])
+			     ->lookup_mem;
+	cn9k_sso_set_lookup_mem(event_dev, lookup_mem);
+	cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+	return 0;
+}
+
+static int
+cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
+			      const struct rte_eth_dev *eth_dev,
+			      int32_t rx_queue_id)
+{
+	int rc;
+
+	rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
+	if (rc)
+		return -EINVAL;
+
+	return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
+}
+
 static struct rte_eventdev_ops cn9k_sso_dev_ops = {
 	.dev_infos_get = cn9k_sso_info_get,
 	.dev_configure = cn9k_sso_dev_configure,
@@ -494,6 +570,12 @@ static struct rte_eventdev_ops cn9k_sso_dev_ops = {
 	.port_unlink = cn9k_sso_port_unlink,
 	.timeout_ticks = cnxk_sso_timeout_ticks,
 
+	.eth_rx_adapter_caps_get = cn9k_sso_rx_adapter_caps_get,
+	.eth_rx_adapter_queue_add = cn9k_sso_rx_adapter_queue_add,
+	.eth_rx_adapter_queue_del = cn9k_sso_rx_adapter_queue_del,
+	.eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
+	.eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
+
 	.timer_adapter_caps_get = cnxk_tim_caps_get,
 
 	.dump = cnxk_sso_dump,
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index b997db2fe..b5af5ecf4 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -5,9 +5,13 @@
 #ifndef __CN9K_WORKER_H__
 #define __CN9K_WORKER_H__
 
+#include "cnxk_ethdev.h"
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
 
+#include "cn9k_ethdev.h"
+#include "cn9k_rx.h"
+
 /* SSO Operations */
 
 static __rte_always_inline uint8_t
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index 32abf9632..9c3331f7e 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -6,6 +6,8 @@
 #define __CNXK_EVENTDEV_H__
 
 #include <rte_devargs.h>
+#include <rte_ethdev.h>
+#include <rte_event_eth_rx_adapter.h>
 #include <rte_kvargs.h>
 #include <rte_mbuf_pool_ops.h>
 #include <rte_pci.h>
@@ -81,7 +83,10 @@ struct cnxk_sso_evdev {
 	uint64_t nb_xaq_cfg;
 	rte_iova_t fc_iova;
 	struct rte_mempool *xaq_pool;
+	uint64_t rx_offloads;
 	uint64_t adptr_xae_cnt;
+	uint16_t rx_adptr_pool_cnt;
+	uint64_t *rx_adptr_pools;
 	uint16_t tim_adptr_ring_cnt;
 	uint16_t *timer_adptr_rings;
 	uint64_t *timer_adptr_sz;
@@ -108,6 +113,7 @@ struct cnxk_sso_evdev {
 struct cn10k_sso_hws {
 	/* Get Work Fastpath data */
 	CN10K_SSO_HWS_OPS;
+	void *lookup_mem;
 	uint32_t gw_wdata;
 	uint8_t swtag_req;
 	uint8_t hws_id;
@@ -132,6 +138,7 @@ struct cn10k_sso_hws {
 struct cn9k_sso_hws {
 	/* Get Work Fastpath data */
 	CN9K_SSO_HWS_OPS;
+	void *lookup_mem;
 	uint8_t swtag_req;
 	uint8_t hws_id;
 	/* Add Work Fastpath data */
@@ -148,6 +155,7 @@ struct cn9k_sso_hws_state {
 struct cn9k_sso_hws_dual {
 	/* Get Work Fastpath data */
 	struct cn9k_sso_hws_state ws_state[2]; /* Ping and Pong */
+	void *lookup_mem;
 	uint8_t swtag_req;
 	uint8_t vws; /* Ping pong bit */
 	uint8_t hws_id;
@@ -235,4 +243,17 @@ void cnxk_sso_dump(struct rte_eventdev *event_dev, FILE *f);
 /* CN9K */
 void cn9k_sso_set_rsrc(void *arg);
 
+/* Common adapter ops */
+int cnxk_sso_rx_adapter_queue_add(
+	const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
+	int32_t rx_queue_id,
+	const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
+int cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
+				  const struct rte_eth_dev *eth_dev,
+				  int32_t rx_queue_id);
+int cnxk_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
+			      const struct rte_eth_dev *eth_dev);
+int cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
+			     const struct rte_eth_dev *eth_dev);
+
 #endif /* __CNXK_EVENTDEV_H__ */
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 6d9615453..e06033117 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -2,6 +2,7 @@
  * Copyright(C) 2021 Marvell International Ltd.
  */
 
+#include "cnxk_ethdev.h"
 #include "cnxk_eventdev.h"
 
 void
@@ -11,6 +12,32 @@ cnxk_sso_updt_xae_cnt(struct cnxk_sso_evdev *dev, void *data,
 	int i;
 
 	switch (event_type) {
+	case RTE_EVENT_TYPE_ETHDEV: {
+		struct cnxk_eth_rxq_sp *rxq = data;
+		uint64_t *old_ptr;
+
+		for (i = 0; i < dev->rx_adptr_pool_cnt; i++) {
+			if ((uint64_t)rxq->qconf.mp == dev->rx_adptr_pools[i])
+				return;
+		}
+
+		dev->rx_adptr_pool_cnt++;
+		old_ptr = dev->rx_adptr_pools;
+		dev->rx_adptr_pools = rte_realloc(
+			dev->rx_adptr_pools,
+			sizeof(uint64_t) * dev->rx_adptr_pool_cnt, 0);
+		if (dev->rx_adptr_pools == NULL) {
+			dev->adptr_xae_cnt += rxq->qconf.mp->size;
+			dev->rx_adptr_pools = old_ptr;
+			dev->rx_adptr_pool_cnt--;
+			return;
+		}
+		dev->rx_adptr_pools[dev->rx_adptr_pool_cnt - 1] =
+			(uint64_t)rxq->qconf.mp;
+
+		dev->adptr_xae_cnt += rxq->qconf.mp->size;
+		break;
+	}
 	case RTE_EVENT_TYPE_TIMER: {
 		struct cnxk_tim_ring *timr = data;
 		uint16_t *old_ring_ptr;
@@ -65,3 +92,133 @@ cnxk_sso_updt_xae_cnt(struct cnxk_sso_evdev *dev, void *data,
 		break;
 	}
 }
+
+static int
+cnxk_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id,
+		    uint16_t port_id, const struct rte_event *ev,
+		    uint8_t custom_flowid)
+{
+	struct roc_nix_rq *rq;
+
+	rq = &cnxk_eth_dev->rqs[rq_id];
+	rq->sso_ena = 1;
+	rq->tt = ev->sched_type;
+	rq->hwgrp = ev->queue_id;
+	rq->flow_tag_width = 20;
+	rq->wqe_skip = 1;
+	rq->tag_mask = (port_id & 0xF) << 20;
+	rq->tag_mask |= (((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV << 4))
+			<< 24;
+
+	if (custom_flowid) {
+		rq->flow_tag_width = 0;
+		rq->tag_mask |= ev->flow_id;
+	}
+
+	return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
+}
+
+static int
+cnxk_sso_rxq_disable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id)
+{
+	struct roc_nix_rq *rq;
+
+	rq = &cnxk_eth_dev->rqs[rq_id];
+	rq->sso_ena = 0;
+	rq->flow_tag_width = 32;
+	rq->tag_mask = 0;
+
+	return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
+}
+
+int
+cnxk_sso_rx_adapter_queue_add(
+	const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
+	int32_t rx_queue_id,
+	const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+	struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+	uint16_t port = eth_dev->data->port_id;
+	struct cnxk_eth_rxq_sp *rxq_sp;
+	int i, rc = 0;
+
+	if (rx_queue_id < 0) {
+		for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+			rxq_sp = eth_dev->data->rx_queues[i];
+			rxq_sp = rxq_sp - 1;
+			cnxk_sso_updt_xae_cnt(dev, rxq_sp,
+					      RTE_EVENT_TYPE_ETHDEV);
+			rc = cnxk_sso_xae_reconfigure(
+				(struct rte_eventdev *)(uintptr_t)event_dev);
+			rc |= cnxk_sso_rxq_enable(
+				cnxk_eth_dev, i, port, &queue_conf->ev,
+				!!(queue_conf->rx_queue_flags &
+				   RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID));
+		}
+	} else {
+		rxq_sp = eth_dev->data->rx_queues[rx_queue_id];
+		rxq_sp = rxq_sp - 1;
+		cnxk_sso_updt_xae_cnt(dev, rxq_sp, RTE_EVENT_TYPE_ETHDEV);
+		rc = cnxk_sso_xae_reconfigure(
+			(struct rte_eventdev *)(uintptr_t)event_dev);
+		rc |= cnxk_sso_rxq_enable(
+			cnxk_eth_dev, (uint16_t)rx_queue_id, port,
+			&queue_conf->ev,
+			!!(queue_conf->rx_queue_flags &
+			   RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID));
+	}
+
+	if (rc < 0) {
+		plt_err("Failed to configure Rx adapter port=%d, q=%d", port,
+			queue_conf->ev.queue_id);
+		return rc;
+	}
+
+	dev->rx_offloads |= cnxk_eth_dev->rx_offload_flags;
+
+	return 0;
+}
+
+int
+cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
+			      const struct rte_eth_dev *eth_dev,
+			      int32_t rx_queue_id)
+{
+	struct cnxk_eth_dev *dev = eth_dev->data->dev_private;
+	int i, rc = 0;
+
+	RTE_SET_USED(event_dev);
+	if (rx_queue_id < 0) {
+		for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+			rc = cnxk_sso_rxq_disable(dev, i);
+	} else {
+		rc = cnxk_sso_rxq_disable(dev, (uint16_t)rx_queue_id);
+	}
+
+	if (rc < 0)
+		plt_err("Failed to clear Rx adapter config port=%d, q=%d",
+			eth_dev->data->port_id, rx_queue_id);
+
+	return rc;
+}
+
+int
+cnxk_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
+			  const struct rte_eth_dev *eth_dev)
+{
+	RTE_SET_USED(event_dev);
+	RTE_SET_USED(eth_dev);
+
+	return 0;
+}
+
+int
+cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
+			 const struct rte_eth_dev *eth_dev)
+{
+	RTE_SET_USED(event_dev);
+	RTE_SET_USED(eth_dev);
+
+	return 0;
+}
-- 
2.17.1



More information about the dev mailing list