[dpdk-dev] [PATCH v6 3/7] event/cnxk: add Tx adapter support

Nithin Dabilpuram nithind1988 at gmail.com
Sat Jul 3 15:23:16 CEST 2021


On Sat, Jul 03, 2021 at 02:44:04AM +0530, pbhagavatula at marvell.com wrote:
> From: Pavan Nikhilesh <pbhagavatula at marvell.com>
> 
> Add support for event eth Tx adapter.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula at marvell.com>
> ---
>  doc/guides/eventdevs/cnxk.rst            |   4 +-
>  doc/guides/rel_notes/release_21_08.rst   |   6 +-
>  drivers/common/cnxk/roc_nix.h            |   1 +
>  drivers/common/cnxk/roc_nix_queue.c      |   7 +-
>  drivers/event/cnxk/cn10k_eventdev.c      |  91 ++++++++++++++
>  drivers/event/cnxk/cn9k_eventdev.c       | 148 +++++++++++++++++++++++
>  drivers/event/cnxk/cnxk_eventdev.h       |  22 +++-
>  drivers/event/cnxk/cnxk_eventdev_adptr.c |  88 ++++++++++++++
>  8 files changed, 358 insertions(+), 9 deletions(-)
> 
> diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst
> index b7e82c127..6fdccc2ab 100644
> --- a/doc/guides/eventdevs/cnxk.rst
> +++ b/doc/guides/eventdevs/cnxk.rst
> @@ -42,7 +42,9 @@ Features of the OCTEON cnxk SSO PMD are:
>  - HW managed packets enqueued from ethdev to eventdev exposed through event eth
>    RX adapter.
>  - N:1 ethernet device Rx queue to Event queue mapping.
> -- Full Rx offload support defined through ethdev queue configuration.
> +- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``
> +  capability while maintaining receive packet order.
> +- Full Rx/Tx offload support defined through ethdev queue configuration.
>  
>  Prerequisites and Compilation procedure
>  ---------------------------------------
> diff --git a/doc/guides/rel_notes/release_21_08.rst b/doc/guides/rel_notes/release_21_08.rst
> index 3892c8017..80ff93269 100644
> --- a/doc/guides/rel_notes/release_21_08.rst
> +++ b/doc/guides/rel_notes/release_21_08.rst
> @@ -60,10 +60,10 @@ New Features
>    * Added net/cnxk driver which provides the support for the integrated ethernet
>      device.
>  
> -* **Added support for Marvell CN10K, CN9K, event Rx adapter.**
> +* **Added support for Marvell CN10K, CN9K, event Rx/Tx adapter.**
>  
> -  * Added Rx adapter support for event/cnxk when the ethernet device requested is
> -    net/cnxk.
> +  * Added Rx/Tx adapter support for event/cnxk when the ethernet device requested
> +    is net/cnxk.
>  
>  
>  Removed Items
> diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
> index 76613fe84..822c1900e 100644
> --- a/drivers/common/cnxk/roc_nix.h
> +++ b/drivers/common/cnxk/roc_nix.h
> @@ -200,6 +200,7 @@ struct roc_nix_sq {
>  	uint64_t aura_handle;
>  	int16_t nb_sqb_bufs_adj;
>  	uint16_t nb_sqb_bufs;
> +	uint16_t aura_sqb_bufs;
>  	plt_iova_t io_addr;
>  	void *lmt_addr;
>  	void *sqe_mem;
> diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
> index 0604e7a18..f69771c15 100644
> --- a/drivers/common/cnxk/roc_nix_queue.c
> +++ b/drivers/common/cnxk/roc_nix_queue.c
> @@ -587,12 +587,12 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
>  	aura.fc_ena = 1;
>  	aura.fc_addr = (uint64_t)sq->fc;
>  	aura.fc_hyst_bits = 0; /* Store count on all updates */
> -	rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, nb_sqb_bufs, &aura,
> +	rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, NIX_MAX_SQB, &aura,
>  				 &pool);
>  	if (rc)
>  		goto fail;
>  
> -	sq->sqe_mem = plt_zmalloc(blk_sz * nb_sqb_bufs, blk_sz);
> +	sq->sqe_mem = plt_zmalloc(blk_sz * NIX_MAX_SQB, blk_sz);
>  	if (sq->sqe_mem == NULL) {
>  		rc = NIX_ERR_NO_MEM;
>  		goto nomem;
> @@ -600,11 +600,12 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
>  
>  	/* Fill the initial buffers */
>  	iova = (uint64_t)sq->sqe_mem;
> -	for (count = 0; count < nb_sqb_bufs; count++) {
> +	for (count = 0; count < NIX_MAX_SQB; count++) {
>  		roc_npa_aura_op_free(sq->aura_handle, 0, iova);
>  		iova += blk_sz;
>  	}
>  	roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
> +	sq->aura_sqb_bufs = NIX_MAX_SQB;

Since now aura is created with NIX_MAX_SQB's, we need to also modify the aura
limit here to sq->nb_sqb_bufs for poll mode ?

With this fixed, Acked-by:  Nithin Dabilpuram <ndabilpuram at marvell.com>
	

>  
>  	return rc;
>  nomem:
> diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
> index ba7d95fff..8a9b04a3d 100644
> --- a/drivers/event/cnxk/cn10k_eventdev.c
> +++ b/drivers/event/cnxk/cn10k_eventdev.c
> @@ -44,6 +44,7 @@ cn10k_sso_init_hws_mem(void *arg, uint8_t port_id)
>  	/* First cache line is reserved for cookie */
>  	ws = (struct cn10k_sso_hws *)((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
>  	ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
> +	ws->tx_base = ws->base;
>  	ws->hws_id = port_id;
>  	ws->swtag_req = 0;
>  	ws->gw_wdata = cn10k_sso_gw_mode_wdata(dev);
> @@ -233,6 +234,39 @@ cn10k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
>  	return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
>  }
>  
> +static int
> +cn10k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
> +{
> +	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
> +	int i;
> +
> +	if (dev->tx_adptr_data == NULL)
> +		return 0;
> +
> +	for (i = 0; i < dev->nb_event_ports; i++) {
> +		struct cn10k_sso_hws *ws = event_dev->data->ports[i];
> +		void *ws_cookie;
> +
> +		ws_cookie = cnxk_sso_hws_get_cookie(ws);
> +		ws_cookie = rte_realloc_socket(
> +			ws_cookie,
> +			sizeof(struct cnxk_sso_hws_cookie) +
> +				sizeof(struct cn10k_sso_hws) +
> +				(sizeof(uint64_t) * (dev->max_port_id + 1) *
> +				 RTE_MAX_QUEUES_PER_PORT),
> +			RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
> +		if (ws_cookie == NULL)
> +			return -ENOMEM;
> +		ws = RTE_PTR_ADD(ws_cookie, sizeof(struct cnxk_sso_hws_cookie));
> +		memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
> +		       sizeof(uint64_t) * (dev->max_port_id + 1) *
> +			       RTE_MAX_QUEUES_PER_PORT);
> +		event_dev->data->ports[i] = ws;
> +	}
> +
> +	return 0;
> +}
> +
>  static void
>  cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
>  {
> @@ -493,6 +527,10 @@ cn10k_sso_start(struct rte_eventdev *event_dev)
>  {
>  	int rc;
>  
> +	rc = cn10k_sso_updt_tx_adptr_data(event_dev);
> +	if (rc < 0)
> +		return rc;
> +
>  	rc = cnxk_sso_start(event_dev, cn10k_sso_hws_reset,
>  			    cn10k_sso_hws_flush_events);
>  	if (rc < 0)
> @@ -595,6 +633,55 @@ cn10k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
>  	return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
>  }
>  
> +static int
> +cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
> +			      const struct rte_eth_dev *eth_dev, uint32_t *caps)
> +{
> +	int ret;
> +
> +	RTE_SET_USED(dev);
> +	ret = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
> +	if (ret)
> +		*caps = 0;
> +	else
> +		*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
> +
> +	return 0;
> +}
> +
> +static int
> +cn10k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
> +			       const struct rte_eth_dev *eth_dev,
> +			       int32_t tx_queue_id)
> +{
> +	int rc;
> +
> +	RTE_SET_USED(id);
> +	rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
> +	if (rc < 0)
> +		return rc;
> +	rc = cn10k_sso_updt_tx_adptr_data(event_dev);
> +	if (rc < 0)
> +		return rc;
> +	cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
> +
> +	return 0;
> +}
> +
> +static int
> +cn10k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
> +			       const struct rte_eth_dev *eth_dev,
> +			       int32_t tx_queue_id)
> +{
> +	int rc;
> +
> +	RTE_SET_USED(id);
> +	rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
> +	if (rc < 0)
> +		return rc;
> +	return cn10k_sso_updt_tx_adptr_data(event_dev);
> +}
> +
>  static struct rte_eventdev_ops cn10k_sso_dev_ops = {
>  	.dev_infos_get = cn10k_sso_info_get,
>  	.dev_configure = cn10k_sso_dev_configure,
> @@ -614,6 +701,10 @@ static struct rte_eventdev_ops cn10k_sso_dev_ops = {
>  	.eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
>  	.eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
>  
> +	.eth_tx_adapter_caps_get = cn10k_sso_tx_adapter_caps_get,
> +	.eth_tx_adapter_queue_add = cn10k_sso_tx_adapter_queue_add,
> +	.eth_tx_adapter_queue_del = cn10k_sso_tx_adapter_queue_del,
> +
>  	.timer_adapter_caps_get = cnxk_tim_caps_get,
>  
>  	.dump = cnxk_sso_dump,
> diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
> index e386cb784..21f80323d 100644
> --- a/drivers/event/cnxk/cn9k_eventdev.c
> +++ b/drivers/event/cnxk/cn9k_eventdev.c
> @@ -248,6 +248,66 @@ cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
>  	return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
>  }
>  
> +static int
> +cn9k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
> +{
> +	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
> +	int i;
> +
> +	if (dev->tx_adptr_data == NULL)
> +		return 0;
> +
> +	for (i = 0; i < dev->nb_event_ports; i++) {
> +		if (dev->dual_ws) {
> +			struct cn9k_sso_hws_dual *dws =
> +				event_dev->data->ports[i];
> +			void *ws_cookie;
> +
> +			ws_cookie = cnxk_sso_hws_get_cookie(dws);
> +			ws_cookie = rte_realloc_socket(
> +				ws_cookie,
> +				sizeof(struct cnxk_sso_hws_cookie) +
> +					sizeof(struct cn9k_sso_hws_dual) +
> +					(sizeof(uint64_t) *
> +					 (dev->max_port_id + 1) *
> +					 RTE_MAX_QUEUES_PER_PORT),
> +				RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
> +			if (ws_cookie == NULL)
> +				return -ENOMEM;
> +			dws = RTE_PTR_ADD(ws_cookie,
> +					  sizeof(struct cnxk_sso_hws_cookie));
> +			memcpy(&dws->tx_adptr_data, dev->tx_adptr_data,
> +			       sizeof(uint64_t) * (dev->max_port_id + 1) *
> +				       RTE_MAX_QUEUES_PER_PORT);
> +			event_dev->data->ports[i] = dws;
> +		} else {
> +			struct cn9k_sso_hws *ws = event_dev->data->ports[i];
> +			void *ws_cookie;
> +
> +			ws_cookie = cnxk_sso_hws_get_cookie(ws);
> +			ws_cookie = rte_realloc_socket(
> +				ws_cookie,
> +				sizeof(struct cnxk_sso_hws_cookie) +
> +					sizeof(struct cn9k_sso_hws_dual) +
> +					(sizeof(uint64_t) *
> +					 (dev->max_port_id + 1) *
> +					 RTE_MAX_QUEUES_PER_PORT),
> +				RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
> +			if (ws_cookie == NULL)
> +				return -ENOMEM;
> +			ws = RTE_PTR_ADD(ws_cookie,
> +					 sizeof(struct cnxk_sso_hws_cookie));
> +			memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
> +			       sizeof(uint64_t) * (dev->max_port_id + 1) *
> +				       RTE_MAX_QUEUES_PER_PORT);
> +			event_dev->data->ports[i] = ws;
> +		}
> +	}
> +	rte_mb();
> +
> +	return 0;
> +}
> +
>  static void
>  cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
>  {
> @@ -734,6 +794,10 @@ cn9k_sso_start(struct rte_eventdev *event_dev)
>  {
>  	int rc;
>  
> +	rc = cn9k_sso_updt_tx_adptr_data(event_dev);
> +	if (rc < 0)
> +		return rc;
> +
>  	rc = cnxk_sso_start(event_dev, cn9k_sso_hws_reset,
>  			    cn9k_sso_hws_flush_events);
>  	if (rc < 0)
> @@ -844,6 +908,86 @@ cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
>  	return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
>  }
>  
> +static int
> +cn9k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
> +			     const struct rte_eth_dev *eth_dev, uint32_t *caps)
> +{
> +	int ret;
> +
> +	RTE_SET_USED(dev);
> +	ret = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
> +	if (ret)
> +		*caps = 0;
> +	else
> +		*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
> +
> +	return 0;
> +}
> +
> +static void
> +cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id,
> +		       bool ena)
> +{
> +	struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
> +	struct cn9k_eth_txq *txq;
> +	struct roc_nix_sq *sq;
> +	int i;
> +
> +	if (tx_queue_id < 0) {
> +		for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
> +			cn9k_sso_txq_fc_update(eth_dev, i, ena);
> +	} else {
> +		uint16_t sq_limit;
> +
> +		sq = &cnxk_eth_dev->sqs[tx_queue_id];
> +		txq = eth_dev->data->tx_queues[tx_queue_id];
> +		sq_limit =
> +			ena ? RTE_MIN(CNXK_SSO_SQB_LIMIT, sq->aura_sqb_bufs) :
> +				    sq->nb_sqb_bufs;
> +		txq->nb_sqb_bufs_adj =
> +			sq_limit -
> +			RTE_ALIGN_MUL_CEIL(sq_limit,
> +					   (1ULL << txq->sqes_per_sqb_log2)) /
> +				(1ULL << txq->sqes_per_sqb_log2);
> +		txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
> +	}
> +}
> +
> +static int
> +cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
> +			      const struct rte_eth_dev *eth_dev,
> +			      int32_t tx_queue_id)
> +{
> +	int rc;
> +
> +	RTE_SET_USED(id);
> +	rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
> +	if (rc < 0)
> +		return rc;
> +	cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, true);
> +	rc = cn9k_sso_updt_tx_adptr_data(event_dev);
> +	if (rc < 0)
> +		return rc;
> +	cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
> +
> +	return 0;
> +}
> +
> +static int
> +cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
> +			      const struct rte_eth_dev *eth_dev,
> +			      int32_t tx_queue_id)
> +{
> +	int rc;
> +
> +	RTE_SET_USED(id);
> +	rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
> +	if (rc < 0)
> +		return rc;
> +	cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, false);
> +	return cn9k_sso_updt_tx_adptr_data(event_dev);
> +}
> +
>  static struct rte_eventdev_ops cn9k_sso_dev_ops = {
>  	.dev_infos_get = cn9k_sso_info_get,
>  	.dev_configure = cn9k_sso_dev_configure,
> @@ -863,6 +1007,10 @@ static struct rte_eventdev_ops cn9k_sso_dev_ops = {
>  	.eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
>  	.eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
>  
> +	.eth_tx_adapter_caps_get = cn9k_sso_tx_adapter_caps_get,
> +	.eth_tx_adapter_queue_add = cn9k_sso_tx_adapter_queue_add,
> +	.eth_tx_adapter_queue_del = cn9k_sso_tx_adapter_queue_del,
> +
>  	.timer_adapter_caps_get = cnxk_tim_caps_get,
>  
>  	.dump = cnxk_sso_dump,
> diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
> index 9d5d2d033..24e1be6a9 100644
> --- a/drivers/event/cnxk/cnxk_eventdev.h
> +++ b/drivers/event/cnxk/cnxk_eventdev.h
> @@ -8,6 +8,7 @@
>  #include <rte_devargs.h>
>  #include <rte_ethdev.h>
>  #include <rte_event_eth_rx_adapter.h>
> +#include <rte_event_eth_tx_adapter.h>
>  #include <rte_kvargs.h>
>  #include <rte_mbuf_pool_ops.h>
>  #include <rte_pci.h>
> @@ -34,6 +35,7 @@
>  #define CNXK_SSO_XAQ_CACHE_CNT (0x7)
>  #define CNXK_SSO_XAQ_SLACK     (8)
>  #define CNXK_SSO_WQE_SG_PTR    (9)
> +#define CNXK_SSO_SQB_LIMIT     (0x180)
>  
>  #define CNXK_TT_FROM_TAG(x)	    (((x) >> 32) & SSO_TT_EMPTY)
>  #define CNXK_TT_FROM_EVENT(x)	    (((x) >> 38) & SSO_TT_EMPTY)
> @@ -86,9 +88,12 @@ struct cnxk_sso_evdev {
>  	rte_iova_t fc_iova;
>  	struct rte_mempool *xaq_pool;
>  	uint64_t rx_offloads;
> +	uint64_t tx_offloads;
>  	uint64_t adptr_xae_cnt;
>  	uint16_t rx_adptr_pool_cnt;
>  	uint64_t *rx_adptr_pools;
> +	uint64_t *tx_adptr_data;
> +	uint16_t max_port_id;
>  	uint16_t tim_adptr_ring_cnt;
>  	uint16_t *timer_adptr_rings;
>  	uint64_t *timer_adptr_sz;
> @@ -115,7 +120,10 @@ struct cn10k_sso_hws {
>  	uint64_t xaq_lmt __rte_cache_aligned;
>  	uint64_t *fc_mem;
>  	uintptr_t grps_base[CNXK_SSO_MAX_HWGRP];
> +	/* Tx Fastpath data */
> +	uint64_t tx_base __rte_cache_aligned;
>  	uintptr_t lmt_base;
> +	uint8_t tx_adptr_data[];
>  } __rte_cache_aligned;
>  
>  /* CN9K HWS ops */
> @@ -140,7 +148,9 @@ struct cn9k_sso_hws {
>  	uint64_t xaq_lmt __rte_cache_aligned;
>  	uint64_t *fc_mem;
>  	uintptr_t grps_base[CNXK_SSO_MAX_HWGRP];
> -	uint64_t base;
> +	/* Tx Fastpath data */
> +	uint64_t base __rte_cache_aligned;
> +	uint8_t tx_adptr_data[];
>  } __rte_cache_aligned;
>  
>  struct cn9k_sso_hws_state {
> @@ -160,7 +170,9 @@ struct cn9k_sso_hws_dual {
>  	uint64_t xaq_lmt __rte_cache_aligned;
>  	uint64_t *fc_mem;
>  	uintptr_t grps_base[CNXK_SSO_MAX_HWGRP];
> -	uint64_t base[2];
> +	/* Tx Fastpath data */
> +	uint64_t base[2] __rte_cache_aligned;
> +	uint8_t tx_adptr_data[];
>  } __rte_cache_aligned;
>  
>  struct cnxk_sso_hws_cookie {
> @@ -267,5 +279,11 @@ int cnxk_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
>  			      const struct rte_eth_dev *eth_dev);
>  int cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
>  			     const struct rte_eth_dev *eth_dev);
> +int cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev,
> +				  const struct rte_eth_dev *eth_dev,
> +				  int32_t tx_queue_id);
> +int cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,
> +				  const struct rte_eth_dev *eth_dev,
> +				  int32_t tx_queue_id);
>  
>  #endif /* __CNXK_EVENTDEV_H__ */
> diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
> index 3b7ecb375..502da272d 100644
> --- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
> +++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
> @@ -223,3 +223,91 @@ cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
>  
>  	return 0;
>  }
> +
> +static int
> +cnxk_sso_sqb_aura_limit_edit(struct roc_nix_sq *sq, uint16_t nb_sqb_bufs)
> +{
> +	return roc_npa_aura_limit_modify(
> +		sq->aura_handle, RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs));
> +}
> +
> +static int
> +cnxk_sso_updt_tx_queue_data(const struct rte_eventdev *event_dev,
> +			    uint16_t eth_port_id, uint16_t tx_queue_id,
> +			    void *txq)
> +{
> +	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
> +	uint16_t max_port_id = dev->max_port_id;
> +	uint64_t *txq_data = dev->tx_adptr_data;
> +
> +	if (txq_data == NULL || eth_port_id > max_port_id) {
> +		max_port_id = RTE_MAX(max_port_id, eth_port_id);
> +		txq_data = rte_realloc_socket(
> +			txq_data,
> +			(sizeof(uint64_t) * (max_port_id + 1) *
> +			 RTE_MAX_QUEUES_PER_PORT),
> +			RTE_CACHE_LINE_SIZE, event_dev->data->socket_id);
> +		if (txq_data == NULL)
> +			return -ENOMEM;
> +	}
> +
> +	((uint64_t(*)[RTE_MAX_QUEUES_PER_PORT])
> +		 txq_data)[eth_port_id][tx_queue_id] = (uint64_t)txq;
> +	dev->max_port_id = max_port_id;
> +	dev->tx_adptr_data = txq_data;
> +	return 0;
> +}
> +
> +int
> +cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev,
> +			      const struct rte_eth_dev *eth_dev,
> +			      int32_t tx_queue_id)
> +{
> +	struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
> +	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
> +	struct roc_nix_sq *sq;
> +	int i, ret;
> +	void *txq;
> +
> +	if (tx_queue_id < 0) {
> +		for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
> +			cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, i);
> +	} else {
> +		txq = eth_dev->data->tx_queues[tx_queue_id];
> +		sq = &cnxk_eth_dev->sqs[tx_queue_id];
> +		cnxk_sso_sqb_aura_limit_edit(sq, CNXK_SSO_SQB_LIMIT);
> +		ret = cnxk_sso_updt_tx_queue_data(
> +			event_dev, eth_dev->data->port_id, tx_queue_id, txq);
> +		if (ret < 0)
> +			return ret;
> +
> +		dev->tx_offloads |= cnxk_eth_dev->tx_offload_flags;
> +	}
> +
> +	return 0;
> +}
> +
> +int
> +cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,
> +			      const struct rte_eth_dev *eth_dev,
> +			      int32_t tx_queue_id)
> +{
> +	struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
> +	struct roc_nix_sq *sq;
> +	int i, ret;
> +
> +	RTE_SET_USED(event_dev);
> +	if (tx_queue_id < 0) {
> +		for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
> +			cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, i);
> +	} else {
> +		sq = &cnxk_eth_dev->sqs[tx_queue_id];
> +		cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);
> +		ret = cnxk_sso_updt_tx_queue_data(
> +			event_dev, eth_dev->data->port_id, tx_queue_id, NULL);
> +		if (ret < 0)
> +			return ret;
> +	}
> +
> +	return 0;
> +}
> -- 
> 2.17.1
> 


More information about the dev mailing list