[dpdk-dev] [PATCH v5 5/8] eventdev: add Tx adapter event vector support

Jayatheerthan, Jay jay.jayatheerthan at intel.com
Thu Mar 25 12:44:31 CET 2021


> -----Original Message-----
> From: pbhagavatula at marvell.com <pbhagavatula at marvell.com>
> Sent: Wednesday, March 24, 2021 10:35 AM
> To: jerinj at marvell.com; Jayatheerthan, Jay <jay.jayatheerthan at intel.com>; Carrillo, Erik G <erik.g.carrillo at intel.com>; Gujjar,
> Abhinandan S <abhinandan.gujjar at intel.com>; McDaniel, Timothy <timothy.mcdaniel at intel.com>; hemant.agrawal at nxp.com; Van
> Haaren, Harry <harry.van.haaren at intel.com>; mattias.ronnblom <mattias.ronnblom at ericsson.com>; Ma, Liang J
> <liang.j.ma at intel.com>
> Cc: dev at dpdk.org; Pavan Nikhilesh <pbhagavatula at marvell.com>
> Subject: [dpdk-dev] [PATCH v5 5/8] eventdev: add Tx adapter event vector support
> 
> From: Pavan Nikhilesh <pbhagavatula at marvell.com>
> 
> Add event vector support for event eth Tx adapter, the implementation
> receives events from the single linked queue and based on
> rte_event_vector::union_valid transmits the vector of mbufs to a given

Typo: attr_valid instead of union_valid

> port, queue pair.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula at marvell.com>
> ---
>  .../rte_event_eth_tx_adapter.c                | 66 ++++++++++++++++---
>  lib/librte_eventdev/rte_eventdev.c            |  5 +-
>  2 files changed, 60 insertions(+), 11 deletions(-)
> 
> diff --git a/lib/librte_eventdev/rte_event_eth_tx_adapter.c b/lib/librte_eventdev/rte_event_eth_tx_adapter.c
> index 5b4c42dcf..db260bfb6 100644
> --- a/lib/librte_eventdev/rte_event_eth_tx_adapter.c
> +++ b/lib/librte_eventdev/rte_event_eth_tx_adapter.c
> @@ -510,6 +510,47 @@ txa_service_buffer_retry(struct rte_mbuf **pkts, uint16_t unsent,
>  	stats->tx_dropped += unsent - sent;
>  }
> 
> +static uint16_t
> +txa_process_event_vector(struct txa_service_data *txa,
> +			 struct rte_event_vector *vec)
> +{
> +	struct txa_service_queue_info *tqi;
> +	uint16_t port, queue, nb_tx = 0;
> +	struct rte_mbuf **mbufs;
> +	int i;
> +
> +	mbufs = (struct rte_mbuf **)vec->mbufs;
> +	if (vec->attr_valid) {
> +		port = vec->port;
> +		queue = vec->queue;
> +		tqi = txa_service_queue(txa, port, queue);
> +		if (unlikely(tqi == NULL || !tqi->added)) {
> +			rte_pktmbuf_free_bulk(mbufs, vec->nb_elem);
> +			rte_mempool_put(rte_mempool_from_obj(vec), vec);
> +			return 0;
> +		}
> +		for (i = 0; i < vec->nb_elem; i++) {
> +			nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf,
> +						   mbufs[i]);
> +		}
> +	} else {
> +		for (i = 0; i < vec->nb_elem; i++) {
> +			port = mbufs[i]->port;
> +			queue = rte_event_eth_tx_adapter_txq_get(mbufs[i]);
> +			tqi = txa_service_queue(txa, port, queue);
> +			if (unlikely(tqi == NULL || !tqi->added)) {
> +				rte_pktmbuf_free(mbufs[i]);
> +				continue;
> +			}
> +			nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf,
> +						   mbufs[i]);
> +		}
> +	}
> +	rte_mempool_put(rte_mempool_from_obj(vec), vec);
> +
> +	return nb_tx;
> +}
> +
>  static void
>  txa_service_tx(struct txa_service_data *txa, struct rte_event *ev,
>  	uint32_t n)
> @@ -522,22 +563,27 @@ txa_service_tx(struct txa_service_data *txa, struct rte_event *ev,
> 
>  	nb_tx = 0;
>  	for (i = 0; i < n; i++) {
> -		struct rte_mbuf *m;
>  		uint16_t port;
>  		uint16_t queue;
>  		struct txa_service_queue_info *tqi;
> 
> -		m = ev[i].mbuf;
> -		port = m->port;
> -		queue = rte_event_eth_tx_adapter_txq_get(m);
> +		if (!(ev[i].event_type & RTE_EVENT_TYPE_VECTOR)) {
> +			struct rte_mbuf *m;
> 
> -		tqi = txa_service_queue(txa, port, queue);
> -		if (unlikely(tqi == NULL || !tqi->added)) {
> -			rte_pktmbuf_free(m);
> -			continue;
> -		}
> +			m = ev[i].mbuf;
> +			port = m->port;
> +			queue = rte_event_eth_tx_adapter_txq_get(m);
> 
> -		nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf, m);
> +			tqi = txa_service_queue(txa, port, queue);
> +			if (unlikely(tqi == NULL || !tqi->added)) {
> +				rte_pktmbuf_free(m);
> +				continue;
> +			}
> +
> +			nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf, m);
> +		} else {
> +			nb_tx += txa_process_event_vector(txa, ev[i].vec);
> +		}
>  	}
> 
>  	stats->tx_packets += nb_tx;
> diff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c
> index 254a31b1f..ed6b5ba59 100644
> --- a/lib/librte_eventdev/rte_eventdev.c
> +++ b/lib/librte_eventdev/rte_eventdev.c
> @@ -196,7 +196,10 @@ rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
>  	if (caps == NULL)
>  		return -EINVAL;
> 
> -	*caps = 0;
> +	if (dev->dev_ops->eth_tx_adapter_caps_get == NULL)
> +		*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
> +	else
> +		*caps = 0;
> 
>  	return dev->dev_ops->eth_tx_adapter_caps_get ?
>  			(*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
> --
> 2.17.1

With changes above, looks good.

Acked-by: Jay Jayatheerthan <jay.jayatheerthan at intel.com>



More information about the dev mailing list