[dpdk-dev] [PATCH v5 3/6] examples/ioat: add rawdev copy mode support

Baran, MarcinX marcinx.baran at intel.com
Fri Sep 27 16:03:02 CEST 2019


-----Original Message-----
From: Bruce Richardson <bruce.richardson at intel.com> 
Sent: Friday, September 27, 2019 12:06 PM
To: Baran, MarcinX <marcinx.baran at intel.com>
Cc: dev at dpdk.org; Modrak, PawelX <pawelx.modrak at intel.com>
Subject: Re: [dpdk-dev] [PATCH v5 3/6] examples/ioat: add rawdev copy mode support

On Fri, Sep 20, 2019 at 09:37:11AM +0200, Marcin Baran wrote:
> Added support for copying packets using rawdev device. Each port's Rx 
> queue is assigned DMA channel for copy.
> 
> Signed-off-by: Marcin Baran <marcinx.baran at intel.com>
> Signed-off-by: Pawel Modrak <pawelx.modrak at intel.com>
> ---
>  examples/ioat/ioatfwd.c | 236 
> ++++++++++++++++++++++++++++++++--------
>  1 file changed, 189 insertions(+), 47 deletions(-)
> 
> diff --git a/examples/ioat/ioatfwd.c b/examples/ioat/ioatfwd.c index 
> 3a092c6cf..c66ce7e49 100644
> --- a/examples/ioat/ioatfwd.c
> +++ b/examples/ioat/ioatfwd.c
> @@ -121,6 +121,50 @@ pktmbuf_sw_copy(struct rte_mbuf *src, struct rte_mbuf *dst)
>  		rte_pktmbuf_mtod(src, char *), src->data_len);  }
>  
> +static uint32_t
> +ioat_enqueue_packets(struct rte_mbuf **pkts,
> +	uint32_t nb_rx, uint16_t dev_id)
> +{
> +	int ret;
> +	uint32_t i;
> +	struct rte_mbuf *pkts_copy[MAX_PKT_BURST];
> +
> +	const uint64_t addr_offset = RTE_PTR_DIFF(pkts[0]->buf_addr,
> +		&pkts[0]->rearm_data);
> +
> +	ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
> +		(void *)pkts_copy, nb_rx);
> +
> +	if (unlikely(ret < 0))
> +		rte_exit(EXIT_FAILURE, "Unable to allocate memory.\n");
> +
> +	for (i = 0; i < nb_rx; i++) {
> +		/* Perform data copy */
> +		ret = rte_ioat_enqueue_copy(dev_id,
> +			pkts[i]->buf_iova
> +			- addr_offset,
> +			pkts_copy[i]->buf_iova
> +			- addr_offset,
> +			rte_pktmbuf_data_len(pkts[i])
> +			+ addr_offset,
> +			(uintptr_t)pkts[i],
> +			(uintptr_t)pkts_copy[i],
> +			0 /* nofence */);
> +
> +		if (ret != 1)
> +			break;
> +	}
> +
> +	ret = i;
> +	/* Free any not enqueued packets. */
> +	rte_mempool_put_bulk(ioat_pktmbuf_pool, (void *)&pkts[i], nb_rx - i);
> +	rte_mempool_put_bulk(ioat_pktmbuf_pool, (void *)&pkts_copy[i],
> +		nb_rx - i);
> +
> +
> +	return ret;
> +}
> +
>  /* Receive packets on one port and enqueue to IOAT rawdev or 
> rte_ring. */  static void  ioat_rx_port(struct rxtx_port_config 
> *rx_config) @@ -136,32 +180,40 @@ ioat_rx_port(struct rxtx_port_config 
> *rx_config)
>  		if (nb_rx == 0)
>  			continue;
>  
> -		/* Perform packet software copy, free source packets */
> -		int ret;
> -		struct rte_mbuf *pkts_burst_copy[MAX_PKT_BURST];
> -
> -		ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
> -			(void *)pkts_burst_copy, nb_rx);
> -
> -		if (unlikely(ret < 0))
> -			rte_exit(EXIT_FAILURE,
> -				"Unable to allocate memory.\n");
> -
> -		for (j = 0; j < nb_rx; j++)
> -			pktmbuf_sw_copy(pkts_burst[j],
> -				pkts_burst_copy[j]);
> -
> -		rte_mempool_put_bulk(ioat_pktmbuf_pool,
> -			(void *)pkts_burst, nb_rx);
> -
> -		nb_enq = rte_ring_enqueue_burst(
> -			rx_config->rx_to_tx_ring,
> -			(void *)pkts_burst_copy, nb_rx, NULL);
> -
> -		/* Free any not enqueued packets. */
> -		rte_mempool_put_bulk(ioat_pktmbuf_pool,
> -			(void *)&pkts_burst_copy[nb_enq],
> -			nb_rx - nb_enq);
> +		if (copy_mode == COPY_MODE_IOAT_NUM) {
> +			/* Perform packet hardware copy */
> +			nb_enq = ioat_enqueue_packets(pkts_burst,
> +				nb_rx, rx_config->ioat_ids[i]);
> +			if (nb_enq > 0)
> +				rte_ioat_do_copies(rx_config->ioat_ids[i]);
> +		} else {
> +			/* Perform packet software copy, free source packets */
> +			int ret;
> +			struct rte_mbuf *pkts_burst_copy[MAX_PKT_BURST];
> +
> +			ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
> +				(void *)pkts_burst_copy, nb_rx);
> +
> +			if (unlikely(ret < 0))
> +				rte_exit(EXIT_FAILURE,
> +					"Unable to allocate memory.\n");
> +
> +			for (j = 0; j < nb_rx; j++)
> +				pktmbuf_sw_copy(pkts_burst[j],
> +					pkts_burst_copy[j]);
> +
> +			rte_mempool_put_bulk(ioat_pktmbuf_pool,
> +				(void *)pkts_burst, nb_rx);
> +
> +			nb_enq = rte_ring_enqueue_burst(
> +				rx_config->rx_to_tx_ring,
> +				(void *)pkts_burst_copy, nb_rx, NULL);
> +
> +			/* Free any not enqueued packets. */
> +			rte_mempool_put_bulk(ioat_pktmbuf_pool,
> +				(void *)&pkts_burst_copy[nb_enq],
> +				nb_rx - nb_enq);
> +		}

Would the diff in this patch be smaller if you switched the order of the branches so that the SW copy leg, which was added first, was processed first? You could even add in a dummy branch in patch 2, so that the indentation for that section remains unchanged.

/Bruce
[Marcin] Switched the order and added dummy branch in patch 2. Also changed ioat_tx_port() function the same way in v6.


More information about the dev mailing list