[dpdk-dev] [PATCH v6 1/4] net/i40e: cleanup Tx buffers

Ananyev, Konstantin konstantin.ananyev at intel.com
Mon Dec 30 14:01:18 CET 2019



> 
> Add support to the i40e driver for the API rte_eth_tx_done_cleanup
> to force free consumed buffers on Tx ring.
> 
> Signed-off-by: Chenxu Di <chenxux.di at intel.com>
> ---
>  drivers/net/i40e/i40e_ethdev.c    |   1 +
>  drivers/net/i40e/i40e_ethdev_vf.c |   1 +
>  drivers/net/i40e/i40e_rxtx.c      | 121 ++++++++++++++++++++++++++++++
>  drivers/net/i40e/i40e_rxtx.h      |   1 +
>  4 files changed, 124 insertions(+)
> 
> diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
> index 5999c964b..fad47a942 100644
> --- a/drivers/net/i40e/i40e_ethdev.c
> +++ b/drivers/net/i40e/i40e_ethdev.c
> @@ -522,6 +522,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
>  	.mac_addr_set                 = i40e_set_default_mac_addr,
>  	.mtu_set                      = i40e_dev_mtu_set,
>  	.tm_ops_get                   = i40e_tm_ops_get,
> +	.tx_done_cleanup              = i40e_tx_done_cleanup,
>  };
> 
>  /* store statistics names and its offset in stats structure */
> diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
> index 5dba0928b..0ca5417d7 100644
> --- a/drivers/net/i40e/i40e_ethdev_vf.c
> +++ b/drivers/net/i40e/i40e_ethdev_vf.c
> @@ -215,6 +215,7 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
>  	.rss_hash_conf_get    = i40evf_dev_rss_hash_conf_get,
>  	.mtu_set              = i40evf_dev_mtu_set,
>  	.mac_addr_set         = i40evf_set_default_mac_addr,
> +	.tx_done_cleanup      = i40e_tx_done_cleanup,
>  };
> 
>  /*
> diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
> index 17dc8c78f..883419bd7 100644
> --- a/drivers/net/i40e/i40e_rxtx.c
> +++ b/drivers/net/i40e/i40e_rxtx.c
> @@ -2455,6 +2455,127 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
>  	}
>  }
> 
> +int i40e_tx_done_cleanup(void *q, uint32_t free_cnt)
> +{
> +	struct i40e_tx_queue *txq = (struct i40e_tx_queue *)q;
> +	struct i40e_tx_entry *sw_ring;
> +	volatile struct i40e_tx_desc *txr;
> +	uint16_t tx_first; /* First segment analyzed. */
> +	uint16_t tx_id;    /* Current segment being processed. */
> +	uint16_t tx_last;  /* Last segment in the current packet. */
> +	uint16_t tx_next;  /* First segment of the next packet. */
> +	int count;
> +
> +	if (txq == NULL)
> +		return -ENODEV;
> +
> +	count = 0;
> +	sw_ring = txq->sw_ring;
> +	txr = txq->tx_ring;
> +
> +	/*
> +	 * tx_tail is the last sent packet on the sw_ring. Goto the end
> +	 * of that packet (the last segment in the packet chain) and
> +	 * then the next segment will be the start of the oldest segment
> +	 * in the sw_ring. This is the first packet that will be
> +	 * attempted to be freed.
> +	 */

Pretty much same comments as for ixgbe.

> +
> +	/* Get last segment in most recently added packet. */
> +	tx_last = sw_ring[txq->tx_tail].last_id;
> +
> +	/* Get the next segment, which is the oldest segment in ring. */
> +	tx_first = sw_ring[tx_last].next_id;
> +
> +	/* Set the current index to the first. */
> +	tx_id = tx_first;
> +
> +	/*
> +	 * Loop through each packet. For each packet, verify that an
> +	 * mbuf exists and that the last segment is free. If so, free
> +	 * it and move on.
> +	 */
> +	while (1) {
> +		tx_last = sw_ring[tx_id].last_id;
> +
> +		if (sw_ring[tx_last].mbuf) {
> +			if ((txr[tx_last].cmd_type_offset_bsz &
> +			    rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
> +			    rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
> +				/*
> +				 * mbuf still in use, nothing left to
> +				 * free.
> +				 */
> +				break;
> +
> +			/* Get the start of the next packet. */
> +			tx_next = sw_ring[tx_last].next_id;
> +
> +			/*
> +			 * Loop through all segments in a
> +			 * packet.
> +			 */
> +			do {
> +				rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
> +				sw_ring[tx_id].mbuf = NULL;
> +				sw_ring[tx_id].last_id = tx_id;
> +
> +				/* Move to next segment. */
> +				tx_id = sw_ring[tx_id].next_id;
> +
> +			} while (tx_id != tx_next);
> +
> +			/*
> +			 * Increment the number of packets
> +			 * freed.
> +			 */
> +			count++;
> +
> +			if (unlikely(count == (int)free_cnt))
> +				break;
> +		} else {
> +			/*
> +			 * There are multiple reasons to be here:
> +			 * 1) All the packets on the ring have been
> +			 *    freed - tx_id is equal to tx_first
> +			 *    and some packets have been freed.
> +			 *    - Done, exit
> +			 * 2) Interfaces has not sent a rings worth of
> +			 *    packets yet, so the segment after tail is
> +			 *    still empty. Or a previous call to this
> +			 *    function freed some of the segments but
> +			 *    not all so there is a hole in the list.
> +			 *    Hopefully this is a rare case.
> +			 *    - Walk the list and find the next mbuf. If
> +			 *      there isn't one, then done.
> +			 */
> +			if (likely(tx_id == tx_first && count != 0))
> +				break;
> +
> +			/*
> +			 * Walk the list and find the next mbuf, if any.
> +			 */
> +			do {
> +				/* Move to next segment. */
> +				tx_id = sw_ring[tx_id].next_id;
> +
> +				if (sw_ring[tx_id].mbuf)
> +					break;
> +
> +			} while (tx_id != tx_first);
> +
> +			/*
> +			 * Determine why previous loop bailed. If there
> +			 * is not an mbuf, done.
> +			 */
> +			if (sw_ring[tx_id].mbuf == NULL)
> +				break;
> +		}
> +	}
> +
> +	return count;
> +}
> +
>  void
>  i40e_reset_tx_queue(struct i40e_tx_queue *txq)
>  {
> diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
> index 2106bb355..8f11f011a 100644
> --- a/drivers/net/i40e/i40e_rxtx.h
> +++ b/drivers/net/i40e/i40e_rxtx.h
> @@ -212,6 +212,7 @@ void i40e_dev_free_queues(struct rte_eth_dev *dev);
>  void i40e_reset_rx_queue(struct i40e_rx_queue *rxq);
>  void i40e_reset_tx_queue(struct i40e_tx_queue *txq);
>  void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq);
> +int i40e_tx_done_cleanup(void *txq, uint32_t free_cnt);
>  int i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq);
>  void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq);
> 
> --
> 2.17.1



More information about the dev mailing list