[PATCH 1/2] net/iavf: enable TSO offloading for tunnel cases

Yang, Qiming qiming.yang at intel.com
Tue Aug 30 09:52:44 CEST 2022


Please retest: TCP/UDP/tunnel-TCP/tunnel-UDP packet

> -----Original Message-----
> From: peng1x.zhang at intel.com <peng1x.zhang at intel.com>
> Sent: Saturday, August 13, 2022 12:52 AM
> To: dev at dpdk.org
> Cc: Xing, Beilei <beilei.xing at intel.com>; Wu, Jingjing <jingjing.wu at intel.com>;
> Zhang, Peng1X <peng1x.zhang at intel.com>
> Subject: [PATCH 1/2] net/iavf: enable TSO offloading for tunnel cases

Should be a bug fix patch. 

> 
> From: Peng Zhang <peng1x.zhang at intel.com>
> 
No need this line.

> Hardware limits that max buffer size per Tx descriptor should be (16K-1)B.
> So when TSO enabled under unencrypt scenario, the mbuf data size may
> exceed the limit and cause malicious behavior to the NIC.

So this patch is fixing the tunnel TSO not enabling.

> 
> This patch supports Tx descriptors for this kind of large buffer.
> 
> Signed-off-by: Peng Zhang <peng1x.zhang at intel.com>
> ---
>  drivers/net/iavf/iavf_rxtx.c | 66 ++++++++++++++++++++++++++++++++----
>  1 file changed, 60 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index
> dfd021889e..adec58e90a 100644
> --- a/drivers/net/iavf/iavf_rxtx.c
> +++ b/drivers/net/iavf/iavf_rxtx.c
> @@ -2642,6 +2642,47 @@ iavf_ipsec_crypto_get_pkt_metadata(const struct
> iavf_tx_queue *txq,
>  	return NULL;
>  }
> 
> +/* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
> +#define IAVF_MAX_DATA_PER_TXD \
> +	(IAVF_TXD_QW1_TX_BUF_SZ_MASK >>
> IAVF_TXD_QW1_TX_BUF_SZ_SHIFT)
> +
> +static inline void
> +iavf_fill_unencrypt_desc(volatile struct iavf_tx_desc *txd, struct rte_mbuf
> *m,
> +			 volatile uint64_t desc_template, struct iavf_tx_entry
> *txe,
> +			 volatile struct iavf_tx_desc *txr, struct iavf_tx_entry
> *txe_ring,
> +			 int desc_idx_last)
> +{
> +		/* Setup TX Descriptor */
> +		int desc_idx;
> +		uint16_t slen = m->data_len;
> +		uint64_t buf_dma_addr = rte_mbuf_data_iova(m);
> +		struct iavf_tx_entry *txn = &txe_ring[txe->next_id];
> +
> +		while ((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&

??? lack of UDP?

> +			unlikely(slen > IAVF_MAX_DATA_PER_TXD)) {
> +			txd->buffer_addr =
> rte_cpu_to_le_64(buf_dma_addr);
> +
> +			txd->cmd_type_offset_bsz =
> +			rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
> +			(uint64_t)IAVF_MAX_DATA_PER_TXD <<
> +			IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) |
> desc_template;
> +
> +			buf_dma_addr += IAVF_MAX_DATA_PER_TXD;
> +			slen -= IAVF_MAX_DATA_PER_TXD;
> +
> +			txe->last_id = desc_idx_last;
> +			desc_idx = txe->next_id;
> +			txe = txn;
> +			txd = &txr[desc_idx];
> +			txn = &txe_ring[txe->next_id];
> +		}
> +
> +		txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
> +		txd->cmd_type_offset_bsz =
> +			rte_cpu_to_le_64((uint64_t)slen <<
> IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) |
> +				desc_template;
> +}
> +
>  /* TX function */
>  uint16_t
>  iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> @@ -2650,6 +2691,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  	volatile struct iavf_tx_desc *txr = txq->tx_ring;
>  	struct iavf_tx_entry *txe_ring = txq->sw_ring;
>  	struct iavf_tx_entry *txe, *txn;
> +	volatile struct iavf_tx_desc *txd;
>  	struct rte_mbuf *mb, *mb_seg;
>  	uint16_t desc_idx, desc_idx_last;
>  	uint16_t idx;
> @@ -2781,6 +2823,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  			ddesc = (volatile struct iavf_tx_desc *)
>  					&txr[desc_idx];
> 
> +			txd = &txr[desc_idx];
>  			txn = &txe_ring[txe->next_id];
>  			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
> 
> @@ -2788,10 +2831,16 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  				rte_pktmbuf_free_seg(txe->mbuf);
> 
>  			txe->mbuf = mb_seg;
> -			iavf_fill_data_desc(ddesc, mb_seg,
> -					ddesc_template, tlen, ipseclen);
> 
> -			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
> +			if (nb_desc_ipsec) {
> +				iavf_fill_data_desc(ddesc, mb_seg,
> +					ddesc_template, tlen, ipseclen);
> +				IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
> +			} else {
> +				iavf_fill_unencrypt_desc(txd, mb_seg,
> +					ddesc_template, txe, txr, txe_ring,
> desc_idx_last);
> +				IAVF_DUMP_TX_DESC(txq, txd, desc_idx);
> +			}
> 
>  			txe->last_id = desc_idx_last;
>  			desc_idx = txe->next_id;
> @@ -2816,10 +2865,15 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  			txq->nb_used = 0;
>  		}
> 
> -		ddesc->cmd_type_offset_bsz |=
> rte_cpu_to_le_64(ddesc_cmd <<
> +		if (nb_desc_ipsec) {
> +			ddesc->cmd_type_offset_bsz |=
> rte_cpu_to_le_64(ddesc_cmd <<
>  				IAVF_TXD_DATA_QW1_CMD_SHIFT);
> -
> -		IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
> +			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
> +		} else {
> +			txd->cmd_type_offset_bsz |=
> rte_cpu_to_le_64(ddesc_cmd <<
> +				IAVF_TXD_DATA_QW1_CMD_SHIFT);
> +			IAVF_DUMP_TX_DESC(txq, txd, desc_idx - 1);
> +		}
>  	}
> 
>  end_of_tx:
> --
> 2.25.1



More information about the dev mailing list