[PATCH v3 07/36] net/ice: refactor context descriptor handling

Loftus, Ciara ciara.loftus at intel.com
Fri Feb 6 11:47:11 CET 2026


> Subject: [PATCH v3 07/36] net/ice: refactor context descriptor handling
> 
> Create a single function to manage all context descriptor handling,
> which returns either 0 or 1 depending on whether a descriptor is needed
> or not, as well as returning directly the descriptor contents if
> relevant.
> 
> Signed-off-by: Bruce Richardson <bruce.richardson at intel.com>
> ---
>  drivers/net/intel/ice/ice_rxtx.c | 104 +++++++++++++++++--------------
>  1 file changed, 57 insertions(+), 47 deletions(-)
> 
> diff --git a/drivers/net/intel/ice/ice_rxtx.c b/drivers/net/intel/ice/ice_rxtx.c
> index 2a53b614b2..cc442fed75 100644
> --- a/drivers/net/intel/ice/ice_rxtx.c
> +++ b/drivers/net/intel/ice/ice_rxtx.c
> @@ -2966,10 +2966,6 @@ ice_txd_enable_checksum(uint64_t ol_flags,
>  			uint32_t *td_offset,
>  			union ci_tx_offload tx_offload)
>  {
> -	/* Set MACLEN */
> -	if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK))
> -		*td_offset |= (tx_offload.l2_len >> 1)
> -			<< CI_TX_DESC_LEN_MACLEN_S;
> 
>  	/* Enable L3 checksum offloads */
>  	if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
> @@ -3052,7 +3048,7 @@ ice_calc_context_desc(uint64_t flags)
> 
>  /* set ice TSO context descriptor */
>  static inline uint64_t
> -ice_set_tso_ctx(struct rte_mbuf *mbuf, union ci_tx_offload tx_offload)
> +ice_set_tso_ctx(uint64_t ol_flags, const struct rte_mbuf *mbuf, union
> ci_tx_offload tx_offload)
>  {
>  	uint64_t ctx_desc = 0;
>  	uint32_t cd_cmd, hdr_len, cd_tso_len;
> @@ -3063,7 +3059,7 @@ ice_set_tso_ctx(struct rte_mbuf *mbuf, union
> ci_tx_offload tx_offload)
>  	}
> 
>  	hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
> -	hdr_len += (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
> +	hdr_len += (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
>  		   tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
> 
>  	cd_cmd = CI_TX_CTX_DESC_TSO;
> @@ -3075,6 +3071,49 @@ ice_set_tso_ctx(struct rte_mbuf *mbuf, union
> ci_tx_offload tx_offload)
>  	return ctx_desc;
>  }
> 
> +/* compute a context descriptor if one is necessary based on the ol_flags
> + *
> + * Returns 0 if no descriptor is necessary.
> + * Returns 1 if one is necessary and the contents of the descriptor are
> returned
> + *   in the values pointed to by qw0 and qw1. td_offset may also be modified.

Regarding the comment above, "td_offset" is not a variable in this function, I
assume the comment is obsolete.

> + */
> +static __rte_always_inline uint16_t
> +get_context_desc(uint64_t ol_flags, const struct rte_mbuf *tx_pkt,
> +	const union ci_tx_offload *tx_offload, const struct ci_tx_queue *txq,
> +	uint64_t *qw0, uint64_t *qw1)
> +{
> +	uint16_t cd_l2tag2 = 0;
> +	uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
> +	uint32_t cd_tunneling_params = 0;
> +	uint64_t ptp_tx_index = txq->ice_vsi->adapter->ptp_tx_index;
> +
> +	if (ice_calc_context_desc(ol_flags) == 0)
> +		return 0;
> +
> +	if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
> +		ice_parse_tunneling_params(ol_flags, *tx_offload,
> &cd_tunneling_params);
> +
> +	if (ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
> RTE_MBUF_F_TX_UDP_SEG))
> +		cd_type_cmd_tso_mss |= ice_set_tso_ctx(ol_flags, tx_pkt,
> *tx_offload);
> +	else if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
> +		cd_type_cmd_tso_mss |=
> +			((uint64_t)CI_TX_CTX_DESC_TSYN <<
> CI_TXD_QW1_CMD_S) |
> +			((ptp_tx_index << ICE_TXD_CTX_QW1_TSYN_S) &
> ICE_TXD_CTX_QW1_TSYN_M);
> +
> +
> +	/* TX context descriptor based double VLAN insert */
> +	if (ol_flags & RTE_MBUF_F_TX_QINQ) {
> +		cd_l2tag2 = tx_pkt->vlan_tci_outer;
> +		cd_type_cmd_tso_mss |=
> ((uint64_t)CI_TX_CTX_DESC_IL2TAG2 << CI_TXD_QW1_CMD_S);
> +	}
> +
> +	*qw0 = rte_cpu_to_le_32(cd_tunneling_params) |
> +		((uint64_t)rte_cpu_to_le_16(cd_l2tag2) << 32);
> +	*qw1 = rte_cpu_to_le_64(cd_type_cmd_tso_mss);
> +
> +	return 1;
> +}
> +
>  uint16_t
>  ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
>  {
> @@ -3085,7 +3124,6 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  	struct ci_tx_entry *txe, *txn;
>  	struct rte_mbuf *tx_pkt;
>  	struct rte_mbuf *m_seg;
> -	uint32_t cd_tunneling_params;
>  	uint16_t tx_id;
>  	uint16_t ts_id = -1;
>  	uint16_t nb_tx;
> @@ -3096,6 +3134,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  	uint32_t td_tag = 0;
>  	uint16_t tx_last;
>  	uint16_t slen;
> +	uint16_t l2_len;
>  	uint64_t buf_dma_addr;
>  	uint64_t ol_flags;
>  	union ci_tx_offload tx_offload = {0};
> @@ -3114,20 +3153,25 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  		(void)ci_tx_xmit_cleanup(txq);
> 
>  	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
> +		uint64_t cd_qw0, cd_qw1;
>  		tx_pkt = *tx_pkts++;
> 
> +		ol_flags = tx_pkt->ol_flags;
>  		td_cmd = 0;
>  		td_tag = 0;
> -		td_offset = 0;
> -		ol_flags = tx_pkt->ol_flags;
> +		l2_len = ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK ?
> +				tx_pkt->outer_l2_len : tx_pkt->l2_len;
> +		td_offset = (l2_len >> 1) << CI_TX_DESC_LEN_MACLEN_S;
> +
>  		tx_offload.l2_len = tx_pkt->l2_len;
>  		tx_offload.l3_len = tx_pkt->l3_len;
>  		tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
>  		tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
>  		tx_offload.l4_len = tx_pkt->l4_len;
>  		tx_offload.tso_segsz = tx_pkt->tso_segsz;
> +
>  		/* Calculate the number of context descriptors needed. */
> -		nb_ctx = ice_calc_context_desc(ol_flags);
> +		nb_ctx = get_context_desc(ol_flags, tx_pkt, &tx_offload, txq,
> &cd_qw0, &cd_qw1);
> 
>  		/* The number of descriptors that must be allocated for
>  		 * a packet equals to the number of the segments of that
> @@ -3169,15 +3213,6 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  			td_tag = tx_pkt->vlan_tci;
>  		}
> 
> -		/* Fill in tunneling parameters if necessary */
> -		cd_tunneling_params = 0;
> -		if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
> -			td_offset |= (tx_offload.outer_l2_len >> 1)
> -				<< CI_TX_DESC_LEN_MACLEN_S;
> -			ice_parse_tunneling_params(ol_flags, tx_offload,
> -						   &cd_tunneling_params);
> -		}
> -
>  		/* Enable checksum offloading */
>  		if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
>  			ice_txd_enable_checksum(ol_flags, &td_cmd,
> @@ -3185,11 +3220,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
> 
>  		if (nb_ctx) {
>  			/* Setup TX context descriptor if required */
> -			volatile struct ice_tx_ctx_desc *ctx_txd =
> -				(volatile struct ice_tx_ctx_desc *)
> -					&ci_tx_ring[tx_id];
> -			uint16_t cd_l2tag2 = 0;
> -			uint64_t cd_type_cmd_tso_mss =
> ICE_TX_DESC_DTYPE_CTX;
> +			uint64_t *ctx_txd = RTE_CAST_PTR(uint64_t *,
> &ci_tx_ring[tx_id]);
> 
>  			txn = &sw_ring[txe->next_id];
>  			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
> @@ -3198,29 +3229,8 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  				txe->mbuf = NULL;
>  			}
> 
> -			if (ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
> RTE_MBUF_F_TX_UDP_SEG))
> -				cd_type_cmd_tso_mss |=
> -					ice_set_tso_ctx(tx_pkt, tx_offload);
> -			else if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
> -				cd_type_cmd_tso_mss |=
> -					((uint64_t)CI_TX_CTX_DESC_TSYN <<
> -					CI_TXD_QW1_CMD_S) |
> -					 (((uint64_t)txq->ice_vsi->adapter-
> >ptp_tx_index <<
> -					 ICE_TXD_CTX_QW1_TSYN_S) &
> ICE_TXD_CTX_QW1_TSYN_M);
> -
> -			ctx_txd->tunneling_params =
> -				rte_cpu_to_le_32(cd_tunneling_params);
> -
> -			/* TX context descriptor based double VLAN insert */
> -			if (ol_flags & RTE_MBUF_F_TX_QINQ) {
> -				cd_l2tag2 = tx_pkt->vlan_tci_outer;
> -				cd_type_cmd_tso_mss |=
> -					((uint64_t)CI_TX_CTX_DESC_IL2TAG2
> <<
> -					 CI_TXD_QW1_CMD_S);
> -			}
> -			ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
> -			ctx_txd->qw1 =
> -				rte_cpu_to_le_64(cd_type_cmd_tso_mss);
> +			ctx_txd[0] = cd_qw0;
> +			ctx_txd[1] = cd_qw1;
> 
>  			txe->last_id = tx_last;
>  			tx_id = txe->next_id;
> --
> 2.51.0



More information about the dev mailing list