[dpdk-dev] [PATCH v3] net/ice: refactor the Rx FlexiMD handling

Guo, Jia jia.guo at intel.com
Tue Sep 22 07:35:12 CEST 2020


Hi, haiyue

> -----Original Message-----
> From: Wang, Haiyue <haiyue.wang at intel.com>
> Sent: Friday, September 18, 2020 9:06 AM
> To: dev at dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang at intel.com>; Jiang, JunyuX
> <junyux.jiang at intel.com>; Rong, Leyi <leyi.rong at intel.com>; Yang, Qiming
> <qiming.yang at intel.com>; Sun, GuinanX <guinanx.sun at intel.com>; Guo,
> Junfeng <junfeng.guo at intel.com>; Guo, Jia <jia.guo at intel.com>; Wang,
> Haiyue <haiyue.wang at intel.com>
> Subject: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> 
> The hardware supports many kinds of FlexiMDs set into Rx descriptor, and
> the FlexiMDs can have different offsets in the descriptor according the DDP
> package setting.
> 
> The FlexiMDs type and offset are identified by the RXDID, which will be used
> to setup the queue.
> 
> For expanding to support different RXDIDs in the future, refactor the Rx
> FlexiMD handling by the functions mapped to related RXDIDs.
> 
> Signed-off-by: Haiyue Wang <haiyue.wang at intel.com>
> ---
> v3: remove the typedef's ___rte_unused, and rewrite the commit title and
>     message.
> v2: assign the handle for ICE_RXDID_COMMS_OVS directly, not use
>     fall-through.
> ---
>  drivers/net/ice/ice_rxtx.c | 263 ++++++++++++++++++++++---------------
>  drivers/net/ice/ice_rxtx.h |   5 +
>  2 files changed, 163 insertions(+), 105 deletions(-)
> 
> diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index
> fef6ad454..93a0ac691 100644
> --- a/drivers/net/ice/ice_rxtx.c
> +++ b/drivers/net/ice/ice_rxtx.c
> @@ -25,40 +25,6 @@ uint64_t
> rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
>  uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
>  uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
> 
> -static inline uint64_t
> -ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid, bool *chk_valid) -{
> -	static struct {
> -		uint64_t *ol_flag;
> -		bool chk_valid;
> -	} ol_flag_map[] = {
> -		[ICE_RXDID_COMMS_AUX_VLAN] = {
> -			&rte_net_ice_dynflag_proto_xtr_vlan_mask, true },
> -		[ICE_RXDID_COMMS_AUX_IPV4] = {
> -			&rte_net_ice_dynflag_proto_xtr_ipv4_mask, true },
> -		[ICE_RXDID_COMMS_AUX_IPV6] = {
> -			&rte_net_ice_dynflag_proto_xtr_ipv6_mask, true },
> -		[ICE_RXDID_COMMS_AUX_IPV6_FLOW] = {
> -			&rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask,
> true },
> -		[ICE_RXDID_COMMS_AUX_TCP] = {
> -			&rte_net_ice_dynflag_proto_xtr_tcp_mask, true },
> -		[ICE_RXDID_COMMS_AUX_IP_OFFSET] = {
> -			&rte_net_ice_dynflag_proto_xtr_ip_offset_mask,
> false },
> -	};
> -	uint64_t *ol_flag;
> -
> -	if (rxdid < RTE_DIM(ol_flag_map)) {
> -		ol_flag = ol_flag_map[rxdid].ol_flag;
> -		if (!ol_flag)
> -			return 0ULL;
> -
> -		*chk_valid = ol_flag_map[rxdid].chk_valid;
> -		return *ol_flag;
> -	}
> -
> -	return 0ULL;
> -}
> -
>  static inline uint8_t
>  ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)  { @@ -76,6 +42,159 @@
> ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
>  				rxdid_map[xtr_type] :
> ICE_RXDID_COMMS_OVS;  }
> 
> +static inline void
> +ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue
> *rxq,
> +				   struct rte_mbuf *mb,
> +				   volatile union ice_rx_flex_desc *rxdp) {
> +	volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
> +			(volatile struct ice_32b_rx_flex_desc_comms_ovs
> *)rxdp; #ifndef
> +RTE_LIBRTE_ICE_16BYTE_RX_DESC
> +	uint16_t stat_err;
> +#endif

This #ifndef could be better combine with below #ifndef.

> +
> +	if (desc->flow_id != 0xFFFFFFFF) {
> +		mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
> +		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
> +	}
> +
> +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> +	stat_err = rte_le_to_cpu_16(desc->status_error0);
> +	if (likely(stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> +		mb->ol_flags |= PKT_RX_RSS_HASH;
> +		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> +	}
> +#endif
> +}
> +
> +static inline void
> +ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
> +				      struct rte_mbuf *mb,
> +				      volatile union ice_rx_flex_desc *rxdp) {
> +	volatile struct ice_32b_rx_flex_desc_comms *desc =
> +			(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
> +	uint16_t stat_err;
> +
> +	stat_err = rte_le_to_cpu_16(desc->status_error0);
> +	if (likely(stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> +		mb->ol_flags |= PKT_RX_RSS_HASH;
> +		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> +	}
> +
> +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> +	if (desc->flow_id != 0xFFFFFFFF) {
> +		mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
> +		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
> +	}
> +
> +	if (rxq->xtr_ol_flag) {
> +		uint32_t metadata = 0;
> +
> +		stat_err = rte_le_to_cpu_16(desc->status_error1);
> +
> +		if (stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
> +			metadata = rte_le_to_cpu_16(desc-
> >flex_ts.flex.aux0);
> +
> +		if (stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
> +			metadata |=
> +				rte_le_to_cpu_16(desc->flex_ts.flex.aux1)
> << 16;
> +
> +		if (metadata) {
> +			mb->ol_flags |= rxq->xtr_ol_flag;
> +
> +			*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb)
> = metadata;
> +		}
> +	}
> +#endif
> +}
> +
> +static inline void
> +ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
> +				      struct rte_mbuf *mb,
> +				      volatile union ice_rx_flex_desc *rxdp) {
> +	volatile struct ice_32b_rx_flex_desc_comms *desc =
> +			(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
> +	uint16_t stat_err;
> +
> +	stat_err = rte_le_to_cpu_16(desc->status_error0);
> +	if (likely(stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> +		mb->ol_flags |= PKT_RX_RSS_HASH;
> +		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> +	}
> +
> +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> +	if (desc->flow_id != 0xFFFFFFFF) {
> +		mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
> +		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
> +	}
> +
> +	if (rxq->xtr_ol_flag) {
> +		uint32_t metadata = 0;
> +
> +		if (desc->flex_ts.flex.aux0 != 0xFFFF)
> +			metadata = rte_le_to_cpu_16(desc-
> >flex_ts.flex.aux0);
> +		else if (desc->flex_ts.flex.aux1 != 0xFFFF)
> +			metadata = rte_le_to_cpu_16(desc-
> >flex_ts.flex.aux1);

So you mean the ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S and ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S could not use to identify the IPv4 hdr offset and IPv6 hdr offset here in rxdid # 25?
And if yes they can and they will not set at the same time, is that separate this v2 from v1 necessary? 

> +
> +		if (metadata) {
> +			mb->ol_flags |= rxq->xtr_ol_flag;
> +
> +			*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb)
> = metadata;
> +		}
> +	}
> +#endif
> +}
> +
> +static void
> +ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t
> +rxdid) {
> +	switch (rxdid) {
> +	case ICE_RXDID_COMMS_AUX_VLAN:
> +		rxq->xtr_ol_flag =
> rte_net_ice_dynflag_proto_xtr_vlan_mask;
> +		rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_aux_v1;
> +		break;
> +
> +	case ICE_RXDID_COMMS_AUX_IPV4:
> +		rxq->xtr_ol_flag =
> rte_net_ice_dynflag_proto_xtr_ipv4_mask;
> +		rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_aux_v1;
> +		break;
> +
> +	case ICE_RXDID_COMMS_AUX_IPV6:
> +		rxq->xtr_ol_flag =
> rte_net_ice_dynflag_proto_xtr_ipv6_mask;
> +		rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_aux_v1;
> +		break;
> +
> +	case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
> +		rxq->xtr_ol_flag =
> rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
> +		rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_aux_v1;
> +		break;
> +
> +	case ICE_RXDID_COMMS_AUX_TCP:
> +		rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
> +		rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_aux_v1;
> +		break;
> +
> +	case ICE_RXDID_COMMS_AUX_IP_OFFSET:
> +		rxq->xtr_ol_flag =
> rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
> +		rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_aux_v2;
> +		break;
> +
> +	case ICE_RXDID_COMMS_OVS:
> +		rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_ovs;
> +		break;
> +
> +	default:
> +		/* update this according to the RXDID for PROTO_XTR_NONE
> */
> +		rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_ovs;
> +		break;
> +	}
> +
> +	if (!rte_net_ice_dynf_proto_xtr_metadata_avail())
> +		rxq->xtr_ol_flag = 0;
> +}
> +
>  static enum ice_status
>  ice_program_hw_rx_queue(struct ice_rx_queue *rxq)  { @@ -158,6 +277,8
> @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
>  		return -EINVAL;
>  	}
> 
> +	ice_select_rxd_to_pkt_fields_handler(rxq, rxdid);
> +
>  	/* Enable Flexible Descriptors in the queue context which
>  	 * allows this driver to select a specific receive descriptor format
>  	 */
> @@ -1338,74 +1459,6 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile
> union ice_rx_flex_desc *rxdp)
>  		   mb->vlan_tci, mb->vlan_tci_outer);  }
> 
> -#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> -#define ICE_RX_PROTO_XTR_VALID \
> -	((1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) | \
> -	 (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
> -
> -static void
> -ice_rxd_to_proto_xtr(struct rte_mbuf *mb,
> -		     volatile struct ice_32b_rx_flex_desc_comms_ovs *desc)
> -{
> -	uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1);
> -	uint32_t metadata = 0;
> -	uint64_t ol_flag;
> -	bool chk_valid;
> -
> -	ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid, &chk_valid);
> -	if (unlikely(!ol_flag))
> -		return;
> -
> -	if (chk_valid) {
> -		if (stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
> -			metadata = rte_le_to_cpu_16(desc-
> >flex_ts.flex.aux0);
> -
> -		if (stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
> -			metadata |=
> -				rte_le_to_cpu_16(desc->flex_ts.flex.aux1)
> << 16;
> -	} else {
> -		if (rte_le_to_cpu_16(desc->flex_ts.flex.aux0) != 0xFFFF)
> -			metadata = rte_le_to_cpu_16(desc-
> >flex_ts.flex.aux0);
> -		else if (rte_le_to_cpu_16(desc->flex_ts.flex.aux1) != 0xFFFF)
> -			metadata = rte_le_to_cpu_16(desc-
> >flex_ts.flex.aux1);
> -	}
> -
> -	if (!metadata)
> -		return;
> -
> -	mb->ol_flags |= ol_flag;
> -
> -	*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
> -}
> -#endif
> -
> -static inline void
> -ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
> -		      volatile union ice_rx_flex_desc *rxdp)
> -{
> -	volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
> -			(volatile struct ice_32b_rx_flex_desc_comms_ovs
> *)rxdp;
> -#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> -	uint16_t stat_err;
> -
> -	stat_err = rte_le_to_cpu_16(desc->status_error0);
> -	if (likely(stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> -		mb->ol_flags |= PKT_RX_RSS_HASH;
> -		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> -	}
> -#endif
> -
> -	if (desc->flow_id != 0xFFFFFFFF) {
> -		mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
> -		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
> -	}
> -
> -#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> -	if (unlikely(rte_net_ice_dynf_proto_xtr_metadata_avail()))
> -		ice_rxd_to_proto_xtr(mb, desc);
> -#endif
> -}
> -
>  #define ICE_LOOK_AHEAD 8
>  #if (ICE_LOOK_AHEAD != 8)
>  #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
> @@ -1463,7 +1516,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
>  			mb->packet_type =
> ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
> 
> 	rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
>  			ice_rxd_to_vlan_tci(mb, &rxdp[j]);
> -			ice_rxd_to_pkt_fields(mb, &rxdp[j]);
> +			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
> 
>  			mb->ol_flags |= pkt_flags;
>  		}
> @@ -1760,7 +1813,7 @@ ice_recv_scattered_pkts(void *rx_queue,
>  		first_seg->packet_type =
> ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
>  			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
>  		ice_rxd_to_vlan_tci(first_seg, &rxd);
> -		ice_rxd_to_pkt_fields(first_seg, &rxd);
> +		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
>  		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
>  		first_seg->ol_flags |= pkt_flags;
>  		/* Prefetch data of first segment, if configured to do so. */
> @@ -2160,7 +2213,7 @@ ice_recv_pkts(void *rx_queue,
>  		rxm->packet_type =
> ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
>  			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
>  		ice_rxd_to_vlan_tci(rxm, &rxd);
> -		ice_rxd_to_pkt_fields(rxm, &rxd);
> +		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
>  		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
>  		rxm->ol_flags |= pkt_flags;
>  		/* copy old mbuf to rx_pkts */
> diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index
> 9fa57b3b2..de2291788 100644
> --- a/drivers/net/ice/ice_rxtx.h
> +++ b/drivers/net/ice/ice_rxtx.h
> @@ -42,6 +42,9 @@
> 
>  typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);
> typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);
> +typedef void (*ice_rxd_to_pkt_fields_t)(struct ice_rx_queue *rxq,
> +					struct rte_mbuf *mb,
> +					volatile union ice_rx_flex_desc
> *rxdp);
> 
>  struct ice_rx_entry {
>  	struct rte_mbuf *mbuf;
> @@ -82,6 +85,8 @@ struct ice_rx_queue {
>  	bool q_set; /* indicate if rx queue has been configured */
>  	bool rx_deferred_start; /* don't start this queue in dev start */
>  	uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */
> +	uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
> +	ice_rxd_to_pkt_fields_t rxd_to_pkt_fields;

If create a function pointer here in .h, it is better add some doc.

>  	ice_rx_release_mbufs_t rx_rel_mbufs;
>  };
> 
> --
> 2.28.0



More information about the dev mailing list