[dpdk-dev] [PATCH] net/iavf: fix the VLAN tag extraction handling
Xie, WeiX
weix.xie at intel.com
Tue Feb 23 08:18:24 CET 2021
Tested-by: Xie,WeiX < weix.xie at intel.com>
Regards,
Xie Wei
> -----Original Message-----
> From: dev [mailto:dev-bounces at dpdk.org] On Behalf Of Leyi Rong
> Sent: Tuesday, February 23, 2021 11:11 AM
> To: Zhang, Qi Z <qi.z.zhang at intel.com>; Lu, Wenzhuo
> <wenzhuo.lu at intel.com>; Xing, Beilei <beilei.xing at intel.com>
> Cc: dev at dpdk.org; Wang, Haiyue <haiyue.wang at intel.com>; Rong, Leyi
> <leyi.rong at intel.com>
> Subject: [dpdk-dev] [PATCH] net/iavf: fix the VLAN tag extraction handling
>
> From: Haiyue Wang <haiyue.wang at intel.com>
>
> The new VIRTCHNL_VF_OFFLOAD_VLAN_V2 capability added support that
> allows the PF to set the location of the RX VLAN tag for stripping offloads.
>
> So the VF needs to extract the VLAN tag according to the location flags.
>
> Fixes: 1c301e8c3cff ("net/iavf: support new VLAN capabilities")
>
> Signed-off-by: Haiyue Wang <haiyue.wang at intel.com>
> Signed-off-by: Leyi Rong <leyi.rong at intel.com>
> ---
> drivers/net/iavf/iavf_rxtx.c | 62 +++++----
> drivers/net/iavf/iavf_rxtx.h | 3 +
> drivers/net/iavf/iavf_rxtx_vec_avx2.c | 179 ++++++++++++++++++++------
> 3 files changed, 182 insertions(+), 62 deletions(-)
>
> diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index
> af5a28d84d..41409e1525 100644
> --- a/drivers/net/iavf/iavf_rxtx.c
> +++ b/drivers/net/iavf/iavf_rxtx.c
> @@ -543,6 +543,24 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev,
> uint16_t queue_idx,
> rxq->proto_xtr = IAVF_PROTO_XTR_NONE;
> }
>
> + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
> + struct virtchnl_vlan_supported_caps *stripping_support =
> + &vf-
> >vlan_v2_caps.offloads.stripping_support;
> + uint32_t stripping_cap;
> +
> + if (stripping_support->outer)
> + stripping_cap = stripping_support->outer;
> + else
> + stripping_cap = stripping_support->inner;
> +
> + if (stripping_cap &
> VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
> + rxq->rx_flags =
> IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
> + else if (stripping_cap &
> VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
> + rxq->rx_flags =
> IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
> + } else {
> + rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
> + }
> +
> iavf_select_rxd_to_pkt_fields_handler(rxq, rxq->rxdid);
>
> rxq->mp = mp;
> @@ -972,31 +990,27 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile
> union iavf_rx_desc *rxdp)
>
> static inline void
> iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
> - volatile union iavf_rx_flex_desc *rxdp)
> + volatile union iavf_rx_flex_desc *rxdp,
> + uint8_t rx_flags)
> {
> - if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
> - (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
> - mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
> - mb->vlan_tci =
> - rte_le_to_cpu_16(rxdp->wb.l2tag1);
> - } else {
> - mb->vlan_tci = 0;
> - }
> + uint16_t vlan_tci = 0;
> +
> + if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
> + rte_le_to_cpu_64(rxdp->wb.status_error0) &
> + (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
> + vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
>
> #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
> - if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
> - (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
> - mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
> - PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
> - mb->vlan_tci_outer = mb->vlan_tci;
> - mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
> - PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u,
> l2tag2_2: %u",
> - rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
> - rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
> - } else {
> - mb->vlan_tci_outer = 0;
> - }
> + if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
> + rte_le_to_cpu_16(rxdp->wb.status_error1) &
> + (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
> + vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
> #endif
> +
> + if (vlan_tci) {
> + mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
> + mb->vlan_tci = vlan_tci;
> + }
> }
>
> /* Translate the rx descriptor status and error fields to pkt flags */ @@ -
> 1314,7 +1328,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
> rxm->ol_flags = 0;
> rxm->packet_type =
> ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
> rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
> - iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
> + iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
> rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
> pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
> rxm->ol_flags |= pkt_flags;
> @@ -1455,7 +1469,7 @@ iavf_recv_scattered_pkts_flex_rxd(void
> *rx_queue, struct rte_mbuf **rx_pkts,
> first_seg->ol_flags = 0;
> first_seg->packet_type =
> ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
> rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
> - iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
> + iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
> rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
> pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
>
> @@ -1692,7 +1706,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct
> iavf_rx_queue *rxq)
>
> mb->packet_type =
> ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
>
> rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
> - iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
> + iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq-
> >rx_flags);
> rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
> stat_err0 =
> rte_le_to_cpu_16(rxdp[j].wb.status_error0);
> pkt_flags =
> iavf_flex_rxd_error_to_pkt_flags(stat_err0);
> diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h index
> d583badd98..922ddadad1 100644
> --- a/drivers/net/iavf/iavf_rxtx.h
> +++ b/drivers/net/iavf/iavf_rxtx.h
> @@ -190,6 +190,9 @@ struct iavf_rx_queue {
> bool q_set; /* if rx queue has been configured */
> bool rx_deferred_start; /* don't start this queue in dev start */
> const struct iavf_rxq_ops *ops;
> + uint8_t rx_flags;
> +#define IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(0)
> +#define IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(1)
> uint8_t proto_xtr; /* protocol extraction type */
> uint64_t xtr_ol_flag;
> /* flexible descriptor metadata extraction offload flag */ diff
> --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
> b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
> index 8f28afc8c5..f443300f54 100644
> --- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
> +++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
> @@ -777,20 +777,32 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct
> iavf_rx_queue *rxq,
> * If RSS(bit12)/VLAN(bit13) are set,
> * shuffle moves appropriate flags in place.
> */
> - const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
> + const __m256i rss_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
> 0, 0, 0, 0,
> 0, 0, 0, 0,
> - PKT_RX_RSS_HASH | PKT_RX_VLAN |
> PKT_RX_VLAN_STRIPPED,
> - PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
> + PKT_RX_RSS_HASH, 0,
> PKT_RX_RSS_HASH, 0,
> /* end up 128-bits */
> 0, 0, 0, 0,
> 0, 0, 0, 0,
> 0, 0, 0, 0,
> - PKT_RX_RSS_HASH | PKT_RX_VLAN |
> PKT_RX_VLAN_STRIPPED,
> - PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
> + PKT_RX_RSS_HASH, 0,
> PKT_RX_RSS_HASH, 0);
>
> + const __m256i vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
> + 0, 0, 0, 0,
> + 0, 0, 0, 0,
> + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
> + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
> + 0, 0,
> + /* end up 128-bits */
> + 0, 0, 0, 0,
> + 0, 0, 0, 0,
> + 0, 0, 0, 0,
> + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
> + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
> + 0, 0);
> +
> uint16_t i, received;
>
> for (i = 0, received = 0; i < nb_pkts; @@ -938,13 +950,24 @@
> _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
> _mm256_srli_epi32(flag_bits, 4));
> l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
> l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
> +
> /* set rss and vlan flags */
> const __m256i rss_vlan_flag_bits =
> _mm256_srli_epi32(flag_bits, 12);
> - const __m256i rss_vlan_flags =
> - _mm256_shuffle_epi8(rss_vlan_flags_shuf,
> + const __m256i rss_flags =
> + _mm256_shuffle_epi8(rss_flags_shuf,
> rss_vlan_flag_bits);
>
> + __m256i vlan_flags = _mm256_setzero_si256();
> +
> + if (rxq->rx_flags ==
> IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1)
> + vlan_flags =
> + _mm256_shuffle_epi8(vlan_flags_shuf,
> + rss_vlan_flag_bits);
> +
> + const __m256i rss_vlan_flags =
> + _mm256_or_si256(rss_flags, vlan_flags);
> +
> /* merge flags */
> __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
> rss_vlan_flags);
> @@ -997,7 +1020,8 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct
> iavf_rx_queue *rxq,
> * will cause performance drop to get into this context.
> */
> if (rxq->vsi->adapter->eth_dev->data-
> >dev_conf.rxmode.offloads &
> - DEV_RX_OFFLOAD_RSS_HASH) {
> + DEV_RX_OFFLOAD_RSS_HASH ||
> + rxq->rx_flags &
> IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
> /* load bottom half of every 32B desc */
> const __m128i raw_desc_bh7 =
> _mm_load_si128
> @@ -1048,36 +1072,115 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct
> iavf_rx_queue *rxq,
>
> (_mm256_castsi128_si256(raw_desc_bh0),
> raw_desc_bh1, 1);
>
> - /**
> - * to shift the 32b RSS hash value to the
> - * highest 32b of each 128b before mask
> - */
> - __m256i rss_hash6_7 =
> - _mm256_slli_epi64(raw_desc_bh6_7, 32);
> - __m256i rss_hash4_5 =
> - _mm256_slli_epi64(raw_desc_bh4_5, 32);
> - __m256i rss_hash2_3 =
> - _mm256_slli_epi64(raw_desc_bh2_3, 32);
> - __m256i rss_hash0_1 =
> - _mm256_slli_epi64(raw_desc_bh0_1, 32);
> -
> - __m256i rss_hash_msk =
> - _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
> - 0xFFFFFFFF, 0, 0, 0);
> -
> - rss_hash6_7 = _mm256_and_si256
> - (rss_hash6_7, rss_hash_msk);
> - rss_hash4_5 = _mm256_and_si256
> - (rss_hash4_5, rss_hash_msk);
> - rss_hash2_3 = _mm256_and_si256
> - (rss_hash2_3, rss_hash_msk);
> - rss_hash0_1 = _mm256_and_si256
> - (rss_hash0_1, rss_hash_msk);
> -
> - mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
> - mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
> - mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
> - mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
> + if (rxq->vsi->adapter->eth_dev->data-
> >dev_conf.rxmode.offloads &
> + DEV_RX_OFFLOAD_RSS_HASH) {
> + /**
> + * to shift the 32b RSS hash value to the
> + * highest 32b of each 128b before mask
> + */
> + __m256i rss_hash6_7 =
> + _mm256_slli_epi64(raw_desc_bh6_7,
> 32);
> + __m256i rss_hash4_5 =
> + _mm256_slli_epi64(raw_desc_bh4_5,
> 32);
> + __m256i rss_hash2_3 =
> + _mm256_slli_epi64(raw_desc_bh2_3,
> 32);
> + __m256i rss_hash0_1 =
> + _mm256_slli_epi64(raw_desc_bh0_1,
> 32);
> +
> + const __m256i rss_hash_msk =
> + _mm256_set_epi32(0xFFFFFFFF, 0, 0,
> 0,
> + 0xFFFFFFFF, 0, 0, 0);
> +
> + rss_hash6_7 = _mm256_and_si256
> + (rss_hash6_7, rss_hash_msk);
> + rss_hash4_5 = _mm256_and_si256
> + (rss_hash4_5, rss_hash_msk);
> + rss_hash2_3 = _mm256_and_si256
> + (rss_hash2_3, rss_hash_msk);
> + rss_hash0_1 = _mm256_and_si256
> + (rss_hash0_1, rss_hash_msk);
> +
> + mb6_7 = _mm256_or_si256(mb6_7,
> rss_hash6_7);
> + mb4_5 = _mm256_or_si256(mb4_5,
> rss_hash4_5);
> + mb2_3 = _mm256_or_si256(mb2_3,
> rss_hash2_3);
> + mb0_1 = _mm256_or_si256(mb0_1,
> rss_hash0_1);
> + }
> +
> + if (rxq->rx_flags &
> IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
> + /* merge the status/error-1 bits into one
> register */
> + const __m256i status1_4_7 =
> +
> _mm256_unpacklo_epi32(raw_desc_bh6_7,
> + raw_desc_bh4_5);
> + const __m256i status1_0_3 =
> +
> _mm256_unpacklo_epi32(raw_desc_bh2_3,
> + raw_desc_bh0_1);
> +
> + const __m256i status1_0_7 =
> +
> _mm256_unpacklo_epi64(status1_4_7,
> + status1_0_3);
> +
> + const __m256i l2tag2p_flag_mask =
> + _mm256_set1_epi32
> + (1 <<
> IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S);
> +
> + __m256i l2tag2p_flag_bits =
> + _mm256_and_si256
> + (status1_0_7, l2tag2p_flag_mask);
> +
> + l2tag2p_flag_bits =
> + _mm256_srli_epi32(l2tag2p_flag_bits,
> +
> IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S);
> +
> + const __m256i l2tag2_flags_shuf =
> + _mm256_set_epi8(0, 0, 0, 0,
> + 0, 0, 0, 0,
> + 0, 0, 0, 0,
> + 0, 0, 0, 0,
> + /* end up 128-bits */
> + 0, 0, 0, 0,
> + 0, 0, 0, 0,
> + 0, 0, 0, 0,
> + 0, 0,
> + PKT_RX_VLAN |
> +
> PKT_RX_VLAN_STRIPPED,
> + 0);
> +
> + vlan_flags =
> +
> _mm256_shuffle_epi8(l2tag2_flags_shuf,
> + l2tag2p_flag_bits);
> +
> + /* merge with vlan_flags */
> + mbuf_flags = _mm256_or_si256
> + (mbuf_flags, vlan_flags);
> +
> + /* L2TAG2_2 */
> + __m256i vlan_tci6_7 =
> + _mm256_slli_si256(raw_desc_bh6_7,
> 4);
> + __m256i vlan_tci4_5 =
> + _mm256_slli_si256(raw_desc_bh4_5,
> 4);
> + __m256i vlan_tci2_3 =
> + _mm256_slli_si256(raw_desc_bh2_3,
> 4);
> + __m256i vlan_tci0_1 =
> + _mm256_slli_si256(raw_desc_bh0_1,
> 4);
> +
> + const __m256i vlan_tci_msk =
> + _mm256_set_epi32(0, 0xFFFF0000, 0,
> 0,
> + 0, 0xFFFF0000, 0, 0);
> +
> + vlan_tci6_7 = _mm256_and_si256
> + (vlan_tci6_7, vlan_tci_msk);
> + vlan_tci4_5 = _mm256_and_si256
> + (vlan_tci4_5, vlan_tci_msk);
> + vlan_tci2_3 = _mm256_and_si256
> + (vlan_tci2_3, vlan_tci_msk);
> + vlan_tci0_1 = _mm256_and_si256
> + (vlan_tci0_1, vlan_tci_msk);
> +
> + mb6_7 = _mm256_or_si256(mb6_7,
> vlan_tci6_7);
> + mb4_5 = _mm256_or_si256(mb4_5,
> vlan_tci4_5);
> + mb2_3 = _mm256_or_si256(mb2_3,
> vlan_tci2_3);
> + mb0_1 = _mm256_or_si256(mb0_1,
> vlan_tci0_1);
> + }
> } /* if() on RSS hash parsing */
> #endif
>
> --
> 2.17.1
More information about the dev
mailing list