[dpdk-dev] [PATCH v9 6/9] net/virtio: reuse packed ring xmit functions

Marvin Liu yong.liu at intel.com
Fri Apr 24 11:24:42 CEST 2020


Move xmit offload and packed ring xmit enqueue function to header file.
These functions will be reused by packed ring vectorized Tx function.

Signed-off-by: Marvin Liu <yong.liu at intel.com>

diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index c9b6e7844..cf18fe564 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -264,10 +264,6 @@ virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
 	return i;
 }
 
-#ifndef DEFAULT_TX_FREE_THRESH
-#define DEFAULT_TX_FREE_THRESH 32
-#endif
-
 static void
 virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)
 {
@@ -562,68 +558,7 @@ virtio_tso_fix_cksum(struct rte_mbuf *m)
 }
 
 
-/* avoid write operation when necessary, to lessen cache issues */
-#define ASSIGN_UNLESS_EQUAL(var, val) do {	\
-	if ((var) != (val))			\
-		(var) = (val);			\
-} while (0)
-
-#define virtqueue_clear_net_hdr(_hdr) do {		\
-	ASSIGN_UNLESS_EQUAL((_hdr)->csum_start, 0);	\
-	ASSIGN_UNLESS_EQUAL((_hdr)->csum_offset, 0);	\
-	ASSIGN_UNLESS_EQUAL((_hdr)->flags, 0);		\
-	ASSIGN_UNLESS_EQUAL((_hdr)->gso_type, 0);	\
-	ASSIGN_UNLESS_EQUAL((_hdr)->gso_size, 0);	\
-	ASSIGN_UNLESS_EQUAL((_hdr)->hdr_len, 0);	\
-} while (0)
-
-static inline void
-virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
-			struct rte_mbuf *cookie,
-			bool offload)
-{
-	if (offload) {
-		if (cookie->ol_flags & PKT_TX_TCP_SEG)
-			cookie->ol_flags |= PKT_TX_TCP_CKSUM;
-
-		switch (cookie->ol_flags & PKT_TX_L4_MASK) {
-		case PKT_TX_UDP_CKSUM:
-			hdr->csum_start = cookie->l2_len + cookie->l3_len;
-			hdr->csum_offset = offsetof(struct rte_udp_hdr,
-				dgram_cksum);
-			hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
-			break;
-
-		case PKT_TX_TCP_CKSUM:
-			hdr->csum_start = cookie->l2_len + cookie->l3_len;
-			hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
-			hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
-			break;
-
-		default:
-			ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
-			ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
-			ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
-			break;
-		}
 
-		/* TCP Segmentation Offload */
-		if (cookie->ol_flags & PKT_TX_TCP_SEG) {
-			hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
-				VIRTIO_NET_HDR_GSO_TCPV6 :
-				VIRTIO_NET_HDR_GSO_TCPV4;
-			hdr->gso_size = cookie->tso_segsz;
-			hdr->hdr_len =
-				cookie->l2_len +
-				cookie->l3_len +
-				cookie->l4_len;
-		} else {
-			ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
-			ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
-			ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
-		}
-	}
-}
 
 static inline void
 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
@@ -725,102 +660,6 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
 	virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
 }
 
-static inline void
-virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
-			      uint16_t needed, int can_push, int in_order)
-{
-	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
-	struct vq_desc_extra *dxp;
-	struct virtqueue *vq = txvq->vq;
-	struct vring_packed_desc *start_dp, *head_dp;
-	uint16_t idx, id, head_idx, head_flags;
-	int16_t head_size = vq->hw->vtnet_hdr_size;
-	struct virtio_net_hdr *hdr;
-	uint16_t prev;
-	bool prepend_header = false;
-
-	id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
-
-	dxp = &vq->vq_descx[id];
-	dxp->ndescs = needed;
-	dxp->cookie = cookie;
-
-	head_idx = vq->vq_avail_idx;
-	idx = head_idx;
-	prev = head_idx;
-	start_dp = vq->vq_packed.ring.desc;
-
-	head_dp = &vq->vq_packed.ring.desc[idx];
-	head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
-	head_flags |= vq->vq_packed.cached_flags;
-
-	if (can_push) {
-		/* prepend cannot fail, checked by caller */
-		hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
-					      -head_size);
-		prepend_header = true;
-
-		/* if offload disabled, it is not zeroed below, do it now */
-		if (!vq->hw->has_tx_offload)
-			virtqueue_clear_net_hdr(hdr);
-	} else {
-		/* setup first tx ring slot to point to header
-		 * stored in reserved region.
-		 */
-		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
-			RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
-		start_dp[idx].len   = vq->hw->vtnet_hdr_size;
-		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
-		idx++;
-		if (idx >= vq->vq_nentries) {
-			idx -= vq->vq_nentries;
-			vq->vq_packed.cached_flags ^=
-				VRING_PACKED_DESC_F_AVAIL_USED;
-		}
-	}
-
-	virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
-
-	do {
-		uint16_t flags;
-
-		start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
-		start_dp[idx].len  = cookie->data_len;
-		if (prepend_header) {
-			start_dp[idx].addr -= head_size;
-			start_dp[idx].len += head_size;
-			prepend_header = false;
-		}
-
-		if (likely(idx != head_idx)) {
-			flags = cookie->next ? VRING_DESC_F_NEXT : 0;
-			flags |= vq->vq_packed.cached_flags;
-			start_dp[idx].flags = flags;
-		}
-		prev = idx;
-		idx++;
-		if (idx >= vq->vq_nentries) {
-			idx -= vq->vq_nentries;
-			vq->vq_packed.cached_flags ^=
-				VRING_PACKED_DESC_F_AVAIL_USED;
-		}
-	} while ((cookie = cookie->next) != NULL);
-
-	start_dp[prev].id = id;
-
-	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
-	vq->vq_avail_idx = idx;
-
-	if (!in_order) {
-		vq->vq_desc_head_idx = dxp->next;
-		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
-			vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
-	}
-
-	virtqueue_store_flags_packed(head_dp, head_flags,
-				     vq->hw->weak_barriers);
-}
-
 static inline void
 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 			uint16_t needed, int use_indirect, int can_push,
@@ -1246,7 +1085,6 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
 	return 0;
 }
 
-#define VIRTIO_MBUF_BURST_SZ 64
 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
 uint16_t
 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index d293a3189..18ae34789 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -563,4 +563,165 @@ virtqueue_notify(struct virtqueue *vq)
 #define VIRTQUEUE_DUMP(vq) do { } while (0)
 #endif
 
+/* avoid write operation when necessary, to lessen cache issues */
+#define ASSIGN_UNLESS_EQUAL(var, val) do {	\
+	typeof(var) var_ = (var);		\
+	typeof(val) val_ = (val);		\
+	if ((var_) != (val_))			\
+		(var_) = (val_);		\
+} while (0)
+
+#define virtqueue_clear_net_hdr(hdr) do {		\
+	typeof(hdr) hdr_ = (hdr);			\
+	ASSIGN_UNLESS_EQUAL((hdr_)->csum_start, 0);	\
+	ASSIGN_UNLESS_EQUAL((hdr_)->csum_offset, 0);	\
+	ASSIGN_UNLESS_EQUAL((hdr_)->flags, 0);		\
+	ASSIGN_UNLESS_EQUAL((hdr_)->gso_type, 0);	\
+	ASSIGN_UNLESS_EQUAL((hdr_)->gso_size, 0);	\
+	ASSIGN_UNLESS_EQUAL((hdr_)->hdr_len, 0);	\
+} while (0)
+
+static inline void
+virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
+			struct rte_mbuf *cookie,
+			bool offload)
+{
+	if (offload) {
+		if (cookie->ol_flags & PKT_TX_TCP_SEG)
+			cookie->ol_flags |= PKT_TX_TCP_CKSUM;
+
+		switch (cookie->ol_flags & PKT_TX_L4_MASK) {
+		case PKT_TX_UDP_CKSUM:
+			hdr->csum_start = cookie->l2_len + cookie->l3_len;
+			hdr->csum_offset = offsetof(struct rte_udp_hdr,
+				dgram_cksum);
+			hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+			break;
+
+		case PKT_TX_TCP_CKSUM:
+			hdr->csum_start = cookie->l2_len + cookie->l3_len;
+			hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
+			hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+			break;
+
+		default:
+			ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+			ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+			ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+			break;
+		}
+
+		/* TCP Segmentation Offload */
+		if (cookie->ol_flags & PKT_TX_TCP_SEG) {
+			hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
+				VIRTIO_NET_HDR_GSO_TCPV6 :
+				VIRTIO_NET_HDR_GSO_TCPV4;
+			hdr->gso_size = cookie->tso_segsz;
+			hdr->hdr_len =
+				cookie->l2_len +
+				cookie->l3_len +
+				cookie->l4_len;
+		} else {
+			ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+			ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+			ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
+		}
+	}
+}
+
+static inline void
+virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
+			      uint16_t needed, int can_push, int in_order)
+{
+	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
+	struct vq_desc_extra *dxp;
+	struct virtqueue *vq = txvq->vq;
+	struct vring_packed_desc *start_dp, *head_dp;
+	uint16_t idx, id, head_idx, head_flags;
+	int16_t head_size = vq->hw->vtnet_hdr_size;
+	struct virtio_net_hdr *hdr;
+	uint16_t prev;
+	bool prepend_header = false;
+
+	id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
+
+	dxp = &vq->vq_descx[id];
+	dxp->ndescs = needed;
+	dxp->cookie = cookie;
+
+	head_idx = vq->vq_avail_idx;
+	idx = head_idx;
+	prev = head_idx;
+	start_dp = vq->vq_packed.ring.desc;
+
+	head_dp = &vq->vq_packed.ring.desc[idx];
+	head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
+	head_flags |= vq->vq_packed.cached_flags;
+
+	if (can_push) {
+		/* prepend cannot fail, checked by caller */
+		hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
+					      -head_size);
+		prepend_header = true;
+
+		/* if offload disabled, it is not zeroed below, do it now */
+		if (!vq->hw->has_tx_offload)
+			virtqueue_clear_net_hdr(hdr);
+	} else {
+		/* setup first tx ring slot to point to header
+		 * stored in reserved region.
+		 */
+		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
+			RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
+		start_dp[idx].len   = vq->hw->vtnet_hdr_size;
+		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
+		idx++;
+		if (idx >= vq->vq_nentries) {
+			idx -= vq->vq_nentries;
+			vq->vq_packed.cached_flags ^=
+				VRING_PACKED_DESC_F_AVAIL_USED;
+		}
+	}
+
+	virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
+
+	do {
+		uint16_t flags;
+
+		start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
+		start_dp[idx].len  = cookie->data_len;
+		if (prepend_header) {
+			start_dp[idx].addr -= head_size;
+			start_dp[idx].len += head_size;
+			prepend_header = false;
+		}
+
+		if (likely(idx != head_idx)) {
+			flags = cookie->next ? VRING_DESC_F_NEXT : 0;
+			flags |= vq->vq_packed.cached_flags;
+			start_dp[idx].flags = flags;
+		}
+		prev = idx;
+		idx++;
+		if (idx >= vq->vq_nentries) {
+			idx -= vq->vq_nentries;
+			vq->vq_packed.cached_flags ^=
+				VRING_PACKED_DESC_F_AVAIL_USED;
+		}
+	} while ((cookie = cookie->next) != NULL);
+
+	start_dp[prev].id = id;
+
+	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
+	vq->vq_avail_idx = idx;
+
+	if (!in_order) {
+		vq->vq_desc_head_idx = dxp->next;
+		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+			vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
+	}
+
+	virtqueue_store_flags_packed(head_dp, head_flags,
+				     vq->hw->weak_barriers);
+}
 #endif /* _VIRTQUEUE_H_ */
-- 
2.17.1



More information about the dev mailing list