[dpdk-dev] [PATCH v2 10/12] virtio: add Tx checksum offload support

Olivier Matz olivier.matz at 6wind.com
Mon Oct 3 11:00:21 CEST 2016


Signed-off-by: Olivier Matz <olivier.matz at 6wind.com>
---
 drivers/net/virtio/virtio_ethdev.c |  7 +++++
 drivers/net/virtio/virtio_ethdev.h |  1 +
 drivers/net/virtio/virtio_rxtx.c   | 57 +++++++++++++++++++++++++-------------
 3 files changed, 45 insertions(+), 20 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 43cb096..55024cd 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1578,6 +1578,13 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_offload_capa =
 		DEV_RX_OFFLOAD_TCP_CKSUM |
 		DEV_RX_OFFLOAD_UDP_CKSUM;
+	dev_info->tx_offload_capa = 0;
+
+	if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) {
+		dev_info->tx_offload_capa |=
+			DEV_TX_OFFLOAD_UDP_CKSUM |
+			DEV_TX_OFFLOAD_TCP_CKSUM;
+	}
 }
 
 /*
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index 2fc9218..202aa2e 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -62,6 +62,7 @@
 	 1u << VIRTIO_NET_F_CTRL_VQ	  |	\
 	 1u << VIRTIO_NET_F_CTRL_RX	  |	\
 	 1u << VIRTIO_NET_F_CTRL_VLAN	  |	\
+	 1u << VIRTIO_NET_F_CSUM	  |	\
 	 1u << VIRTIO_NET_F_MRG_RXBUF	  |	\
 	 1ULL << VIRTIO_F_VERSION_1)
 
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index eda678a..4ae11e7 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -213,13 +213,14 @@ static inline void
 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 		       uint16_t needed, int use_indirect, int can_push)
 {
+	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
 	struct vq_desc_extra *dxp;
 	struct virtqueue *vq = txvq->vq;
 	struct vring_desc *start_dp;
 	uint16_t seg_num = cookie->nb_segs;
 	uint16_t head_idx, idx;
 	uint16_t head_size = vq->hw->vtnet_hdr_size;
-	unsigned long offs;
+	struct virtio_net_hdr *hdr;
 
 	head_idx = vq->vq_desc_head_idx;
 	idx = head_idx;
@@ -230,10 +231,9 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 	start_dp = vq->vq_ring.desc;
 
 	if (can_push) {
-		/* put on zero'd transmit header (no offloads) */
-		void *hdr = rte_pktmbuf_prepend(cookie, head_size);
-
-		memset(hdr, 0, head_size);
+		/* prepend cannot fail, checked by caller */
+		hdr = (struct virtio_net_hdr *)
+			rte_pktmbuf_prepend(cookie, head_size);
 	} else if (use_indirect) {
 		/* setup tx ring slot to point to indirect
 		 * descriptor list stored in reserved region.
@@ -241,14 +241,11 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 		 * the first slot in indirect ring is already preset
 		 * to point to the header in reserved region
 		 */
-		struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
-
-		offs = idx * sizeof(struct virtio_tx_region)
-			+ offsetof(struct virtio_tx_region, tx_indir);
-
-		start_dp[idx].addr  = txvq->virtio_net_hdr_mem + offs;
+		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
+			RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
 		start_dp[idx].len   = (seg_num + 1) * sizeof(struct vring_desc);
 		start_dp[idx].flags = VRING_DESC_F_INDIRECT;
+		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
 
 		/* loop below will fill in rest of the indirect elements */
 		start_dp = txr[idx].tx_indir;
@@ -257,15 +254,40 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 		/* setup first tx ring slot to point to header
 		 * stored in reserved region.
 		 */
-		offs = idx * sizeof(struct virtio_tx_region)
-			+ offsetof(struct virtio_tx_region, tx_hdr);
-
-		start_dp[idx].addr  = txvq->virtio_net_hdr_mem + offs;
+		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
+			RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
 		start_dp[idx].len   = vq->hw->vtnet_hdr_size;
 		start_dp[idx].flags = VRING_DESC_F_NEXT;
+		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
+
 		idx = start_dp[idx].next;
 	}
 
+	/* Checksum Offload */
+	switch (cookie->ol_flags & PKT_TX_L4_MASK) {
+	case PKT_TX_UDP_CKSUM:
+		hdr->csum_start = cookie->l2_len + cookie->l3_len;
+		hdr->csum_offset = 6;
+		hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+		break;
+
+	case PKT_TX_TCP_CKSUM:
+		hdr->csum_start = cookie->l2_len + cookie->l3_len;
+		hdr->csum_offset = 16;
+		hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+		break;
+
+	default:
+		hdr->csum_start = 0;
+		hdr->csum_offset = 0;
+		hdr->flags = 0;
+		break;
+	}
+
+	hdr->gso_type = 0;
+	hdr->gso_size = 0;
+	hdr->hdr_len = 0;
+
 	do {
 		start_dp[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
 		start_dp[idx].len   = cookie->data_len;
@@ -512,11 +534,6 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
 	PMD_INIT_FUNC_TRACE();
 
-	if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMS)
-	    != ETH_TXQ_FLAGS_NOXSUMS) {
-		PMD_INIT_LOG(ERR, "TX checksum offload not supported\n");
-		return -EINVAL;
-	}
 
 #ifdef RTE_MACHINE_CPUFLAG_SSSE3
 	/* Use simple rx/tx func if single segment and no offloads */
-- 
2.8.1



More information about the dev mailing list