[dpdk-dev] [PATCH v1 07/14] vhost: add flush function for burst enqueue

Marvin Liu yong.liu at intel.com
Thu Sep 5 18:14:14 CEST 2019


Flush used flags when burst enqueue function is finished. Descriptor's
flags are pre-calculated as them will be reset by vhost.

Signed-off-by: Marvin Liu <yong.liu at intel.com>

diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 86552cbeb..5471acaf7 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -39,6 +39,9 @@
 
 #define VHOST_LOG_CACHE_NR 32
 
+#define VIRTIO_RX_USED_FLAG  (0ULL | VRING_DESC_F_AVAIL | VRING_DESC_F_USED \
+				| VRING_DESC_F_WRITE)
+#define VIRTIO_RX_USED_WRAP_FLAG (VRING_DESC_F_WRITE)
 #define PACKED_DESCS_BURST 4
 #define PACKED_BURST_MASK (PACKED_DESCS_BURST - 1)
 #define DESC_SINGLE_DEQUEUE (VRING_DESC_F_NEXT | VRING_DESC_F_INDIRECT)
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index dffd466d5..ce255dd82 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -157,6 +157,60 @@ flush_shadow_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
 	vhost_log_cache_sync(dev, vq);
 }
 
+static __rte_always_inline void
+flush_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+	uint64_t len, uint64_t len1, uint64_t len2, uint64_t len3, uint16_t id,
+	uint16_t id1, uint16_t id2, uint16_t id3, uint16_t flags)
+{
+	vq->desc_packed[vq->last_used_idx].id = id;
+	vq->desc_packed[vq->last_used_idx].len = len;
+	vq->desc_packed[vq->last_used_idx + 1].id = id1;
+	vq->desc_packed[vq->last_used_idx + 1].len = len1;
+
+	vq->desc_packed[vq->last_used_idx + 2].id = id2;
+	vq->desc_packed[vq->last_used_idx + 2].len = len2;
+
+	vq->desc_packed[vq->last_used_idx + 3].id = id3;
+	vq->desc_packed[vq->last_used_idx + 3].len = len3;
+
+	rte_smp_wmb();
+	vq->desc_packed[vq->last_used_idx].flags = flags;
+	rte_smp_wmb();
+	vq->desc_packed[vq->last_used_idx + 1].flags = flags;
+	rte_smp_wmb();
+	vq->desc_packed[vq->last_used_idx + 2].flags = flags;
+	rte_smp_wmb();
+	vq->desc_packed[vq->last_used_idx + 3].flags = flags;
+
+	vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
+				   sizeof(struct vring_packed_desc),
+				   sizeof(struct vring_packed_desc) *
+				   PACKED_DESCS_BURST);
+	vhost_log_cache_sync(dev, vq);
+
+	vq->last_used_idx += PACKED_DESCS_BURST;
+	if (vq->last_used_idx >= vq->size) {
+		vq->used_wrap_counter ^= 1;
+		vq->last_used_idx -= vq->size;
+	}
+}
+
+static __rte_always_inline void
+flush_enqueue_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+	uint64_t len, uint64_t len1, uint64_t len2, uint64_t len3, uint16_t id,
+	uint16_t id1, uint16_t id2, uint16_t id3)
+{
+	uint16_t flags = 0;
+
+	if (vq->used_wrap_counter)
+		flags = VIRTIO_RX_USED_FLAG;
+	else
+		flags = VIRTIO_RX_USED_WRAP_FLAG;
+
+	flush_burst_packed(dev, vq, len, len1, len2, len3, id, id1, id2, id3,
+			   flags);
+}
+
 static __rte_always_inline void
 update_enqueue_shadow_packed(struct vhost_virtqueue *vq, uint16_t desc_idx,
 	uint32_t len, uint16_t count)
@@ -950,6 +1004,7 @@ virtio_dev_rx_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	uint64_t len, len1, len2, len3;
 	struct virtio_net_hdr_mrg_rxbuf *hdr, *hdr1, *hdr2, *hdr3;
 	uint32_t buf_offset = dev->vhost_hlen;
+	uint16_t id, id1, id2, id3;
 
 	if (unlikely(avail_idx & PACKED_BURST_MASK))
 		return -1;
@@ -1036,6 +1091,14 @@ virtio_dev_rx_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		    rte_pktmbuf_mtod_offset(pkts[3], void *, 0),
 		    pkts[3]->pkt_len);
 
+	id = descs[avail_idx].id;
+	id1 = descs[avail_idx + 1].id;
+	id2 = descs[avail_idx + 2].id;
+	id3 = descs[avail_idx + 3].id;
+
+	flush_enqueue_burst_packed(dev, vq, len, len1, len2, len3, id, id1,
+				   id2, id3);
+
 	return 0;
 }
 
-- 
2.17.1



More information about the dev mailing list