[dpdk-dev] [PATCH v1 08/14] vhost: buffer vhost dequeue shadow ring

Marvin Liu yong.liu at intel.com
Thu Sep 5 18:14:15 CEST 2019


Buffer used ring updates as many as possible in vhost dequeue function
for coordinating with virtio driver. For supporting buffer, shadow used
ring element should contain descriptor index and its wrap counter. First
shadowed ring index is recorded for calculating buffered number.

Signed-off-by: Marvin Liu <yong.liu at intel.com>

diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 5471acaf7..b161082ca 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -42,6 +42,8 @@
 #define VIRTIO_RX_USED_FLAG  (0ULL | VRING_DESC_F_AVAIL | VRING_DESC_F_USED \
 				| VRING_DESC_F_WRITE)
 #define VIRTIO_RX_USED_WRAP_FLAG (VRING_DESC_F_WRITE)
+#define VIRTIO_TX_USED_FLAG  (0ULL | VRING_DESC_F_AVAIL | VRING_DESC_F_USED)
+#define VIRTIO_TX_USED_WRAP_FLAG (0x0)
 #define PACKED_DESCS_BURST 4
 #define PACKED_BURST_MASK (PACKED_DESCS_BURST - 1)
 #define DESC_SINGLE_DEQUEUE (VRING_DESC_F_NEXT | VRING_DESC_F_INDIRECT)
@@ -90,9 +92,11 @@ struct log_cache_entry {
 };
 
 struct vring_used_elem_packed {
+	uint16_t used_idx;
 	uint16_t id;
 	uint32_t len;
 	uint32_t count;
+	uint16_t used_wrap_counter;
 };
 
 /**
@@ -147,6 +151,7 @@ struct vhost_virtqueue {
 	};
 	uint16_t                shadow_used_idx;
 	uint16_t                enqueue_shadow_count;
+	uint16_t                dequeue_shadow_head;
 	struct vhost_vring_addr ring_addrs;
 
 	struct batch_copy_elem	*batch_copy_elems;
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index ce255dd82..f8ad54e18 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -240,6 +240,42 @@ update_shadow_packed(struct vhost_virtqueue *vq,
 	vq->shadow_used_packed[i].count = count;
 }
 
+static __rte_always_inline void
+update_dequeue_shadow_packed(struct vhost_virtqueue *vq, uint16_t buf_id,
+	uint16_t count)
+{
+	if (!vq->shadow_used_idx) {
+		vq->dequeue_shadow_head = vq->last_used_idx;
+
+		vq->shadow_used_packed[0].id  = buf_id;
+		vq->shadow_used_packed[0].len = 0;
+		vq->shadow_used_packed[0].count = count;
+		vq->shadow_used_packed[0].used_idx = vq->last_used_idx;
+		vq->shadow_used_packed[0].used_wrap_counter =
+			vq->used_wrap_counter;
+
+		vq->shadow_used_idx = 1;
+	} else {
+		vq->desc_packed[vq->last_used_idx].id = buf_id;
+		vq->desc_packed[vq->last_used_idx].len = 0;
+
+		if (vq->used_wrap_counter)
+			vq->desc_packed[vq->last_used_idx].flags =
+				VIRTIO_TX_USED_FLAG;
+		else
+			vq->desc_packed[vq->last_used_idx].flags =
+				VIRTIO_TX_USED_WRAP_FLAG;
+
+	}
+
+	vq->last_used_idx += count;
+
+	if (vq->last_used_idx >= vq->size) {
+		vq->used_wrap_counter ^= 1;
+		vq->last_used_idx -= vq->size;
+	}
+}
+
 static inline void
 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
@@ -1898,6 +1934,8 @@ virtio_dev_tx_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 					&desc_count))
 		return -1;
 
+	update_dequeue_shadow_packed(vq, buf_id, desc_count);
+
 	vq->last_avail_idx += desc_count;
 	if (vq->last_avail_idx >= vq->size) {
 		vq->last_avail_idx -= vq->size;
-- 
2.17.1



More information about the dev mailing list