[dpdk-dev] [PATCH v1 04/14] vhost: add burst dequeue function
Marvin Liu
yong.liu at intel.com
Thu Sep 5 18:14:11 CEST 2019
Add burst dequeue function like enqueue function for packed ring, burst
dequeue function will not support chained descritpors, single packet
dequeue function will handle it.
Signed-off-by: Marvin Liu <yong.liu at intel.com>
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index ed8b4aabf..b33f29ba0 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -41,6 +41,8 @@
#define PACKED_DESCS_BURST 4
#define PACKED_BURST_MASK (PACKED_DESCS_BURST - 1)
+#define DESC_SINGLE_DEQUEUE (VRING_DESC_F_NEXT | VRING_DESC_F_INDIRECT)
+
/**
* Structure contains buffer address, length and descriptor index
* from vring to do scatter RX.
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 454e8b33e..f34df3733 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1603,6 +1603,150 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
return i;
}
+static __rte_always_inline int
+vhost_dequeue_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
+ uint16_t avail_idx, uintptr_t *desc_addr, uint16_t *ids)
+{
+ bool wrap_counter = vq->avail_wrap_counter;
+ struct vring_packed_desc *descs = vq->desc_packed;
+ uint64_t len, len1, len2, len3;
+ uint64_t buf_len, buf_len1, buf_len2, buf_len3;
+ uint32_t buf_offset = dev->vhost_hlen;
+
+ // check whether desc is burst aligned
+ if (unlikely(avail_idx & PACKED_BURST_MASK))
+ return -1;
+
+ if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)) |
+ unlikely(!desc_is_avail(&descs[avail_idx + 1], wrap_counter)) |
+ unlikely(!desc_is_avail(&descs[avail_idx + 2], wrap_counter)) |
+ unlikely(!desc_is_avail(&descs[avail_idx + 3], wrap_counter)))
+ return 1;
+
+ if (unlikely((descs[avail_idx].flags & DESC_SINGLE_DEQUEUE) |
+ (descs[avail_idx + 1].flags & DESC_SINGLE_DEQUEUE) |
+ (descs[avail_idx + 2].flags & DESC_SINGLE_DEQUEUE) |
+ (descs[avail_idx + 3].flags & DESC_SINGLE_DEQUEUE)))
+ return -1;
+
+ rte_smp_rmb();
+
+ len = descs[avail_idx].len;
+ len1 = descs[avail_idx + 1].len;
+ len2 = descs[avail_idx + 2].len;
+ len3 = descs[avail_idx + 3].len;
+
+ desc_addr[0] = vhost_iova_to_vva(dev, vq, descs[avail_idx].addr, &len,
+ VHOST_ACCESS_RW);
+
+ desc_addr[1] = vhost_iova_to_vva(dev, vq, descs[avail_idx + 1].addr,
+ &len1, VHOST_ACCESS_RW);
+
+ desc_addr[2] = vhost_iova_to_vva(dev, vq, descs[avail_idx + 2].addr,
+ &len2, VHOST_ACCESS_RW);
+
+ desc_addr[3] = vhost_iova_to_vva(dev, vq, descs[avail_idx + 3].addr,
+ &len3, VHOST_ACCESS_RW);
+
+ if (unlikely((len != descs[avail_idx].len) |
+ (len1 != descs[avail_idx + 1].len) |
+ (len2 != descs[avail_idx + 2].len) |
+ (len3 != descs[avail_idx + 3].len))) {
+ return -1;
+ }
+
+ if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, PACKED_DESCS_BURST))
+ return -1;
+
+ buf_len = pkts[0]->buf_len - pkts[0]->data_off;
+ buf_len1 = pkts[1]->buf_len - pkts[1]->data_off;
+ buf_len2 = pkts[2]->buf_len - pkts[2]->data_off;
+ buf_len3 = pkts[3]->buf_len - pkts[3]->data_off;
+
+ if (unlikely((buf_len < (len - buf_offset)) |
+ (buf_len1 < (len1 - buf_offset)) |
+ (buf_len2 < (len2 - buf_offset)) |
+ (buf_len3 < (len3 - buf_offset)))) {
+ rte_pktmbuf_free(pkts[0]);
+ rte_pktmbuf_free(pkts[1]);
+ rte_pktmbuf_free(pkts[2]);
+ rte_pktmbuf_free(pkts[3]);
+ return -1;
+ }
+
+ pkts[0]->pkt_len = descs[avail_idx].len - buf_offset;
+ pkts[1]->pkt_len = descs[avail_idx + 1].len - buf_offset;
+ pkts[2]->pkt_len = descs[avail_idx + 2].len - buf_offset;
+ pkts[3]->pkt_len = descs[avail_idx + 3].len - buf_offset;
+
+ pkts[0]->data_len = pkts[0]->pkt_len;
+ pkts[1]->data_len = pkts[1]->pkt_len;
+ pkts[2]->data_len = pkts[2]->pkt_len;
+ pkts[3]->data_len = pkts[3]->pkt_len;
+
+ ids[0] = descs[avail_idx].id;
+ ids[1] = descs[avail_idx + 1].id;
+ ids[2] = descs[avail_idx + 2].id;
+ ids[3] = descs[avail_idx + 3].id;
+
+ return 0;
+}
+
+static __rte_unused int
+virtio_dev_tx_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts)
+{
+ uint16_t avail_idx = vq->last_avail_idx;
+ uint32_t buf_offset = dev->vhost_hlen;
+ uintptr_t desc_addr[4];
+ uint16_t ids[4];
+ int ret;
+ struct virtio_net_hdr *hdr, *hdr1, *hdr2, *hdr3;
+
+ ret = vhost_dequeue_burst_packed(dev, vq, mbuf_pool, pkts, avail_idx,
+ desc_addr, ids);
+
+ if (ret)
+ return ret;
+
+ rte_prefetch0((void *)(uintptr_t)desc_addr[0]);
+ rte_prefetch0((void *)(uintptr_t)desc_addr[1]);
+ rte_prefetch0((void *)(uintptr_t)desc_addr[2]);
+ rte_prefetch0((void *)(uintptr_t)desc_addr[3]);
+
+ rte_memcpy(rte_pktmbuf_mtod_offset(pkts[0], void *, 0),
+ (void *)(uintptr_t)(desc_addr[0] + buf_offset),
+ pkts[0]->pkt_len);
+ rte_memcpy(rte_pktmbuf_mtod_offset(pkts[1], void *, 0),
+ (void *)(uintptr_t)(desc_addr[1] + buf_offset),
+ pkts[1]->pkt_len);
+ rte_memcpy(rte_pktmbuf_mtod_offset(pkts[2], void *, 0),
+ (void *)(uintptr_t)(desc_addr[2] + buf_offset),
+ pkts[2]->pkt_len);
+ rte_memcpy(rte_pktmbuf_mtod_offset(pkts[3], void *, 0),
+ (void *)(uintptr_t)(desc_addr[3] + buf_offset),
+ pkts[3]->pkt_len);
+
+ if (virtio_net_with_host_offload(dev)) {
+ hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr[0]);
+ hdr1 = (struct virtio_net_hdr *)((uintptr_t)desc_addr[1]);
+ hdr2 = (struct virtio_net_hdr *)((uintptr_t)desc_addr[2]);
+ hdr3 = (struct virtio_net_hdr *)((uintptr_t)desc_addr[3]);
+ vhost_dequeue_offload(hdr, pkts[0]);
+ vhost_dequeue_offload(hdr1, pkts[1]);
+ vhost_dequeue_offload(hdr2, pkts[2]);
+ vhost_dequeue_offload(hdr3, pkts[3]);
+ }
+
+ vq->last_avail_idx += PACKED_DESCS_BURST;
+ if (vq->last_avail_idx >= vq->size) {
+ vq->last_avail_idx -= vq->size;
+ vq->avail_wrap_counter ^= 1;
+ }
+ return 0;
+}
+
static __rte_always_inline int
vhost_dequeue_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t *buf_id,
--
2.17.1
More information about the dev
mailing list