[dpdk-dev] [PATCH 2/3] vhost: optimize dequeue for small packets

Xie, Huawei huawei.xie at intel.com
Wed Jun 1 08:24:18 CEST 2016


On 5/3/2016 8:42 AM, Yuanhan Liu wrote:
> Both current kernel virtio driver and DPDK virtio driver use at least
> 2 desc buffer for Tx: the first for storing the header, and the others
> for storing the data.

Tx could prepend some space for virtio net header whenever possible, so
that it could use only one descriptor.

Another thing is this doesn't reduce the check because you also add a check.


>
> Therefore, we could fetch the first data desc buf before the main loop,
> and do the copy first before the check of "are we done yet?". This
> could save one check for small packets, that just have one data desc
> buffer and need one mbuf to store it.
>
> Signed-off-by: Yuanhan Liu <yuanhan.liu at linux.intel.com>
> ---
>  lib/librte_vhost/vhost_rxtx.c | 52 ++++++++++++++++++++++++++++++-------------
>  1 file changed, 36 insertions(+), 16 deletions(-)
>
> diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
> index 2c3b810..34d6ed1 100644
> --- a/lib/librte_vhost/vhost_rxtx.c
> +++ b/lib/librte_vhost/vhost_rxtx.c
> @@ -753,18 +753,48 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
>  		return -1;
>  
>  	desc_addr = gpa_to_vva(dev, desc->addr);
> -	rte_prefetch0((void *)(uintptr_t)desc_addr);
> -
> -	/* Retrieve virtio net header */
>  	hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr);
> -	desc_avail  = desc->len - dev->vhost_hlen;
> -	desc_offset = dev->vhost_hlen;
> +	rte_prefetch0(hdr);
> +
> +	/*
> +	 * Both current kernel virio driver and DPDK virtio driver
> +	 * use at least 2 desc bufferr for Tx: the first for storing
> +	 * the header, and others for storing the data.
> +	 */
> +	if (likely(desc->len == dev->vhost_hlen)) {
> +		desc = &vq->desc[desc->next];
> +
> +		desc_addr = gpa_to_vva(dev, desc->addr);
> +		rte_prefetch0((void *)(uintptr_t)desc_addr);
> +
> +		desc_offset = 0;
> +		desc_avail  = desc->len;
> +		nr_desc    += 1;
> +
> +		PRINT_PACKET(dev, (uintptr_t)desc_addr, desc->len, 0);
> +	} else {
> +		desc_avail  = desc->len - dev->vhost_hlen;
> +		desc_offset = dev->vhost_hlen;
> +	}
>  
>  	mbuf_offset = 0;
>  	mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
> -	while (desc_avail != 0 || (desc->flags & VRING_DESC_F_NEXT) != 0) {
> +	while (1) {
> +		cpy_len = RTE_MIN(desc_avail, mbuf_avail);
> +		rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *, mbuf_offset),
> +			(void *)((uintptr_t)(desc_addr + desc_offset)),
> +			cpy_len);
> +
> +		mbuf_avail  -= cpy_len;
> +		mbuf_offset += cpy_len;
> +		desc_avail  -= cpy_len;
> +		desc_offset += cpy_len;
> +
>  		/* This desc reaches to its end, get the next one */
>  		if (desc_avail == 0) {
> +			if ((desc->flags & VRING_DESC_F_NEXT) == 0)
> +				break;
> +
>  			if (unlikely(desc->next >= vq->size ||
>  				     ++nr_desc >= vq->size))
>  				return -1;
> @@ -800,16 +830,6 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
>  			mbuf_offset = 0;
>  			mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
>  		}
> -
> -		cpy_len = RTE_MIN(desc_avail, mbuf_avail);
> -		rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *, mbuf_offset),
> -			(void *)((uintptr_t)(desc_addr + desc_offset)),
> -			cpy_len);
> -
> -		mbuf_avail  -= cpy_len;
> -		mbuf_offset += cpy_len;
> -		desc_avail  -= cpy_len;
> -		desc_offset += cpy_len;
>  	}
>  
>  	prev->data_len = mbuf_offset;



More information about the dev mailing list