[dpdk-dev] [PATCH v6 11/15] vhost: add vector filling support for packed ring

Tiwei Bie tiwei.bie at intel.com
Wed Jul 4 07:53:07 CEST 2018


On Mon, Jul 02, 2018 at 10:16:25AM +0200, Maxime Coquelin wrote:
[...]
> +static __rte_always_inline int
> +fill_vec_buf_packed_indirect(struct virtio_net *dev,
> +			struct vhost_virtqueue *vq,
> +			struct vring_desc_packed *desc, uint16_t *vec_idx,
> +			struct buf_vector *buf_vec, uint16_t *len, uint8_t perm)
> +{
> +	uint16_t i;
> +	uint32_t nr_decs;
> +	uint16_t vec_id = *vec_idx;
> +	uint64_t dlen;
> +	struct vring_desc_packed *descs, *idescs = NULL;
> +
> +	dlen = desc->len;
> +	descs = (struct vring_desc_packed *)(uintptr_t)
> +		vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
> +	if (unlikely(!descs))
> +		return -1;
> +
> +	if (unlikely(dlen < desc->len)) {
> +		/*
> +		 * The indirect desc table is not contiguous
> +		 * in process VA space, we have to copy it.
> +		 */
> +		idescs = alloc_copy_ind_table(dev, vq, desc->addr, desc->len);
> +		if (unlikely(!idescs))
> +			return -1;
> +
> +		descs = idescs;
> +	}
> +
> +	nr_decs =  desc->len / sizeof(struct vring_desc_packed);

s/nr_decs =  /nr_desc = /


> +	if (unlikely(nr_decs >= vq->size)) {
> +		free_ind_table(idescs);
> +		return -1;
> +	}
[...]
> +
> +static inline int
> +fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
> +				uint16_t avail_idx, uint16_t *desc_count,
> +				struct buf_vector *buf_vec, uint16_t *vec_idx,
> +				uint16_t *buf_id, uint16_t *len, uint8_t perm)
> +{
> +	bool wrap_counter = vq->avail_wrap_counter;
> +	struct vring_desc_packed *descs = vq->desc_packed;
> +	uint16_t vec_id = *vec_idx;
> +
> +	if (avail_idx < vq->last_avail_idx)
> +		wrap_counter ^= 1;

In which case avail_idx will be less than vq->last_avail_idx
and we need to wrap the wrap_counter?

> +
> +	if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
> +		return -1;
> +
> +	*desc_count = 0;
> +
> +	while (1) {
> +		if (unlikely(vec_id >= BUF_VECTOR_MAX))
> +			return -1;
> +
> +		*desc_count += 1;
> +		*buf_id = descs[avail_idx].index;
> +
> +		if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
> +			if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
> +							&descs[avail_idx],
> +							&vec_id, buf_vec,
> +							len, perm) < 0))
> +				return -1;
> +		} else {
> +			*len += descs[avail_idx].len;
> +
> +			if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
> +							descs[avail_idx].addr,
> +							descs[avail_idx].len,
> +							perm)))
> +				return -1;
> +		}
> +
> +		if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
> +			break;
> +
> +		if (++avail_idx >= vq->size) {
> +			avail_idx -= vq->size;
> +			wrap_counter ^= 1;
> +		}
> +	}
> +
> +	*vec_idx = vec_id;
> +
> +	return 0;
> +}
[...]


More information about the dev mailing list