[dpdk-dev] [PATCH v8 2/4] vhost: add support for packed ring in async vhost
Hu, Jiayu
jiayu.hu at intel.com
Tue Apr 27 07:16:23 CEST 2021
Hi Cheng,
Some comments are inline.
> -----Original Message-----
> From: Jiang, Cheng1 <cheng1.jiang at intel.com>
> Sent: Monday, April 19, 2021 4:51 PM
> To: maxime.coquelin at redhat.com; Xia, Chenbo <chenbo.xia at intel.com>
> Cc: dev at dpdk.org; Hu, Jiayu <jiayu.hu at intel.com>; Yang, YvonneX
> <yvonnex.yang at intel.com>; Wang, Yinan <yinan.wang at intel.com>; Liu,
> Yong <yong.liu at intel.com>; Jiang, Cheng1 <cheng1.jiang at intel.com>
> Subject: [PATCH v8 2/4] vhost: add support for packed ring in async vhost
>
> For now async vhost data path only supports split ring. This patch
> enables packed ring in async vhost data path to make async vhost
> compatible with virtio 1.1 spec.
>
> Signed-off-by: Cheng Jiang <Cheng1.jiang at intel.com>
> ---
> lib/librte_vhost/rte_vhost_async.h | 1 +
> lib/librte_vhost/vhost.c | 79 ++++--
> lib/librte_vhost/vhost.h | 15 +-
> lib/librte_vhost/virtio_net.c | 441 +++++++++++++++++++++++++++--
> 4 files changed, 488 insertions(+), 48 deletions(-)
>
> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> index 438bdafd1..ce88ad3c0 100644
> --- a/lib/librte_vhost/virtio_net.c
> +++ b/lib/librte_vhost/virtio_net.c
> @@ -363,14 +363,14 @@
> vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
> }
>
> static __rte_always_inline void
> -vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
> - struct vhost_virtqueue *vq,
> - uint32_t len[],
> - uint16_t id[],
> - uint16_t count[],
> +vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
> + uint32_t *len,
> + uint16_t *id,
> + uint16_t *count,
> uint16_t num_buffers)
> {
> uint16_t i;
> +
> for (i = 0; i < num_buffers; i++) {
> /* enqueue shadow flush action aligned with batch num */
> if (!vq->shadow_used_idx)
> @@ -382,6 +382,17 @@ vhost_shadow_enqueue_single_packed(struct
> virtio_net *dev,
> vq->shadow_aligned_idx += count[i];
> vq->shadow_used_idx++;
> }
> +}
> +
> +static __rte_always_inline void
> +vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
> + struct vhost_virtqueue *vq,
> + uint32_t *len,
> + uint16_t *id,
> + uint16_t *count,
> + uint16_t num_buffers)
> +{
> + vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);
>
> if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
> do_data_copy_enqueue(dev, vq);
> @@ -1474,6 +1485,23 @@ store_dma_desc_info_split(struct
> vring_used_elem *s_ring, struct vring_used_elem
> }
> }
>
> +static __rte_always_inline void
> +store_dma_desc_info_packed(struct vring_used_elem_packed *s_ring,
> + struct vring_used_elem_packed *d_ring,
> + uint16_t ring_size, uint16_t s_idx, uint16_t d_idx, uint16_t
> count)
> +{
> + uint16_t elem_size = sizeof(struct vring_used_elem_packed);
> +
> + if (d_idx + count <= ring_size) {
> + rte_memcpy(d_ring + d_idx, s_ring + s_idx, count *
> elem_size);
> + } else {
> + uint16_t size = ring_size - d_idx;
> +
> + rte_memcpy(d_ring + d_idx, s_ring + s_idx, size * elem_size);
> + rte_memcpy(d_ring, s_ring + s_idx + size, (count - size) *
> elem_size);
> + }
> +}
> +
> static __rte_noinline uint32_t
> virtio_dev_rx_async_submit_split(struct virtio_net *dev,
> struct vhost_virtqueue *vq, uint16_t queue_id,
> @@ -1556,12 +1584,12 @@ virtio_dev_rx_async_submit_split(struct
> virtio_net *dev,
> * descriptors.
> */
> from = vq->shadow_used_idx - num_buffers;
> - to = vq->async_desc_idx & (vq->size - 1);
> + to = vq->async_desc_idx_split & (vq->size - 1);
>
> store_dma_desc_info_split(vq->shadow_used_split,
> vq->async_descs_split, vq->size, from,
> to, num_buffers);
>
> - vq->async_desc_idx += num_buffers;
> + vq->async_desc_idx_split += num_buffers;
> vq->shadow_used_idx -= num_buffers;
> } else
> comp_pkts[num_done_pkts++] = pkts[pkt_idx];
> @@ -1619,7 +1647,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net
> *dev,
> num_descs += pkts_info[slot_idx & (vq->size -
> 1)].descs;
> slot_idx--;
> }
> - vq->async_desc_idx -= num_descs;
> + vq->async_desc_idx_split -= num_descs;
> /* recover shadow used ring and available ring */
> vq->shadow_used_idx -= (vq->last_avail_idx -
>
> async_pkts_log[num_async_pkts].last_avail_idx -
> @@ -1641,6 +1669,329 @@ virtio_dev_rx_async_submit_split(struct
> virtio_net *dev,
> return pkt_idx;
> }
>
> +static __rte_always_inline void
> +vhost_update_used_packed(struct vhost_virtqueue *vq,
> + struct vring_used_elem_packed *shadow_ring,
> + uint16_t count)
> +{
> + int i;
> + uint16_t used_idx = vq->last_used_idx;
> + uint16_t head_idx = vq->last_used_idx;
> + uint16_t head_flags = 0;
> +
> + if (count == 0)
> + return;
> +
> + /* Split loop in two to save memory barriers */
> + for (i = 0; i < count; i++) {
> + vq->desc_packed[used_idx].id = shadow_ring[i].id;
> + vq->desc_packed[used_idx].len = shadow_ring[i].len;
> +
> + used_idx += shadow_ring[i].count;
> + if (used_idx >= vq->size)
> + used_idx -= vq->size;
> + }
> +
> + /* The ordering for storing desc flags needs to be enforced. */
> + rte_atomic_thread_fence(__ATOMIC_RELEASE);
> +
> + for (i = 0; i < count; i++) {
> + uint16_t flags;
> +
> + if (vq->shadow_used_packed[i].len)
> + flags = VRING_DESC_F_WRITE;
> + else
> + flags = 0;
> +
> + if (vq->used_wrap_counter) {
> + flags |= VRING_DESC_F_USED;
> + flags |= VRING_DESC_F_AVAIL;
> + } else {
> + flags &= ~VRING_DESC_F_USED;
> + flags &= ~VRING_DESC_F_AVAIL;
> + }
> +
> + if (i > 0) {
> + vq->desc_packed[vq->last_used_idx].flags = flags;
> + } else {
> + head_idx = vq->last_used_idx;
> + head_flags = flags;
> + }
> +
> + vq_inc_last_used_packed(vq, shadow_ring[i].count);
> + }
> +
> + vq->desc_packed[head_idx].flags = head_flags;
> +}
> +
> +static __rte_always_inline int
> +vhost_enqueue_async_single_packed(struct virtio_net *dev,
> + struct vhost_virtqueue *vq,
> + struct rte_mbuf *pkt,
> + struct buf_vector *buf_vec,
> + uint16_t *nr_descs,
> + uint16_t *nr_buffers,
> + struct vring_packed_desc *async_descs,
> + struct iovec *src_iovec, struct iovec *dst_iovec,
> + struct rte_vhost_iov_iter *src_it,
> + struct rte_vhost_iov_iter *dst_it)
> +{
> + uint16_t nr_vec = 0;
> + uint16_t avail_idx = vq->last_avail_idx;
> + uint16_t max_tries, tries = 0;
> + uint16_t buf_id = 0;
> + uint32_t len = 0;
> + uint16_t desc_count = 0;
> + uint32_t size = pkt->pkt_len + sizeof(struct
> virtio_net_hdr_mrg_rxbuf);
> + uint32_t buffer_len[vq->size];
> + uint16_t buffer_buf_id[vq->size];
> + uint16_t buffer_desc_count[vq->size];
> + *nr_buffers = 0;
nr_buffers and nr_descs are pointers of num_buffers and num_desc in
virtio_dev_rx_async_submit_packed(), and num_buffers and num_desc
don't have init values. I think you need to init them before pass their
pointers to another function, as it will read/update values pointed by the
pointers.
In addition, *nr_buffers is set to 0, but *nr_descs is not, and both of them are set to
0 in virtio_dev_rx_async_single_packed(). It looks strange.
> +
> + if (rxvq_is_mergeable(dev))
> + max_tries = vq->size - 1;
> + else
> + max_tries = 1;
> +
> + while (size > 0) {
> + /*
> + * if we tried all available ring items, and still
> + * can't get enough buf, it means something abnormal
> + * happened.
> + */
> + if (unlikely(++tries > max_tries))
> + return -1;
> +
> + if (unlikely(fill_vec_buf_packed(dev, vq, avail_idx,
> &desc_count, buf_vec, &nr_vec,
> + &buf_id, &len,
> VHOST_ACCESS_RW) < 0))
> + return -1;
> +
> + len = RTE_MIN(len, size);
> + size -= len;
> +
> + buffer_len[*nr_buffers] = len;
> + buffer_buf_id[*nr_buffers] = buf_id;
> + buffer_desc_count[*nr_buffers] = desc_count;
> + *nr_buffers += 1;
> +
> + *nr_descs += desc_count;
> + avail_idx += desc_count;
> + if (avail_idx >= vq->size)
> + avail_idx -= vq->size;
> + }
> +
> + if (async_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers,
> src_iovec, dst_iovec,
> + src_it, dst_it) < 0)
> + return -1;
> + /* store descriptors for DMA */
> + if (avail_idx >= *nr_descs) {
> + rte_memcpy(async_descs, &vq->desc_packed[vq-
> >last_avail_idx],
> + *nr_descs * sizeof(struct vring_packed_desc));
> + } else {
> + uint16_t nr_copy = vq->size - vq->last_avail_idx;
It needs a blank line.
> + rte_memcpy(async_descs, &vq->desc_packed[vq-
> >last_avail_idx],
> + nr_copy * sizeof(struct vring_packed_desc));
> + rte_memcpy(async_descs + nr_copy, vq->desc_packed,
> + (*nr_descs - nr_copy) * sizeof(struct
> vring_packed_desc));
> + }
> +
> + vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id,
> buffer_desc_count, *nr_buffers);
> +
> + return 0;
> +}
> +
> +static __rte_always_inline int16_t
> +virtio_dev_rx_async_single_packed(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
> + struct rte_mbuf *pkt, uint16_t *nr_descs, uint16_t
> *nr_buffers,
> + struct vring_packed_desc *async_descs,
> + struct iovec *src_iovec, struct iovec *dst_iovec,
> + struct rte_vhost_iov_iter *src_it, struct
> rte_vhost_iov_iter *dst_it)
> +{
> + struct buf_vector buf_vec[BUF_VECTOR_MAX];
> + *nr_descs = 0;
> + *nr_buffers = 0;
> +
> + if (unlikely(vhost_enqueue_async_single_packed(dev, vq, pkt,
> buf_vec, nr_descs, nr_buffers,
> + async_descs, src_iovec,
> dst_iovec,
> + src_it, dst_it) < 0)) {
> + VHOST_LOG_DATA(DEBUG, "(%d) failed to get enough desc
> from vring\n", dev->vid);
> + return -1;
> + }
> +
> + VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end
> index %d\n",
> + dev->vid, vq->last_avail_idx, vq->last_avail_idx +
> *nr_descs);
> +
> + return 0;
> +}
> +
> +static __rte_always_inline void
> +dma_error_handler_packed(struct vhost_virtqueue *vq, struct
> vring_packed_desc *async_descs,
> + uint16_t async_descs_idx, uint16_t slot_idx, uint32_t
> nr_err,
> + uint32_t *pkt_idx, uint32_t *num_async_pkts,
> uint32_t *num_done_pkts)
> +{
> + uint16_t descs_err = 0;
> + uint16_t buffers_err = 0;
> + struct async_inflight_info *pkts_info = vq->async_pkts_info;
> +
> + *num_async_pkts -= nr_err;
> + *pkt_idx -= nr_err;
> + /* calculate the sum of buffers and descs of DMA-error packets. */
> + while (nr_err-- > 0) {
> + descs_err += pkts_info[slot_idx % vq->size].descs;
> + buffers_err += pkts_info[slot_idx % vq->size].nr_buffers;
> + slot_idx--;
> + }
> +
> + vq->async_buffer_idx_packed -= buffers_err;
> +
> + if (vq->last_avail_idx >= descs_err) {
> + vq->last_avail_idx -= descs_err;
> +
> + rte_memcpy(&vq->desc_packed[vq->last_avail_idx],
> + &async_descs[async_descs_idx - descs_err],
> + descs_err * sizeof(struct vring_packed_desc));
> + } else {
> + uint16_t nr_copy;
> +
> + vq->last_avail_idx = vq->last_avail_idx + vq->size - descs_err;
> + nr_copy = vq->size - vq->last_avail_idx;
> + rte_memcpy(&vq->desc_packed[vq->last_avail_idx],
> + &async_descs[async_descs_idx - descs_err],
> + nr_copy * sizeof(struct vring_packed_desc));
> + descs_err -= nr_copy;
> + rte_memcpy(&vq->desc_packed[0],
> &async_descs[async_descs_idx - descs_err],
> + descs_err * sizeof(struct vring_packed_desc));
> + vq->avail_wrap_counter ^= 1;
> + }
> +
> + *num_done_pkts = *pkt_idx - *num_async_pkts;
> +}
> +
> +static __rte_noinline uint32_t
> +virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
> + struct vhost_virtqueue *vq, uint16_t queue_id,
> + struct rte_mbuf **pkts, uint32_t count,
> + struct rte_mbuf **comp_pkts, uint32_t *comp_count)
> +{
> + uint32_t pkt_idx = 0, pkt_burst_idx = 0;
> + uint16_t async_descs_idx = 0;
> + uint16_t num_buffers;
> + uint16_t num_desc;
How about using "num_descs" to make naming consist with "num_buffers"?
Thanks,
Jiayu
More information about the dev
mailing list