[PATCH v4 1/2] vhost: support clear in-flight packets for async dequeue
Hu, Jiayu
jiayu.hu at intel.com
Thu Jun 9 09:06:26 CEST 2022
Hi Yuan,
> -----Original Message-----
> From: Wang, YuanX <yuanx.wang at intel.com>
> Sent: Tuesday, June 7, 2022 1:45 AM
> To: maxime.coquelin at redhat.com; Xia, Chenbo <chenbo.xia at intel.com>;
> dev at dpdk.org
> Cc: Hu, Jiayu <jiayu.hu at intel.com>; Ding, Xuan <xuan.ding at intel.com>; Pai
> G, Sunil <sunil.pai.g at intel.com>; Wang, YuanX <yuanx.wang at intel.com>
> Subject: [PATCH v4 1/2] vhost: support clear in-flight packets for async
> dequeue
>
> rte_vhost_clear_queue_thread_unsafe() supports to clear in-flight packets
> for async enqueue only. But after supporting async dequeue, this API should
> support async dequeue too.
>
> This patch also adds the thread-safe version of this API, the difference
> between the two API is that thread safety uses lock.
>
> These APIs maybe used to clean up packets in the async channel to prevent
> packet loss when the device state changes or when the device is destroyed.
>
> Signed-off-by: Yuan Wang <yuanx.wang at intel.com>
> ---
> doc/guides/prog_guide/vhost_lib.rst | 8 ++-
> doc/guides/rel_notes/release_22_07.rst | 5 ++
> lib/vhost/rte_vhost_async.h | 25 ++++++++
> lib/vhost/version.map | 1 +
> lib/vhost/virtio_net.c | 82 +++++++++++++++++++++++++-
> 5 files changed, 118 insertions(+), 3 deletions(-)
>
> diff --git a/doc/guides/prog_guide/vhost_lib.rst
> b/doc/guides/prog_guide/vhost_lib.rst
> index cd3f6caa9a..b9545770d0 100644
> --- a/doc/guides/prog_guide/vhost_lib.rst
> +++ b/doc/guides/prog_guide/vhost_lib.rst
> @@ -288,7 +288,13 @@ The following is an overview of some key Vhost API
> functions:
>
> * ``rte_vhost_clear_queue_thread_unsafe(vid, queue_id, **pkts, count,
> dma_id, vchan_id)``
>
> - Clear inflight packets which are submitted to DMA engine in vhost async
> data
> + Clear in-flight packets which are submitted to async channel in vhost
> + async data path without performing any locking. Completed packets are
Better specify "without performing locking on virtqueue".
> + returned to applications through ``pkts``.
> +
> +* ``rte_vhost_clear_queue(vid, queue_id, **pkts, count, dma_id,
> +vchan_id)``
> +
> + Clear in-flight packets which are submitted to async channel in vhost
> + async data
> path. Completed packets are returned to applications through ``pkts``.
>
> * ``rte_vhost_vring_stats_get_names(int vid, uint16_t queue_id, struct
> rte_vhost_stat_name *names, unsigned int size)`` diff --git
> a/doc/guides/rel_notes/release_22_07.rst
> b/doc/guides/rel_notes/release_22_07.rst
> index c81383f4a3..2ca06b543c 100644
> --- a/doc/guides/rel_notes/release_22_07.rst
> +++ b/doc/guides/rel_notes/release_22_07.rst
> @@ -147,6 +147,11 @@ New Features
> Added vhost async dequeue API which can leverage DMA devices to
> accelerate receiving pkts from guest.
>
> +* **Added thread-safe version of inflight packet clear API in vhost
> +library.**
> +
> + Added an API which can clear the inflight packets submitted to the
> + async channel in a thread-safe manner in the vhost async data path.
> +
> Removed Items
> -------------
>
> diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h index
> a1e7f674ed..1db2a10124 100644
> --- a/lib/vhost/rte_vhost_async.h
> +++ b/lib/vhost/rte_vhost_async.h
> @@ -183,6 +183,31 @@ uint16_t rte_vhost_clear_queue_thread_unsafe(int
> vid, uint16_t queue_id,
> struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
> uint16_t vchan_id);
>
> +/**
> + * This function checks async completion status and clear packets for
> + * a specific vhost device queue. Packets which are inflight will be
> + * returned in an array.
> + *
> + * @param vid
> + * ID of vhost device to clear data
> + * @param queue_id
> + * Queue id to clear data
> + * @param pkts
> + * Blank array to get return packet pointer
> + * @param count
> + * Size of the packet array
> + * @param dma_id
> + * The identifier of the DMA device
> + * @param vchan_id
> + * The identifier of virtual DMA channel
> + * @return
> + * Number of packets returned
> + */
> +__rte_experimental
> +uint16_t rte_vhost_clear_queue(int vid, uint16_t queue_id,
> + struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
> + uint16_t vchan_id);
> +
> /**
> * The DMA vChannels used in asynchronous data path must be configured
> * first. So this function needs to be called before enabling DMA diff --git
> a/lib/vhost/version.map b/lib/vhost/version.map index
> 4880b9a422..9329f88e79 100644
> --- a/lib/vhost/version.map
> +++ b/lib/vhost/version.map
> @@ -95,6 +95,7 @@ EXPERIMENTAL {
> rte_vhost_vring_stats_reset;
> rte_vhost_async_try_dequeue_burst;
> rte_vhost_driver_get_vdpa_dev_type;
> + rte_vhost_clear_queue;
> };
>
> INTERNAL {
> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index
> 68a26eb17d..a90ae3cb96 100644
> --- a/lib/vhost/virtio_net.c
> +++ b/lib/vhost/virtio_net.c
> @@ -26,6 +26,11 @@
>
> #define MAX_BATCH_LEN 256
>
> +static __rte_always_inline uint16_t
> +async_poll_dequeue_completed_split(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
> + struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
> + uint16_t vchan_id, bool legacy_ol_flags);
> +
> /* DMA device copy operation tracking array. */ struct async_dma_info
> dma_copy_track[RTE_DMADEV_DEFAULT_MAX];
>
> @@ -2155,7 +2160,7 @@ rte_vhost_clear_queue_thread_unsafe(int vid,
> uint16_t queue_id,
> return 0;
>
> VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
> - if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> + if (unlikely(queue_id >= dev->nr_vring)) {
> VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue
> idx %d.\n",
> dev->ifname, __func__, queue_id);
> return 0;
> @@ -2182,7 +2187,18 @@ rte_vhost_clear_queue_thread_unsafe(int vid,
> uint16_t queue_id,
> return 0;
> }
>
> - n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts,
> count, dma_id, vchan_id);
> + if (queue_id % 2 == 0)
Replace "%" with "&" should help the performance a bit.
> + n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id,
> + pkts, count, dma_id, vchan_id);
> + else {
> + if (unlikely(vq_is_packed(dev)))
> + VHOST_LOG_DATA(ERR,
> + "(%d) %s: async dequeue does not
> support packed ring.\n",
> + dev->vid, __func__);
> + else
> + n_pkts_cpl =
> async_poll_dequeue_completed_split(dev, vq, pkts, count,
> + dma_id, vchan_id, dev->flags &
> VIRTIO_DEV_LEGACY_OL_FLAGS);
> + }
>
> vhost_queue_stats_update(dev, vq, pkts, n_pkts_cpl);
> vq->stats.inflight_completed += n_pkts_cpl; @@ -2190,6 +2206,68
> @@ rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
> return n_pkts_cpl;
> }
>
> +uint16_t
> +rte_vhost_clear_queue(int vid, uint16_t queue_id, struct rte_mbuf **pkts,
> + uint16_t count, int16_t dma_id, uint16_t vchan_id) {
> + struct virtio_net *dev = get_device(vid);
> + struct vhost_virtqueue *vq;
> + uint16_t n_pkts_cpl = 0;
> +
> + if (!dev)
> + return 0;
> +
> + VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
> + if (unlikely(queue_id >= dev->nr_vring)) {
> + VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue
> idx %d.\n",
> + dev->ifname, __func__, queue_id);
> + return 0;
> + }
> +
> + vq = dev->virtqueue[queue_id];
> +
> + if (!rte_spinlock_trylock(&vq->access_lock)) {
> + VHOST_LOG_DATA(ERR,
> + "(%d) %s: failed to clear async queue id %d,
> virtqueue busy.\n",
> + dev->vid, __func__, queue_id);
> + return 0;
> + }
Failing to acquire the lock shouldn't be treated as an error.
> +
> + if (unlikely(!vq->async)) {
> + VHOST_LOG_DATA(ERR, "(%s) %s: async not registered for
> queue id %d.\n",
> + dev->ifname, __func__, queue_id);
> + goto out_access_unlock;
> + }
> +
> + if (unlikely(!dma_copy_track[dma_id].vchans ||
> +
> !dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
> + VHOST_LOG_DATA(ERR, "(%s) %s: invalid channel %d:%u.\n",
> dev->ifname, __func__,
> + dma_id, vchan_id);
> + goto out_access_unlock;
> + }
Also need to check if dma_id is valid.
> +
> + if (queue_id % 2 == 0)
Ditto.
Thanks,
Jiayu
More information about the dev
mailing list