[dpdk-dev] [PATCH v2 2/3] vhost: move dirty logging cache out of the virtqueue

David Marchand david.marchand at redhat.com
Tue Mar 16 14:13:18 CET 2021


On Tue, Mar 16, 2021 at 1:42 PM Maxime Coquelin
<maxime.coquelin at redhat.com> wrote:
>
> This patch moves the per-virtqueue's dirty logging cache
> out of the virtqueue struct, by allocating it dynamically
> only when live-migration is enabled.
>
> It saves 8 cachelines in vhost_virtqueue struct.
>
> Signed-off-by: Maxime Coquelin <maxime.coquelin at redhat.com>
> ---
>  lib/librte_vhost/vhost.c      | 14 ++++++++++++++
>  lib/librte_vhost/vhost.h      |  2 +-
>  lib/librte_vhost/vhost_user.c | 25 +++++++++++++++++++++++++
>  3 files changed, 40 insertions(+), 1 deletion(-)
>
> diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
> index 5a7c0c6cff..c3490ce897 100644
> --- a/lib/librte_vhost/vhost.c
> +++ b/lib/librte_vhost/vhost.c
> @@ -145,6 +145,10 @@ __vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
>         if (unlikely(!dev->log_base))
>                 return;
>
> +       /* No cache, nothing to sync */
> +       if (unlikely(!vq->log_cache))
> +               return;
> +
>         rte_atomic_thread_fence(__ATOMIC_RELEASE);
>
>         log_base = (unsigned long *)(uintptr_t)dev->log_base;
> @@ -177,6 +181,14 @@ vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq,
>         uint32_t offset = page / (sizeof(unsigned long) << 3);
>         int i;
>
> +       if (unlikely(!vq->log_cache)) {
> +               /* No logging cache allocated, write dirty log map directly */
> +               rte_smp_wmb();

We try not to reintroduce full barriers (checkpatch caught this).


> +               vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
> +
> +               return;
> +       }
> +
>         for (i = 0; i < vq->log_cache_nb_elem; i++) {
>                 struct log_cache_entry *elem = vq->log_cache + i;
>
> @@ -354,6 +366,8 @@ free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
>         }
>         rte_free(vq->batch_copy_elems);
>         rte_mempool_free(vq->iotlb_pool);
> +       if (vq->log_cache)
> +               rte_free(vq->log_cache);

No if() needed.


>         rte_free(vq);
>  }
>
> diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
> index 717f410548..3a71dfeed9 100644
> --- a/lib/librte_vhost/vhost.h
> +++ b/lib/librte_vhost/vhost.h
> @@ -183,7 +183,7 @@ struct vhost_virtqueue {
>         bool                    used_wrap_counter;
>         bool                    avail_wrap_counter;
>
> -       struct log_cache_entry log_cache[VHOST_LOG_CACHE_NR];
> +       struct log_cache_entry *log_cache;
>         uint16_t log_cache_nb_elem;
>
>         rte_rwlock_t    iotlb_lock;
> diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
> index a60bb945ad..0f452d6ff3 100644
> --- a/lib/librte_vhost/vhost_user.c
> +++ b/lib/librte_vhost/vhost_user.c
> @@ -2022,6 +2022,11 @@ vhost_user_get_vring_base(struct virtio_net **pdev,
>         rte_free(vq->batch_copy_elems);
>         vq->batch_copy_elems = NULL;
>
> +       if (vq->log_cache) {
> +               rte_free(vq->log_cache);
> +               vq->log_cache = NULL;
> +       }

Idem.


> +
>         msg->size = sizeof(msg->payload.state);
>         msg->fd_num = 0;
>
> @@ -2121,6 +2126,7 @@ vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg,
>         int fd = msg->fds[0];
>         uint64_t size, off;
>         void *addr;
> +       uint32_t i;
>
>         if (validate_msg_fds(msg, 1) != 0)
>                 return RTE_VHOST_MSG_RESULT_ERR;
> @@ -2174,6 +2180,25 @@ vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg,
>         dev->log_base = dev->log_addr + off;
>         dev->log_size = size;
>
> +       for (i = 0; i < dev->nr_vring; i++) {
> +               struct vhost_virtqueue *vq = dev->virtqueue[i];
> +
> +               if (vq->log_cache) {
> +                       rte_free(vq->log_cache);
> +                       vq->log_cache = NULL;
> +               }

Idem.


> +               vq->log_cache_nb_elem = 0;
> +               vq->log_cache = rte_zmalloc("vq log cache",
> +                               sizeof(struct log_cache_entry) * VHOST_LOG_CACHE_NR,
> +                               0);
> +               /*
> +                * If log cache alloc fail, don't fail migration, but no
> +                * caching will be done, which will impact performance
> +                */
> +               if (!vq->log_cache)
> +                       VHOST_LOG_CONFIG(ERR, "Failed to allocate VQ logging cache\n");
> +       }
> +
>         /*
>          * The spec is not clear about it (yet), but QEMU doesn't expect
>          * any payload in the reply.
> --
> 2.29.2
>


-- 
David Marchand



More information about the dev mailing list