[PATCH v6 4/5] vhost_user: Function defs for add/rem mem regions
Bathija, Pravin
Pravin.Bathija at dell.com
Wed Feb 11 11:20:23 CET 2026
Dear Maxime,
Answers inline.
Internal Use - Confidential
> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin at redhat.com>
> Sent: Monday, January 26, 2026 11:58 AM
> To: Bathija, Pravin <Pravin.Bathija at dell.com>
> Cc: dev at dpdk.org; fengchengwen at huawei.com
> Subject: Re: [PATCH v6 4/5] vhost_user: Function defs for add/rem mem
> regions
>
>
> [EXTERNAL EMAIL]
>
> On Thu, Jan 22, 2026 at 8:49 AM <pravin.bathija at dell.com> wrote:
> >
> > From: Pravin M Bathija <pravin.bathija at dell.com>
> >
> > These changes cover the function definition for add/remove memory
> > region calls which are invoked on receiving vhost user message from
> > vhost user front-end (e.g. Qemu). In our case, in addition to testing
> > with qemu front-end, the testing has also been performed with libblkio
> > front-end and spdk/dpdk back-end. We did I/O using libblkio based
> > device driver, to spdk based drives. There are also changes for
> > set_mem_table and new definition for get memory slots. Our changes
> > optimize the set memory table call to use common support functions.
> > Message get memory slots is how the vhost-user front-end queries the
> > vhost-user back-end about the number of memory slots available to be
> > registered by the back-end. In addition support function to invalidate
> > vring is also defined which is used in add/remove memory region functions.
> >
> > Signed-off-by: Pravin M Bathija <pravin.bathija at dell.com>
> > ---
> > lib/vhost/vhost_user.c | 234
> > +++++++++++++++++++++++++++++++++++------
> > 1 file changed, 202 insertions(+), 32 deletions(-)
> >
> > diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c index
> > e2831b7625..1a2dcfbbca 100644
> > --- a/lib/vhost/vhost_user.c
> > +++ b/lib/vhost/vhost_user.c
> > @@ -71,6 +71,9 @@
> VHOST_MESSAGE_HANDLER(VHOST_USER_SET_FEATURES,
> > vhost_user_set_features, false, t
> > VHOST_MESSAGE_HANDLER(VHOST_USER_SET_OWNER,
> vhost_user_set_owner,
> > false, true) \ VHOST_MESSAGE_HANDLER(VHOST_USER_RESET_OWNER,
> > vhost_user_reset_owner, false, false) \
> > VHOST_MESSAGE_HANDLER(VHOST_USER_SET_MEM_TABLE,
> > vhost_user_set_mem_table, true, true) \
> > +VHOST_MESSAGE_HANDLER(VHOST_USER_GET_MAX_MEM_SLOTS,
> > +vhost_user_get_max_mem_slots, false, false) \
> > +VHOST_MESSAGE_HANDLER(VHOST_USER_ADD_MEM_REG,
> vhost_user_add_mem_reg,
> > +true, true) \ VHOST_MESSAGE_HANDLER(VHOST_USER_REM_MEM_REG,
> > +vhost_user_rem_mem_reg, false, true) \
> > VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_BASE,
> > vhost_user_set_log_base, true, true) \
> > VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_FD,
> vhost_user_set_log_fd,
> > true, true) \ VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_NUM,
> vhost_user_set_vring_num, false, true) \ @@ -1520,7 +1523,6 @@
> vhost_user_set_mem_table(struct virtio_net **pdev,
> > struct virtio_net *dev = *pdev;
> > struct VhostUserMemory *memory = &ctx->msg.payload.memory;
> > struct rte_vhost_mem_region *reg;
> > - int numa_node = SOCKET_ID_ANY;
> > uint64_t mmap_offset;
> > uint32_t i;
> > bool async_notify = false;
> > @@ -1565,39 +1567,13 @@ vhost_user_set_mem_table(struct virtio_net
> **pdev,
> > if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
> > vhost_user_iotlb_flush_all(dev);
> >
> > - free_mem_region(dev);
> > + free_all_mem_regions(dev);
> > rte_free(dev->mem);
> > dev->mem = NULL;
> > }
> >
> > - /*
> > - * If VQ 0 has already been allocated, try to allocate on the same
> > - * NUMA node. It can be reallocated later in numa_realloc().
> > - */
> > - if (dev->nr_vring > 0)
> > - numa_node = dev->virtqueue[0]->numa_node;
> > -
> > - dev->nr_guest_pages = 0;
> > - if (dev->guest_pages == NULL) {
> > - dev->max_guest_pages = 8;
> > - dev->guest_pages = rte_zmalloc_socket(NULL,
> > - dev->max_guest_pages *
> > - sizeof(struct guest_page),
> > - RTE_CACHE_LINE_SIZE,
> > - numa_node);
> > - if (dev->guest_pages == NULL) {
> > - VHOST_CONFIG_LOG(dev->ifname, ERR,
> > - "failed to allocate memory for dev->guest_pages");
> > - goto close_msg_fds;
> > - }
> > - }
> > -
> > - dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct
> rte_vhost_memory) +
> > - sizeof(struct rte_vhost_mem_region) * memory->nregions, 0,
> numa_node);
> > - if (dev->mem == NULL) {
> > - VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to allocate memory
> for dev->mem");
> > - goto free_guest_pages;
> > - }
> > + if (vhost_user_initialize_memory(pdev) < 0)
> > + goto close_msg_fds;
> >
> > for (i = 0; i < memory->nregions; i++) {
> > reg = &dev->mem->regions[i]; @@ -1661,11 +1637,167 @@
> > vhost_user_set_mem_table(struct virtio_net **pdev,
> > return RTE_VHOST_MSG_RESULT_OK;
> >
> > free_mem_table:
> > - free_mem_region(dev);
> > + free_all_mem_regions(dev);
> > rte_free(dev->mem);
> > dev->mem = NULL;
> > + rte_free(dev->guest_pages);
> > + dev->guest_pages = NULL;
> > +close_msg_fds:
> > + close_msg_fds(ctx);
> > + return RTE_VHOST_MSG_RESULT_ERR; }
> > +
> > +
> > +static int
> > +vhost_user_get_max_mem_slots(struct virtio_net **pdev __rte_unused,
> > + struct vhu_msg_context *ctx,
> > + int main_fd __rte_unused) {
> > + uint32_t max_mem_slots = VHOST_MEMORY_MAX_NREGIONS;
> > +
> > + ctx->msg.payload.u64 = (uint64_t)max_mem_slots;
> > + ctx->msg.size = sizeof(ctx->msg.payload.u64);
> > + ctx->fd_num = 0;
> > +
> > + return RTE_VHOST_MSG_RESULT_REPLY; }
> > +
> > +static void
> > +dev_invalidate_vrings(struct virtio_net *dev) {
> > + uint32_t i;
> > +
> > + for (i = 0; i < dev->nr_vring; i++) {
> > + struct vhost_virtqueue *vq = dev->virtqueue[i];
> > +
> > + if (!vq)
> > + continue;
> >
> > -free_guest_pages:
> > + if (vq->desc || vq->avail || vq->used) {
> > + /* vhost_user_lock_all_queue_pairs locked all qps */
> > + VHOST_USER_ASSERT_LOCK(dev, vq,
> > + VHOST_USER_ADD_MEM_REG);
> > +
> > + /*
> > + * If the memory table got updated, the ring addresses
> > + * need to be translated again as virtual addresses have
> > + * changed.
> > + */
> > + vring_invalidate(dev, vq);
> > +
> > + translate_ring_addresses(&dev, &vq);
> > + }
> > + }
> > +}
> > +
> > +static int
> > +vhost_user_add_mem_reg(struct virtio_net **pdev,
> > + struct vhu_msg_context *ctx,
> > + int main_fd __rte_unused)
>
> The parameter is marked as unused but is actually used.
I have made the corrections in the amended patch-set I will submit following this e-mail.
>
> > +{
> > + uint32_t i;
> > + struct virtio_net *dev = *pdev;
> > + struct VhostUserMemoryRegion *region =
> > +&ctx->msg.payload.memory_single.region;
> > +
> > + /* convert first region add to normal memory table set */
> > + if (dev->mem == NULL) {
> > + if (vhost_user_initialize_memory(pdev) < 0)
> > + goto close_msg_fds;
> > + }
> > +
> > + /* make sure new region will fit */
> > + if (dev->mem->nregions >= VHOST_MEMORY_MAX_NREGIONS) {
> > + VHOST_CONFIG_LOG(dev->ifname, ERR, "too many memory
> regions already (%u)",
> > + dev->mem->nregions);
> > + goto close_msg_fds;
> > + }
> > +
> > + /* make sure supplied memory fd present */
> > + if (ctx->fd_num != 1) {
> > + VHOST_CONFIG_LOG(dev->ifname, ERR, "fd count makes no sense
> (%u)", ctx->fd_num);
> > + goto close_msg_fds;
> > + }
> > +
> > + /* Make sure no overlap in guest virtual address space */
> > + if (dev->mem != NULL && dev->mem->nregions > 0) {
> > + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
> > + struct rte_vhost_mem_region *current_region =
> > + &dev->mem->regions[i];
> > +
> > + if (current_region->mmap_size == 0)
> > + continue;
> > +
> > + uint64_t current_region_guest_start = current_region-
> >guest_user_addr;
> > + uint64_t current_region_guest_end =
> current_region_guest_start
> > + + current_region->mmap_size - 1;
> > + uint64_t proposed_region_guest_start = region-
> >userspace_addr;
> > + uint64_t proposed_region_guest_end =
> proposed_region_guest_start
> > + + region->memory_size - 1;
> > + bool overlap = false;
> > +
> > + overlap = !((proposed_region_guest_end <
> current_region_guest_start) ||
> > + (proposed_region_guest_start >
> > + current_region_guest_end));
> > +
> > + if (overlap) {
> > + VHOST_CONFIG_LOG(dev->ifname, ERR,
> > + "requested memory region overlaps with another
> region");
> > + VHOST_CONFIG_LOG(dev->ifname, ERR,
> > + "\tRequested region address:0x%" PRIx64,
> > + region->userspace_addr);
> > + VHOST_CONFIG_LOG(dev->ifname, ERR,
> > + "\tRequested region size:0x%" PRIx64,
> > + region->memory_size);
> > + VHOST_CONFIG_LOG(dev->ifname, ERR,
> > + "\tOverlapping region address:0x%" PRIx64,
> > + current_region->guest_user_addr);
> > + VHOST_CONFIG_LOG(dev->ifname, ERR,
> > + "\tOverlapping region size:0x%" PRIx64,
> > + current_region->mmap_size);
> > + goto close_msg_fds;
> > + }
> > +
> > + }
> > + }
> > +
> > + /* find a new region and set it like memory table set does */
> > + struct rte_vhost_mem_region *reg = NULL;
> > +
> > + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
> > + if (dev->mem->regions[i].guest_user_addr == 0) {
> > + reg = &dev->mem->regions[i];
> > + break;
> > + }
> > + }
> > + if (reg == NULL) {
> > + VHOST_CONFIG_LOG(dev->ifname, ERR, "no free memory region");
> > + goto close_msg_fds;
> > + }
> > +
> > + reg->guest_phys_addr = region->guest_phys_addr;
> > + reg->guest_user_addr = region->userspace_addr;
> > + reg->size = region->memory_size;
> > + reg->fd = ctx->fds[0];
> > +
> > + if (vhost_user_mmap_region(dev, reg, region->mmap_offset) < 0) {
> > + VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to mmap region");
> > + goto close_msg_fds;
> > + }
> > +
> > + dev->mem->nregions++;
> > +
> > + if (dev->async_copy && rte_vfio_is_enabled("vfio"))
> > + async_dma_map_region(dev, reg, true);
> > +
> > + if (vhost_user_postcopy_register(dev, main_fd, ctx) < 0)
>
> Post-copy live-migration is broken.
> The assumes the vhost-user message is of type SET_MEM_TABLE, and so
> assumes ctx contains a VhostUserMemory payload.
> But here ctx contains a VhostUserMemoryRegion payload.
>
> Given the complexity of the patch, it is worrying that such issues are
> overlooked.
> What other features could be broken?
Although I tested this before I have again tested Post-copy live migration successfully with qemu using the following steps.
- Ran dpdk-testpmd while enabling poctcopy migration as option.
- Ran two instances of qemu, source and destination. Source was running alpine linux while destination was started with option "-incoming defer"
- I did an add memory region at the source VM which invoked vhost_postcopy_register function. After this I did the same add memory region for destination VM.
- The "post_copy_register" debug messages which I added in this upcoming patch-set were correctly seen on the testpmd console.
- Following this I turned on migrate capabilities on source and destination VMs at qemu prompt. For destination VM I also set the incoming migration to TCP port 4444.
- Set the "migrate" destination to host 127.0.0.1 TCP port 4444 and invoked command "migrate_start_postcopy"
After the above steps, the Alpine Linux VM memory pages were correctly migrated to destination VM. I would be happy to share console logs of the above test and step by step commands.
Source code clarification: In the function vhost_user_add_mem_reg we provide the argument of a single region of type "VhostUserMemoryRegion" in the message payload. This single memory region is then converted to type rte_vhost_mem_region (variable reg line#1761) and then added to the list of memory regions using the call vhost_user_mmap_region(line#1779). By the time, vhost_user_postcopy_register(line#1794) is called, the newl added region is part of the "dev" structure.
>
> > + goto free_mem_table;
> > +
> > + dev_invalidate_vrings(dev);
> > + dump_guest_pages(dev);
> > +
> > + return RTE_VHOST_MSG_RESULT_OK;
> > +
> > +free_mem_table:
> > + free_all_mem_regions(dev);
> > + rte_free(dev->mem);
> > + dev->mem = NULL;
> > rte_free(dev->guest_pages);
> > dev->guest_pages = NULL;
> > close_msg_fds:
> > @@ -1673,6 +1805,44 @@ vhost_user_set_mem_table(struct virtio_net
> **pdev,
> > return RTE_VHOST_MSG_RESULT_ERR; }
> >
> > +static int
> > +vhost_user_rem_mem_reg(struct virtio_net **pdev __rte_unused,
> > + struct vhu_msg_context *ctx __rte_unused,
>
> pdev and ctx are actually used.
I have made the corrections in the amended patch-set I will submit following this e-mail.
>
>
> > + int main_fd __rte_unused) {
> > + uint32_t i;
> > + struct virtio_net *dev = *pdev;
> > + struct VhostUserMemoryRegion *region =
> > +&ctx->msg.payload.memory_single.region;
> > +
> > + if (dev->mem != NULL && dev->mem->nregions > 0) {
> > + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
> > + struct rte_vhost_mem_region *current_region =
> > + &dev->mem->regions[i];
> > +
> > + if (current_region->guest_user_addr == 0)
> > + continue;
> > +
> > + /*
> > + * According to the vhost-user specification:
> > + * The memory region to be removed is identified by its guest
> address,
> > + * user address and size. The mmap offset is ignored.
> > + */
> > + if (region->userspace_addr == current_region->guest_user_addr
> > + && region->guest_phys_addr == current_region-
> >guest_phys_addr
> > + && region->memory_size == current_region->size) {
> > + if (dev->async_copy && rte_vfio_is_enabled("vfio"))
> > + async_dma_map_region(dev, current_region, false);
> > + dev_invalidate_vrings(dev);
> > + free_mem_region(current_region);
> > + dev->mem->nregions--;
> > + return RTE_VHOST_MSG_RESULT_OK;
> > + }
> > + }
> > + }
> > +
> > + VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to find region");
> > + return RTE_VHOST_MSG_RESULT_ERR; }
> > +
> > static bool
> > vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq) {
> > --
> > 2.43.0
> >
More information about the dev
mailing list