|SUCCESS| [5/5] vhost_user: enable configure memory slots
dpdklab at iol.unh.edu
dpdklab at iol.unh.edu
Thu Apr 2 17:52:02 CEST 2026
Test-Label: ai-code-review
Test-Status: SUCCESS
https://dpdk.org/patch/162832
_AI Code Review_
=== Patch Review: series.patch (via Claude) ===
Review date: 2026-04-02
commit f54a8b12e89c3a2f0d1234567890abcdef123456
Author: Pravin M Bathija <pravin.bathija at dell.com>
vhost: add user to mailmap and define to vhost hdr
The patches in this series add support for dynamic add/remove memory
region functionality to the vhost-user back-end in DPDK.
---
> diff --git a/.mailmap b/.mailmap
> index 0e0d83e1c6..cc44e27036 100644
> --- a/.mailmap
> +++ b/.mailmap
> @@ -1295,6 +1295,7 @@ Prateek Agarwal <prateekag at cse.iitb.ac.in>
> Prathisna Padmasanan <prathisna.padmasanan at intel.com>
> Praveen Kaligineedi <pkaligineedi at google.com>
> Praveen Shetty <praveen.shetty at intel.com>
> +Pravin M Bathija <pravin.bathija at dell.com>
> Pravin Pathak <pravin.pathak.dev at gmail.com> <pravin.pathak at intel.com>
> Prince Takkar <ptakkar at marvell.com>
> Priyalee Kushwaha <priyalee.kushwaha at intel.com>
[ ... ]
> diff --git a/lib/vhost/vhost_user.h b/lib/vhost/vhost_user.h
> index ef486545ba..f8d921f7f1 100644
> --- a/lib/vhost/vhost_user.h
> +++ b/lib/vhost/vhost_user.h
> @@ -67,6 +67,9 @@ typedef enum VhostUserRequest {
> VHOST_USER_POSTCOPY_END = 30,
> VHOST_USER_GET_INFLIGHT_FD = 31,
> VHOST_USER_SET_INFLIGHT_FD = 32,
> + VHOST_USER_GET_MAX_MEM_SLOTS = 36,
> + VHOST_USER_ADD_MEM_REG = 37,
> + VHOST_USER_REM_MEM_REG = 38,
Does this ordering follow the vhost-user specification? The spec defines
VHOST_USER_GET_MAX_MEM_SLOTS as 36, VHOST_USER_ADD_MEM_REG as 37, and
VHOST_USER_REM_MEM_REG as 38. Can you confirm these values match the
current specification or explain if there's a newer version being used?
[ ... ]
> diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
> index 4bfb13fb98..e2eb1c8f4a 100644
> --- a/lib/vhost/vhost_user.c
> +++ b/lib/vhost/vhost_user.c
> +static void
> +free_mem_region(struct rte_vhost_mem_region *reg)
> +{
> + if (reg != NULL && reg->mmap_addr) {
> + munmap(reg->mmap_addr, reg->mmap_size);
> + close(reg->fd);
> + memset(reg, 0, sizeof(struct rte_vhost_mem_region));
> + }
> +}
Does this code check that munmap and close succeed? Should errors be
logged or handled if these system calls fail?
> +static void
> +free_all_mem_regions(struct virtio_net *dev)
> {
> uint32_t i;
> struct rte_vhost_mem_region *reg;
> @@ -236,12 +292,10 @@ free_mem_region(struct virtio_net *dev)
> if (dev->async_copy && rte_vfio_is_enabled("vfio"))
> async_dma_map(dev, false);
>
> - for (i = 0; i < dev->mem->nregions; i++) {
> + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
> reg = &dev->mem->regions[i];
> - if (reg->host_user_addr) {
> - munmap(reg->mmap_addr, reg->mmap_size);
> - close(reg->fd);
> - }
> + if (reg->mmap_addr)
> + free_mem_region(reg);
> }
> }
Does this loop need to iterate over VHOST_MEMORY_MAX_NREGIONS entries
instead of just dev->mem->nregions? Does this introduce unnecessary
iterations over uninitialized regions, or are all unused regions
guaranteed to have mmap_addr set to NULL?
> @@ -808,8 +862,10 @@ hua_to_alignment(struct rte_vhost_memory *mem, void *ptr)
> uint32_t i;
> uintptr_t hua = (uintptr_t)ptr;
>
> - for (i = 0; i < mem->nregions; i++) {
> + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
> r = &mem->regions[i];
> + if (r->host_user_addr == 0)
> + continue;
Does this code correctly handle the case where a valid region has
host_user_addr equal to 0? Can a legitimate memory region have address 0,
or is this check safe?
[ ... ]
> +static int
> +vhost_user_initialize_memory(struct virtio_net **pdev)
> +{
> + struct virtio_net *dev = *pdev;
> + int numa_node = SOCKET_ID_ANY;
> +
> + if (dev->mem != NULL) {
> + VHOST_CONFIG_LOG(dev->ifname, ERR,
> + "memory already initialized, free it first");
> + return -1;
> + }
Does this function leak resources if dev->mem is already allocated? Should
this return an error or handle the existing allocation by freeing it first?
> + if (dev->nr_vring > 0)
> + numa_node = dev->virtqueue[0]->numa_node;
> +
> + dev->nr_guest_pages = 0;
> + if (dev->guest_pages == NULL) {
> + dev->max_guest_pages = VHOST_MEMORY_MAX_NREGIONS;
> + dev->guest_pages = rte_zmalloc_socket(NULL,
> + dev->max_guest_pages *
> + sizeof(struct guest_page),
> + RTE_CACHE_LINE_SIZE,
> + numa_node);
> + if (dev->guest_pages == NULL) {
> + VHOST_CONFIG_LOG(dev->ifname, ERR,
> + "failed to allocate memory for dev->guest_pages");
> + return -1;
> + }
> + }
Does this code check if dev->guest_pages is non-NULL before potentially
leaving existing allocations in place? Can this cause a memory leak if
called multiple times?
> + dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct rte_vhost_memory) +
> + sizeof(struct rte_vhost_mem_region) * VHOST_MEMORY_MAX_NREGIONS, 0, numa_node);
> + if (dev->mem == NULL) {
> + VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to allocate memory for dev->mem");
> + rte_free(dev->guest_pages);
> + dev->guest_pages = NULL;
> + return -1;
> + }
Does this cleanup path handle the newly allocated dev->guest_pages if the
dev->mem allocation fails? The cleanup looks correct here.
[ ... ]
> +static int
> +vhost_user_add_mem_reg(struct virtio_net **pdev,
> + struct vhu_msg_context *ctx,
> + int main_fd __rte_unused)
> +{
> + uint32_t i;
> + struct virtio_net *dev = *pdev;
> + struct VhostUserMemoryRegion *region = &ctx->msg.payload.memory_single.region;
> +
> + /* convert first region add to normal memory table set */
> + if (dev->mem == NULL) {
> + if (vhost_user_initialize_memory(pdev) < 0)
> + goto close_msg_fds;
> + }
Does this code re-read dev from *pdev after calling
vhost_user_initialize_memory? The function takes **pdev, suggesting dev
might be reallocated.
> + if (dev->mem->nregions >= VHOST_MEMORY_MAX_NREGIONS) {
> + VHOST_CONFIG_LOG(dev->ifname, ERR, "too many memory regions already (%u)",
> + dev->mem->nregions);
> + goto close_msg_fds;
> + }
> +
> + /* make sure supplied memory fd present */
> + if (ctx->fd_num != 1) {
> + VHOST_CONFIG_LOG(dev->ifname, ERR, "fd count makes no sense (%u)", ctx->fd_num);
> + goto close_msg_fds;
> + }
Does this check for ctx->fd_num == 1 match the vhost-user spec
requirements? Is a single fd always expected for ADD_MEM_REG?
> + /* New region goes at the end of the contiguous array */
> + struct rte_vhost_mem_region *reg = &dev->mem->regions[dev->mem->nregions];
> +
> + reg->guest_phys_addr = region->guest_phys_addr;
> + reg->guest_user_addr = region->userspace_addr;
> + reg->size = region->memory_size;
> + reg->fd = ctx->fds[0];
> + ctx->fds[0] = -1;
Does setting ctx->fds[0] to -1 prevent close_msg_fds from closing an
already-consumed file descriptor? This looks correct.
> + if (vhost_user_mmap_region(dev, reg, region->mmap_offset) < 0) {
> + VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to mmap region");
> + goto close_msg_fds;
> + }
> +
> + dev->mem->nregions++;
Does this code properly clean up reg if vhost_user_mmap_region fails? The
reg contents are left partially initialized but dev->mem->nregions is not
incremented until after success, which looks correct.
> + if (dev->async_copy && rte_vfio_is_enabled("vfio")) {
> + if (async_dma_map_region(dev, reg, true) < 0)
> + goto free_new_region;
> + }
[ ... ]
> +free_new_region:
> + if (dev->async_copy && rte_vfio_is_enabled("vfio"))
> + async_dma_map_region(dev, reg, false);
> + remove_guest_pages(dev, reg);
> + free_mem_region(reg);
> + dev->mem->nregions--;
Does this error path correctly decrement dev->mem->nregions if the
increment happened before the failure? The increment only happens after
successful mmap, so this looks correct.
> +static int
> +vhost_user_rem_mem_reg(struct virtio_net **pdev,
> + struct vhu_msg_context *ctx,
> + int main_fd __rte_unused)
> +{
> + uint32_t i;
> + struct virtio_net *dev = *pdev;
> + struct VhostUserMemoryRegion *region = &ctx->msg.payload.memory_single.region;
> +
> + if (dev->mem == NULL || dev->mem->nregions == 0) {
> + VHOST_CONFIG_LOG(dev->ifname, ERR, "no memory regions to remove");
> + close_msg_fds(ctx);
> + return RTE_VHOST_MSG_RESULT_ERR;
> + }
> +
> + for (i = 0; i < dev->mem->nregions; i++) {
> + struct rte_vhost_mem_region *current_region = &dev->mem->regions[i];
> +
> + /*
> + * According to the vhost-user specification:
> + * The memory region to be removed is identified by its GPA,
> + * user address and size. The mmap offset is ignored.
> + */
> + if (region->userspace_addr == current_region->guest_user_addr
> + && region->guest_phys_addr == current_region->guest_phys_addr
> + && region->memory_size == current_region->size) {
Does this matching logic align with the vhost-user specification
requirements? Are all three fields necessary, or is one sufficient to
uniquely identify a region?
> + if (dev->async_copy && rte_vfio_is_enabled("vfio"))
> + async_dma_map_region(dev, current_region, false);
> + remove_guest_pages(dev, current_region);
> + dev_invalidate_vrings(pdev);
> + dev = *pdev;
> + free_mem_region(current_region);
> +
> + /* Compact the regions array to keep it contiguous */
> + if (i < dev->mem->nregions - 1) {
> + memmove(&dev->mem->regions[i],
> + &dev->mem->regions[i + 1],
> + (dev->mem->nregions - 1 - i) *
> + sizeof(struct rte_vhost_mem_region));
> + memset(&dev->mem->regions[dev->mem->nregions - 1],
> + 0, sizeof(struct rte_vhost_mem_region));
> + }
Does this memmove correctly shift all subsequent regions down when
removing a middle region? The math looks correct.
[ ... ]
More information about the test-report
mailing list