|SUCCESS| [v8,5/5] vhost_user: enable configure memory slots
dpdklab at iol.unh.edu
dpdklab at iol.unh.edu
Fri Apr 3 14:47:30 CEST 2026
Test-Label: ai-code-review
Test-Status: SUCCESS
https://dpdk.org/patch/162849
_AI Code Review_
=== Patch Review: series.patch (via Claude) ===
Review date: 2026-04-03
# Review of DPDK vhost-user add/remove memory regions patch series
git sha: series.patch
Author: Pravin M Bathija <pravin.bathija at dell.com>
Subject: [PATCH v8 1/5] vhost: add user to mailmap and define to vhost hdr
This patch series adds support for dynamically adding and removing memory
regions in the vhost-user back-end implementation. The changes include new
vhost-user protocol messages, supporting functions, and enabling the
VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS feature.
> diff --git a/.mailmap b/.mailmap
> index 0e0d83e1c6..cc44e27036 100644
> --- a/.mailmap
> +++ b/.mailmap
[ ... ]
> diff --git a/lib/vhost/rte_vhost.h b/lib/vhost/rte_vhost.h
> index 2f7c4c0080..a7f9700538 100644
> --- a/lib/vhost/rte_vhost.h
> +++ b/lib/vhost/rte_vhost.h
> @@ -109,6 +109,10 @@ extern "C" {
> #define VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD 12
> #endif
>
> +#ifndef VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS
> +#define VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS 15
> +#endif
> +
These ifndef guards around the protocol feature define are unnecessary.
The other protocol features in this file do not use such guards. Does
this definition need to be conditionally compiled?
> diff --git a/lib/vhost/vhost_user.h b/lib/vhost/vhost_user.h
> index ef486545ba..f8d921f7f1 100644
> --- a/lib/vhost/vhost_user.h
> +++ b/lib/vhost/vhost_user.h
> @@ -67,6 +67,9 @@ typedef enum VhostUserRequest {
> VHOST_USER_POSTCOPY_END = 30,
> VHOST_USER_GET_INFLIGHT_FD = 31,
> VHOST_USER_SET_INFLIGHT_FD = 32,
> + VHOST_USER_GET_MAX_MEM_SLOTS = 36,
> + VHOST_USER_ADD_MEM_REG = 37,
> + VHOST_USER_REM_MEM_REG = 38,
> VHOST_USER_SET_STATUS = 39,
> VHOST_USER_GET_STATUS = 40,
> } VhostUserRequest;
The message IDs 36, 37, and 38 appear to come from the vhost-user
specification. Can you confirm these values match the official protocol?
> @@ -91,6 +94,11 @@ typedef struct VhostUserMemory {
> VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
> } VhostUserMemory;
>
> +typedef struct VhostUserSingleMemReg {
> + uint64_t padding;
> + VhostUserMemoryRegion region;
> +} VhostUserSingleMemReg;
> +
What is the purpose of the padding field? Does it align with the protocol
specification for these messages?
> diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
> index 4bfb13fb98..e2eb1c8f4a 100644
[ ... ]
> +static int
> +async_dma_map_region(struct virtio_net *dev, struct rte_vhost_mem_region *reg, bool do_map)
> +{
> + uint32_t i;
> + int ret;
> + uint64_t reg_start = reg->host_user_addr;
> + uint64_t reg_end = reg_start + reg->size;
> +
> + for (i = 0; i < dev->nr_guest_pages; i++) {
> + struct guest_page *page = &dev->guest_pages[i];
> +
> + /* Only process pages belonging to this region */
> + if (page->host_user_addr < reg_start ||
> + page->host_user_addr >= reg_end)
> + continue;
In async_dma_map_region(), does this code correctly handle the case where
page->host_user_addr + page->size extends beyond reg_end? The check only
verifies the page start address falls within the region bounds.
> +
> + if (do_map) {
> + ret = rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD,
> + page->host_user_addr,
> + page->host_iova,
> + page->size);
> + if (ret) {
> + if (rte_errno == ENODEV)
> + return 0;
> +
> + VHOST_CONFIG_LOG(dev->ifname, ERR, "DMA engine map failed");
> + return -1;
> + }
When rte_vfio_container_dma_map() fails partway through the loop, does
this code unmap the pages that were successfully mapped before the error?
> + } else {
> + ret = rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD,
> + page->host_user_addr,
> + page->host_iova,
> + page->size);
> + if (ret) {
> + if (rte_errno == EINVAL)
> + return 0;
> +
> + VHOST_CONFIG_LOG(dev->ifname, ERR, "DMA engine unmap failed");
> + return -1;
> + }
Is it correct to return 0 on EINVAL during unmap? Does this indicate the
region was already unmapped, or does it hide a real error?
> static void
> -free_mem_region(struct virtio_net *dev)
> +free_mem_region(struct rte_vhost_mem_region *reg)
> +{
> + if (reg != NULL && reg->mmap_addr) {
Does this code check both reg != NULL and reg->mmap_addr? The style guide
requires explicit NULL comparison for pointers. Should this be written as
two separate if statements for clarity?
> + munmap(reg->mmap_addr, reg->mmap_size);
> + close(reg->fd);
> + memset(reg, 0, sizeof(struct rte_vhost_mem_region));
Should munmap() and close() return values be checked here? What happens
if these system calls fail?
> +static void
> +free_all_mem_regions(struct virtio_net *dev)
> {
> uint32_t i;
> struct rte_vhost_mem_region *reg;
> @@ -236,12 +292,10 @@ free_mem_region(struct virtio_net *dev)
> if (dev->async_copy && rte_vfio_is_enabled("vfio"))
> async_dma_map(dev, false);
>
> - for (i = 0; i < dev->mem->nregions; i++) {
> + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
> reg = &dev->mem->regions[i];
> - if (reg->host_user_addr) {
> - munmap(reg->mmap_addr, reg->mmap_size);
> - close(reg->fd);
> - }
> + if (reg->mmap_addr)
> + free_mem_region(reg);
Does it make sense to iterate over VHOST_MEMORY_MAX_NREGIONS instead of
dev->mem->nregions? The original code only freed actually allocated
regions.
> @@ -808,8 +862,10 @@ hua_to_alignment(struct rte_vhost_memory *mem, void *ptr)
> uint32_t i;
> uintptr_t hua = (uintptr_t)ptr;
>
> - for (i = 0; i < mem->nregions; i++) {
> + for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
> r = &mem->regions[i];
> + if (r->host_user_addr == 0)
> + continue;
In hua_to_alignment(), is host_user_addr == 0 a reliable way to check if
a region is valid? Could a legitimate memory region start at address 0?
> +static void
> +remove_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg)
> +{
> + uint64_t reg_start = reg->host_user_addr;
> + uint64_t reg_end = reg_start + reg->size;
> + uint32_t i, j = 0;
> +
> + for (i = 0; i < dev->nr_guest_pages; i++) {
> + if (dev->guest_pages[i].host_user_addr >= reg_start &&
> + dev->guest_pages[i].host_user_addr < reg_end)
> + continue;
> + if (j != i)
> + dev->guest_pages[j] = dev->guest_pages[i];
> + j++;
> + }
> + dev->nr_guest_pages = j;
> +}
In remove_guest_pages(), does the page boundary check correctly handle
pages that partially overlap the region? Should it also verify
host_user_addr + page size is within the region?
> +static int
> +vhost_user_initialize_memory(struct virtio_net **pdev)
> +{
> + struct virtio_net *dev = *pdev;
> + int numa_node = SOCKET_ID_ANY;
> +
> + if (dev->mem != NULL) {
> + VHOST_CONFIG_LOG(dev->ifname, ERR,
> + "memory already initialized, free it first");
> + return -1;
> + }
Does vhost_user_initialize_memory() need to verify dev is not NULL before
dereferencing it?
> + dev->nr_guest_pages = 0;
> + if (dev->guest_pages == NULL) {
> + dev->max_guest_pages = VHOST_MEMORY_MAX_NREGIONS;
> + dev->guest_pages = rte_zmalloc_socket(NULL,
> + dev->max_guest_pages *
> + sizeof(struct guest_page),
> + RTE_CACHE_LINE_SIZE,
> + numa_node);
> + if (dev->guest_pages == NULL) {
> + VHOST_CONFIG_LOG(dev->ifname, ERR,
> + "failed to allocate memory for dev->guest_pages");
> + return -1;
> + }
> + }
In the original code, max_guest_pages was initialized to 8. Why is it now
set to VHOST_MEMORY_MAX_NREGIONS? Does this change the memory allocation
strategy?
> + dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct rte_vhost_memory) +
> + sizeof(struct rte_vhost_mem_region) * VHOST_MEMORY_MAX_NREGIONS, 0, numa_node);
> + if (dev->mem == NULL) {
> + VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to allocate memory for dev->mem");
> + rte_free(dev->guest_pages);
> + dev->guest_pages = NULL;
> + return -1;
> + }
When rte_zmalloc_socket() fails for dev->mem, should the code also reset
dev->max_guest_pages and dev->nr_guest_pages?
> @@ -1278,6 +1356,8 @@ vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
> /* Now userfault register and we can use the memory */
> for (i = 0; i < memory->nregions; i++) {
> reg = &dev->mem->regions[i];
> + if (reg->host_user_addr == 0)
> + continue;
> if (vhost_user_postcopy_region_register(dev, reg) < 0)
> return -1;
> }
In vhost_user_postcopy_register(), is the check for host_user_addr == 0
consistent with how regions are validated elsewhere? Should it use
mmap_addr instead?
> @@ -1246,10 +1320,14 @@ vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
> * DPDK's virtual address with Qemu, so that Qemu can
> * retrieve the region offset when handling userfaults.
> */
> + int reg_msg_index = 0;
> memory = &ctx->msg.payload.memory;
> for (i = 0; i < memory->nregions; i++) {
> reg = &dev->mem->regions[i];
> - memory->regions[i].userspace_addr = reg->host_user_addr;
> + if (reg->host_user_addr == 0)
> + continue;
> + memory->regions[reg_msg_index].userspace_addr = reg->host_user_addr;
> + reg_msg_index++;
Does this code correctly update memory->nregions after skipping invalid
regions? The front-end expects nregions to match the number of regions
sent in the message.
> +static void
> +dev_invalidate_vrings(struct virtio_net **pdev)
> +{
> + struct virtio_net *dev = *pdev;
> + uint32_t i;
> +
> + for (i = 0; i < dev->nr_vring; i++) {
> + struct vhost_virtqueue *vq = dev->virtqueue[i];
> +
> + if (!vq)
> + continue;
> +
> + if (vq->desc || vq->avail || vq->used) {
> + /* vhost_user_lock_all_queue_pairs locked all qps */
> + VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_ADD_MEM_REG);
Does the assert macro provide useful information when the lock is not
held? The comment indicates the caller should have locked all queue pairs.
> + vring_invalidate(dev, vq);
> +
> + translate_ring_addresses(&dev, &vq);
> + }
> + }
> +
> + *pdev = dev;
> +}
In dev_invalidate_vrings(), why does the function take **pdev and update
*pdev at the end? Does translate_ring_addresses() potentially reallocate
dev?
> +static int
> +vhost_user_add_mem_reg(struct virtio_net **pdev,
> + struct vhu_msg_context *ctx,
> + int main_fd __rte_unused)
> +{
> + uint32_t i;
> + struct virtio_net *dev = *pdev;
> + struct VhostUserMemoryRegion *region = &ctx->msg.payload.memory_single.region;
> +
> + /* convert
More information about the test-report
mailing list