[dpdk-stable] [PATCH 17.11 2/2] vhost: convert buffer addresses to GPA for logging
Adrian Moreno
amorenoz at redhat.com
Thu Jan 9 18:58:42 CET 2020
[ upstream commit 1fc3b3f06aa9c79c749e8587859d75d237ba9161 ]
Add IOVA versions of dirty page logging functions.
Note that the API facing rte_vhost_log_write is not modified.
So, make explicit that it expects the address in GPA space.
Fixes: 69c90e98f483 ("vhost: enable IOMMU support")
Cc: stable at dpdk.org
Signed-off-by: Adrian Moreno <amorenoz at redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
---
lib/librte_vhost/rte_vhost.h | 2 +-
lib/librte_vhost/vhost.c | 40 +++++++++++++++++++++++++++++++++++
lib/librte_vhost/vhost.h | 31 +++++++++++++++++++++++++++
lib/librte_vhost/virtio_net.c | 18 +++++++++-------
4 files changed, 82 insertions(+), 9 deletions(-)
diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
index 3fc6034de..eccaa3ed5 100644
--- a/lib/librte_vhost/rte_vhost.h
+++ b/lib/librte_vhost/rte_vhost.h
@@ -204,7 +204,7 @@ rte_vhost_va_from_guest_pa(struct rte_vhost_memory *mem,
* @param vid
* vhost device ID
* @param addr
- * the starting address for write
+ * the starting address for write (in guest physical address space)
* @param len
* the length to write
*/
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 4b4ef56e1..6a8f54fba 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -156,6 +156,26 @@ __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
}
}
+void
+__vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len)
+{
+ uint64_t hva, gpa, map_len;
+ map_len = len;
+
+ hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
+ if (map_len != len) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
+ iova);
+ return;
+ }
+
+ gpa = hva_to_gpa(dev, hva, len);
+ if (gpa)
+ __vhost_log_write(dev, gpa, len);
+}
+
void
__vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
@@ -244,6 +264,26 @@ __vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
}
}
+void
+__vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len)
+{
+ uint64_t hva, gpa, map_len;
+ map_len = len;
+
+ hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
+ if (map_len != len) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
+ iova);
+ return;
+ }
+
+ gpa = hva_to_gpa(dev, hva, len);
+ if (gpa)
+ __vhost_log_cache_write(dev, vq, gpa, len);
+}
+
static void
cleanup_vq(struct vhost_virtqueue *vq, int destroy)
{
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 34e2ecc4c..aa4c4c941 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -274,9 +274,14 @@ struct virtio_net {
void __vhost_log_cache_write(struct virtio_net *dev,
struct vhost_virtqueue *vq,
uint64_t addr, uint64_t len);
+void __vhost_log_cache_write_iova(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len);
void __vhost_log_cache_sync(struct virtio_net *dev,
struct vhost_virtqueue *vq);
void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
+void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len);
static __rte_always_inline void
vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
@@ -314,6 +319,32 @@ vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
vhost_log_write(dev, vq->log_guest_addr + offset, len);
}
+static __rte_always_inline void
+vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len)
+{
+ if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
+ return;
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ __vhost_log_cache_write_iova(dev, vq, iova, len);
+ else
+ __vhost_log_cache_write(dev, vq, iova, len);
+}
+
+static __rte_always_inline void
+vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len)
+{
+ if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
+ return;
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ __vhost_log_write_iova(dev, vq, iova, len);
+ else
+ __vhost_log_write(dev, iova, len);
+}
+
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER1
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index b302c384d..b8f43900d 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -161,7 +161,8 @@ do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
for (i = 0; i < count; i++) {
rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
- vhost_log_cache_write(dev, vq, elem[i].log_addr, elem[i].len);
+ vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
+ elem[i].len);
PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
}
}
@@ -278,7 +279,7 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
virtio_enqueue_offload(m,
(struct virtio_net_hdr *)(uintptr_t)desc_addr);
PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0);
- vhost_log_cache_write(dev, vq, desc_gaddr, dev->vhost_hlen);
+ vhost_log_cache_write_iova(dev, vq, desc_gaddr, dev->vhost_hlen);
} else {
struct virtio_net_hdr vnet_hdr;
uint64_t remain = dev->vhost_hlen;
@@ -301,7 +302,7 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
(void *)(uintptr_t)src, len);
PRINT_PACKET(dev, (uintptr_t)dst, (uint32_t)len, 0);
- vhost_log_cache_write(dev, vq, guest_addr, len);
+ vhost_log_cache_write_iova(dev, vq, guest_addr, len);
remain -= len;
guest_addr += len;
dst += len;
@@ -382,8 +383,9 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
desc_offset)),
rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
cpy_len);
- vhost_log_cache_write(dev, vq, desc_gaddr + desc_offset,
- cpy_len);
+ vhost_log_cache_write_iova(dev, vq,
+ desc_gaddr + desc_offset,
+ cpy_len);
PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
cpy_len, 0);
} else {
@@ -808,7 +810,7 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
PRINT_PACKET(dev, (uintptr_t)dst,
(uint32_t)len, 0);
- vhost_log_cache_write(dev, vq,
+ vhost_log_cache_write_iova(dev, vq,
guest_addr, len);
remain -= len;
@@ -818,7 +820,7 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
} else {
PRINT_PACKET(dev, (uintptr_t)hdr_addr,
dev->vhost_hlen, 0);
- vhost_log_cache_write(dev, vq, hdr_phys_addr,
+ vhost_log_cache_write_iova(dev, vq, hdr_phys_addr,
dev->vhost_hlen);
}
@@ -832,7 +834,7 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
desc_offset)),
rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
cpy_len);
- vhost_log_cache_write(dev, vq, desc_gaddr + desc_offset,
+ vhost_log_cache_write_iova(dev, vq, desc_gaddr + desc_offset,
cpy_len);
PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
cpy_len, 0);
--
2.21.1
More information about the stable
mailing list