[dpdk-dev] [PATCH v2 2/3] vhost: convert buffer addresses to GPA for logging

Adrian Moreno amorenoz at redhat.com
Fri Sep 27 20:14:45 CEST 2019


Note that the API facing rte_vhost_log_write is not modified.
So, make explicit that it expects the address in GPA space.

Fixes: 69c90e98f483 ("vhost: enable IOMMU support")
Cc: maxime.coquelin at redhat.com
Cc: stable at dpdk.org

Signed-off-by: Adrian Moreno <amorenoz at redhat.com>
---
 lib/librte_vhost/rte_vhost.h  |  2 +-
 lib/librte_vhost/vdpa.c       |  4 +++-
 lib/librte_vhost/vhost.c      | 21 +++++++++++++++++++++
 lib/librte_vhost/vhost.h      | 16 ++++++++++++++++
 lib/librte_vhost/virtio_net.c |  9 +++++----
 5 files changed, 46 insertions(+), 6 deletions(-)

diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
index 7fb172912..16c7e4243 100644
--- a/lib/librte_vhost/rte_vhost.h
+++ b/lib/librte_vhost/rte_vhost.h
@@ -265,7 +265,7 @@ rte_vhost_va_from_guest_pa(struct rte_vhost_memory *mem,
  * @param vid
  *  vhost device ID
  * @param addr
- *  the starting address for write
+ *  the starting address for write (in guest physical address space)
  * @param len
  *  the length to write
  */
diff --git a/lib/librte_vhost/vdpa.c b/lib/librte_vhost/vdpa.c
index 8e45ce9f8..11fd5aede 100644
--- a/lib/librte_vhost/vdpa.c
+++ b/lib/librte_vhost/vdpa.c
@@ -201,7 +201,9 @@ rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
 				goto fail;
 			desc = desc_ring[desc_id];
 			if (desc.flags & VRING_DESC_F_WRITE)
-				vhost_log_write(dev, desc.addr, desc.len);
+				vhost_log_cache_write_iova(dev, vq,
+							   desc.addr,
+							   desc.len);
 			desc_id = desc.next;
 		} while (desc.flags & VRING_DESC_F_NEXT);
 
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index e57dda22f..78457a77c 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -200,6 +200,27 @@ __vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	}
 }
 
+void
+__vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+			     uint64_t iova, uint64_t len)
+{
+	uint64_t hva, gpa, map_len;
+	map_len = len;
+
+	hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
+	if (map_len != len) {
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"Failed to write log for IOVA 0x%0lx."
+			"No IOTLB entry found\n",
+			iova);
+		return;
+	}
+
+	gpa = hva_to_gpa(dev, hva, len);
+	if (gpa)
+		__vhost_log_cache_write(dev, vq, gpa, len);
+}
+
 void *
 vhost_alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		uint64_t desc_addr, uint64_t desc_len)
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 9d3883385..6dab8089a 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -353,6 +353,9 @@ desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
 void __vhost_log_cache_write(struct virtio_net *dev,
 		struct vhost_virtqueue *vq,
 		uint64_t addr, uint64_t len);
+void __vhost_log_cache_write_iova(struct virtio_net *dev,
+		struct vhost_virtqueue *vq,
+		uint64_t iova, uint64_t len);
 void __vhost_log_cache_sync(struct virtio_net *dev,
 		struct vhost_virtqueue *vq);
 void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
@@ -393,6 +396,19 @@ vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	vhost_log_write(dev, vq->log_guest_addr + offset, len);
 }
 
+static __rte_always_inline void
+vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+			   uint64_t iova, uint64_t len)
+{
+	if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
+	    return;
+
+	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+		__vhost_log_cache_write_iova(dev, vq, iova, len);
+	else
+		__vhost_log_cache_write(dev, vq, iova, len);
+}
+
 /* Macros for printing using RTE_LOG */
 #define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
 #define RTE_LOGTYPE_VHOST_DATA   RTE_LOGTYPE_USER1
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 5b85b832d..9c5d72c22 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -178,7 +178,8 @@ do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
 
 	for (i = 0; i < count; i++) {
 		rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
-		vhost_log_cache_write(dev, vq, elem[i].log_addr, elem[i].len);
+		vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
+					   elem[i].len);
 		PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
 	}
 
@@ -633,7 +634,7 @@ copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
 		PRINT_PACKET(dev, (uintptr_t)dst,
 				(uint32_t)len, 0);
-		vhost_log_cache_write(dev, vq,
+		vhost_log_cache_write_iova(dev, vq,
 				iova, len);
 
 		remain -= len;
@@ -733,7 +734,7 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			} else {
 				PRINT_PACKET(dev, (uintptr_t)hdr_addr,
 						dev->vhost_hlen, 0);
-				vhost_log_cache_write(dev, vq,
+				vhost_log_cache_write_iova(dev, vq,
 						buf_vec[0].buf_iova,
 						dev->vhost_hlen);
 			}
@@ -748,7 +749,7 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
 				rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
 				cpy_len);
-			vhost_log_cache_write(dev, vq, buf_iova + buf_offset,
+			vhost_log_cache_write_iova(dev, vq, buf_iova + buf_offset,
 					cpy_len);
 			PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
 				cpy_len, 0);
-- 
2.21.0



More information about the dev mailing list