[dpdk-stable] [PATCH 17.11 1/2] vhost: un-inline dirty pages logging functions
Adrian Moreno
amorenoz at redhat.com
Thu Jan 9 18:58:41 CET 2020
From: Maxime Coquelin <maxime.coquelin at redhat.com>
[ upstream commit 094b643d9b425c942aa18d1d229832f8d96940af ]
In order to reduce the I-cache pressure, this patch removes
the inlining of the dirty pages logging functions, that we
can consider as cold path.
Indeed, these functions are only called while doing live
migration, so not called most of the time.
Signed-off-by: Maxime Coquelin <maxime.coquelin at redhat.com>
Reviewed-by: Tiwei Bie <tiwei.bie at intel.com>
---
lib/librte_vhost/vhost.c | 134 +++++++++++++++++++++++++++++++++++++++
lib/librte_vhost/vhost.h | 132 ++++----------------------------------
2 files changed, 146 insertions(+), 120 deletions(-)
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index ec584695c..4b4ef56e1 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -110,6 +110,140 @@ get_device(int vid)
return dev;
}
+#define VHOST_LOG_PAGE 4096
+
+/*
+ * Atomically set a bit in memory.
+ */
+static __rte_always_inline void
+vhost_set_bit(unsigned int nr, volatile uint8_t *addr)
+{
+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
+ /*
+ * __sync_ built-ins are deprecated, but __atomic_ ones
+ * are sub-optimized in older GCC versions.
+ */
+ __sync_fetch_and_or_1(addr, (1U << nr));
+#else
+ __atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED);
+#endif
+}
+
+static __rte_always_inline void
+vhost_log_page(uint8_t *log_base, uint64_t page)
+{
+ vhost_set_bit(page % 8, &log_base[page / 8]);
+}
+
+void
+__vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
+{
+ uint64_t page;
+
+ if (unlikely(!dev->log_base || !len))
+ return;
+
+ if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
+ return;
+
+ /* To make sure guest memory updates are committed before logging */
+ rte_smp_wmb();
+
+ page = addr / VHOST_LOG_PAGE;
+ while (page * VHOST_LOG_PAGE < addr + len) {
+ vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
+ page += 1;
+ }
+}
+
+void
+__vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+ unsigned long *log_base;
+ int i;
+
+ if (unlikely(!dev->log_base))
+ return;
+
+ log_base = (unsigned long *)(uintptr_t)dev->log_base;
+
+ /*
+ * It is expected a write memory barrier has been issued
+ * before this function is called.
+ */
+
+ for (i = 0; i < vq->log_cache_nb_elem; i++) {
+ struct log_cache_entry *elem = vq->log_cache + i;
+
+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
+ /*
+ * '__sync' builtins are deprecated, but '__atomic' ones
+ * are sub-optimized in older GCC versions.
+ */
+ __sync_fetch_and_or(log_base + elem->offset, elem->val);
+#else
+ __atomic_fetch_or(log_base + elem->offset, elem->val,
+ __ATOMIC_RELAXED);
+#endif
+ }
+
+ rte_smp_wmb();
+
+ vq->log_cache_nb_elem = 0;
+}
+
+static __rte_always_inline void
+vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t page)
+{
+ uint32_t bit_nr = page % (sizeof(unsigned long) << 3);
+ uint32_t offset = page / (sizeof(unsigned long) << 3);
+ int i;
+
+ for (i = 0; i < vq->log_cache_nb_elem; i++) {
+ struct log_cache_entry *elem = vq->log_cache + i;
+
+ if (elem->offset == offset) {
+ elem->val |= (1UL << bit_nr);
+ return;
+ }
+ }
+
+ if (unlikely(i >= VHOST_LOG_CACHE_NR)) {
+ /*
+ * No more room for a new log cache entry,
+ * so write the dirty log map directly.
+ */
+ rte_smp_wmb();
+ vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
+
+ return;
+ }
+
+ vq->log_cache[i].offset = offset;
+ vq->log_cache[i].val = (1UL << bit_nr);
+ vq->log_cache_nb_elem++;
+}
+
+void
+__vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t addr, uint64_t len)
+{
+ uint64_t page;
+
+ if (unlikely(!dev->log_base || !len))
+ return;
+
+ if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
+ return;
+
+ page = addr / VHOST_LOG_PAGE;
+ while (page * VHOST_LOG_PAGE < addr + len) {
+ vhost_log_cache_page(dev, vq, page);
+ page += 1;
+ }
+}
+
static void
cleanup_vq(struct vhost_virtqueue *vq, int destroy)
{
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index f8b587a9d..34e2ecc4c 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -271,141 +271,33 @@ struct virtio_net {
} __rte_cache_aligned;
-#define VHOST_LOG_PAGE 4096
-
-/*
- * Atomically set a bit in memory.
- */
-static __rte_always_inline void
-vhost_set_bit(unsigned int nr, volatile uint8_t *addr)
-{
-#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
- /*
- * __sync_ built-ins are deprecated, but __atomic_ ones
- * are sub-optimized in older GCC versions.
- */
- __sync_fetch_and_or_1(addr, (1U << nr));
-#else
- __atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED);
-#endif
-}
-
-static __rte_always_inline void
-vhost_log_page(uint8_t *log_base, uint64_t page)
-{
- vhost_set_bit(page % 8, &log_base[page / 8]);
-}
+void __vhost_log_cache_write(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ uint64_t addr, uint64_t len);
+void __vhost_log_cache_sync(struct virtio_net *dev,
+ struct vhost_virtqueue *vq);
+void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
static __rte_always_inline void
vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
{
- uint64_t page;
-
- if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
- !dev->log_base || !len))
- return;
-
- if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
- return;
-
- /* To make sure guest memory updates are committed before logging */
- rte_smp_wmb();
-
- page = addr / VHOST_LOG_PAGE;
- while (page * VHOST_LOG_PAGE < addr + len) {
- vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
- page += 1;
- }
+ if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
+ __vhost_log_write(dev, addr, len);
}
static __rte_always_inline void
vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
- unsigned long *log_base;
- int i;
-
- if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
- !dev->log_base))
- return;
-
- log_base = (unsigned long *)(uintptr_t)dev->log_base;
-
- /*
- * It is expected a write memory barrier has been issued
- * before this function is called.
- */
-
- for (i = 0; i < vq->log_cache_nb_elem; i++) {
- struct log_cache_entry *elem = vq->log_cache + i;
-
-#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
- /*
- * '__sync' builtins are deprecated, but '__atomic' ones
- * are sub-optimized in older GCC versions.
- */
- __sync_fetch_and_or(log_base + elem->offset, elem->val);
-#else
- __atomic_fetch_or(log_base + elem->offset, elem->val,
- __ATOMIC_RELAXED);
-#endif
- }
-
- rte_smp_wmb();
-
- vq->log_cache_nb_elem = 0;
-}
-
-static __rte_always_inline void
-vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq,
- uint64_t page)
-{
- uint32_t bit_nr = page % (sizeof(unsigned long) << 3);
- uint32_t offset = page / (sizeof(unsigned long) << 3);
- int i;
-
- for (i = 0; i < vq->log_cache_nb_elem; i++) {
- struct log_cache_entry *elem = vq->log_cache + i;
-
- if (elem->offset == offset) {
- elem->val |= (1UL << bit_nr);
- return;
- }
- }
-
- if (unlikely(i >= VHOST_LOG_CACHE_NR)) {
- /*
- * No more room for a new log cache entry,
- * so write the dirty log map directly.
- */
- rte_smp_wmb();
- vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
-
- return;
- }
-
- vq->log_cache[i].offset = offset;
- vq->log_cache[i].val = (1UL << bit_nr);
- vq->log_cache_nb_elem++;
+ if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
+ __vhost_log_cache_sync(dev, vq);
}
static __rte_always_inline void
vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint64_t addr, uint64_t len)
{
- uint64_t page;
-
- if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
- !dev->log_base || !len))
- return;
-
- if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
- return;
-
- page = addr / VHOST_LOG_PAGE;
- while (page * VHOST_LOG_PAGE < addr + len) {
- vhost_log_cache_page(dev, vq, page);
- page += 1;
- }
+ if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
+ __vhost_log_cache_write(dev, vq, addr, len);
}
static __rte_always_inline void
--
2.21.1
More information about the stable
mailing list