[PATCH 1/7] vhost: improve IOTLB logs
Maxime Coquelin
maxime.coquelin at redhat.com
Thu Dec 23 09:36:53 CET 2021
This patch adds IOTLB mempool name when logging debug
or error messages, and also prepends the socket path.
to all the logs.
Signed-off-by: Maxime Coquelin <maxime.coquelin at redhat.com>
---
lib/vhost/iotlb.c | 26 +++++++++++++++-----------
lib/vhost/iotlb.h | 10 +++++-----
lib/vhost/vhost.c | 2 +-
lib/vhost/vhost_user.c | 2 +-
4 files changed, 22 insertions(+), 18 deletions(-)
diff --git a/lib/vhost/iotlb.c b/lib/vhost/iotlb.c
index 82bdb84526..e9e1ede7a4 100644
--- a/lib/vhost/iotlb.c
+++ b/lib/vhost/iotlb.c
@@ -62,7 +62,7 @@ vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova,
}
void
-vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq,
+vhost_user_iotlb_pending_insert(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint64_t iova, uint8_t perm)
{
struct vhost_iotlb_entry *node;
@@ -70,14 +70,16 @@ vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq,
ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
if (ret) {
- VHOST_LOG_CONFIG(DEBUG, "IOTLB pool empty, clear entries\n");
+ VHOST_LOG_CONFIG(DEBUG, "(%s) IOTLB pool %s empty, clear entries\n",
+ dev->ifname, vq->iotlb_pool->name);
if (!TAILQ_EMPTY(&vq->iotlb_pending_list))
vhost_user_iotlb_pending_remove_all(vq);
else
vhost_user_iotlb_cache_random_evict(vq);
ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
if (ret) {
- VHOST_LOG_CONFIG(ERR, "IOTLB pool still empty, failure\n");
+ VHOST_LOG_CONFIG(ERR, "(%s) IOTLB pool %s still empty, failure\n",
+ dev->ifname, vq->iotlb_pool->name);
return;
}
}
@@ -156,22 +158,25 @@ vhost_user_iotlb_cache_random_evict(struct vhost_virtqueue *vq)
}
void
-vhost_user_iotlb_cache_insert(struct vhost_virtqueue *vq, uint64_t iova,
- uint64_t uaddr, uint64_t size, uint8_t perm)
+vhost_user_iotlb_cache_insert(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t uaddr,
+ uint64_t size, uint8_t perm)
{
struct vhost_iotlb_entry *node, *new_node;
int ret;
ret = rte_mempool_get(vq->iotlb_pool, (void **)&new_node);
if (ret) {
- VHOST_LOG_CONFIG(DEBUG, "IOTLB pool empty, clear entries\n");
+ VHOST_LOG_CONFIG(DEBUG, "(%s) IOTLB pool %s empty, clear entries\n",
+ dev->ifname, vq->iotlb_pool->name);
if (!TAILQ_EMPTY(&vq->iotlb_list))
vhost_user_iotlb_cache_random_evict(vq);
else
vhost_user_iotlb_pending_remove_all(vq);
ret = rte_mempool_get(vq->iotlb_pool, (void **)&new_node);
if (ret) {
- VHOST_LOG_CONFIG(ERR, "IOTLB pool still empty, failure\n");
+ VHOST_LOG_CONFIG(ERR, "(%s) IOTLB pool %s still empty, failure\n",
+ dev->ifname, vq->iotlb_pool->name);
return;
}
}
@@ -311,7 +316,7 @@ vhost_user_iotlb_init(struct virtio_net *dev, int vq_index)
snprintf(pool_name, sizeof(pool_name), "iotlb_%u_%d_%d",
getpid(), dev->vid, vq_index);
- VHOST_LOG_CONFIG(DEBUG, "IOTLB cache name: %s\n", pool_name);
+ VHOST_LOG_CONFIG(DEBUG, "(%s) IOTLB cache name: %s\n", dev->ifname, pool_name);
/* If already created, free it and recreate */
vq->iotlb_pool = rte_mempool_lookup(pool_name);
@@ -324,9 +329,8 @@ vhost_user_iotlb_init(struct virtio_net *dev, int vq_index)
RTE_MEMPOOL_F_NO_CACHE_ALIGN |
RTE_MEMPOOL_F_SP_PUT);
if (!vq->iotlb_pool) {
- VHOST_LOG_CONFIG(ERR,
- "Failed to create IOTLB cache pool (%s)\n",
- pool_name);
+ VHOST_LOG_CONFIG(ERR, "(%s) Failed to create IOTLB cache pool (%s)\n",
+ dev->ifname, pool_name);
return -1;
}
diff --git a/lib/vhost/iotlb.h b/lib/vhost/iotlb.h
index b6e0757ad6..8d0ff7473b 100644
--- a/lib/vhost/iotlb.h
+++ b/lib/vhost/iotlb.h
@@ -33,17 +33,17 @@ vhost_user_iotlb_wr_unlock(struct vhost_virtqueue *vq)
rte_rwlock_write_unlock(&vq->iotlb_lock);
}
-void vhost_user_iotlb_cache_insert(struct vhost_virtqueue *vq, uint64_t iova,
- uint64_t uaddr, uint64_t size,
- uint8_t perm);
+void vhost_user_iotlb_cache_insert(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t uaddr,
+ uint64_t size, uint8_t perm);
void vhost_user_iotlb_cache_remove(struct vhost_virtqueue *vq,
uint64_t iova, uint64_t size);
uint64_t vhost_user_iotlb_cache_find(struct vhost_virtqueue *vq, uint64_t iova,
uint64_t *size, uint8_t perm);
bool vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova,
uint8_t perm);
-void vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq, uint64_t iova,
- uint8_t perm);
+void vhost_user_iotlb_pending_insert(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint8_t perm);
void vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq, uint64_t iova,
uint64_t size, uint8_t perm);
void vhost_user_iotlb_flush_all(struct vhost_virtqueue *vq);
diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
index 13a9bb9dd1..e52d7f7bb6 100644
--- a/lib/vhost/vhost.c
+++ b/lib/vhost/vhost.c
@@ -56,7 +56,7 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
*/
vhost_user_iotlb_rd_unlock(vq);
- vhost_user_iotlb_pending_insert(vq, iova, perm);
+ vhost_user_iotlb_pending_insert(dev, vq, iova, perm);
if (vhost_user_iotlb_miss(dev, iova, perm)) {
VHOST_LOG_CONFIG(ERR,
"IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index a781346c4d..67934be12c 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -2563,7 +2563,7 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
if (!vq)
continue;
- vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
+ vhost_user_iotlb_cache_insert(dev, vq, imsg->iova, vva,
len, imsg->perm);
if (is_vring_iotlb(dev, vq, imsg))
--
2.31.1
More information about the dev
mailing list