[PATCH 1/7] vhost: improve IOTLB logs
David Marchand
david.marchand at redhat.com
Tue Jan 4 15:44:35 CET 2022
On Thu, Dec 23, 2021 at 9:37 AM Maxime Coquelin
<maxime.coquelin at redhat.com> wrote:
>
> This patch adds IOTLB mempool name when logging debug
> or error messages, and also prepends the socket path.
> to all the logs.
>
> Signed-off-by: Maxime Coquelin <maxime.coquelin at redhat.com>
> ---
> lib/vhost/iotlb.c | 26 +++++++++++++++-----------
> lib/vhost/iotlb.h | 10 +++++-----
> lib/vhost/vhost.c | 2 +-
> lib/vhost/vhost_user.c | 2 +-
> 4 files changed, 22 insertions(+), 18 deletions(-)
>
> diff --git a/lib/vhost/iotlb.c b/lib/vhost/iotlb.c
> index 82bdb84526..e9e1ede7a4 100644
> --- a/lib/vhost/iotlb.c
> +++ b/lib/vhost/iotlb.c
> @@ -62,7 +62,7 @@ vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova,
> }
>
> void
> -vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq,
> +vhost_user_iotlb_pending_insert(struct virtio_net *dev, struct vhost_virtqueue *vq,
> uint64_t iova, uint8_t perm)
> {
> struct vhost_iotlb_entry *node;
> @@ -70,14 +70,16 @@ vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq,
>
> ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
> if (ret) {
> - VHOST_LOG_CONFIG(DEBUG, "IOTLB pool empty, clear entries\n");
> + VHOST_LOG_CONFIG(DEBUG, "(%s) IOTLB pool %s empty, clear entries\n",
> + dev->ifname, vq->iotlb_pool->name);
> if (!TAILQ_EMPTY(&vq->iotlb_pending_list))
> vhost_user_iotlb_pending_remove_all(vq);
> else
> vhost_user_iotlb_cache_random_evict(vq);
> ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
> if (ret) {
> - VHOST_LOG_CONFIG(ERR, "IOTLB pool still empty, failure\n");
> + VHOST_LOG_CONFIG(ERR, "(%s) IOTLB pool %s still empty, failure\n",
> + dev->ifname, vq->iotlb_pool->name);
> return;
> }
> }
> @@ -156,22 +158,25 @@ vhost_user_iotlb_cache_random_evict(struct vhost_virtqueue *vq)
> }
>
> void
> -vhost_user_iotlb_cache_insert(struct vhost_virtqueue *vq, uint64_t iova,
> - uint64_t uaddr, uint64_t size, uint8_t perm)
> +vhost_user_iotlb_cache_insert(struct virtio_net *dev, struct vhost_virtqueue *vq,
> + uint64_t iova, uint64_t uaddr,
> + uint64_t size, uint8_t perm)
> {
> struct vhost_iotlb_entry *node, *new_node;
> int ret;
>
> ret = rte_mempool_get(vq->iotlb_pool, (void **)&new_node);
> if (ret) {
> - VHOST_LOG_CONFIG(DEBUG, "IOTLB pool empty, clear entries\n");
> + VHOST_LOG_CONFIG(DEBUG, "(%s) IOTLB pool %s empty, clear entries\n",
> + dev->ifname, vq->iotlb_pool->name);
We have the same logs in two different paths
(vhost_user_iotlb_pending_insert and vhost_user_iotlb_cache_insert).
It would probably help when debugging to have separate messages.
This could be added later, since this current patch is about prefixing
with the socket path.
> if (!TAILQ_EMPTY(&vq->iotlb_list))
> vhost_user_iotlb_cache_random_evict(vq);
> else
> vhost_user_iotlb_pending_remove_all(vq);
> ret = rte_mempool_get(vq->iotlb_pool, (void **)&new_node);
> if (ret) {
> - VHOST_LOG_CONFIG(ERR, "IOTLB pool still empty, failure\n");
> + VHOST_LOG_CONFIG(ERR, "(%s) IOTLB pool %s still empty, failure\n",
> + dev->ifname, vq->iotlb_pool->name);
> return;
> }
> }
> @@ -311,7 +316,7 @@ vhost_user_iotlb_init(struct virtio_net *dev, int vq_index)
>
> snprintf(pool_name, sizeof(pool_name), "iotlb_%u_%d_%d",
> getpid(), dev->vid, vq_index);
> - VHOST_LOG_CONFIG(DEBUG, "IOTLB cache name: %s\n", pool_name);
> + VHOST_LOG_CONFIG(DEBUG, "(%s) IOTLB cache name: %s\n", dev->ifname, pool_name);
>
> /* If already created, free it and recreate */
> vq->iotlb_pool = rte_mempool_lookup(pool_name);
> @@ -324,9 +329,8 @@ vhost_user_iotlb_init(struct virtio_net *dev, int vq_index)
> RTE_MEMPOOL_F_NO_CACHE_ALIGN |
> RTE_MEMPOOL_F_SP_PUT);
> if (!vq->iotlb_pool) {
> - VHOST_LOG_CONFIG(ERR,
> - "Failed to create IOTLB cache pool (%s)\n",
> - pool_name);
> + VHOST_LOG_CONFIG(ERR, "(%s) Failed to create IOTLB cache pool (%s)\n",
I'd make this log consistent with the previous log and remove the ()
around the pool name.
> + dev->ifname, pool_name);
> return -1;
> }
>
The rest lgtm.
--
David Marchand
More information about the dev
mailing list