[PATCH] vhost: use another variable to store vhost msg result code
Xia, Chenbo
chenbo.xia at intel.com
Thu Sep 22 15:26:40 CEST 2022
> -----Original Message-----
> From: Pei, Andy <andy.pei at intel.com>
> Sent: Monday, July 18, 2022 10:07 AM
> To: dev at dpdk.org
> Cc: maxime.coquelin at redhat.com; Xia, Chenbo <chenbo.xia at intel.com>; Ma,
> WenwuX <wenwux.ma at intel.com>
> Subject: [PATCH] vhost: use another variable to store vhost msg result
> code
Patch looks good. I suggest to use title:
vhost: use dedicated variable for vhost message result code\\
Thanks,
Chenbo
>
> Currently in function vhost_user_msg_handler, variable ret is used to
> store both vhost msg result code and function call return value.
> After this patch, variable ret is used only to store function call
> return value, a new variable msg_result is used to store vhost msg
> result. This can improve readability.
>
> Signed-off-by: Andy Pei <andy.pei at intel.com>
> ---
> lib/vhost/vhost_user.c | 24 ++++++++++++------------
> 1 file changed, 12 insertions(+), 12 deletions(-)
>
> diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
> index 4ad28ba..dac06c9 100644
> --- a/lib/vhost/vhost_user.c
> +++ b/lib/vhost/vhost_user.c
> @@ -2969,6 +2969,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
> struct vhu_msg_context ctx;
> vhost_message_handler_t *msg_handler;
> struct rte_vdpa_device *vdpa_dev;
> + int msg_result = RTE_VHOST_MSG_RESULT_OK;
> int ret;
> int unlock_required = 0;
> bool handled;
> @@ -3061,8 +3062,8 @@ static int is_vring_iotlb(struct virtio_net *dev,
> handled = false;
> if (dev->extern_ops.pre_msg_handle) {
> RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0);
> - ret = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
> - switch (ret) {
> + msg_result = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
> + switch (msg_result) {
> case RTE_VHOST_MSG_RESULT_REPLY:
> send_vhost_reply(dev, fd, &ctx);
> /* Fall-through */
> @@ -3080,12 +3081,12 @@ static int is_vring_iotlb(struct virtio_net *dev,
> goto skip_to_post_handle;
>
> if (!msg_handler->accepts_fd && validate_msg_fds(dev, &ctx, 0) != 0)
> {
> - ret = RTE_VHOST_MSG_RESULT_ERR;
> + msg_result = RTE_VHOST_MSG_RESULT_ERR;
> } else {
> - ret = msg_handler->callback(&dev, &ctx, fd);
> + msg_result = msg_handler->callback(&dev, &ctx, fd);
> }
>
> - switch (ret) {
> + switch (msg_result) {
> case RTE_VHOST_MSG_RESULT_ERR:
> VHOST_LOG_CONFIG(dev->ifname, ERR,
> "processing %s failed.\n",
> @@ -3110,11 +3111,11 @@ static int is_vring_iotlb(struct virtio_net *dev,
> }
>
> skip_to_post_handle:
> - if (ret != RTE_VHOST_MSG_RESULT_ERR &&
> + if (msg_result != RTE_VHOST_MSG_RESULT_ERR &&
> dev->extern_ops.post_msg_handle) {
> RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0);
> - ret = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx);
> - switch (ret) {
> + msg_result = (*dev->extern_ops.post_msg_handle)(dev->vid,
> &ctx);
> + switch (msg_result) {
> case RTE_VHOST_MSG_RESULT_REPLY:
> send_vhost_reply(dev, fd, &ctx);
> /* Fall-through */
> @@ -3133,7 +3134,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
> "vhost message (req: %d) was not handled.\n",
> request);
> close_msg_fds(&ctx);
> - ret = RTE_VHOST_MSG_RESULT_ERR;
> + msg_result = RTE_VHOST_MSG_RESULT_ERR;
> }
>
> /*
> @@ -3142,17 +3143,16 @@ static int is_vring_iotlb(struct virtio_net *dev,
> * VHOST_USER_NEED_REPLY was cleared in send_vhost_reply().
> */
> if (ctx.msg.flags & VHOST_USER_NEED_REPLY) {
> - ctx.msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR;
> + ctx.msg.payload.u64 = msg_result == RTE_VHOST_MSG_RESULT_ERR;
> ctx.msg.size = sizeof(ctx.msg.payload.u64);
> ctx.fd_num = 0;
> send_vhost_reply(dev, fd, &ctx);
> - } else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
> + } else if (msg_result == RTE_VHOST_MSG_RESULT_ERR) {
> VHOST_LOG_CONFIG(dev->ifname, ERR, "vhost message handling
> failed.\n");
> ret = -1;
> goto unlock;
> }
>
> - ret = 0;
> for (i = 0; i < dev->nr_vring; i++) {
> struct vhost_virtqueue *vq = dev->virtqueue[i];
> bool cur_ready = vq_is_ready(dev, vq);
> --
> 1.8.3.1
More information about the dev
mailing list