[PATCH v2] vhost: use dedicated variable for vhost message result code

Pei, Andy andy.pei at intel.com
Thu Sep 29 15:49:19 CEST 2022


Hi Chenbo,

Thanks for your efforts.

> -----Original Message-----
> From: Xia, Chenbo <chenbo.xia at intel.com>
> Sent: Thursday, September 29, 2022 4:38 PM
> To: Pei, Andy <andy.pei at intel.com>; dev at dpdk.org
> Cc: maxime.coquelin at redhat.com
> Subject: RE: [PATCH v2] vhost: use dedicated variable for vhost message
> result code
> 
> > -----Original Message-----
> > From: Xia, Chenbo <chenbo.xia at intel.com>
> > Sent: Monday, September 26, 2022 2:58 PM
> > To: Pei, Andy <andy.pei at intel.com>; dev at dpdk.org
> > Cc: maxime.coquelin at redhat.com
> > Subject: RE: [PATCH v2] vhost: use dedicated variable for vhost
> > message result code
> >
> > > -----Original Message-----
> > > From: Pei, Andy <andy.pei at intel.com>
> > > Sent: Friday, September 23, 2022 10:33 AM
> > > To: dev at dpdk.org
> > > Cc: Xia, Chenbo <chenbo.xia at intel.com>; maxime.coquelin at redhat.com
> > > Subject: [PATCH v2] vhost: use dedicated variable for vhost message
> > result
> > > code
> > >
> > > Currently in function vhost_user_msg_handler, variable ret is used
> > > to store both vhost msg result code and function call return value.
> > > After this patch, variable ret is used only to store function call
> > > return value, a new dedicated variable msg_result is used to store
> > > vhost msg result. This can improve readability.
> > >
> > > Signed-off-by: Andy Pei <andy.pei at intel.com>
> > > ---
> > >  lib/vhost/vhost_user.c | 24 ++++++++++++------------
> > >  1 file changed, 12 insertions(+), 12 deletions(-)
> > >
> > > diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c index
> > > 0182090..6d93495 100644
> > > --- a/lib/vhost/vhost_user.c
> > > +++ b/lib/vhost/vhost_user.c
> > > @@ -2954,6 +2954,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > >  	struct vhu_msg_context ctx;
> > >  	vhost_message_handler_t *msg_handler;
> > >  	struct rte_vdpa_device *vdpa_dev;
> > > +	int msg_result = RTE_VHOST_MSG_RESULT_OK;
> > >  	int ret;
> > >  	int unlock_required = 0;
> > >  	bool handled;
> > > @@ -3046,8 +3047,8 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > >  	handled = false;
> > >  	if (dev->extern_ops.pre_msg_handle) {
> > >  		RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context,
> msg) != 0);
> > > -		ret = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
> > > -		switch (ret) {
> > > +		msg_result = (*dev->extern_ops.pre_msg_handle)(dev->vid,
> &ctx);
> > > +		switch (msg_result) {
> > >  		case RTE_VHOST_MSG_RESULT_REPLY:
> > >  			send_vhost_reply(dev, fd, &ctx);
> > >  			/* Fall-through */
> > > @@ -3065,12 +3066,12 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > >  		goto skip_to_post_handle;
> > >
> > >  	if (!msg_handler->accepts_fd && validate_msg_fds(dev, &ctx, 0) !=
> > > 0) {
> > > -		ret = RTE_VHOST_MSG_RESULT_ERR;
> > > +		msg_result = RTE_VHOST_MSG_RESULT_ERR;
> > >  	} else {
> > > -		ret = msg_handler->callback(&dev, &ctx, fd);
> > > +		msg_result = msg_handler->callback(&dev, &ctx, fd);
> > >  	}
> > >
> > > -	switch (ret) {
> > > +	switch (msg_result) {
> > >  	case RTE_VHOST_MSG_RESULT_ERR:
> > >  		VHOST_LOG_CONFIG(dev->ifname, ERR,
> > >  			"processing %s failed.\n",
> > > @@ -3095,11 +3096,11 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > >  	}
> > >
> > >  skip_to_post_handle:
> > > -	if (ret != RTE_VHOST_MSG_RESULT_ERR &&
> > > +	if (msg_result != RTE_VHOST_MSG_RESULT_ERR &&
> > >  			dev->extern_ops.post_msg_handle) {
> > >  		RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context,
> msg) != 0);
> > > -		ret = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx);
> > > -		switch (ret) {
> > > +		msg_result = (*dev->extern_ops.post_msg_handle)(dev->vid,
> > > &ctx);
> > > +		switch (msg_result) {
> > >  		case RTE_VHOST_MSG_RESULT_REPLY:
> > >  			send_vhost_reply(dev, fd, &ctx);
> > >  			/* Fall-through */
> > > @@ -3118,7 +3119,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > >  			"vhost message (req: %d) was not handled.\n",
> > >  			request);
> > >  		close_msg_fds(&ctx);
> > > -		ret = RTE_VHOST_MSG_RESULT_ERR;
> > > +		msg_result = RTE_VHOST_MSG_RESULT_ERR;
> > >  	}
> > >
> > >  	/*
> > > @@ -3127,17 +3128,16 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > >  	 * VHOST_USER_NEED_REPLY was cleared in send_vhost_reply().
> > >  	 */
> > >  	if (ctx.msg.flags & VHOST_USER_NEED_REPLY) {
> > > -		ctx.msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR;
> > > +		ctx.msg.payload.u64 = msg_result ==
> RTE_VHOST_MSG_RESULT_ERR;
> > >  		ctx.msg.size = sizeof(ctx.msg.payload.u64);
> > >  		ctx.fd_num = 0;
> > >  		send_vhost_reply(dev, fd, &ctx);
> > > -	} else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
> > > +	} else if (msg_result == RTE_VHOST_MSG_RESULT_ERR) {
> > >  		VHOST_LOG_CONFIG(dev->ifname, ERR, "vhost message
> handling
> > > failed.\n");
> > >  		ret = -1;
> > >  		goto unlock;
> > >  	}
> > >
> > > -	ret = 0;
> > >  	for (i = 0; i < dev->nr_vring; i++) {
> > >  		struct vhost_virtqueue *vq = dev->virtqueue[i];
> > >  		bool cur_ready = vq_is_ready(dev, vq);
> > > --
> > > 1.8.3.1
> >
> > Reviewed-by: Chenbo Xia <chenbo.xia at intel.com>
> 
> Applied to next-virtio/main, thanks


More information about the dev mailing list