[PATCH v7 10/12] vhost: add vdpa device type to rte vdpa device

Xia, Chenbo chenbo.xia at intel.com
Wed Oct 19 11:14:08 CEST 2022


> -----Original Message-----
> From: Pei, Andy <andy.pei at intel.com>
> Sent: Tuesday, October 18, 2022 2:20 PM
> To: dev at dpdk.org
> Cc: Xia, Chenbo <chenbo.xia at intel.com>; Xu, Rosen <rosen.xu at intel.com>;
> Huang, Wei <wei.huang at intel.com>; Cao, Gang <gang.cao at intel.com>;
> maxime.coquelin at redhat.com
> Subject: [PATCH v7 10/12] vhost: add vdpa device type to rte vdpa device
> 
> Add vdpa_device_type to rte_vdpa_device to store device type.
> Call vdpa ops get_dev_type to fill vdpa_device_type
> when register vdpa device.
> 
> Signed-off-by: Andy Pei <andy.pei at intel.com>
> ---
>  lib/vhost/socket.c      | 15 +--------------
>  lib/vhost/vdpa.c        | 17 +++++++++++++++++
>  lib/vhost/vdpa_driver.h |  2 ++
>  3 files changed, 20 insertions(+), 14 deletions(-)
> 
> diff --git a/lib/vhost/socket.c b/lib/vhost/socket.c
> index 608ae57..f768114 100644
> --- a/lib/vhost/socket.c
> +++ b/lib/vhost/socket.c
> @@ -627,7 +627,6 @@ struct rte_vdpa_device *
>  {
>  	struct vhost_user_socket *vsocket;
>  	struct rte_vdpa_device *vdpa_dev;
> -	uint32_t vdpa_type = 0;
>  	int ret = 0;
> 
>  	pthread_mutex_lock(&vhost_user.mutex);
> @@ -644,19 +643,7 @@ struct rte_vdpa_device *
>  		goto unlock_exit;
>  	}
> 
> -	if (vdpa_dev->ops->get_dev_type) {
> -		ret = vdpa_dev->ops->get_dev_type(vdpa_dev, &vdpa_type);
> -		if (ret) {
> -			VHOST_LOG_CONFIG(path, ERR,
> -				"failed to get vdpa dev type for socket file.\n");
> -			ret = -1;
> -			goto unlock_exit;
> -		}
> -	} else {
> -		vdpa_type = RTE_VHOST_VDPA_DEVICE_TYPE_NET;
> -	}
> -
> -	*type = vdpa_type;
> +	*type = vdpa_dev->vdpa_device_type;
> 
>  unlock_exit:
>  	pthread_mutex_unlock(&vhost_user.mutex);
> diff --git a/lib/vhost/vdpa.c b/lib/vhost/vdpa.c
> index bb82857..b487f4d 100644
> --- a/lib/vhost/vdpa.c
> +++ b/lib/vhost/vdpa.c
> @@ -73,6 +73,8 @@ struct rte_vdpa_device *
>  		struct rte_vdpa_dev_ops *ops)
>  {
>  	struct rte_vdpa_device *dev;
> +	uint32_t vdpa_type = -1;
> +	int ret = 0;
> 
>  	if (ops == NULL)
>  		return NULL;
> @@ -101,6 +103,21 @@ struct rte_vdpa_device *
> 
>  	dev->device = rte_dev;
>  	dev->ops = ops;
> +
> +	if (ops->get_dev_type) {
> +		ret = ops->get_dev_type(dev, &vdpa_type);
> +		if (ret) {
> +			VHOST_LOG_CONFIG(rte_dev->name, ERR,
> +					 "Failed to get vdpa dev type.\n");
> +			ret = -1;
> +			goto out_unlock;
> +		}
> +	} else {
> +		/** by default, we assume vdpa device is a net device */
> +		vdpa_type = RTE_VHOST_VDPA_DEVICE_TYPE_NET;
> +	}
> +	dev->vdpa_device_type = vdpa_type;
> +
>  	TAILQ_INSERT_TAIL(&vdpa_device_list, dev, next);
>  out_unlock:
>  	rte_spinlock_unlock(&vdpa_device_list_lock);
> diff --git a/lib/vhost/vdpa_driver.h b/lib/vhost/vdpa_driver.h
> index 8b88a53..c4ec222 100644
> --- a/lib/vhost/vdpa_driver.h
> +++ b/lib/vhost/vdpa_driver.h
> @@ -92,6 +92,8 @@ struct rte_vdpa_device {
>  	struct rte_device *device;
>  	/** vdpa device operations */
>  	struct rte_vdpa_dev_ops *ops;
> +	/** vdpa device type: net, blk... */
> +	uint32_t vdpa_device_type;
>  };
> 
>  /**
> --
> 1.8.3.1

Reviewed-by: Chenbo Xia <chenbo.xia at intel.com>


More information about the dev mailing list