[PATCH v8 04/13] vdpa/ifc: add blk ops for ifc device

Xia, Chenbo chenbo.xia at intel.com
Mon May 23 06:07:41 CEST 2022


> -----Original Message-----
> From: Pei, Andy <andy.pei at intel.com>
> Sent: Wednesday, May 18, 2022 8:14 PM
> To: dev at dpdk.org
> Cc: Xia, Chenbo <chenbo.xia at intel.com>; maxime.coquelin at redhat.com; Cao,
> Gang <gang.cao at intel.com>; Liu, Changpeng <changpeng.liu at intel.com>; Xu,
> Rosen <rosen.xu at intel.com>; Xiao, QimaiX <qimaix.xiao at intel.com>
> Subject: [PATCH v8 04/13] vdpa/ifc: add blk ops for ifc device
> 
> For virtio blk device, re-use part of ifc driver ops.
> Implement ifcvf_blk_get_config for virtio blk device.
> Support VHOST_USER_PROTOCOL_F_CONFIG feature for virtio
> blk device.
> 
> Signed-off-by: Andy Pei <andy.pei at intel.com>
> ---
>  drivers/vdpa/ifc/base/ifcvf.h |  4 ++
>  drivers/vdpa/ifc/ifcvf_vdpa.c | 91
> ++++++++++++++++++++++++++++++++++++++++++-
>  2 files changed, 94 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/vdpa/ifc/base/ifcvf.h b/drivers/vdpa/ifc/base/ifcvf.h
> index 483d38b..244de46 100644
> --- a/drivers/vdpa/ifc/base/ifcvf.h
> +++ b/drivers/vdpa/ifc/base/ifcvf.h
> @@ -67,6 +67,10 @@
>  #define IFCVF_32_BIT_MASK		0xffffffff
> 
> 
> +#ifndef VHOST_USER_PROTOCOL_F_CONFIG
> +#define VHOST_USER_PROTOCOL_F_CONFIG	9
> +#endif
> +
>  struct ifcvf_pci_cap {
>  	u8 cap_vndr;            /* Generic PCI field: PCI_CAP_ID_VNDR */
>  	u8 cap_next;            /* Generic PCI field: next ptr. */
> diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c b/drivers/vdpa/ifc/ifcvf_vdpa.c
> index be0efd3..350214a 100644
> --- a/drivers/vdpa/ifc/ifcvf_vdpa.c
> +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c
> @@ -1087,6 +1087,10 @@ struct rte_vdpa_dev_info {
>  		 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | \
>  		 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD | \
>  		 1ULL << VHOST_USER_PROTOCOL_F_STATUS)
> +
> +#define VDPA_BLK_PROTOCOL_FEATURES \
> +		(1ULL << VHOST_USER_PROTOCOL_F_CONFIG)
> +
>  static int
>  ifcvf_get_protocol_features(struct rte_vdpa_device *vdev, uint64_t
> *features)
>  {
> @@ -1199,6 +1203,91 @@ struct rte_vdpa_dev_info {
>  	return device_id;
>  }
> 
> +static int
> +ifcvf_blk_get_config(int vid, uint8_t *config, uint32_t len)
> +{
> +	struct virtio_blk_config *dev_cfg;
> +	struct ifcvf_internal *internal;
> +	struct rte_vdpa_device *vdev;
> +	struct internal_list *list;
> +	uint32_t i;
> +	uint64_t capacity = 0;
> +	uint8_t *byte;
> +
> +	if (len < sizeof(struct virtio_blk_config)) {
> +		DRV_LOG(ERR, "Invalid len: %u, required: %u",
> +			len, (uint32_t)sizeof(struct virtio_blk_config));
> +		return -1;
> +	}

I believe it should not be > sizeof(struct virtio_blk_config) too?

And one question is should we limit only reading the whole blk config.
I guess we are having this check because of current usage of QEMU? 
Although this is fine as it's vendor-specific logic, just wondering if
we should have this limit for blk device.

Thanks,
Chenbo

> +
> +	vdev = rte_vhost_get_vdpa_device(vid);
> +	list = find_internal_resource_by_vdev(vdev);
> +	if (list == NULL) {
> +		DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
> +		return -1;
> +	}
> +
> +	internal = list->internal;
> +
> +	for (i = 0; i < sizeof(struct virtio_blk_config); i++)
> +		config[i] = *((u8 *)internal->hw.blk_cfg + i);
> +
> +	dev_cfg = (struct virtio_blk_config *)internal->hw.blk_cfg;
> +
> +	/* cannot read 64-bit register in one attempt, so read byte by byte.
> */
> +	for (i = 0; i < sizeof(internal->hw.blk_cfg->capacity); i++) {
> +		byte = (uint8_t *)&internal->hw.blk_cfg->capacity + i;
> +		capacity |= (uint64_t)*byte << (i * 8);
> +	}
> +	/* The capacity is number of sectors in 512-byte.
> +	 * So right shift 1 bit  we get in K,
> +	 * another right shift 10 bits we get in M,
> +	 * right shift 10 more bits, we get in G.
> +	 * To show capacity in G, we right shift 21 bits in total.
> +	 */
> +	DRV_LOG(DEBUG, "capacity  : %"PRIu64"G", capacity >> 21);
> +
> +	DRV_LOG(DEBUG, "size_max  : 0x%08x", dev_cfg->size_max);
> +	DRV_LOG(DEBUG, "seg_max   : 0x%08x", dev_cfg->seg_max);
> +	DRV_LOG(DEBUG, "blk_size  : 0x%08x", dev_cfg->blk_size);
> +	DRV_LOG(DEBUG, "geometry");
> +	DRV_LOG(DEBUG, "      cylinders: %u", dev_cfg->geometry.cylinders);
> +	DRV_LOG(DEBUG, "      heads    : %u", dev_cfg->geometry.heads);
> +	DRV_LOG(DEBUG, "      sectors  : %u", dev_cfg->geometry.sectors);
> +	DRV_LOG(DEBUG, "num_queues: 0x%08x", dev_cfg->num_queues);
> +
> +	DRV_LOG(DEBUG, "config: [%x] [%x] [%x] [%x] [%x] [%x] [%x] [%x]\n",
> +		config[0], config[1], config[2], config[3], config[4],
> +		config[5], config[6], config[7]);
> +	return 0;
> +}
> +
> +static int
> +ifcvf_blk_get_protocol_features(struct rte_vdpa_device *vdev,
> +	uint64_t *features)
> +{
> +	RTE_SET_USED(vdev);
> +
> +	*features = VDPA_SUPPORTED_PROTOCOL_FEATURES;
> +	*features |= VDPA_BLK_PROTOCOL_FEATURES;
> +	return 0;
> +}
> +
> +static struct rte_vdpa_dev_ops ifcvf_blk_ops = {
> +	.get_queue_num = ifcvf_get_queue_num,
> +	.get_features = ifcvf_get_vdpa_features,
> +	.set_features = ifcvf_set_features,
> +	.get_protocol_features = ifcvf_blk_get_protocol_features,
> +	.dev_conf = ifcvf_dev_config,
> +	.dev_close = ifcvf_dev_close,
> +	.set_vring_state = ifcvf_set_vring_state,
> +	.migration_done = NULL,
> +	.get_vfio_group_fd = ifcvf_get_vfio_group_fd,
> +	.get_vfio_device_fd = ifcvf_get_vfio_device_fd,
> +	.get_notify_area = ifcvf_get_notify_area,
> +	.get_config = ifcvf_blk_get_config,
> +};
> +
>  struct rte_vdpa_dev_info dev_info[] = {
>  	{
>  		.features = (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |
> @@ -1211,7 +1300,7 @@ struct rte_vdpa_dev_info dev_info[] = {
>  	{
>  		.features = (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) |
>  			    (1ULL << VHOST_F_LOG_ALL),
> -		.ops = NULL,
> +		.ops = &ifcvf_blk_ops,
>  	},
>  };
> 
> --
> 1.8.3.1



More information about the dev mailing list