[dpdk-dev] [PATCH 27/40] net/virtio: add Virtio-user memory tables ops

Xia, Chenbo chenbo.xia at intel.com
Wed Jan 6 12:57:02 CET 2021


Hi Maxime,

> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin at redhat.com>
> Sent: Monday, December 21, 2020 5:14 AM
> To: dev at dpdk.org; Xia, Chenbo <chenbo.xia at intel.com>; olivier.matz at 6wind.com;
> amorenoz at redhat.com; david.marchand at redhat.com
> Cc: Maxime Coquelin <maxime.coquelin at redhat.com>
> Subject: [PATCH 27/40] net/virtio: add Virtio-user memory tables ops
> 
> This patch implements a dedicated callback for
> preparing and sending memory table to the backends.
> 
> Signed-off-by: Maxime Coquelin <maxime.coquelin at redhat.com>
> ---

<snip>

> 
> +static int
> +vhost_user_check_reply_ack(struct virtio_user_dev *dev, struct vhost_user_msg
> *msg)
> +{
> +	enum vhost_user_request req = msg->request;
> +	int ret;
> +
> +	if (!(msg->flags & VHOST_USER_NEED_REPLY_MASK))
> +		return 0;
> +
> +	ret = vhost_user_read(dev->vhostfd, msg);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "Failed to read reply-ack");
> +		return -1;
> +	}
> +
> +	if (req != msg->request) {
> +		PMD_DRV_LOG(ERR, "Unexpected reply-ack request type (%d)", msg-
> >request);
> +		return -1;
> +	}

I think it's better to keep the size check: msg.size should equal sizeof(msg.payload.u64).

> +
> +	return msg->payload.u64 ? -1 : 0;

I think it's better to add a log after checking payload's value. Looking back to
vhost_user_set_memory_table, there's no way for user or developer to know what has
failed (vhost_user_write fails or NACK). Maybe it's also better to add error log in
or outside vhost_user_write :)

Thanks,
Chenbo

> +}
> +
>  static int
>  vhost_user_set_owner(struct virtio_user_dev *dev)
>  {
> @@ -336,25 +359,47 @@ update_memory_region(const struct rte_memseg_list *msl
> __rte_unused,
>  }
> 
>  static int
> -prepare_vhost_memory_user(struct vhost_user_msg *msg, int fds[])
> +vhost_user_set_memory_table(struct virtio_user_dev *dev)
>  {
>  	struct walk_arg wa;
> +	int fds[VHOST_MEMORY_MAX_NREGIONS];
> +	int ret, fd_num;
> +	struct vhost_user_msg msg = {
> +		.request = VHOST_USER_SET_MEM_TABLE,
> +		.flags = VHOST_USER_VERSION,
> +	};
> +
> +	if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK))
> +		msg.flags |= VHOST_USER_NEED_REPLY_MASK;
> 
>  	wa.region_nr = 0;
> -	wa.vm = &msg->payload.memory;
> +	wa.vm = &msg.payload.memory;
>  	wa.fds = fds;
> 
>  	/*
>  	 * The memory lock has already been taken by memory subsystem
>  	 * or virtio_user_start_device().
>  	 */
> -	if (rte_memseg_walk_thread_unsafe(update_memory_region, &wa) < 0)
> -		return -1;
> +	ret = rte_memseg_walk_thread_unsafe(update_memory_region, &wa);
> +	if (ret < 0)
> +		goto err;
> 
> -	msg->payload.memory.nregions = wa.region_nr;
> -	msg->payload.memory.padding = 0;
> +	fd_num = wa.region_nr;
> +	msg.payload.memory.nregions = wa.region_nr;
> +	msg.payload.memory.padding = 0;
> 
> -	return 0;
> +	msg.size = sizeof(msg.payload.memory.nregions);
> +	msg.size += sizeof(msg.payload.memory.padding);
> +	msg.size += fd_num * sizeof(struct vhost_memory_region);
> +
> +	ret = vhost_user_write(dev->vhostfd, &msg, fds, fd_num);
> +	if (ret < 0)
> +		goto err;
> +
> +	return vhost_user_check_reply_ack(dev, &msg);
> +err:
> +	PMD_DRV_LOG(ERR, "Failed to set memory table");
> +	return -1;
>  }
> 
>  static struct vhost_user_msg m;
> @@ -367,7 +412,6 @@ const char * const vhost_msg_strings[] = {
>  	[VHOST_USER_GET_VRING_BASE] = "VHOST_GET_VRING_BASE",
>  	[VHOST_USER_SET_VRING_ADDR] = "VHOST_SET_VRING_ADDR",
>  	[VHOST_USER_SET_VRING_KICK] = "VHOST_SET_VRING_KICK",
> -	[VHOST_USER_SET_MEM_TABLE] = "VHOST_SET_MEM_TABLE",
>  	[VHOST_USER_SET_VRING_ENABLE] = "VHOST_SET_VRING_ENABLE",
>  	[VHOST_USER_SET_STATUS] = "VHOST_SET_STATUS",
>  	[VHOST_USER_GET_STATUS] = "VHOST_GET_STATUS",
> @@ -426,18 +470,6 @@ vhost_user_sock(struct virtio_user_dev *dev,
>  	case VHOST_USER_RESET_OWNER:
>  		break;
> 
> -	case VHOST_USER_SET_MEM_TABLE:
> -		if (prepare_vhost_memory_user(&msg, fds) < 0)
> -			return -1;
> -		fd_num = msg.payload.memory.nregions;
> -		msg.size = sizeof(m.payload.memory.nregions);
> -		msg.size += sizeof(m.payload.memory.padding);
> -		msg.size += fd_num * sizeof(struct vhost_memory_region);
> -
> -		if (has_reply_ack)
> -			msg.flags |= VHOST_USER_NEED_REPLY_MASK;
> -		break;
> -
>  	case VHOST_USER_SET_LOG_FD:
>  		fds[fd_num++] = *((int *)arg);
>  		break;
> @@ -636,6 +668,7 @@ struct virtio_user_backend_ops virtio_ops_user = {
>  	.set_features = vhost_user_set_features,
>  	.get_protocol_features = vhost_user_get_protocol_features,
>  	.set_protocol_features = vhost_user_set_protocol_features,
> +	.set_memory_table = vhost_user_set_memory_table,
>  	.send_request = vhost_user_sock,
>  	.enable_qp = vhost_user_enable_queue_pair
>  };
> diff --git a/drivers/net/virtio/virtio_user/vhost_vdpa.c
> b/drivers/net/virtio/virtio_user/vhost_vdpa.c
> index c0a9b5b767..3059ec545d 100644
> --- a/drivers/net/virtio/virtio_user/vhost_vdpa.c
> +++ b/drivers/net/virtio/virtio_user/vhost_vdpa.c
> @@ -19,7 +19,6 @@
>  #define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64)
>  #define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
>  #define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
> -#define VHOST_SET_MEM_TABLE _IOW(VHOST_VIRTIO, 0x03, void *)
>  #define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
>  #define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
>  #define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state)
> @@ -44,7 +43,6 @@ static uint64_t vhost_req_user_to_vdpa[] = {
>  	[VHOST_USER_GET_VRING_BASE] = VHOST_GET_VRING_BASE,
>  	[VHOST_USER_SET_VRING_ADDR] = VHOST_SET_VRING_ADDR,
>  	[VHOST_USER_SET_VRING_KICK] = VHOST_SET_VRING_KICK,
> -	[VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
>  	[VHOST_USER_SET_STATUS] = VHOST_VDPA_SET_STATUS,
>  	[VHOST_USER_GET_STATUS] = VHOST_VDPA_GET_STATUS,
>  	[VHOST_USER_SET_VRING_ENABLE] = VHOST_VDPA_SET_VRING_ENABLE,
> @@ -202,7 +200,7 @@ vhost_vdpa_map(const struct rte_memseg_list *msl, const
> struct rte_memseg *ms,
>  }
> 
>  static int
> -vhost_vdpa_dma_map_all(struct virtio_user_dev *dev)
> +vhost_vdpa_set_memory_table(struct virtio_user_dev *dev)
>  {
>  	vhost_vdpa_dma_unmap(dev, NULL, 0, SIZE_MAX);
> 
> @@ -248,9 +246,6 @@ vhost_vdpa_send_request(struct virtio_user_dev *dev,
> 
>  	req_vdpa = vhost_req_user_to_vdpa[req];
> 
> -	if (req_vdpa == VHOST_SET_MEM_TABLE)
> -		return vhost_vdpa_dma_map_all(dev);
> -
>  	switch (req_vdpa) {
>  	case VHOST_SET_VRING_NUM:
>  	case VHOST_SET_VRING_ADDR:
> @@ -331,6 +326,7 @@ struct virtio_user_backend_ops virtio_ops_vdpa = {
>  	.set_owner = vhost_vdpa_set_owner,
>  	.get_features = vhost_vdpa_get_features,
>  	.set_features = vhost_vdpa_set_features,
> +	.set_memory_table = vhost_vdpa_set_memory_table,
>  	.send_request = vhost_vdpa_send_request,
>  	.enable_qp = vhost_vdpa_enable_queue_pair,
>  	.dma_map = vhost_vdpa_dma_map,
> diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c
> b/drivers/net/virtio/virtio_user/virtio_user_dev.c
> index 6bb61b3e89..ae976be158 100644
> --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
> +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
> @@ -177,7 +177,7 @@ virtio_user_start_device(struct virtio_user_dev *dev)
>  		goto error;
> 
>  	/* Step 2: share memory regions */
> -	ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
> +	ret = dev->ops->set_memory_table(dev);
>  	if (ret < 0)
>  		goto error;
> 
> @@ -351,7 +351,7 @@ virtio_user_mem_event_cb(enum rte_mem_event type
> __rte_unused,
>  		dev->ops->enable_qp(dev, i, 0);
> 
>  	/* Step 2: update memory regions */
> -	dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
> +	dev->ops->set_memory_table(dev);
> 
>  	/* Step 3: resume the active queues */
>  	for (i = 0; i < dev->queue_pairs; i++)
> --
> 2.29.2



More information about the dev mailing list