[dpdk-dev] vhost: add virtio configuration space access socket messages

Ilya Maximets i.maximets at samsung.com
Tue Feb 26 08:39:04 CET 2019


On 26.02.2019 10:01, Liu, Changpeng wrote:
> 
> 
>> -----Original Message-----
>> From: Ilya Maximets [mailto:i.maximets at samsung.com]
>> Sent: Monday, February 25, 2019 9:20 PM
>> To: Liu, Changpeng <changpeng.liu at intel.com>; dev at dpdk.org
>> Cc: Stojaczyk, Dariusz <dariusz.stojaczyk at intel.com>;
>> maxime.coquelin at redhat.com; Bie, Tiwei <tiwei.bie at intel.com>; Wang,
>> Zhihong <zhihong.wang at intel.com>; Jason Wang <jasowang at redhat.com>
>> Subject: Re: vhost: add virtio configuration space access socket messages
>>
>> On 25.02.2019 10:51, Changpeng Liu wrote:
>>> This patch adds new vhost user messages GET_CONFIG and SET_CONFIG
>>> used to get/set virtio device's PCI configuration space.
>>
>> Beside the fact that some additional description and reasoning required,
>> I do not see the usage of this feature. You're defining the flag
>> VHOST_USER_PROTOCOL_F_CONFIG, but it's never used. So, none of dpdk vhost
>> backends (vdpa, vhost-user) will use this feature.
>> You, probably, missed adding it to VHOST_USER_PROTOCOL_FEATURES or
>> VDPA_SUPPORTED_PROTOCOL_FEATURES.
>>
>> From the other side, current implementation forces application to properly
>> implement the get/set_config callbacks. Otherwise, receiving of the messages
>> will result in RTE_VHOST_MSG_RESULT_ERR and subsequent vhost
>> disconnection.
>> This looks strange, because supported protocol features normally enabled by
>> default. Am I misunderstood something ?
> QEMU will not send the messages if VHOST_USER_PROTOCOL_F_CONFIG wasn't enabled.

So, you're going to enable it only by explicit call to
'rte_vhost_driver_set_features' ?

In this case I'm assuming that you're implementing your own vhost backend.
But why you're not using 'dev->extern_ops' and corresponding 'pre_msg_handle'
or 'post_msg_handle' to handle your GET/SET_CONFIG messages like it does
'vhost_crypto' backend ?


>>
>> One more thing. According to spec: "vhost-user slave uses zero length of
>> payload to indicate an error to vhost-user master". However, current version
>> of 'vhost_user_get_config' replies with size equal to 'sizeof(uint64_t)'.
> Yeah, can be changed to 0 here, QEMU can process it.
>>
>> Best regards, Ilya Maximets.
>>
>>> Signed-off-by: Changpeng Liu <changpeng.liu at intel.com>
>>> Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk at intel.com>
>>> ---
>>>  lib/librte_vhost/rte_vhost.h  |  8 ++++++++
>>>  lib/librte_vhost/vhost_user.c | 44
>> +++++++++++++++++++++++++++++++++++++++++++
>>>  lib/librte_vhost/vhost_user.h | 16 ++++++++++++++++
>>>  3 files changed, 68 insertions(+)
>>>
>>> diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
>>> index 2753670..ab710ce 100644
>>> --- a/lib/librte_vhost/rte_vhost.h
>>> +++ b/lib/librte_vhost/rte_vhost.h
>>> @@ -63,6 +63,10 @@
>>>  #define VHOST_USER_PROTOCOL_F_PAGEFAULT 8
>>>  #endif
>>>
>>> +#ifndef VHOST_USER_PROTOCOL_F_CONFIG
>>> +#define VHOST_USER_PROTOCOL_F_CONFIG 9
>>> +#endif
>>> +
>>>  #ifndef VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD
>>>  #define VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD 10
>>>  #endif
>>> @@ -173,6 +177,10 @@ struct vhost_device_ops {
>>>
>>>  	int (*vring_state_changed)(int vid, uint16_t queue_id, int enable);
>> 	/**< triggered when a vring is enabled or disabled */
>>>
>>> +	int (*get_config)(int vid, uint8_t *config, uint32_t config_len>>> +	int (*set_config)(int vid, uint8_t *config, uint32_t offset,
>>> +			  uint32_t len, uint32_t flags);
>>> +
>>>  	/**
>>>  	 * Features could be changed after the feature negotiation.
>>>  	 * For example, VHOST_F_LOG_ALL will be set/cleared at the
>>> diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
>>> index b086ad9..8e42734 100644
>>> --- a/lib/librte_vhost/vhost_user.c
>>> +++ b/lib/librte_vhost/vhost_user.c
>>> @@ -73,6 +73,8 @@
>>>  	[VHOST_USER_NET_SET_MTU]  = "VHOST_USER_NET_SET_MTU",
>>>  	[VHOST_USER_SET_SLAVE_REQ_FD]  =
>> "VHOST_USER_SET_SLAVE_REQ_FD",
>>>  	[VHOST_USER_IOTLB_MSG]  = "VHOST_USER_IOTLB_MSG",
>>> +	[VHOST_USER_GET_CONFIG] = "VHOST_USER_GET_CONFIG",
>>> +	[VHOST_USER_SET_CONFIG] = "VHOST_USER_SET_CONFIG",
>>>  	[VHOST_USER_CRYPTO_CREATE_SESS] =
>> "VHOST_USER_CRYPTO_CREATE_SESS",
>>>  	[VHOST_USER_CRYPTO_CLOSE_SESS] =
>> "VHOST_USER_CRYPTO_CLOSE_SESS",
>>>  	[VHOST_USER_POSTCOPY_ADVISE]  = "VHOST_USER_POSTCOPY_ADVISE",
>>> @@ -359,6 +361,46 @@
>>>  	return RTE_VHOST_MSG_RESULT_OK;
>>>  }
>>>
>>> +static int
>>> +vhost_user_get_config(struct virtio_net **pdev, struct VhostUserMsg *msg,
>>> +		      int main_fd __rte_unused)
>>> +{
>>> +	struct virtio_net *dev = *pdev;
>>> +
>>> +	if (!dev->notify_ops->get_config) {
>>> +		msg->size = sizeof(uint64_t);
>>> +		return RTE_VHOST_MSG_RESULT_REPLY;
>>> +	}
>>> +
>>> +	if (dev->notify_ops->get_config(dev->vid,
>>> +					msg->payload.config.region,
>>> +					msg->payload.config.size) != 0) {
>>> +		msg->size = sizeof(uint64_t);
>>> +	}
>>> +
>>> +	return RTE_VHOST_MSG_RESULT_REPLY;
>>> +}
>>> +
>>> +static int
>>> +vhost_user_set_config(struct virtio_net **pdev, struct VhostUserMsg *msg,
>>> +		      int main_fd __rte_unused)
>>> +{
>>> +	struct virtio_net *dev = *pdev;
>>> +
>>> +	if (!dev->notify_ops->set_config)
>>> +		return RTE_VHOST_MSG_RESULT_ERR;
>>> +
>>> +	if (dev->notify_ops->set_config(dev->vid,
>>> +					msg->payload.config.region,
>>> +					msg->payload.config.offset,
>>> +					msg->payload.config.size,
>>> +					msg->payload.config.flags) != 0) {
>>> +		return RTE_VHOST_MSG_RESULT_ERR;
>>> +	}
>>> +
>>> +	return RTE_VHOST_MSG_RESULT_OK;
>>> +}
>>> +
>>>  /*
>>>   * Reallocate virtio_dev and vhost_virtqueue data structure to make them on
>> the
>>>   * same numa node as the memory of vring descriptor.
>>> @@ -1725,6 +1767,8 @@ typedef int (*vhost_message_handler_t)(struct
>> virtio_net **pdev,
>>>  	[VHOST_USER_NET_SET_MTU] = vhost_user_net_set_mtu,
>>>  	[VHOST_USER_SET_SLAVE_REQ_FD] = vhost_user_set_req_fd,
>>>  	[VHOST_USER_IOTLB_MSG] = vhost_user_iotlb_msg,
>>> +	[VHOST_USER_GET_CONFIG] = vhost_user_get_config,
>>> +	[VHOST_USER_SET_CONFIG] = vhost_user_set_config,
>>>  	[VHOST_USER_POSTCOPY_ADVISE] = vhost_user_set_postcopy_advise,
>>>  	[VHOST_USER_POSTCOPY_LISTEN] = vhost_user_set_postcopy_listen,
>>>  	[VHOST_USER_POSTCOPY_END] = vhost_user_postcopy_end,
>>> diff --git a/lib/librte_vhost/vhost_user.h b/lib/librte_vhost/vhost_user.h
>>> index 2a650fe..057cdec 100644
>>> --- a/lib/librte_vhost/vhost_user.h
>>> +++ b/lib/librte_vhost/vhost_user.h
>>> @@ -12,6 +12,11 @@
>>>
>>>  /* refer to hw/virtio/vhost-user.c */
>>>
>>> +/*
>>> + * Maximum size of virtio device config space
>>> + */
>>> +#define VHOST_USER_MAX_CONFIG_SIZE 256
>>> +
>>>  #define VHOST_MEMORY_MAX_NREGIONS 8
>>>
>>>  #define VHOST_USER_PROTOCOL_FEATURES	((1ULL <<
>> VHOST_USER_PROTOCOL_F_MQ) | \
>>> @@ -49,6 +54,8 @@
>>>  	VHOST_USER_NET_SET_MTU = 20,
>>>  	VHOST_USER_SET_SLAVE_REQ_FD = 21,
>>>  	VHOST_USER_IOTLB_MSG = 22,
>>> +	VHOST_USER_GET_CONFIG = 24,
>>> +	VHOST_USER_SET_CONFIG = 25,
>>>  	VHOST_USER_CRYPTO_CREATE_SESS = 26,
>>>  	VHOST_USER_CRYPTO_CLOSE_SESS = 27,
>>>  	VHOST_USER_POSTCOPY_ADVISE = 28,
>>> @@ -60,6 +67,7 @@
>>>  typedef enum VhostUserSlaveRequest {
>>>  	VHOST_USER_SLAVE_NONE = 0,
>>>  	VHOST_USER_SLAVE_IOTLB_MSG = 1,
>>> +	VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2,
>>>  	VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
>>>  	VHOST_USER_SLAVE_MAX
>>>  } VhostUserSlaveRequest;
>>> @@ -112,6 +120,13 @@
>>>  	uint64_t offset;
>>>  } VhostUserVringArea;
>>>
>>> +typedef struct VhostUserConfig {
>>> +	uint32_t offset;
>>> +	uint32_t size;
>>> +	uint32_t flags;
>>> +	uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
>>> +} VhostUserConfig;
>>> +
>>>  typedef struct VhostUserMsg {
>>>  	union {
>>>  		uint32_t master; /* a VhostUserRequest value */
>>> @@ -131,6 +146,7 @@
>>>  		struct vhost_vring_addr addr;
>>>  		VhostUserMemory memory;
>>>  		VhostUserLog    log;
>>> +		VhostUserConfig config;
>>>  		struct vhost_iotlb_msg iotlb;
>>>  		VhostUserCryptoSessionParam crypto_session;
>>>  		VhostUserVringArea area;
>>>


More information about the dev mailing list