[PATCH v1 21/21] net/virtio-user: remove max queues limitation

Maxime Coquelin maxime.coquelin at redhat.com
Tue Feb 7 15:14:25 CET 2023



On 1/31/23 06:19, Xia, Chenbo wrote:
> Hi Maxime,
> 
>> -----Original Message-----
>> From: Maxime Coquelin <maxime.coquelin at redhat.com>
>> Sent: Wednesday, November 30, 2022 11:57 PM
>> To: dev at dpdk.org; Xia, Chenbo <chenbo.xia at intel.com>;
>> david.marchand at redhat.com; eperezma at redhat.com
>> Cc: Maxime Coquelin <maxime.coquelin at redhat.com>
>> Subject: [PATCH v1 21/21] net/virtio-user: remove max queues limitation
>>
>> This patch removes the limitation of 8 queue pairs by
>> dynamically allocating vring metadata once we know the
>> maximum number of queue pairs supported by the backend.
>>
>> This is especially useful for Vhost-vDPA with physical
>> devices, where the maximum queues supported may be much
>> more than 8 pairs.
>>
>> Signed-off-by: Maxime Coquelin <maxime.coquelin at redhat.com>
>> ---
>>   drivers/net/virtio/virtio.h                   |   6 -
>>   .../net/virtio/virtio_user/virtio_user_dev.c  | 118 ++++++++++++++----
>>   .../net/virtio/virtio_user/virtio_user_dev.h  |  16 +--
>>   drivers/net/virtio/virtio_user_ethdev.c       |  17 +--
>>   4 files changed, 109 insertions(+), 48 deletions(-)
>>
>> diff --git a/drivers/net/virtio/virtio.h b/drivers/net/virtio/virtio.h
>> index 5c8f71a44d..04a897bf51 100644
>> --- a/drivers/net/virtio/virtio.h
>> +++ b/drivers/net/virtio/virtio.h
>> @@ -124,12 +124,6 @@
>>   	VIRTIO_NET_HASH_TYPE_UDP_EX)
>>
>>
>> -/*
>> - * Maximum number of virtqueues per device.
>> - */
>> -#define VIRTIO_MAX_VIRTQUEUE_PAIRS 8
>> -#define VIRTIO_MAX_VIRTQUEUES (VIRTIO_MAX_VIRTQUEUE_PAIRS * 2 + 1)
>> -
>>   /* VirtIO device IDs. */
>>   #define VIRTIO_ID_NETWORK  0x01
>>   #define VIRTIO_ID_BLOCK    0x02
>> diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c
>> b/drivers/net/virtio/virtio_user/virtio_user_dev.c
>> index 7c48c9bb29..aa24fdea70 100644
>> --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
>> +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
>> @@ -17,6 +17,7 @@
>>   #include <rte_alarm.h>
>>   #include <rte_string_fns.h>
>>   #include <rte_eal_memconfig.h>
>> +#include <rte_malloc.h>
>>
>>   #include "vhost.h"
>>   #include "virtio_user_dev.h"
>> @@ -58,8 +59,8 @@ virtio_user_kick_queue(struct virtio_user_dev *dev,
>> uint32_t queue_sel)
>>   	int ret;
>>   	struct vhost_vring_file file;
>>   	struct vhost_vring_state state;
>> -	struct vring *vring = &dev->vrings[queue_sel];
>> -	struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel];
>> +	struct vring *vring = &dev->vrings.split[queue_sel];
>> +	struct vring_packed *pq_vring = &dev->vrings.packed[queue_sel];
>>   	struct vhost_vring_addr addr = {
>>   		.index = queue_sel,
>>   		.log_guest_addr = 0,
>> @@ -299,18 +300,6 @@ virtio_user_dev_init_max_queue_pairs(struct
>> virtio_user_dev *dev, uint32_t user_
>>   		return ret;
>>   	}
>>
>> -	if (dev->max_queue_pairs > VIRTIO_MAX_VIRTQUEUE_PAIRS) {
>> -		/*
>> -		 * If the device supports control queue, the control queue
>> -		 * index is max_virtqueue_pairs * 2. Disable MQ if it happens.
>> -		 */
>> -		PMD_DRV_LOG(ERR, "(%s) Device advertises too many queues (%u,
>> max supported %u)",
>> -				dev->path, dev->max_queue_pairs,
>> VIRTIO_MAX_VIRTQUEUE_PAIRS);
>> -		dev->max_queue_pairs = 1;
>> -
>> -		return -1;
>> -	}
>> -
>>   	return 0;
>>   }
>>
>> @@ -579,6 +568,86 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
>>   	return 0;
>>   }
>>
>> +static int
>> +virtio_user_alloc_vrings(struct virtio_user_dev *dev)
>> +{
>> +	int i, size, nr_vrings;
>> +
>> +	nr_vrings = dev->max_queue_pairs * 2;
>> +	if (dev->hw_cvq)
>> +		nr_vrings++;
>> +
>> +	dev->callfds = rte_zmalloc("virtio_user_dev", nr_vrings *
>> sizeof(*dev->callfds), 0);
>> +	if (!dev->callfds) {
>> +		PMD_INIT_LOG(ERR, "(%s) Failed to alloc callfds", dev->path);
>> +		return -1;
>> +	}
>> +
>> +	dev->kickfds = rte_zmalloc("virtio_user_dev", nr_vrings *
>> sizeof(*dev->kickfds), 0);
>> +	if (!dev->kickfds) {
>> +		PMD_INIT_LOG(ERR, "(%s) Failed to alloc kickfds", dev->path);
>> +		goto free_callfds;
>> +	}
>> +
>> +	for (i = 0; i < nr_vrings; i++) {
>> +		dev->callfds[i] = -1;
>> +		dev->kickfds[i] = -1;
>> +	}
>> +
>> +	size = RTE_MAX(sizeof(*dev->vrings.split), sizeof(*dev-
>>> vrings.packed));
>> +	dev->vrings.ptr = rte_zmalloc("virtio_user_dev", nr_vrings * size,
>> 0);
>> +	if (!dev->vrings.ptr) {
>> +		PMD_INIT_LOG(ERR, "(%s) Failed to alloc vrings metadata", dev-
>>> path);
>> +		goto free_kickfds;
>> +	}
>> +
>> +	dev->packed_queues = rte_zmalloc("virtio_user_dev",
>> +			nr_vrings * sizeof(*dev->packed_queues), 0);
> 
> Should we pass the info of packed vq or not to save the alloc of
> dev->packed_queues, also to know correct size of dev->vrings.ptr.

That's not ideal because the negotiation haven't taken place yet with 
the Virtio layer, but it should be doable for packed ring specifically 
since it is only possible to disable it via the devargs, not at run
time.

Thanks,
Maxime



More information about the dev mailing list