[dpdk-dev] [PATCH v2 06/10] net/virtio: fix queue setup consistency

Xu, Qian Q qian.q.xu at intel.com
Wed Feb 7 09:31:07 CET 2018


Any update, Olivier? 
We are near to release, and the bug-fix is important for the virtio vector path usage. Thanks. 

> -----Original Message-----
> From: dev [mailto:dev-bounces at dpdk.org] On Behalf Of Olivier Matz
> Sent: Thursday, February 1, 2018 4:28 PM
> To: Yao, Lei A <lei.a.yao at intel.com>
> Cc: dev at dpdk.org; yliu at fridaylinux.org; maxime.coquelin at redhat.com;
> Thomas Monjalon <thomas at monjalon.net>; stable at dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v2 06/10] net/virtio: fix queue setup consistency
> 
> Hi Lei,
> 
> It's on my todo list, I'll check this as soon as possible.
> 
> Olivier
> 
> 
> On Thu, Feb 01, 2018 at 03:14:15AM +0000, Yao, Lei A wrote:
> > Hi, Olivier
> >
> > This is Lei from DPDK validation team in Intel. During our DPDK
> > 18.02-rc1 test, I find the following patch will cause one serious issue with virtio
> vector path:
> > the traffic can't resume after stop/start the virtio device.
> >
> > The step like following:
> > 1. Launch vhost-user port using testpmd at Host 2. Launch VM with
> > virtio device, mergeable is off 3. Bind the virtio device to pmd
> > driver, launch testpmd, let the tx/rx use vector path
> >     virtio_xmit_pkts_simple
> >     virtio_recv_pkts_vec
> > 4. Send traffic to virtio device from vhost side, then stop the virtio
> > device 5. Start the virtio device again After step 5, the traffic
> > can't resume.
> >
> > Could you help check this and give a fix? This issue will impact the
> > virtio pmd user experience heavily. By the way, this patch is already
> > included into V17.11. Looks like we need give a patch to this LTS version.
> Thanks a lot!
> >
> > BRs
> > Lei
> > > -----Original Message-----
> > > From: dev [mailto:dev-bounces at dpdk.org] On Behalf Of Olivier Matz
> > > Sent: Thursday, September 7, 2017 8:14 PM
> > > To: dev at dpdk.org; yliu at fridaylinux.org; maxime.coquelin at redhat.com
> > > Cc: stephen at networkplumber.org; stable at dpdk.org
> > > Subject: [dpdk-dev] [PATCH v2 06/10] net/virtio: fix queue setup
> > > consistency
> > >
> > > In rx/tx queue setup functions, some code is executed only if
> > > use_simple_rxtx == 1. The value of this variable can change
> > > depending on the offload flags or sse support. If Rx queue setup is
> > > called before Tx queue setup, it can result in an invalid configuration:
> > >
> > > - dev_configure is called: use_simple_rxtx is initialized to 0
> > > - rx queue setup is called: queues are initialized without simple path
> > >   support
> > > - tx queue setup is called: use_simple_rxtx switch to 1, and simple
> > >   Rx/Tx handlers are selected
> > >
> > > Fix this by postponing a part of Rx/Tx queue initialization in
> > > dev_start(), as it was the case in the initial implementation.
> > >
> > > Fixes: 48cec290a3d2 ("net/virtio: move queue configure code to
> > > proper
> > > place")
> > > Cc: stable at dpdk.org
> > >
> > > Signed-off-by: Olivier Matz <olivier.matz at 6wind.com>
> > > ---
> > >  drivers/net/virtio/virtio_ethdev.c | 13 +++++++++++++
> > > drivers/net/virtio/virtio_ethdev.h |  6 ++++++
> > >  drivers/net/virtio/virtio_rxtx.c   | 40 ++++++++++++++++++++++++++++++-
> > > -------
> > >  3 files changed, 51 insertions(+), 8 deletions(-)
> > >
> > > diff --git a/drivers/net/virtio/virtio_ethdev.c
> > > b/drivers/net/virtio/virtio_ethdev.c
> > > index 8eee3ff80..c7888f103 100644
> > > --- a/drivers/net/virtio/virtio_ethdev.c
> > > +++ b/drivers/net/virtio/virtio_ethdev.c
> > > @@ -1737,6 +1737,19 @@ virtio_dev_start(struct rte_eth_dev *dev)
> > >  	struct virtnet_rx *rxvq;
> > >  	struct virtnet_tx *txvq __rte_unused;
> > >  	struct virtio_hw *hw = dev->data->dev_private;
> > > +	int ret;
> > > +
> > > +	/* Finish the initialization of the queues */
> > > +	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> > > +		ret = virtio_dev_rx_queue_setup_finish(dev, i);
> > > +		if (ret < 0)
> > > +			return ret;
> > > +	}
> > > +	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> > > +		ret = virtio_dev_tx_queue_setup_finish(dev, i);
> > > +		if (ret < 0)
> > > +			return ret;
> > > +	}
> > >
> > >  	/* check if lsc interrupt feature is enabled */
> > >  	if (dev->data->dev_conf.intr_conf.lsc) { diff --git
> > > a/drivers/net/virtio/virtio_ethdev.h
> > > b/drivers/net/virtio/virtio_ethdev.h
> > > index c3413c6d9..2039bc547 100644
> > > --- a/drivers/net/virtio/virtio_ethdev.h
> > > +++ b/drivers/net/virtio/virtio_ethdev.h
> > > @@ -92,10 +92,16 @@ int  virtio_dev_rx_queue_setup(struct
> > > rte_eth_dev *dev, uint16_t rx_queue_id,
> > >  		const struct rte_eth_rxconf *rx_conf,
> > >  		struct rte_mempool *mb_pool);
> > >
> > > +int virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev,
> > > +				uint16_t rx_queue_id);
> > > +
> > >  int  virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t
> > > tx_queue_id,
> > >  		uint16_t nb_tx_desc, unsigned int socket_id,
> > >  		const struct rte_eth_txconf *tx_conf);
> > >
> > > +int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
> > > +				uint16_t tx_queue_id);
> > > +
> > >  uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> > >  		uint16_t nb_pkts);
> > >
> > > diff --git a/drivers/net/virtio/virtio_rxtx.c
> > > b/drivers/net/virtio/virtio_rxtx.c
> > > index e30377c51..a32e3229f 100644
> > > --- a/drivers/net/virtio/virtio_rxtx.c
> > > +++ b/drivers/net/virtio/virtio_rxtx.c
> > > @@ -421,9 +421,6 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
> > >  	struct virtio_hw *hw = dev->data->dev_private;
> > >  	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
> > >  	struct virtnet_rx *rxvq;
> > > -	int error, nbufs;
> > > -	struct rte_mbuf *m;
> > > -	uint16_t desc_idx;
> > >
> > >  	PMD_INIT_FUNC_TRACE();
> > >
> > > @@ -440,10 +437,24 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev
> > > *dev,
> > >  	}
> > >  	dev->data->rx_queues[queue_idx] = rxvq;
> > >
> > > +	return 0;
> > > +}
> > > +
> > > +int
> > > +virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t
> > > queue_idx)
> > > +{
> > > +	uint16_t vtpci_queue_idx = 2 * queue_idx +
> > > VTNET_SQ_RQ_QUEUE_IDX;
> > > +	struct virtio_hw *hw = dev->data->dev_private;
> > > +	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
> > > +	struct virtnet_rx *rxvq = &vq->rxq;
> > > +	struct rte_mbuf *m;
> > > +	uint16_t desc_idx;
> > > +	int error, nbufs;
> > > +
> > > +	PMD_INIT_FUNC_TRACE();
> > >
> > >  	/* Allocate blank mbufs for the each rx descriptor */
> > >  	nbufs = 0;
> > > -	error = ENOSPC;
> > >
> > >  	if (hw->use_simple_rxtx) {
> > >  		for (desc_idx = 0; desc_idx < vq->vq_nentries; @@ -534,7
> +545,6
> > > @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
> > >  	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
> > >  	struct virtnet_tx *txvq;
> > >  	uint16_t tx_free_thresh;
> > > -	uint16_t desc_idx;
> > >
> > >  	PMD_INIT_FUNC_TRACE();
> > >
> > > @@ -563,9 +573,24 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev
> > > *dev,
> > >
> > >  	vq->vq_free_thresh = tx_free_thresh;
> > >
> > > -	if (hw->use_simple_rxtx) {
> > > -		uint16_t mid_idx  = vq->vq_nentries >> 1;
> > > +	dev->data->tx_queues[queue_idx] = txvq;
> > > +	return 0;
> > > +}
> > > +
> > > +int
> > > +virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
> > > +				uint16_t queue_idx)
> > > +{
> > > +	uint8_t vtpci_queue_idx = 2 * queue_idx +
> > > VTNET_SQ_TQ_QUEUE_IDX;
> > > +	struct virtio_hw *hw = dev->data->dev_private;
> > > +	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
> > > +	uint16_t mid_idx = vq->vq_nentries >> 1;
> > > +	struct virtnet_tx *txvq = &vq->txq;
> > > +	uint16_t desc_idx;
> > >
> > > +	PMD_INIT_FUNC_TRACE();
> > > +
> > > +	if (hw->use_simple_rxtx) {
> > >  		for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) {
> > >  			vq->vq_ring.avail->ring[desc_idx] =
> > >  				desc_idx + mid_idx;
> > > @@ -587,7 +612,6 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev
> > > *dev,
> > >
> > >  	VIRTQUEUE_DUMP(vq);
> > >
> > > -	dev->data->tx_queues[queue_idx] = txvq;
> > >  	return 0;
> > >  }
> > >
> > > --
> > > 2.11.0
> >


More information about the dev mailing list