[dpdk-dev] [PATCH v3] net/virtio-user: fix packed ring server mode

Ding, Xuan xuan.ding at intel.com
Wed Dec 18 03:38:34 CET 2019



> -----Original Message-----
> From: Ye, Xiaolong <xiaolong.ye at intel.com>
> Sent: Wednesday, December 18, 2019 10:25 AM
> To: Ding, Xuan <xuan.ding at intel.com>
> Cc: maxime.coquelin at redhat.com; Bie, Tiwei <tiwei.bie at intel.com>; Wang,
> Zhihong <zhihong.wang at intel.com>; Liu, Yong <yong.liu at intel.com>;
> dev at dpdk.org; stable at dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v3] net/virtio-user: fix packed ring server mode
> 
> Hi, Xuan
> 
> On 12/18, Xuan Ding wrote:
> >This patch fixes the situation where datapath does not work properly
> >when vhost reconnects to virtio in server mode with packed ring.
> >
> >Currently, virtio and vhost share memory of vring. For split ring,
> >vhost can read the status of discriptors directly from the available
> >ring and the used ring during reconnection. Therefore, the datapath can
> continue.
> >
> >But for packed ring, when reconnecting to virtio, vhost cannot get the
> >status of discriptors only through the descriptor ring. By resetting Tx
> >and Rx queues, the datapath can restart from the beginning.
> >
> >Fixes: 4c3f5822eb214 ("net/virtio: add packed virtqueue defines")
> >Cc: stable at dpdk.org
> >
> >Signed-off-by: Xuan Ding <xuan.ding at intel.com>
> >
> >v3:
> >* Removed an extra asterisk from a comment.
> >* Renamed device reset function and moved it to virtio_user_ethdev.c.
> >
> >v2:
> >* Renamed queue reset functions and moved them to virtqueue.c.
> 
> Please put these change log after below '---' marker, then they won't be shown
> in commit log when you apply the patch by `git am`.
> 
> Thanks,
> Xiaolong

Hi, Xiaolong,
Thank you for your advice, I will change it in the next version.

Best regards,
Xuan

> 
> >---
> > drivers/net/virtio/virtio_ethdev.c      |  4 +-
> > drivers/net/virtio/virtio_user_ethdev.c | 40 ++++++++++++++
> > drivers/net/virtio/virtqueue.c          | 71 +++++++++++++++++++++++++
> > drivers/net/virtio/virtqueue.h          |  4 ++
> > 4 files changed, 117 insertions(+), 2 deletions(-)
> >
> >diff --git a/drivers/net/virtio/virtio_ethdev.c
> >b/drivers/net/virtio/virtio_ethdev.c
> >index 044eb10a7..f9d0ea70d 100644
> >--- a/drivers/net/virtio/virtio_ethdev.c
> >+++ b/drivers/net/virtio/virtio_ethdev.c
> >@@ -1913,6 +1913,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
> > 			goto err_vtpci_init;
> > 	}
> >
> >+	rte_spinlock_init(&hw->state_lock);
> >+
> > 	/* reset device and negotiate default features */
> > 	ret = virtio_init_device(eth_dev,
> VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
> > 	if (ret < 0)
> >@@ -2155,8 +2157,6 @@ virtio_dev_configure(struct rte_eth_dev *dev)
> > 			return -EBUSY;
> > 		}
> >
> >-	rte_spinlock_init(&hw->state_lock);
> >-
> > 	hw->use_simple_rx = 1;
> >
> > 	if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) { diff --git
> >a/drivers/net/virtio/virtio_user_ethdev.c
> >b/drivers/net/virtio/virtio_user_ethdev.c
> >index 3fc172573..425f48230 100644
> >--- a/drivers/net/virtio/virtio_user_ethdev.c
> >+++ b/drivers/net/virtio/virtio_user_ethdev.c
> >@@ -25,12 +25,48 @@
> > #define virtio_user_get_dev(hw) \
> > 	((struct virtio_user_dev *)(hw)->virtio_user_dev)
> >
> >+static void
> >+virtio_user_reset_queues_packed(struct rte_eth_dev *dev) {
> >+	struct virtio_hw *hw = dev->data->dev_private;
> >+	struct virtnet_rx *rxvq;
> >+	struct virtnet_tx *txvq;
> >+	uint16_t i;
> >+
> >+	/* Add lock to avoid queue contention. */
> >+	rte_spinlock_lock(&hw->state_lock);
> >+	hw->started = 0;
> >+
> >+	/*
> >+	 * Waitting for datapath to complete before resetting queues.
> >+	 * 1 ms should be enough for the ongoing Tx/Rx function to finish.
> >+	 */
> >+	rte_delay_ms(1);
> >+
> >+	/* Vring reset for each Tx queue and Rx queue. */
> >+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> >+		rxvq = dev->data->rx_queues[i];
> >+		virtqueue_rxvq_reset_packed(rxvq->vq);
> >+		virtio_dev_rx_queue_setup_finish(dev, i);
> >+	}
> >+
> >+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> >+		txvq = dev->data->tx_queues[i];
> >+		virtqueue_txvq_reset_packed(txvq->vq);
> >+	}
> >+
> >+	hw->started = 1;
> >+	rte_spinlock_unlock(&hw->state_lock);
> >+}
> >+
> >+
> > static int
> > virtio_user_server_reconnect(struct virtio_user_dev *dev) {
> > 	int ret;
> > 	int connectfd;
> > 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
> >+	struct virtio_hw *hw = eth_dev->data->dev_private;
> >
> > 	connectfd = accept(dev->listenfd, NULL, NULL);
> > 	if (connectfd < 0)
> >@@ -51,6 +87,10 @@ virtio_user_server_reconnect(struct virtio_user_dev
> >*dev)
> >
> > 	dev->features &= dev->device_features;
> >
> >+	/* For packed ring, resetting queues is required in reconnection. */
> >+	if (vtpci_packed_queue(hw))
> >+		virtio_user_reset_queues_packed(eth_dev);
> >+
> > 	ret = virtio_user_start_device(dev);
> > 	if (ret < 0)
> > 		return -1;
> >diff --git a/drivers/net/virtio/virtqueue.c
> >b/drivers/net/virtio/virtqueue.c index 5ff1e3587..0b4e3bf3e 100644
> >--- a/drivers/net/virtio/virtqueue.c
> >+++ b/drivers/net/virtio/virtqueue.c
> >@@ -141,3 +141,74 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
> > 	else
> > 		virtqueue_rxvq_flush_split(vq);
> > }
> >+
> >+int
> >+virtqueue_rxvq_reset_packed(struct virtqueue *vq) {
> >+	int size = vq->vq_nentries;
> >+	struct vq_desc_extra *dxp;
> >+	struct virtnet_rx *rxvq;
> >+	uint16_t desc_idx;
> >+
> >+	vq->vq_used_cons_idx = 0;
> >+	vq->vq_desc_head_idx = 0;
> >+	vq->vq_avail_idx = 0;
> >+	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
> >+	vq->vq_free_cnt = vq->vq_nentries;
> >+
> >+	vq->vq_packed.used_wrap_counter = 1;
> >+	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
> >+	vq->vq_packed.event_flags_shadow = 0;
> >+	vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
> >+
> >+	rxvq = &vq->rxq;
> >+	memset(rxvq->mz->addr, 0, rxvq->mz->len);
> >+
> >+	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
> >+		dxp = &vq->vq_descx[desc_idx];
> >+		if (dxp->cookie != NULL) {
> >+			rte_pktmbuf_free(dxp->cookie);
> >+			dxp->cookie = NULL;
> >+		}
> >+	}
> >+
> >+	vring_desc_init_packed(vq, size);
> >+
> >+	return 0;
> >+}
> >+
> >+int
> >+virtqueue_txvq_reset_packed(struct virtqueue *vq) {
> >+	int size = vq->vq_nentries;
> >+	struct vq_desc_extra *dxp;
> >+	struct virtnet_tx *txvq;
> >+	uint16_t desc_idx;
> >+
> >+	vq->vq_used_cons_idx = 0;
> >+	vq->vq_desc_head_idx = 0;
> >+	vq->vq_avail_idx = 0;
> >+	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
> >+	vq->vq_free_cnt = vq->vq_nentries;
> >+
> >+	vq->vq_packed.used_wrap_counter = 1;
> >+	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
> >+	vq->vq_packed.event_flags_shadow = 0;
> >+
> >+	txvq = &vq->txq;
> >+	memset(txvq->mz->addr, 0, txvq->mz->len);
> >+	memset(txvq->virtio_net_hdr_mz->addr, 0,
> >+		txvq->virtio_net_hdr_mz->len);
> >+
> >+	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
> >+		dxp = &vq->vq_descx[desc_idx];
> >+		if (dxp->cookie != NULL) {
> >+			rte_pktmbuf_free(dxp->cookie);
> >+			dxp->cookie = NULL;
> >+		}
> >+	}
> >+
> >+	vring_desc_init_packed(vq, size);
> >+
> >+	return 0;
> >+}
> >diff --git a/drivers/net/virtio/virtqueue.h
> >b/drivers/net/virtio/virtqueue.h index 8d7f197b1..58ad7309a 100644
> >--- a/drivers/net/virtio/virtqueue.h
> >+++ b/drivers/net/virtio/virtqueue.h
> >@@ -443,6 +443,10 @@ struct rte_mbuf *virtqueue_detach_unused(struct
> >virtqueue *vq);
> > /* Flush the elements in the used ring. */  void
> >virtqueue_rxvq_flush(struct virtqueue *vq);
> >
> >+int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
> >+
> >+int virtqueue_txvq_reset_packed(struct virtqueue *vq);
> >+
> > static inline int
> > virtqueue_full(const struct virtqueue *vq)  {
> >--
> >2.17.1
> >


More information about the dev mailing list