[dpdk-stable] patch 'net/virtio: fix refill order in packed ring datapath' has been queued to stable release 20.11.3
luca.boccassi at gmail.com
luca.boccassi at gmail.com
Mon Jul 26 15:52:46 CEST 2021
Hi,
FYI, your patch has been queued to stable release 20.11.3
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 07/28/21. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Queued patches are on a temporary branch at:
https://github.com/bluca/dpdk-stable
This queued commit can be viewed at:
https://github.com/bluca/dpdk-stable/commit/12e277dee62c9ecf129351cfe3d4393297abe6cb
Thanks.
Luca Boccassi
---
>From 12e277dee62c9ecf129351cfe3d4393297abe6cb Mon Sep 17 00:00:00 2001
From: Cheng Jiang <cheng1.jiang at intel.com>
Date: Thu, 8 Jul 2021 09:58:01 +0000
Subject: [PATCH] net/virtio: fix refill order in packed ring datapath
[ upstream commit 2d91b28730a945def257bc372a525c9b5dbf181c ]
The front-end should refill the descriptor with the mbuf indicated by
the buff_id rather then the index of used descriptor. Back-end may
return buffers out of order if async copy mode is enabled.
When initializing rxq, refill the descriptors in order as buff_id is
not available at that time.
Fixes: a76290c8f1cf ("net/virtio: implement Rx path for packed queues")
Signed-off-by: Cheng Jiang <cheng1.jiang at intel.com>
Signed-off-by: Marvin Liu <yong.liu at intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
---
drivers/net/virtio/virtio_rxtx.c | 72 +++++++++++++++++++++++---------
1 file changed, 52 insertions(+), 20 deletions(-)
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 77934e8c58..393d4e9f84 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -333,13 +333,35 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
return 0;
}
-static inline int
-virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
- struct rte_mbuf **cookie, uint16_t num)
+static inline void
+virtqueue_refill_single_packed(struct virtqueue *vq,
+ struct vring_packed_desc *dp,
+ struct rte_mbuf *cookie)
{
- struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
uint16_t flags = vq->vq_packed.cached_flags;
struct virtio_hw *hw = vq->hw;
+
+ dp->addr = cookie->buf_iova +
+ RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+ dp->len = cookie->buf_len -
+ RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
+
+ virtqueue_store_flags_packed(dp, flags,
+ hw->weak_barriers);
+
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->vq_packed.cached_flags ^=
+ VRING_PACKED_DESC_F_AVAIL_USED;
+ flags = vq->vq_packed.cached_flags;
+ }
+}
+
+static inline int
+virtqueue_enqueue_recv_refill_packed_init(struct virtqueue *vq,
+ struct rte_mbuf **cookie, uint16_t num)
+{
+ struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
struct vq_desc_extra *dxp;
uint16_t idx;
int i;
@@ -355,24 +377,34 @@ virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
dxp->cookie = (void *)cookie[i];
dxp->ndescs = 1;
- start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
- RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
- start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM
- + hw->vtnet_hdr_size;
+ virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
+ }
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
+ return 0;
+}
- vq->vq_desc_head_idx = dxp->next;
- if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
- vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
+static inline int
+virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
+ struct rte_mbuf **cookie, uint16_t num)
+{
+ struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
+ struct vq_desc_extra *dxp;
+ uint16_t idx, did;
+ int i;
- virtqueue_store_flags_packed(&start_dp[idx], flags,
- hw->weak_barriers);
+ if (unlikely(vq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(vq->vq_free_cnt < num))
+ return -EMSGSIZE;
- if (++vq->vq_avail_idx >= vq->vq_nentries) {
- vq->vq_avail_idx -= vq->vq_nentries;
- vq->vq_packed.cached_flags ^=
- VRING_PACKED_DESC_F_AVAIL_USED;
- flags = vq->vq_packed.cached_flags;
- }
+ for (i = 0; i < num; i++) {
+ idx = vq->vq_avail_idx;
+ did = start_dp[idx].id;
+ dxp = &vq->vq_descx[did];
+ dxp->cookie = (void *)cookie[i];
+ dxp->ndescs = 1;
+
+ virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
}
vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
return 0;
@@ -748,7 +780,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
/* Enqueue allocated buffers */
if (vtpci_packed_queue(vq->hw))
- error = virtqueue_enqueue_recv_refill_packed(vq,
+ error = virtqueue_enqueue_recv_refill_packed_init(vq,
&m, 1);
else
error = virtqueue_enqueue_recv_refill(vq,
--
2.30.2
---
Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- - 2021-07-26 13:53:17.050079266 +0100
+++ 0023-net-virtio-fix-refill-order-in-packed-ring-datapath.patch 2021-07-26 13:53:15.841292454 +0100
@@ -1 +1 @@
-From 2d91b28730a945def257bc372a525c9b5dbf181c Mon Sep 17 00:00:00 2001
+From 12e277dee62c9ecf129351cfe3d4393297abe6cb Mon Sep 17 00:00:00 2001
@@ -5,0 +6,2 @@
+[ upstream commit 2d91b28730a945def257bc372a525c9b5dbf181c ]
+
@@ -14 +15,0 @@
-Cc: stable at dpdk.org
@@ -24 +25 @@
-index 34108fb946..f70644b0b7 100644
+index 77934e8c58..393d4e9f84 100644
@@ -27 +28 @@
-@@ -328,13 +328,35 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
+@@ -333,13 +333,35 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
@@ -67 +68 @@
-@@ -350,24 +372,34 @@ virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
+@@ -355,24 +377,34 @@ virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
@@ -71,4 +72,4 @@
-- start_dp[idx].addr = cookie[i]->buf_iova +
-- RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
-- start_dp[idx].len = cookie[i]->buf_len -
-- RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
+- start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
+- RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+- start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM
+- + hw->vtnet_hdr_size;
@@ -117 +118 @@
-@@ -740,7 +772,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
+@@ -748,7 +780,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
@@ -120 +121 @@
- if (virtio_with_packed_queue(vq->hw))
+ if (vtpci_packed_queue(vq->hw))
More information about the stable
mailing list