[dpdk-stable] patch 'vhost: protect vring access done by application' has been queued to LTS release 17.11.10
luca.boccassi at gmail.com
luca.boccassi at gmail.com
Thu Dec 19 15:32:59 CET 2019
Hi,
FYI, your patch has been queued to LTS release 17.11.10
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 12/21/19. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Thanks.
Luca Boccassi
---
>From 553c27798e7b3ca0ccf8f7e2191543097b4d467d Mon Sep 17 00:00:00 2001
From: Tiwei Bie <tiwei.bie at intel.com>
Date: Mon, 19 Aug 2019 19:34:57 +0800
Subject: [PATCH] vhost: protect vring access done by application
[ upstream commit 4e0de8dac8531b82d4c328791a67f49eadfed5f0 ]
Besides the enqueue/dequeue API, other APIs of the builtin net
backend should also be protected.
Fixes: a3688046995f ("vhost: protect active rings from async ring changes")
Reported-by: Peng He <xnhp0320 at icloud.com>
Signed-off-by: Tiwei Bie <tiwei.bie at intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
---
lib/librte_vhost/vhost.c | 42 ++++++++++++++++++++++++++++++++++------
1 file changed, 36 insertions(+), 6 deletions(-)
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 78fedc6a47..08ab6eab35 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -538,22 +538,32 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id)
{
struct virtio_net *dev;
struct vhost_virtqueue *vq;
+ uint16_t ret = 0;
dev = get_device(vid);
if (!dev)
return 0;
vq = dev->virtqueue[queue_id];
- if (!vq->enabled)
- return 0;
- return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
+ rte_spinlock_lock(&vq->access_lock);
+
+ if (unlikely(!vq->enabled || vq->avail == NULL))
+ goto out;
+
+ ret = *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
+
+out:
+ rte_spinlock_unlock(&vq->access_lock);
+ return ret;
}
int
rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
{
struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ int ret = 0;
if (dev == NULL)
return -1;
@@ -564,8 +574,21 @@ rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
return -1;
}
+ vq = dev->virtqueue[queue_id];
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ if (vq->used == NULL) {
+ ret = -1;
+ goto out;
+ }
+
dev->virtqueue[queue_id]->used->flags = VRING_USED_F_NO_NOTIFY;
- return 0;
+
+out:
+ rte_spinlock_unlock(&vq->access_lock);
+
+ return ret;
}
void
@@ -604,6 +627,7 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
{
struct virtio_net *dev;
struct vhost_virtqueue *vq;
+ uint32_t ret = 0;
dev = get_device(vid);
if (dev == NULL)
@@ -619,8 +643,14 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
if (vq == NULL)
return 0;
+ rte_spinlock_lock(&vq->access_lock);
+
if (unlikely(vq->enabled == 0 || vq->avail == NULL))
- return 0;
+ goto out;
- return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
+ ret = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
+
+out:
+ rte_spinlock_unlock(&vq->access_lock);
+ return ret;
}
--
2.20.1
---
Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- - 2019-12-19 14:32:27.626988674 +0000
+++ 0032-vhost-protect-vring-access-done-by-application.patch 2019-12-19 14:32:25.785291480 +0000
@@ -1,26 +1,27 @@
-From 4e0de8dac8531b82d4c328791a67f49eadfed5f0 Mon Sep 17 00:00:00 2001
+From 553c27798e7b3ca0ccf8f7e2191543097b4d467d Mon Sep 17 00:00:00 2001
From: Tiwei Bie <tiwei.bie at intel.com>
Date: Mon, 19 Aug 2019 19:34:57 +0800
Subject: [PATCH] vhost: protect vring access done by application
+[ upstream commit 4e0de8dac8531b82d4c328791a67f49eadfed5f0 ]
+
Besides the enqueue/dequeue API, other APIs of the builtin net
backend should also be protected.
Fixes: a3688046995f ("vhost: protect active rings from async ring changes")
-Cc: stable at dpdk.org
Reported-by: Peng He <xnhp0320 at icloud.com>
Signed-off-by: Tiwei Bie <tiwei.bie at intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
---
- lib/librte_vhost/vhost.c | 50 +++++++++++++++++++++++++++++++---------
- 1 file changed, 39 insertions(+), 11 deletions(-)
+ lib/librte_vhost/vhost.c | 42 ++++++++++++++++++++++++++++++++++------
+ 1 file changed, 36 insertions(+), 6 deletions(-)
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
-index 77be160697..cea44df8cb 100644
+index 78fedc6a47..08ab6eab35 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
-@@ -785,22 +785,33 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id)
+@@ -538,22 +538,32 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id)
{
struct virtio_net *dev;
struct vhost_virtqueue *vq;
@@ -47,77 +48,39 @@
+ return ret;
}
--static inline void
-+static inline int
- vhost_enable_notify_split(struct virtio_net *dev,
- struct vhost_virtqueue *vq, int enable)
- {
-+ if (vq->used == NULL)
-+ return -1;
-+
- if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
- if (enable)
- vq->used->flags &= ~VRING_USED_F_NO_NOTIFY;
-@@ -810,17 +821,21 @@ vhost_enable_notify_split(struct virtio_net *dev,
- if (enable)
- vhost_avail_event(vq) = vq->last_avail_idx;
- }
-+ return 0;
- }
-
--static inline void
-+static inline int
- vhost_enable_notify_packed(struct virtio_net *dev,
- struct vhost_virtqueue *vq, int enable)
- {
- uint16_t flags;
-
-+ if (vq->device_event == NULL)
-+ return -1;
-+
- if (!enable) {
- vq->device_event->flags = VRING_EVENT_F_DISABLE;
-- return;
-+ return 0;
- }
-
- flags = VRING_EVENT_F_ENABLE;
-@@ -833,6 +848,7 @@ vhost_enable_notify_packed(struct virtio_net *dev,
- rte_smp_wmb();
-
- vq->device_event->flags = flags;
-+ return 0;
- }
-
int
-@@ -840,18 +856,23 @@ rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
+ rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
{
struct virtio_net *dev = get_device(vid);
- struct vhost_virtqueue *vq;
-+ int ret;
++ struct vhost_virtqueue *vq;
++ int ret = 0;
- if (!dev)
+ if (dev == NULL)
return -1;
+@@ -564,8 +574,21 @@ rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
+ return -1;
+ }
- vq = dev->virtqueue[queue_id];
-
++ vq = dev->virtqueue[queue_id];
++
+ rte_spinlock_lock(&vq->access_lock);
+
- if (vq_is_packed(dev))
-- vhost_enable_notify_packed(dev, vq, enable);
-+ ret = vhost_enable_notify_packed(dev, vq, enable);
- else
-- vhost_enable_notify_split(dev, vq, enable);
-+ ret = vhost_enable_notify_split(dev, vq, enable);
-
++ if (vq->used == NULL) {
++ ret = -1;
++ goto out;
++ }
++
+ dev->virtqueue[queue_id]->used->flags = VRING_USED_F_NO_NOTIFY;
- return 0;
++
++out:
+ rte_spinlock_unlock(&vq->access_lock);
+
+ return ret;
}
void
-@@ -890,6 +911,7 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
+@@ -604,6 +627,7 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
{
struct virtio_net *dev;
struct vhost_virtqueue *vq;
@@ -125,7 +88,7 @@
dev = get_device(vid);
if (dev == NULL)
-@@ -905,10 +927,16 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
+@@ -619,8 +643,14 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
if (vq == NULL)
return 0;
@@ -142,8 +105,6 @@
+ rte_spinlock_unlock(&vq->access_lock);
+ return ret;
}
-
- int rte_vhost_get_vdpa_device_id(int vid)
--
2.20.1
More information about the stable
mailing list