[dpdk-dev] [PATCH v2] vhost: fix index overflow for packed ring in async vhost
Xia, Chenbo
chenbo.xia at intel.com
Fri Jul 16 04:30:50 CEST 2021
> -----Original Message-----
> From: Jiang, Cheng1 <cheng1.jiang at intel.com>
> Sent: Thursday, July 15, 2021 5:51 PM
> To: maxime.coquelin at redhat.com; Xia, Chenbo <chenbo.xia at intel.com>
> Cc: dev at dpdk.org; Hu, Jiayu <jiayu.hu at intel.com>; Yang, YvonneX
> <yvonnex.yang at intel.com>; Jiang, Cheng1 <cheng1.jiang at intel.com>;
> stable at dpdk.org
> Subject: [PATCH v2] vhost: fix index overflow for packed ring in async vhost
>
> We introduced some new indexes in packed ring of async vhost. They
> will eventually overflow and lead to errors if the ring size is not
> a power of 2. This patch is to check and keep these indexes within a
> reasonable range.
>
> Fixes: 873e8dad6f49 ("vhost: support packed ring in async datapath")
> Cc: stable at dpdk.org
>
> Signed-off-by: Cheng Jiang <cheng1.jiang at intel.com>
> ---
> lib/vhost/virtio_net.c | 21 ++++++++++++++-------
> 1 file changed, 14 insertions(+), 7 deletions(-)
>
> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> index f4a2c88d8b..bfb2bf8fc4 100644
> --- a/lib/vhost/virtio_net.c
> +++ b/lib/vhost/virtio_net.c
> @@ -2036,7 +2036,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
>
> slot_idx = (vq->async_pkts_idx + num_async_pkts) % vq->size;
> if (it_pool[it_idx].count) {
> - uint16_t from, to;
> + uint16_t from;
>
> async_descs_idx += num_descs;
> async_fill_desc(&tdes[pkt_burst_idx++],
> @@ -2055,11 +2055,13 @@ virtio_dev_rx_async_submit_packed(struct virtio_net
> *dev,
> * descriptors.
> */
> from = vq->shadow_used_idx - num_buffers;
> - to = vq->async_buffer_idx_packed % vq->size;
> store_dma_desc_info_packed(vq->shadow_used_packed,
> - vq->async_buffers_packed, vq->size, from, to,
> num_buffers);
> + vq->async_buffers_packed, vq->size, from,
> + vq->async_buffer_idx_packed, num_buffers);
>
> vq->async_buffer_idx_packed += num_buffers;
> + if (vq->async_buffer_idx_packed >= vq->size)
> + vq->async_buffer_idx_packed -= vq->size;
> vq->shadow_used_idx -= num_buffers;
> } else {
> comp_pkts[num_done_pkts++] = pkts[pkt_idx];
> @@ -2112,6 +2114,8 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
> dma_error_handler_packed(vq, async_descs, async_descs_idx,
> slot_idx, pkt_err,
> &pkt_idx, &num_async_pkts, &num_done_pkts);
> vq->async_pkts_idx += num_async_pkts;
> + if (vq->async_pkts_idx >= vq->size)
> + vq->async_pkts_idx -= vq->size;
> *comp_count = num_done_pkts;
>
> if (likely(vq->shadow_used_idx)) {
> @@ -2160,7 +2164,7 @@ write_back_completed_descs_packed(struct vhost_virtqueue
> *vq,
> uint16_t from, to;
>
> do {
> - from = vq->last_async_buffer_idx_packed % vq->size;
> + from = vq->last_async_buffer_idx_packed;
> to = (from + nr_left) % vq->size;
> if (to > from) {
> vhost_update_used_packed(vq, vq->async_buffers_packed + from,
> to - from);
> @@ -2169,7 +2173,7 @@ write_back_completed_descs_packed(struct vhost_virtqueue
> *vq,
> } else {
> vhost_update_used_packed(vq, vq->async_buffers_packed + from,
> vq->size - from);
> - vq->last_async_buffer_idx_packed += vq->size - from;
> + vq->last_async_buffer_idx_packed = 0;
> nr_left -= vq->size - from;
> }
> } while (nr_left > 0);
> @@ -2252,10 +2256,13 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid,
> uint16_t queue_id,
> vhost_vring_call_split(dev, vq);
> }
> } else {
> - if (vq_is_packed(dev))
> + if (vq_is_packed(dev)) {
> vq->last_async_buffer_idx_packed += n_buffers;
> - else
> + if (vq->last_async_buffer_idx_packed >= vq->size)
> + vq->last_async_buffer_idx_packed -= vq->size;
> + } else {
> vq->last_async_desc_idx_split += n_descs;
> + }
> }
>
> done:
> --
> 2.29.2
Reviewed-by: Chenbo Xia <chenbo.xia at intel.com>
More information about the dev
mailing list