[PATCH v2] net/memif: fix multi-segment Rx corruption

Sriram Yagnaraman sriram.yagnaraman at ericsson.com
Tue Mar 24 17:14:36 CET 2026


Fix dst_off being reset per-descriptor instead of per-packet in the Rx
slow path. When processing chained descriptors (MEMIF_DESC_FLAG_NEXT),
goto next_slot2 reset dst_off to 0, overwriting the beginning of the
current mbuf with data from subsequent descriptors. Move dst_off
initialization before the next_slot2 label so it is only reset once
per packet.

Add boundary check in both Rx paths before processing next segment.
If MEMIF_DESC_FLAG_NEXT is set but n_slots is 0, free the incomplete
mbuf chain and exit gracefully to prevent reading beyond available
descriptors.

Bugzilla ID: 1609
Fixes: aa17df860891 ("net/memif: add a Rx fast path")
Cc: stable at dpdk.org

Reported-by: Mike Bly <bly454 at gmail.com>
Signed-off-by: Sriram Yagnaraman <sriram.yagnaraman at ericsson.com>
---
v2:
- Fix double-free in fast-path truncation: mbuf_head is mbufs[rx_pkts],
  so rte_pktmbuf_free_bulk already frees it and its chain. Remove the
  separate rte_pktmbuf_free(mbuf_head) call. (Stephen Hemminger)
- Remove MIF_LOG from truncation paths, increment mq->n_err and report
  via stats->ierrors instead.

 drivers/net/memif/rte_eth_memif.c | 19 +++++++++++++++++--
 drivers/net/memif/rte_eth_memif.h |  1 +
 2 files changed, 18 insertions(+), 2 deletions(-)

diff --git a/drivers/net/memif/rte_eth_memif.c b/drivers/net/memif/rte_eth_memif.c
index effcee3721..5d153c3a5a 100644
--- a/drivers/net/memif/rte_eth_memif.c
+++ b/drivers/net/memif/rte_eth_memif.c
@@ -378,6 +378,12 @@ eth_memif_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 			n_slots--;
 
 			if (d0->flags & MEMIF_DESC_FLAG_NEXT) {
+				if (unlikely(n_slots == 0)) {
+					mq->n_err++;
+					rte_pktmbuf_free_bulk(mbufs + rx_pkts,
+							MAX_PKT_BURST - rx_pkts);
+					goto no_free_bufs;
+				}
 				mbuf_tail = mbuf;
 				mbuf = rte_pktmbuf_alloc(mq->mempool);
 				if (unlikely(mbuf == NULL)) {
@@ -416,13 +422,13 @@ eth_memif_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 				goto no_free_bufs;
 			mbuf = mbuf_head;
 			mbuf->port = mq->in_port;
+			dst_off = 0;
 
 next_slot2:
 			s0 = cur_slot & mask;
 			d0 = &ring->desc[s0];
 
 			src_len = d0->length;
-			dst_off = 0;
 			src_off = 0;
 
 			do {
@@ -464,8 +470,14 @@ eth_memif_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 			cur_slot++;
 			n_slots--;
 
-			if (d0->flags & MEMIF_DESC_FLAG_NEXT)
+			if (d0->flags & MEMIF_DESC_FLAG_NEXT) {
+				if (unlikely(n_slots == 0)) {
+					mq->n_err++;
+					rte_pktmbuf_free(mbuf_head);
+					goto no_free_bufs;
+				}
 				goto next_slot2;
+			}
 
 			mq->n_bytes += rte_pktmbuf_pkt_len(mbuf_head);
 			*bufs++ = mbuf_head;
@@ -1607,6 +1619,7 @@ memif_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats,
 		}
 		stats->ipackets += mq->n_pkts;
 		stats->ibytes += mq->n_bytes;
+		stats->ierrors += mq->n_err;
 	}
 
 	tmp = (pmd->role == MEMIF_ROLE_CLIENT) ? pmd->run.num_c2s_rings :
@@ -1639,12 +1652,14 @@ memif_stats_reset(struct rte_eth_dev *dev)
 		    dev->data->rx_queues[i];
 		mq->n_pkts = 0;
 		mq->n_bytes = 0;
+		mq->n_err = 0;
 	}
 	for (i = 0; i < pmd->run.num_s2c_rings; i++) {
 		mq = (pmd->role == MEMIF_ROLE_CLIENT) ? dev->data->rx_queues[i] :
 		    dev->data->tx_queues[i];
 		mq->n_pkts = 0;
 		mq->n_bytes = 0;
+		mq->n_err = 0;
 	}
 
 	return 0;
diff --git a/drivers/net/memif/rte_eth_memif.h b/drivers/net/memif/rte_eth_memif.h
index d4e625ab51..9c7a3a93f0 100644
--- a/drivers/net/memif/rte_eth_memif.h
+++ b/drivers/net/memif/rte_eth_memif.h
@@ -69,6 +69,7 @@ struct memif_queue {
 	/* rx/tx info */
 	uint64_t n_pkts;			/**< number of rx/tx packets */
 	uint64_t n_bytes;			/**< number of rx/tx bytes */
+	uint64_t n_err;				/**< number of rx/tx errors */
 
 	struct rte_intr_handle *intr_handle;	/**< interrupt handle */
 
-- 
2.43.7



More information about the dev mailing list