[PATCH 2/2] app/testpmd: support selective Rx data

Gregory Etelson getelson at nvidia.com
Mon Feb 2 17:09:03 CET 2026


Add support for selective Rx data using existing rxoffs and rxpkts
command line parameters.

When both rxoffs and rxpkts are specified on PMDs supporting
selective Rx data (selective_read capability), testpmd automatically:
1. Inserts segments with NULL mempool for gaps between configured
   segments to discard unwanted data.
2. Adds a trailing segment with NULL mempool to cover any remaining
   data up to MTU.

Example usage to receive only Ethernet header and a segment at
offset 128:
  --rxoffs=0,128 --rxpkts=14,64

This creates segments:
- [0-13]: 14 bytes with mempool (received)
- [14-127]: 114 bytes with NULL mempool (discarded)
- [128-191]: 64 bytes with mempool (received)
- [192-MTU]: remaining bytes with NULL mempool (discarded)

Signed-off-by: Gregory Etelson <getelson at nvidia.com>
Signed-off-by: Claude Sonnet 4.5 <noreply at anthropic.com>
---
 app/test-pmd/testpmd.c                | 74 +++++++++++++++++++++++++--
 doc/guides/testpmd_app_ug/run_app.rst | 19 +++++++
 2 files changed, 88 insertions(+), 5 deletions(-)

diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 1fe41d852a..62129f0d28 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2676,11 +2676,58 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	int ret;
 
 
-	if ((rx_pkt_nb_segs > 1) &&
+	if ((rx_pkt_nb_segs > 1 || rx_pkt_nb_offs > 0) &&
 	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
+		struct rte_eth_dev_info dev_info;
+		uint16_t seg_idx = 0;
+		uint16_t next_offset = 0;
+		uint16_t mtu = 0;
+		bool selective_rx;
+
+		ret = rte_eth_dev_info_get(port_id, &dev_info);
+		if (ret != 0)
+			return ret;
+
+		selective_rx = rx_pkt_nb_offs > 0 &&
+			       dev_info.rx_seg_capa.selective_read != 0;
+
+		if (selective_rx) {
+			ret = rte_eth_dev_get_mtu(port_id, &mtu);
+			if (ret != 0)
+				return ret;
+		}
+
 		/* multi-segment configuration */
 		for (i = 0; i < rx_pkt_nb_segs; i++) {
-			struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
+			struct rte_eth_rxseg_split *rx_seg;
+			uint16_t seg_offset;
+
+			seg_offset = i < rx_pkt_nb_offs ?
+				     rx_pkt_seg_offsets[i] : next_offset;
+
+			/* Insert gap segment if selective Rx and there's a gap */
+			if (selective_rx && seg_offset > next_offset) {
+				if (seg_idx >= MAX_SEGS_BUFFER_SPLIT) {
+					fprintf(stderr,
+						"Too many segments (max %u)\n",
+						MAX_SEGS_BUFFER_SPLIT);
+					return -EINVAL;
+				}
+				rx_seg = &rx_useg[seg_idx++].split;
+				rx_seg->offset = next_offset;
+				rx_seg->length = seg_offset - next_offset;
+				rx_seg->mp = NULL; /* Discard gap data */
+				next_offset = seg_offset;
+			}
+
+			/* Add the actual data segment */
+			if (seg_idx >= MAX_SEGS_BUFFER_SPLIT) {
+				fprintf(stderr,
+					"Too many segments (max %u)\n",
+					MAX_SEGS_BUFFER_SPLIT);
+				return -EINVAL;
+			}
+			rx_seg = &rx_useg[seg_idx++].split;
 			/*
 			 * Use last valid pool for the segments with number
 			 * exceeding the pool index.
@@ -2688,8 +2735,7 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 			mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
 			mpx = mbuf_pool_find(socket_id, mp_n);
 			/* Handle zero as mbuf data buffer size. */
-			rx_seg->offset = i < rx_pkt_nb_offs ?
-					   rx_pkt_seg_offsets[i] : 0;
+			rx_seg->offset = seg_offset;
 			rx_seg->mp = mpx ? mpx : mp;
 			if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) {
 				rx_seg->proto_hdr = rx_pkt_hdr_protos[i] & ~prev_hdrs;
@@ -2699,8 +2745,26 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 						rx_pkt_seg_lengths[i] :
 						mbuf_data_size[mp_n];
 			}
+
+			if (selective_rx)
+				next_offset = seg_offset + rx_seg->length;
 		}
-		rx_conf->rx_nseg = rx_pkt_nb_segs;
+
+		/* Add trailing segment to MTU if selective Rx enabled */
+		if (selective_rx && next_offset < mtu) {
+			if (seg_idx >= MAX_SEGS_BUFFER_SPLIT) {
+				fprintf(stderr,
+					"Too many segments (max %u)\n",
+					MAX_SEGS_BUFFER_SPLIT);
+				return -EINVAL;
+			}
+			rx_useg[seg_idx].split.offset = next_offset;
+			rx_useg[seg_idx].split.length = mtu - next_offset;
+			rx_useg[seg_idx].split.mp = NULL; /* Discard trailing data */
+			seg_idx++;
+		}
+
+		rx_conf->rx_nseg = seg_idx;
 		rx_conf->rx_seg = rx_useg;
 		rx_conf->rx_mempools = NULL;
 		rx_conf->rx_nmempool = 0;
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index 97d6c75716..638c0b0eb3 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -364,6 +364,11 @@ The command line options are:
     feature is engaged. Affects only the queues configured
     with split offloads (currently BUFFER_SPLIT is supported only).
 
+    When used with ``--rxpkts`` on PMDs supporting selective Rx data,
+    enables receiving only specific packet segments and discarding the rest.
+    Gaps between configured segments and any trailing data up to MTU are
+    automatically filled with NULL mempool segments (data is discarded).
+
 *   ``--rxpkts=X[,Y]``
 
     Set the length of segments to scatter packets on receiving if split
@@ -373,6 +378,20 @@ The command line options are:
     command line parameter and the mbufs to receive will be allocated
     sequentially from these extra memory pools.
 
+    **Selective Rx Data Example:**
+
+    To receive only the Ethernet header (14 bytes at offset 0) and
+    a 64-byte segment starting at offset 128, while discarding the rest::
+
+        --rxoffs=0,128 --rxpkts=14,64
+
+    This configuration will:
+
+    * Receive 14 bytes at offset 0 (Ethernet header)
+    * Discard bytes 14-127 (inserted NULL mempool segment)
+    * Receive 64 bytes at offset 128
+    * Discard bytes 192-MTU (inserted NULL mempool segment)
+
 *   ``--txpkts=X[,Y]``
 
     Set TX segment sizes or total packet length. Valid for ``tx-only``
-- 
2.51.0



More information about the dev mailing list