[PATCH v11 1/1] app/testpmd: support multiple mbuf pools per Rx queue

Hanumanth Reddy Pothula hpothula at marvell.com
Tue Nov 1 15:13:56 CET 2022


Ping

> -----Original Message-----
> From: Hanumanth Pothula <hpothula at marvell.com>
> Sent: Tuesday, October 25, 2022 7:10 AM
> To: Aman Singh <aman.deep.singh at intel.com>; Yuying Zhang
> <yuying.zhang at intel.com>
> Cc: dev at dpdk.org; andrew.rybchenko at oktetlabs.ru; thomas at monjalon.net;
> Jerin Jacob Kollanukkaran <jerinj at marvell.com>; Nithin Kumar Dabilpuram
> <ndabilpuram at marvell.com>; Hanumanth Reddy Pothula
> <hpothula at marvell.com>
> Subject: [PATCH v11 1/1] app/testpmd: support multiple mbuf pools per Rx
> queue
> 
> Some of the HW has support for choosing memory pools based on the packet's
> size. The pool sort capability allows PMD/NIC to choose a memory pool based
> on the packet's length.
> 
> On multiple mempool support enabled, populate mempool array accordingly.
> Also, print pool name on which packet is received.
> 
> Signed-off-by: Hanumanth Pothula <hpothula at marvell.com>
> v11:
>  - Resolve compilation and warning.
> v10:
>  - Populate multi-mempool array based on mbuf_data_size_n instead
>    of rx_pkt_nb_segs.
> ---
>  app/test-pmd/testpmd.c | 63 +++++++++++++++++++++++++++---------------
>  app/test-pmd/testpmd.h |  3 ++
>  app/test-pmd/util.c    |  4 +--
>  3 files changed, 45 insertions(+), 25 deletions(-)
> 
> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index
> 5b0f0838dc..62f7c9dba8 100644
> --- a/app/test-pmd/testpmd.c
> +++ b/app/test-pmd/testpmd.c
> @@ -2647,11 +2647,18 @@ rx_queue_setup(uint16_t port_id, uint16_t
> rx_queue_id,
>  	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)  {
>  	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
> +	struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {};
> +	struct rte_mempool *mpx;
>  	unsigned int i, mp_n;
>  	int ret;
> 
> -	if (rx_pkt_nb_segs <= 1 ||
> -	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
> +	/* Verify Rx queue configuration is single pool and segment or
> +	 * multiple pool/segment.
> +	 * @see rte_eth_rxconf::rx_mempools
> +	 * @see rte_eth_rxconf::rx_seg
> +	 */
> +	if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 ||
> +	    ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) {
>  		rx_conf->rx_seg = NULL;
>  		rx_conf->rx_nseg = 0;
>  		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, @@ -
> 2659,29 +2666,39 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
>  					     rx_conf, mp);
>  		goto exit;
>  	}
> -	for (i = 0; i < rx_pkt_nb_segs; i++) {
> -		struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
> -		struct rte_mempool *mpx;
> -		/*
> -		 * Use last valid pool for the segments with number
> -		 * exceeding the pool index.
> -		 */
> -		mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
> -		mpx = mbuf_pool_find(socket_id, mp_n);
> -		/* Handle zero as mbuf data buffer size. */
> -		rx_seg->offset = i < rx_pkt_nb_offs ?
> -				   rx_pkt_seg_offsets[i] : 0;
> -		rx_seg->mp = mpx ? mpx : mp;
> -		if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) {
> -			rx_seg->proto_hdr = rx_pkt_hdr_protos[i];
> -		} else {
> -			rx_seg->length = rx_pkt_seg_lengths[i] ?
> -					rx_pkt_seg_lengths[i] :
> -					mbuf_data_size[mp_n];
> +	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
> +		for (i = 0; i < rx_pkt_nb_segs; i++) {
> +			struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
> +			/*
> +			 * Use last valid pool for the segments with number
> +			 * exceeding the pool index.
> +			 */
> +			mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 :
> i;
> +			mpx = mbuf_pool_find(socket_id, mp_n);
> +			if (rx_conf->offloads &
> RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
> +				/**
> +				 * On Segment length zero, update length as,
> +				 *      buffer size - headroom size
> +				 * to make sure enough space is accomidate for
> header.
> +				 */
> +				rx_seg->length = rx_pkt_seg_lengths[i] ?
> +						 rx_pkt_seg_lengths[i] :
> +						 mbuf_data_size[mp_n] -
> RTE_PKTMBUF_HEADROOM;
> +				rx_seg->offset = i < rx_pkt_nb_offs ?
> +						 rx_pkt_seg_offsets[i] : 0;
> +				rx_seg->mp = mpx ? mpx : mp;
> +			}
> +		}
> +		rx_conf->rx_nseg = rx_pkt_nb_segs;
> +		rx_conf->rx_seg = rx_useg;
> +	} else {
> +		for (i = 0; i < mbuf_data_size_n; i++) {
> +			mpx = mbuf_pool_find(socket_id, i);
> +			rx_mempool[i] = mpx ? mpx : mp;
>  		}
> +		rx_conf->rx_mempools = rx_mempool;
> +		rx_conf->rx_nmempool = mbuf_data_size_n;
>  	}
> -	rx_conf->rx_nseg = rx_pkt_nb_segs;
> -	rx_conf->rx_seg = rx_useg;
>  	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
>  				    socket_id, rx_conf, NULL);
>  	rx_conf->rx_seg = NULL;
> diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index
> e65be323b8..14be10dcef 100644
> --- a/app/test-pmd/testpmd.h
> +++ b/app/test-pmd/testpmd.h
> @@ -80,6 +80,9 @@ extern uint8_t cl_quit;
> 
>  #define MIN_TOTAL_NUM_MBUFS 1024
> 
> +/* Maximum number of pools supported per Rx queue */ #define
> +MAX_MEMPOOL 8
> +
>  typedef uint8_t  lcoreid_t;
>  typedef uint16_t portid_t;
>  typedef uint16_t queueid_t;
> diff --git a/app/test-pmd/util.c b/app/test-pmd/util.c index
> fd98e8b51d..f9df5f69ef 100644
> --- a/app/test-pmd/util.c
> +++ b/app/test-pmd/util.c
> @@ -150,8 +150,8 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue,
> struct rte_mbuf *pkts[],
>  		print_ether_addr(" - dst=", &eth_hdr->dst_addr,
>  				 print_buf, buf_size, &cur_len);
>  		MKDUMPSTR(print_buf, buf_size, cur_len,
> -			  " - type=0x%04x - length=%u - nb_segs=%d",
> -			  eth_type, (unsigned int) mb->pkt_len,
> +			  " - pool=%s - type=0x%04x - length=%u -
> nb_segs=%d",
> +			  mb->pool->name, eth_type, (unsigned int) mb-
> >pkt_len,
>  			  (int)mb->nb_segs);
>  		ol_flags = mb->ol_flags;
>  		if (ol_flags & RTE_MBUF_F_RX_RSS_HASH) {
> --
> 2.25.1



More information about the dev mailing list