[dpdk-dev] [PATCH 19.08] net/pcap: enable infinitely rxing a pcap file

Ferruh Yigit ferruh.yigit at intel.com
Mon May 27 17:15:01 CEST 2019


On 4/11/2019 12:00 PM, Cian Ferriter wrote:
> It can be useful to use pcap files for some rudimental performance
> testing. This patch enables this functionality in the pcap driver.
> 
> At a high level, this works by creaing a ring of sufficient size to
> store the packets in the pcap file passed to the application. When the
> rx function for this mode is called, packets are dequeued from the ring
> for use by the application and also enqueued back on to the ring to be
> "received" again.
> 
> A tx_drop mode is also added since transmitting to a tx_pcap file isn't
> desirable at a high traffic rate.
> 
> Jumbo frames are not supported in this mode. When filling the ring at rx
> queue setup time, the presence of multi segment mbufs is checked for.
> The PMD will exit on detection of these multi segment mbufs.
> 
> Signed-off-by: Cian Ferriter <cian.ferriter at intel.com>
> ---
>  doc/guides/nics/pcap_ring.rst   |  15 +++
>  drivers/net/pcap/rte_eth_pcap.c | 209 ++++++++++++++++++++++++++++++--
>  2 files changed, 215 insertions(+), 9 deletions(-)
> 
> diff --git a/doc/guides/nics/pcap_ring.rst b/doc/guides/nics/pcap_ring.rst
> index c1ef9196b..45f55a345 100644
> --- a/doc/guides/nics/pcap_ring.rst
> +++ b/doc/guides/nics/pcap_ring.rst
> @@ -106,6 +106,21 @@ Runtime Config Options
>  
>     --vdev 'net_pcap0,iface=eth0,phy_mac=1'
>  
> +- Use the RX PCAP file to infinitely receive packets
> +
> + In case ``rx_pcap=`` configuration is set, user may want to use the selected PCAP file for rudimental
> + performance testing. This can be done with a ``devarg`` ``infinite_rx``, for example::
> +
> +   --vdev 'net_pcap0,rx_pcap=file_rx.pcap,infinite_rx=1,tx_drop=1'
> +
> + When this mode is used, it is recommended to use the ``tx_drop`` ``devarg``.
> +
> +- Drop all packets on transmit
> +
> + The user may want to drop all packets on tx for a device. This can be done with the ``tx_drop`` ``devarg``, for example::
> +
> +   --vdev 'net_pcap0,rx_pcap=file_rx.pcap,tx_drop=1'

Is there a performance drop when tx files are directed to 'loopback' interface,
like: --vdev 'net_pcap0,rx_pcap=file_rx.pcap,tx_iface=lo' ?
If no performance drop I am for using 'lo' interface instead of adding new devargs.

> +
>  Examples of Usage
>  ^^^^^^^^^^^^^^^^^
>  
> diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
> index 353538f16..b72db973a 100644
> --- a/drivers/net/pcap/rte_eth_pcap.c
> +++ b/drivers/net/pcap/rte_eth_pcap.c
<...>

> @@ -178,6 +186,40 @@ eth_pcap_gather_data(unsigned char *data, struct rte_mbuf *mbuf)
>  	}
>  }
>  
> +static uint16_t
> +eth_pcap_rx_infinite(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
> +{
> +	int i;
> +	struct pcap_rx_queue *pcap_q = queue;
> +
> +	if (unlikely(nb_pkts == 0))
> +		return 0;
> +
> +	if (rte_pktmbuf_alloc_bulk(pcap_q->mb_pool, bufs, nb_pkts) != 0)
> +		return 0;
> +
> +	for (i = 0; i < nb_pkts; i++) {
> +		struct rte_mbuf *pcap_buf;
> +		int err = rte_ring_dequeue(pcap_q->pkts, (void **)&pcap_buf);
> +		if (err)
> +			rte_panic("failed to dequeue from pcap pkts ring\n");

Please don't have any rte_panic() in the driver code, there are a few more in patch.

> +
> +		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *),
> +				rte_pktmbuf_mtod(pcap_buf, void *),
> +				pcap_buf->data_len);
> +		bufs[i]->data_len = pcap_buf->data_len;
> +		bufs[i]->pkt_len = pcap_buf->pkt_len;
> +		bufs[i]->port = pcap_q->port_id;
> +
> +		/* enqueue packet back on ring to allow infinite rx */
> +		rte_ring_enqueue(pcap_q->pkts, pcap_buf);
> +	}
> +
> +	pcap_q->rx_stat.pkts += i;

For consistency, can you please update "pcap_q->rx_stat.bytes" too?

> +
> +	return i;
> +}
> +
>  static uint16_t
>  eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
>  {

<...>

> @@ -447,6 +509,24 @@ open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
>  	return 0;
>  }
>  
> +static uint64_t
> +count_packets_in_pcaps(pcap_t **pcap, struct pcap_rx_queue *pcap_q)
> +{
> +	const u_char *packet;
> +	struct pcap_pkthdr header;
> +	uint64_t pcap_pkt_count;

Compiler is complaining about uninitialized 'pcap_pkt_count'.

> +
> +	while ((packet = pcap_next(*pcap, &header)))
> +		pcap_pkt_count++;

It seems there is no quicker way to get number of packets from a pcap file, if
anyone know a way, comment is welcome.

> +
> +	/* the pcap is reopened so it can be used as normal later */
> +	pcap_close(*pcap);
> +	*pcap = NULL;
> +	open_single_rx_pcap(pcap_q->name, pcap);
> +
> +	return pcap_pkt_count;
> +}
> +
>  static int
>  eth_dev_start(struct rte_eth_dev *dev)
>  {

<...>

> @@ -671,6 +752,49 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
>  	pcap_q->queue_id = rx_queue_id;
>  	dev->data->rx_queues[rx_queue_id] = pcap_q;
>  
> +	if (internals->infinite_rx) {
> +		struct pmd_process_private *pp;
> +		char ring_name[NAME_MAX];
> +		uint16_t num_rx = 0;
> +		static uint32_t ring_number;
> +		uint64_t pcap_pkt_count = 0;
> +		pcap_t **pcap;
> +
> +		pp = rte_eth_devices[pcap_q->port_id].process_private;
> +		pcap = &pp->rx_pcap[pcap_q->queue_id];
> +
> +		if (unlikely(*pcap == NULL))
> +			rte_panic("no packets in pcap to fill ring with\n");
> +
> +		pcap_pkt_count = count_packets_in_pcaps(pcap, pcap_q);
> +
> +		snprintf(ring_name, sizeof(ring_name), "PCAP_RING%" PRIu16,
> +				ring_number);
> +
> +		pcap_q->pkts = rte_ring_create(ring_name,
> +				rte_align64pow2(pcap_pkt_count + 1), 0,
> +				RING_F_SP_ENQ | RING_F_SC_DEQ);
> +		ring_number++;
> +		if (!pcap_q->pkts)
> +			rte_panic("failed to alloc ring\n");
> +
> +
> +		struct rte_mbuf *bufs[pcap_pkt_count];

Can you please move deceleration to the beginning of the function.

> +		if (rte_pktmbuf_alloc_bulk(mb_pool, bufs, pcap_pkt_count) != 0)
> +			return 0;

Should this return error?

> +
> +		num_rx = eth_pcap_rx(pcap_q, bufs, pcap_pkt_count);
> +
> +		/* check for multiseg mbufs */
> +		for (i = 0; i < num_rx; i++) {
> +			if (bufs[i]->nb_segs != 1)
> +				rte_panic("jumbo frames are not supported in infinite rx mode\n");

This need to be replaced with returning error, and please remember to cleanup in
that case.

> +		}
> +
> +		rte_ring_enqueue_bulk(pcap_q->pkts, (void * const *)bufs,
> +				num_rx, NULL);
> +	}
> +
>  	return 0;
>  }
>  
<...>

> @@ -1132,9 +1285,18 @@ eth_from_pcaps(struct rte_vdev_device *vdev,
>  		}
>  	}
>  
> -	eth_dev->rx_pkt_burst = eth_pcap_rx;
> +	/* assign rx ops */
> +	if (infinite_rx) {
> +		eth_dev->rx_pkt_burst = eth_pcap_rx_infinite;
> +		internals->infinite_rx = infinite_rx;

Since "infinite_rx" can be 0 or 1, isn't it better to set
"internals->infinite_rx" out of this check, although functionally it will be
same (since internals is all zero by default), logically better I think

> +	} else {
> +		eth_dev->rx_pkt_burst = eth_pcap_rx;
> +	}
>  
> -	if (using_dumpers)
> +	/* assign tx ops */
> +	if (infinite_rx)
> +		eth_dev->tx_pkt_burst = eth_tx_drop;

Shouldn't set 'tx_pkt_burst' with 'infinite_rx' check, I guess intention was
'tx_drop' check.

> +	else if (using_dumpers)
>  		eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
>  	else
>  		eth_dev->tx_pkt_burst = eth_pcap_tx;

<...>

> @@ -1217,6 +1380,17 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
>  	pcaps.num_of_queue = 0;
>  
>  	if (is_rx_pcap) {
> +		/*
> +		 * We check whether we want to infinitely rx the pcap file
> +		 */
> +		if (rte_kvargs_count(kvlist, ETH_PCAP_INFINITE_RX_ARG) == 1) {

What do you think printing a warning if user provided the value more than once?

> +			ret = rte_kvargs_process(kvlist,
> +					ETH_PCAP_INFINITE_RX_ARG,
> +					&get_infinite_rx_arg, &infinite_rx);
> +			if (ret < 0)
> +				goto free_kvlist;
> +		}
> +
>  		ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
>  				&open_rx_pcap, &pcaps);
>  	} else {
> @@ -1228,18 +1402,30 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
>  		goto free_kvlist;
>  
>  	/*
> -	 * We check whether we want to open a TX stream to a real NIC or a
> -	 * pcap file
> +	 * We check whether we want to open a TX stream to a real NIC,
> +	 * a pcap file, or drop packets on tx
>  	 */
>  	is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
>  	dumpers.num_of_queue = 0;
>  
> -	if (is_tx_pcap)
> +	tx_drop = rte_kvargs_count(kvlist, ETH_PCAP_TX_DROP_ARG) ? 1 : 0;

Can drop this line, below already getting the 'tx_drop' value, overwriting this one.

> +	if (rte_kvargs_count(kvlist, ETH_PCAP_TX_DROP_ARG) == 1) {
> +		ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_DROP_ARG,
> +				&get_tx_drop_arg, &tx_drop);
> +		if (ret < 0)
> +			goto free_kvlist;
> +	}
> +
> +	if (tx_drop) {
> +		/* add dummy queue which counts dropped packets */
> +		ret = add_queue(&dumpers, "dummy", "tx_drop", NULL, NULL);

This will break the multiple queue use case. Instead of adding a single queue,
need to find a way to add multiple queues as user want and all will just ignore
the packtets.

> +	} else if (is_tx_pcap) {
>  		ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
>  				&open_tx_pcap, &dumpers);
> -	else
> +	} else {
>  		ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
>  				&open_tx_iface, &dumpers);
> +	}
>  
>  	if (ret < 0)
>  		goto free_kvlist;

<...>



More information about the dev mailing list