[dpdk-dev] [PATCH v2] net/vhost: fix xstats wrong after clearing stats

Xia, Chenbo chenbo.xia at intel.com
Fri Oct 9 05:13:37 CEST 2020


Hi David,

> -----Original Message-----
> From: David Christensen <drc at linux.vnet.ibm.com>
> Sent: Wednesday, October 7, 2020 5:23 AM
> To: dev at dpdk.org; maxime.coquelin at redhat.com; Xia, Chenbo
> <chenbo.xia at intel.com>; Wang, Zhihong <zhihong.wang at intel.com>
> Cc: stable at dpdk.org; David Christensen <drc at linux.vnet.ibm.com>; Yang,
> Zhiyong <zhiyong.yang at intel.com>
> Subject: [PATCH v2] net/vhost: fix xstats wrong after clearing stats
> 
> The PMD API allows stats and xstats values to be cleared separately.
> This is a problem for the vhost PMD since some of the xstats values are
> derived from existing stats values.  For example:
> 
> testpmd> show port xstats all
> ...
> tx_unicast_packets: 17562959
> ...
> testpmd> clear port stats all
> ...
> show port xstats all
> ...
> tx_unicast_packets: 18446744073709551615
> ...
> 
> Modify the driver so that stats and xstats values are stored, updated,
> and cleared separately.
> 
> Fixes: 4d6cf2ac93dc ("net/vhost: add extended statistics")
> Cc: zhiyong.yang at intel.com

Better to replace this Cc with 'Cc: stable at dpdk.org' as other fix patches
do. You can cc zhiyong with git send-email command.

> 
> Signed-off-by: David Christensen <drc at linux.vnet.ibm.com>
> ---
> v2:
> * Removed newly unused vq loops
> * Added "fixes" message
> * Renamed vhost_count_multicast_broadcast to vhost_count_xcast_packets
> 
>  drivers/net/vhost/rte_eth_vhost.c | 70 +++++++++++++++----------------
>  1 file changed, 35 insertions(+), 35 deletions(-)
> 
> diff --git a/drivers/net/vhost/rte_eth_vhost.c
> b/drivers/net/vhost/rte_eth_vhost.c
> index e55278af6..163cf9409 100644
> --- a/drivers/net/vhost/rte_eth_vhost.c
> +++ b/drivers/net/vhost/rte_eth_vhost.c
> @@ -73,6 +73,9 @@ enum vhost_xstats_pkts {
>  	VHOST_BROADCAST_PKT,
>  	VHOST_MULTICAST_PKT,
>  	VHOST_UNICAST_PKT,
> +	VHOST_PKT,
> +	VHOST_BYTE,
> +	VHOST_MISSED_PKT,
>  	VHOST_ERRORS_PKT,
>  	VHOST_ERRORS_FRAGMENTED,
>  	VHOST_ERRORS_JABBER,
> @@ -149,11 +152,11 @@ struct vhost_xstats_name_off {
>  /* [rx]_is prepended to the name string here */
>  static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
>  	{"good_packets",
> -	 offsetof(struct vhost_queue, stats.pkts)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
>  	{"total_bytes",
> -	 offsetof(struct vhost_queue, stats.bytes)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
>  	{"missed_pkts",
> -	 offsetof(struct vhost_queue, stats.missed_pkts)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
>  	{"broadcast_packets",
>  	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
>  	{"multicast_packets",
> @@ -189,11 +192,11 @@ static const struct vhost_xstats_name_off
> vhost_rxport_stat_strings[] = {
>  /* [tx]_ is prepended to the name string here */
>  static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
>  	{"good_packets",
> -	 offsetof(struct vhost_queue, stats.pkts)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
>  	{"total_bytes",
> -	 offsetof(struct vhost_queue, stats.bytes)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
>  	{"missed_pkts",
> -	 offsetof(struct vhost_queue, stats.missed_pkts)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
>  	{"broadcast_packets",
>  	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
>  	{"multicast_packets",
> @@ -287,23 +290,6 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct
> rte_eth_xstat *xstats,
>  	if (n < nxstats)
>  		return nxstats;
> 
> -	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> -		vq = dev->data->rx_queues[i];
> -		if (!vq)
> -			continue;
> -		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
> -				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
> -				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
> -	}
> -	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> -		vq = dev->data->tx_queues[i];
> -		if (!vq)
> -			continue;
> -		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
> -				+ vq->stats.missed_pkts
> -				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
> -				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
> -	}
>  	for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
>  		xstats[count].value = 0;
>  		for (i = 0; i < dev->data->nb_rx_queues; i++) {
> @@ -334,7 +320,7 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct
> rte_eth_xstat *xstats,
>  }
> 
>  static inline void
> -vhost_count_multicast_broadcast(struct vhost_queue *vq,
> +vhost_count_xcast_packets(struct vhost_queue *vq,
>  				struct rte_mbuf *mbuf)
>  {
>  	struct rte_ether_addr *ea = NULL;
> @@ -346,20 +332,27 @@ vhost_count_multicast_broadcast(struct vhost_queue
> *vq,
>  			pstats->xstats[VHOST_BROADCAST_PKT]++;
>  		else
>  			pstats->xstats[VHOST_MULTICAST_PKT]++;
> +	} else {
> +		pstats->xstats[VHOST_UNICAST_PKT]++;
>  	}
>  }
> 
>  static void
> -vhost_update_packet_xstats(struct vhost_queue *vq,
> -			   struct rte_mbuf **bufs,
> -			   uint16_t count)
> +vhost_update_packet_xstats(struct vhost_queue *vq, struct rte_mbuf **bufs,
> +			   uint16_t count, uint64_t nb_bytes,
> +			   uint64_t nb_missed)
>  {
>  	uint32_t pkt_len = 0;
>  	uint64_t i = 0;
>  	uint64_t index;
>  	struct vhost_stats *pstats = &vq->stats;
> 
> +	pstats->xstats[VHOST_BYTE] += nb_bytes;
> +	pstats->xstats[VHOST_MISSED_PKT] += nb_missed;
> +	pstats->xstats[VHOST_UNICAST_PKT] += nb_missed;
> +
>  	for (i = 0; i < count ; i++) {
> +		pstats->xstats[VHOST_PKT]++;
>  		pkt_len = bufs[i]->pkt_len;
>  		if (pkt_len == 64) {
>  			pstats->xstats[VHOST_64_PKT]++;
> @@ -375,7 +368,7 @@ vhost_update_packet_xstats(struct vhost_queue *vq,
>  			else if (pkt_len > 1522)
>  				pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
>  		}
> -		vhost_count_multicast_broadcast(vq, bufs[i]);
> +		vhost_count_xcast_packets(vq, bufs[i]);
>  	}
>  }
> 
> @@ -385,6 +378,7 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t
> nb_bufs)
>  	struct vhost_queue *r = q;
>  	uint16_t i, nb_rx = 0;
>  	uint16_t nb_receive = nb_bufs;
> +	uint64_t nb_bytes = 0;
> 
>  	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
>  		return 0;
> @@ -419,10 +413,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs,
> uint16_t nb_bufs)
>  		if (r->internal->vlan_strip)
>  			rte_vlan_strip(bufs[i]);
> 
> -		r->stats.bytes += bufs[i]->pkt_len;
> +		nb_bytes += bufs[i]->pkt_len;
>  	}
> 
> -	vhost_update_packet_xstats(r, bufs, nb_rx);
> +	r->stats.bytes += nb_bytes;
> +	vhost_update_packet_xstats(r, bufs, nb_rx, nb_bytes, 0);
> 
>  out:
>  	rte_atomic32_set(&r->while_queuing, 0);
> @@ -436,6 +431,8 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t
> nb_bufs)
>  	struct vhost_queue *r = q;
>  	uint16_t i, nb_tx = 0;
>  	uint16_t nb_send = 0;
> +	uint64_t nb_bytes = 0;
> +	uint64_t nb_missed = 0;
> 
>  	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
>  		return 0;
> @@ -476,20 +473,23 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs,
> uint16_t nb_bufs)
>  			break;
>  	}
> 
> +	for (i = 0; likely(i < nb_tx); i++)
> +		nb_bytes += bufs[i]->pkt_len;
> +
> +	nb_missed = nb_bufs - nb_tx;
> +
>  	r->stats.pkts += nb_tx;
> +	r->stats.bytes += nb_bytes;
>  	r->stats.missed_pkts += nb_bufs - nb_tx;
> 
> -	for (i = 0; likely(i < nb_tx); i++)
> -		r->stats.bytes += bufs[i]->pkt_len;
> -
> -	vhost_update_packet_xstats(r, bufs, nb_tx);
> +	vhost_update_packet_xstats(r, bufs, nb_tx, nb_bytes, nb_missed);
> 
>  	/* According to RFC2863 page42 section ifHCOutMulticastPkts and
>  	 * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
>  	 * are increased when packets are not transmitted successfully.
>  	 */

I think the above comment should be updated because in the below function,
we also update unicast pkts too (Based on RFC2863).

Thanks!
Chenbo

>  	for (i = nb_tx; i < nb_bufs; i++)
> -		vhost_count_multicast_broadcast(r, bufs[i]);
> +		vhost_count_xcast_packets(r, bufs[i]);
> 
>  	for (i = 0; likely(i < nb_tx); i++)
>  		rte_pktmbuf_free(bufs[i]);
> --
> 2.18.4



More information about the dev mailing list