[dpdk-dev] [PATCH 1/8] virtio: maintain stats per queue

Stephen Hemminger stephen at networkplumber.org
Sat Jun 14 03:06:18 CEST 2014


Avoid cache collision and thrashing of the software statistics
by keeping them per-queue in the driver.

Signed-off-by: Stephen Hemminger <shemming at brocade.com>

--- a/lib/librte_pmd_virtio/virtio_ethdev.c	2014-06-13 17:41:11.634778400 -0700
+++ b/lib/librte_pmd_virtio/virtio_ethdev.c	2014-06-13 17:48:08.235480894 -0700
@@ -474,19 +474,67 @@ virtio_dev_atomic_write_link_status(stru
 static void
 virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
-	struct virtio_hw *hw =
-		VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	if (stats)
-		memcpy(stats, &hw->eth_stats, sizeof(*stats));
+	unsigned i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		const struct virtqueue *txvq = dev->data->tx_queues[i];
+		if (txvq == NULL)
+			continue;
+
+		stats->opackets += txvq->packets;
+		stats->obytes += txvq->bytes;
+		stats->oerrors += txvq->errors;
+
+		if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+			stats->q_opackets[i] = txvq->packets;
+			stats->q_obytes[i] = txvq->bytes;
+		}
+	}
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		const struct virtqueue *rxvq = dev->data->rx_queues[i];
+		if (rxvq == NULL)
+			continue;
+
+		stats->ipackets += rxvq->packets;
+		stats->ibytes += rxvq->bytes;
+		stats->ierrors += rxvq->errors;
+
+		if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+			stats->q_ipackets[i] = rxvq->packets;
+			stats->q_ibytes[i] = rxvq->bytes;
+		}
+	}
+
+	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
 }
 
 static void
 virtio_dev_stats_reset(struct rte_eth_dev *dev)
 {
-	struct virtio_hw *hw =
-		VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	/* Reset software totals */
-	memset(&hw->eth_stats, 0, sizeof(hw->eth_stats));
+	unsigned int i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		struct virtqueue *txvq = dev->data->tx_queues[i];
+		if (txvq == NULL)
+			continue;
+
+		txvq->packets = 0;
+		txvq->bytes = 0;
+		txvq->errors = 0;
+	}
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		struct virtqueue *rxvq = dev->data->rx_queues[i];
+		if (rxvq == NULL)
+			continue;
+
+		rxvq->packets = 0;
+		rxvq->bytes = 0;
+		rxvq->errors = 0;
+	}
+
+	dev->data->rx_mbuf_alloc_failed = 0;
 }
 
 static void
--- a/lib/librte_pmd_virtio/virtio_pci.h	2014-06-13 17:41:11.634778400 -0700
+++ b/lib/librte_pmd_virtio/virtio_pci.h	2014-06-13 17:41:11.626778388 -0700
@@ -179,7 +179,6 @@ struct virtio_hw {
 	uint8_t     revision_id;
 	uint8_t     mac_addr[ETHER_ADDR_LEN];
 	int         adapter_stopped;
-	struct      rte_eth_stats eth_stats;
 };
 
 /*
--- a/lib/librte_pmd_virtio/virtio_rxtx.c	2014-06-13 17:41:11.634778400 -0700
+++ b/lib/librte_pmd_virtio/virtio_rxtx.c	2014-06-13 17:42:07.946869260 -0700
@@ -301,7 +301,7 @@ virtio_recv_pkts(void *rx_queue, struct
 			PMD_RX_LOG(ERR, "Packet drop\n");
 			nb_enqueued++;
 			virtio_discard_rxbuf(rxvq, rxm);
-			hw->eth_stats.ierrors++;
+			rxvq->errors++;
 			continue;
 		}
 
@@ -317,20 +317,19 @@ virtio_recv_pkts(void *rx_queue, struct
 		VIRTIO_DUMP_PACKET(rxm, rxm->pkt.data_len);
 
 		rx_pkts[nb_rx++] = rxm;
-		hw->eth_stats.ibytes += len[i] - sizeof(struct virtio_net_hdr);
-		hw->eth_stats.q_ibytes[rxvq->queue_id] += len[i]
-			- sizeof(struct virtio_net_hdr);
+		rxvq->bytes += len[i] - sizeof(struct virtio_net_hdr);
 	}
 
-	hw->eth_stats.ipackets += nb_rx;
-	hw->eth_stats.q_ipackets[rxvq->queue_id] += nb_rx;
+	rxvq->packets += nb_rx;
 
 	/* Allocate new mbuf for the used descriptor */
 	error = ENOSPC;
 	while (likely(!virtqueue_full(rxvq))) {
 		new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
 		if (unlikely(new_mbuf == NULL)) {
-			hw->eth_stats.rx_nombuf++;
+			struct rte_eth_dev *dev
+				= &rte_eth_devices[rxvq->port_id];
+			dev->data->rx_mbuf_alloc_failed++;
 			break;
 		}
 		error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);
@@ -359,7 +358,6 @@ virtio_xmit_pkts(void *tx_queue, struct
 	struct rte_mbuf *txm;
 	uint16_t nb_used, nb_tx, num;
 	int error;
-	struct virtio_hw *hw;
 
 	nb_tx = 0;
 
@@ -371,7 +369,6 @@ virtio_xmit_pkts(void *tx_queue, struct
 
 	rmb();
 
-	hw = txvq->hw;
 	num = (uint16_t)(likely(nb_used < VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);
 
 	while (nb_tx < nb_pkts) {
@@ -394,9 +391,7 @@ virtio_xmit_pkts(void *tx_queue, struct
 				break;
 			}
 			nb_tx++;
-			hw->eth_stats.obytes += txm->pkt.data_len;
-			hw->eth_stats.q_obytes[txvq->queue_id]
-				+= txm->pkt.data_len;
+			txvq->bytes += txm->pkt.data_len;
 		} else {
 			PMD_TX_LOG(ERR, "No free tx descriptors to transmit\n");
 			break;
@@ -404,8 +399,7 @@ virtio_xmit_pkts(void *tx_queue, struct
 	}
 	vq_update_avail_idx(txvq);
 
-	hw->eth_stats.opackets += nb_tx;
-	hw->eth_stats.q_opackets[txvq->queue_id] += nb_tx;
+	txvq->packets += nb_tx;
 
 	if (unlikely(virtqueue_kick_prepare(txvq))) {
 		virtqueue_notify(txvq);
--- a/lib/librte_pmd_virtio/virtqueue.h	2014-06-13 17:41:11.634778400 -0700
+++ b/lib/librte_pmd_virtio/virtqueue.h	2014-06-13 17:41:11.630778394 -0700
@@ -154,6 +154,11 @@ struct virtqueue {
 	uint16_t vq_avail_idx;
 	void     *virtio_net_hdr_mem; /**< hdr for each xmit packet */
 
+	/* Statistics */
+	uint64_t	packets;
+	uint64_t	bytes;
+	uint64_t	errors;
+
 	struct vq_desc_extra {
 		void              *cookie;
 		uint16_t          ndescs;



More information about the dev mailing list