[dpdk-dev] [virtio PATCH] pmd: support dpdk-1.3

Thomas Monjalon thomas.monjalon at 6wind.com
Thu Jul 25 22:40:56 CEST 2013


From: Damien Millescamps <damien.millescamps at 6wind.com>

API changes in DPDK 1.3:
- queue arrays are already allocated by rte_ethdev
- queue release must be handled by the PMD
- queue struct becomes an opaque pointer
- vlan_filter_set can return an error
- bit-fields vlan_macip have moved

Signed-off-by: Damien Millescamps <damien.millescamps at 6wind.com>
Signed-off-by: Thomas Monjalon <thomas.monjalon at 6wind.com>
---
 virtio_user.c |   71 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 70 insertions(+), 1 deletion(-)

diff --git a/virtio_user.c b/virtio_user.c
index d92af7e..6eac19d 100644
--- a/virtio_user.c
+++ b/virtio_user.c
@@ -49,8 +49,12 @@
 
 #include <rte_ethdev.h>
 #include <rte_malloc.h>
+#include <rte_memzone.h>
 #include <rte_version.h>
 
+#define DPDK_VERSION_BEFORE(x,y) RTE_VERSION <  RTE_VERSION_NUM(x,y,0,0)
+#define DPDK_VERSION_SINCE(x,y)  RTE_VERSION >= RTE_VERSION_NUM(x,y,0,0)
+
 /* We never use MSI-X, so the offset is never 24. */
 #define VIRTIO_PCI_CONFIG_OFFT	20
 
@@ -183,8 +187,17 @@ struct virtio_pmd_ctrl {
 	uint8_t data[VIRTIO_MAX_CTRL_DATA];
 };
 
+#if DPDK_VERSION_BEFORE(1,3)
+/* until dpdk-1.3, queue structs are igb ones */
 typedef struct igb_tx_queue rte_txq_t;
 typedef struct igb_rx_queue rte_rxq_t;
+/* until dpdk-1.3, bit-fields vlan_macip are flat in rte_pktmbuf */
+#define RTE_MBUF_VLAN_MAC_IP(m) (m)->pkt.vlan_tci
+#else /* since dpdk-1.3 */
+typedef void rte_txq_t;
+typedef void rte_rxq_t;
+#define RTE_MBUF_VLAN_MAC_IP(m) (m)->pkt.vlan_macip.f.vlan_tci
+#endif
 
 /* This part is taken from ixgbe and igb pmd drivers. */
 static inline struct rte_mbuf*
@@ -453,7 +466,7 @@ virtio_recv_buf(rte_rxq_t *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		buf->pkt.data_len = len;
 		buf->pkt.in_port = rq->port_id;
 		buf->ol_flags = 0; /* RSS - FDIR // Error */
-		buf->pkt.vlan_tci = 0;
+		RTE_MBUF_VLAN_MAC_IP(buf) = 0;
 
 		cur_seg = buf;
 		rx_pkts[completed] = buf;
@@ -1001,6 +1014,7 @@ virtio_dev_stop(struct rte_eth_dev *dev)
 	/* Set status to 0, but this will mean we have to re-init dev. */
 	outb(0,  priv->ioaddr + VIRTIO_PCI_STATUS);
 
+#if DPDK_VERSION_BEFORE(1,3)
 	/* Set queues to 0 to release them. */
 	for (index = 0; index < priv->qp_num * 2; index++) {
 		unsigned int i;
@@ -1020,6 +1034,7 @@ virtio_dev_stop(struct rte_eth_dev *dev)
 		munmap(priv->qps[VIRTIO_QUEUE_XMIT][index>>1].mapped_addr,
 		       priv->qps[VIRTIO_QUEUE_XMIT][index>>1].mapped_size);
 	}
+#endif
 	index = priv->qp_num * 2;
 	if (priv->cq_present) {
 		outw(index, priv->ioaddr + VIRTIO_PCI_QUEUE_SEL);
@@ -1119,7 +1134,11 @@ virtio_dev_close(struct rte_eth_dev *dev)
 	}
 }
 
+#if DPDK_VERSION_SINCE(1,3)
+static int
+#else
 static void
+#endif
 virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 {
 	(void)dev;
@@ -1127,8 +1146,12 @@ virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 	(void)on;
 	RTE_LOG(WARNING, PMD, "%s(): No VLAN enabled for device\n",
 			__func__);
+#if DPDK_VERSION_SINCE(1,3)
+	return -1;
+#endif
 }
 
+#if DPDK_VERSION_BEFORE(1,3)
 /* Allocate array of pointers to register queues */
 static int queue_array_alloc(struct rte_eth_dev *dev,
 			     uint16_t nb_rx_q, uint16_t nb_tx_q)
@@ -1172,14 +1195,20 @@ static int queue_array_alloc(struct rte_eth_dev *dev,
 static int
 virtio_dev_configure(struct rte_eth_dev *dev,
 		     uint16_t nb_rx_q, uint16_t nb_tx_q)
+#else
+static int
+virtio_dev_configure(struct rte_eth_dev *dev)
+#endif
 {
 	struct virtio_net_adapter* priv;
 
+#if DPDK_VERSION_BEFORE(1,3)
 	int ret;
 	ret = queue_array_alloc(dev, nb_rx_q, nb_tx_q);
 	if (ret) {
 		return ret;
 	}
+#endif
 
 	priv = (struct virtio_net_adapter*)dev->data->dev_private;
 
@@ -1210,6 +1239,42 @@ virtio_dev_configure(struct rte_eth_dev *dev,
 	return 0;
 }
 
+#if DPDK_VERSION_SINCE(1,3)
+static void
+virtio_dev_rx_queue_release(rte_rxq_t* rxq)
+{
+	struct virtio_net_vring* rq = (struct virtio_net_vring*)rxq;
+	unsigned int i;
+
+	outw(rq->queue_idx, rq->adapter->ioaddr + VIRTIO_PCI_QUEUE_SEL);
+	outl(0, rq->adapter->ioaddr + VIRTIO_PCI_QUEUE_PFN);
+
+	for (i = 0 ; i < rq->qsize; i++) {
+		rte_pktmbuf_free(rq->user[i]);
+	}
+	rte_free(rq->user);
+}
+
+static void
+virtio_dev_tx_queue_release(rte_txq_t* txq)
+{
+	struct virtio_net_vring* tq = (struct virtio_net_vring*)txq;
+	unsigned int i;
+
+	outw(tq->queue_idx, tq->adapter->ioaddr + VIRTIO_PCI_QUEUE_SEL);
+	outl(0, tq->adapter->ioaddr + VIRTIO_PCI_QUEUE_PFN);
+
+	for (i = 0 ; i < tq->qsize; i++) {
+		if (tq->user[i]) {
+			rte_pktmbuf_free_seg(tq->user[i]);
+		}
+	}
+	rte_free(tq->user);
+	rte_free(tq->tx_hdrs);
+	munmap(tq->mapped_addr, tq->mapped_size);
+}
+#endif
+
 /* Get statistics. All statistics are not available compared to IXGBE. */
 static void
 virtio_dev_stats_get(struct rte_eth_dev *dev,
@@ -1282,6 +1347,10 @@ static struct eth_dev_ops virtio_eth_dev_ops = {
 	.vlan_filter_set      = virtio_vlan_filter_set,
 	.rx_queue_setup       = virtio_dev_rx_queue_setup,
 	.tx_queue_setup       = virtio_dev_tx_queue_setup,
+#if DPDK_VERSION_SINCE(1,3)
+	.rx_queue_release     = virtio_dev_rx_queue_release,
+	.tx_queue_release     = virtio_dev_tx_queue_release,
+#endif
 	.dev_led_on           = NULL,
 	.dev_led_off          = NULL,
 	.flow_ctrl_set        = NULL,
-- 
1.7.10.4



More information about the dev mailing list