[dpdk-dev] [PATCH 2/3] null: add rings to allow user to provide the mbufs for rx/tx

Paul Atkins patkins at brocade.com
Fri Jan 29 17:18:12 CET 2016


When using the null driver it is useful to be able to
provide a set of mbufs to be received on the interface.
Add an option to specify a ring that the driver can poll to
provide the set of packets that have been received.  Add a
similar ring for the tx side where the packets that are being
transmitted can be stored, so the user can see what was sent.

Signed-off-by: Paul Atkins <patkins at brocade.com>
---
 drivers/net/null/rte_eth_null.c |   91 +++++++++++++++++++++++++++++++++++++--
 1 file changed, 88 insertions(+), 3 deletions(-)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 9483d6a..176f477 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -47,6 +47,8 @@
 #define ETH_NULL_PACKET_SIZE_ARG	"size"
 #define ETH_NULL_PACKET_COPY_ARG	"copy"
 #define ETH_NULL_ETH_ADDR_ARG		"eth_addr"
+#define ETH_NULL_RX_RING		"rx_ring"
+#define ETH_NULL_TX_RING		"tx_ring"
 
 static unsigned default_packet_size = 64;
 static unsigned default_packet_copy;
@@ -56,6 +58,8 @@ static const char *valid_arguments[] = {
 	ETH_NULL_PACKET_SIZE_ARG,
 	ETH_NULL_PACKET_COPY_ARG,
 	ETH_NULL_ETH_ADDR_ARG,
+	ETH_NULL_RX_RING,
+	ETH_NULL_TX_RING,
 	NULL
 };
 
@@ -76,6 +80,8 @@ struct pmd_internals {
 	unsigned packet_size;
 	unsigned packet_copy;
 	struct ether_addr eth_addr;
+	struct rte_ring *rx_ring;
+	struct rte_ring *tx_ring;
 	unsigned numa_node;
 
 	unsigned nb_rx_queues;
@@ -158,6 +164,22 @@ eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 }
 
 static uint16_t
+eth_null_copy_rx_from_ring(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+	int i;
+	struct null_queue *h = q;
+
+	if ((q == NULL) || (bufs == NULL) || (nb_bufs == 0))
+		return 0;
+
+	i = rte_ring_mc_dequeue_burst(h->internals->rx_ring, (void **)bufs,
+				      nb_bufs);
+	rte_atomic64_add(&h->rx_pkts, 1);
+
+	return i;
+}
+
+static uint16_t
 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 {
 	int i;
@@ -196,6 +218,23 @@ eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 	return i;
 }
 
+static uint16_t
+eth_null_copy_tx_to_ring(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+	int i;
+	struct null_queue *h = q;
+
+	if ((q == NULL) || (bufs == NULL) || (nb_bufs == 0))
+		return 0;
+
+	i = rte_ring_enqueue_burst(h->internals->tx_ring, (void **)bufs,
+				   nb_bufs);
+
+	rte_atomic64_add(&h->tx_pkts, 1);
+
+	return i;
+}
+
 static int
 eth_dev_configure(struct rte_eth_dev *dev) {
 	struct pmd_internals *internals;
@@ -495,7 +534,11 @@ eth_dev_null_create_internal(const char *name,
 			     const unsigned numa_node,
 			     unsigned packet_size,
 			     unsigned packet_copy,
-			     struct ether_addr eth_addr)
+			     struct ether_addr eth_addr,
+			     struct rte_ring *rx_ring,
+			     struct rte_ring *tx_ring
+
+	)
 {
 	const unsigned nb_rx_queues = 1;
 	const unsigned nb_tx_queues = 1;
@@ -546,6 +589,8 @@ eth_dev_null_create_internal(const char *name,
 	internals->packet_size = packet_size;
 	internals->packet_copy = packet_copy;
 	internals->eth_addr = eth_addr;
+	internals->rx_ring = rx_ring;
+	internals->tx_ring = tx_ring;
 	internals->numa_node = numa_node;
 
 	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
@@ -580,6 +625,10 @@ eth_dev_null_create_internal(const char *name,
 		eth_dev->rx_pkt_burst = eth_null_rx;
 		eth_dev->tx_pkt_burst = eth_null_tx;
 	}
+	if (rx_ring)
+		eth_dev->rx_pkt_burst = eth_null_copy_rx_from_ring;
+	if (tx_ring)
+		eth_dev->tx_pkt_burst = eth_null_copy_tx_to_ring;
 
 	return 0;
 
@@ -597,7 +646,8 @@ eth_dev_null_create(const char *name,
 		unsigned packet_copy)
 {
 	return eth_dev_null_create_internal(name, numa_node, packet_size,
-					    packet_copy, default_eth_addr);
+					    packet_copy, default_eth_addr,
+					    NULL, NULL);
 }
 
 static inline int
@@ -645,6 +695,23 @@ get_eth_addr_arg(const char *key __rte_unused,
 				       sizeof(struct ether_addr));
 }
 
+static inline int
+get_ring_arg(const char *key __rte_unused,
+	     const char *value, void *extra_args)
+{
+	const char *a = value;
+	struct rte_ring **ring = extra_args;
+
+	if ((value == NULL) || (extra_args == NULL))
+		return -EINVAL;
+
+	*ring = rte_ring_create(a, 64, SOCKET_ID_ANY, 0);
+	if (*ring == NULL)
+		return -1;
+
+	return 0;
+}
+
 static int
 rte_pmd_null_devinit(const char *name, const char *params)
 {
@@ -652,6 +719,8 @@ rte_pmd_null_devinit(const char *name, const char *params)
 	unsigned packet_size = default_packet_size;
 	unsigned packet_copy = default_packet_copy;
 	struct ether_addr eth_addr = default_eth_addr;
+	struct rte_ring *rx_ring = NULL;
+	struct rte_ring *tx_ring = NULL;
 	struct rte_kvargs *kvlist = NULL;
 	int ret;
 
@@ -691,6 +760,21 @@ rte_pmd_null_devinit(const char *name, const char *params)
 				goto free_kvlist;
 		}
 
+		if (rte_kvargs_count(kvlist, ETH_NULL_RX_RING) == 1) {
+			ret = rte_kvargs_process(kvlist,
+						 ETH_NULL_RX_RING,
+						 &get_ring_arg, &rx_ring);
+			if (ret < 0)
+				goto free_kvlist;
+		}
+
+		if (rte_kvargs_count(kvlist, ETH_NULL_TX_RING) == 1) {
+			ret = rte_kvargs_process(kvlist,
+						 ETH_NULL_TX_RING,
+						 &get_ring_arg, &tx_ring);
+			if (ret < 0)
+				goto free_kvlist;
+		}
 	}
 
 	RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
@@ -698,7 +782,8 @@ rte_pmd_null_devinit(const char *name, const char *params)
 			packet_copy ? "enabled" : "disabled");
 
 	ret = eth_dev_null_create_internal(name, numa_node, packet_size,
-					   packet_copy, eth_addr);
+					   packet_copy, eth_addr, rx_ring,
+					   tx_ring);
 
 free_kvlist:
 	if (kvlist)
-- 
1.7.10.4



More information about the dev mailing list