[dpdk-dev] [PATCH v4 4/7] ethdev: synchronize port allocation

Matan Azrad matan at mellanox.com
Sat Jan 20 22:24:25 CET 2018


Ethernet port allocation was not thread safe, means 2 threads which tried
to allocate a new port at the same time might get an identical port
identifier and caused to memory overwrite.
Actually, all the port configurations were not thread safe from ethdev
point of view.

The port ownership mechanism added to the ethdev is a good point to
redefine the synchronization rules in ethdev:

1. The port allocation and port release synchronization will be
   managed by ethdev.
2. The port usage synchronization will be managed by the port owner.
3. The port ownership synchronization will be managed by ethdev.

Add port allocation synchronization to complete the new rules.

Signed-off-by: Matan Azrad <matan at mellanox.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev at intel.com>
---
 lib/librte_ether/rte_ethdev.c | 43 +++++++++++++++++++++++++++++++------------
 1 file changed, 31 insertions(+), 12 deletions(-)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index af0e072..f616775 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -52,6 +52,9 @@
 /* spinlock for add/remove tx callbacks */
 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
 
+/* spinlock for shared data allocation */
+static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
+
 /* store statistics names and its offset in stats structure  */
 struct rte_eth_xstats_name_off {
 	char name[RTE_ETH_XSTATS_NAME_SIZE];
@@ -202,21 +205,27 @@ struct rte_eth_dev *
 rte_eth_dev_allocate(const char *name)
 {
 	uint16_t port_id;
-	struct rte_eth_dev *eth_dev;
+	struct rte_eth_dev *eth_dev = NULL;
+
+	/* Synchronize local threads to allocate shared data only once. */
+	rte_spinlock_lock(&rte_eth_shared_data_lock);
+	if (rte_eth_dev_shared_data == NULL)
+		rte_eth_dev_shared_data_alloc();
+	rte_spinlock_unlock(&rte_eth_shared_data_lock);
+
+	/* Synchronize port creation between primary and secondary threads. */
+	rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
 
 	port_id = rte_eth_dev_find_free_port();
 	if (port_id == RTE_MAX_ETHPORTS) {
 		RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
-		return NULL;
+		goto unlock;
 	}
 
-	if (rte_eth_dev_shared_data == NULL)
-		rte_eth_dev_shared_data_alloc();
-
 	if (rte_eth_dev_allocated(name) != NULL) {
 		RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
 				name);
-		return NULL;
+		goto unlock;
 	}
 
 	eth_dev = eth_dev_get(port_id);
@@ -224,7 +233,11 @@ struct rte_eth_dev *
 	eth_dev->data->port_id = port_id;
 	eth_dev->data->mtu = ETHER_MTU;
 
-	_rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
+unlock:
+	rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
+
+	if (eth_dev != NULL)
+		_rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
 
 	return eth_dev;
 }
@@ -238,10 +251,16 @@ struct rte_eth_dev *
 rte_eth_dev_attach_secondary(const char *name)
 {
 	uint16_t i;
-	struct rte_eth_dev *eth_dev;
+	struct rte_eth_dev *eth_dev = NULL;
 
+	/* Synchronize local threads to attach shared data only once. */
+	rte_spinlock_lock(&rte_eth_shared_data_lock);
 	if (rte_eth_dev_shared_data == NULL)
 		rte_eth_dev_shared_data_alloc();
+	rte_spinlock_unlock(&rte_eth_shared_data_lock);
+
+	/* Synchronize port attachment to primary port creation and release. */
+	rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
 
 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
 		if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
@@ -251,12 +270,12 @@ struct rte_eth_dev *
 		RTE_PMD_DEBUG_TRACE(
 			"device %s is not driven by the primary process\n",
 			name);
-		return NULL;
+	} else {
+		eth_dev = eth_dev_get(i);
+		RTE_ASSERT(eth_dev->data->port_id == i);
 	}
 
-	eth_dev = eth_dev_get(i);
-	RTE_ASSERT(eth_dev->data->port_id == i);
-
+	rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
 	return eth_dev;
 }
 
-- 
1.8.3.1



More information about the dev mailing list