[dpdk-dev] [PATCH v3 4/4] net/mlx4: remove device register remap

Yongseok Koh yskoh at mellanox.com
Fri Apr 5 03:33:57 CEST 2019


UAR (User Access Region) register does not need to be remapped for primary
process but it should be remapped only for secondary process. UAR register
table is in the process private structure in rte_eth_devices[],
	(struct mlx4_proc_priv *)rte_eth_devices[port_id].process_private

The actual UAR table follows the data structure and the table is used for
both Tx and Rx.

For Tx, BlueFlame in UAR is used to ring the doorbell. MLX4_TX_BFREG(txq)
is defined to get a register for the txq. Processes access its own private
data to acquire the register from the UAR table.

For Rx, the doorbell in UAR is required in arming CQ event. However, it is
a known issue that the register isn't remapped for secondary process.

Signed-off-by: Yongseok Koh <yskoh at mellanox.com>
---
 drivers/net/mlx4/mlx4.c      | 232 ++++++++-----------------------------------
 drivers/net/mlx4/mlx4.h      |  15 ++-
 drivers/net/mlx4/mlx4_prm.h  |   3 +-
 drivers/net/mlx4/mlx4_rxtx.c |   2 +-
 drivers/net/mlx4/mlx4_rxtx.h |   6 +-
 drivers/net/mlx4/mlx4_txq.c  | 170 +++++++++++++++++++------------
 6 files changed, 165 insertions(+), 263 deletions(-)

diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index 252658fc6a..b22fe11e6c 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -126,30 +126,6 @@ mlx4_init_shared_data(void)
 	return ret;
 }
 
-/**
- * Uninitialize shared data between primary and secondary process.
- *
- * The pointer of secondary process is dereferenced and primary process frees
- * the memzone.
- */
-static void
-mlx4_uninit_shared_data(void)
-{
-	const struct rte_memzone *mz;
-
-	rte_spinlock_lock(&mlx4_shared_data_lock);
-	if (mlx4_shared_data) {
-		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-			mz = rte_memzone_lookup(MZ_MLX4_PMD_SHARED_DATA);
-			rte_memzone_free(mz);
-		} else {
-			memset(&mlx4_local_data, 0, sizeof(mlx4_local_data));
-		}
-		mlx4_shared_data = NULL;
-	}
-	rte_spinlock_unlock(&mlx4_shared_data_lock);
-}
-
 #ifdef HAVE_IBV_MLX4_BUF_ALLOCATORS
 /**
  * Verbs callback to allocate a memory. This function should allocate the space
@@ -219,6 +195,8 @@ static int
 mlx4_dev_configure(struct rte_eth_dev *dev)
 {
 	struct mlx4_priv *priv = dev->data->dev_private;
+	struct mlx4_proc_priv *ppriv;
+	size_t ppriv_size;
 	struct rte_flow_error error;
 	int ret;
 
@@ -235,6 +213,22 @@ mlx4_dev_configure(struct rte_eth_dev *dev)
 	if (ret)
 		ERROR("%p: interrupt handler installation failed",
 		      (void *)dev);
+	/*
+	 * UAR register table follows the process private structure. BlueFlame
+	 * registers for Tx queues come first and registers for Rx queues
+	 * follows.
+	 */
+	ppriv_size = sizeof(struct mlx4_proc_priv) +
+		     (dev->data->nb_rx_queues + dev->data->nb_tx_queues) *
+		     sizeof(void *);
+	ppriv = rte_malloc_socket("mlx4_proc_priv", ppriv_size,
+				  RTE_CACHE_LINE_SIZE, dev->device->numa_node);
+	if (!ppriv) {
+		rte_errno = ENOMEM;
+		return -rte_errno;
+	}
+	ppriv->uar_table_sz = ppriv_size;
+	dev->process_private = ppriv;
 exit:
 	return ret;
 }
@@ -262,11 +256,6 @@ mlx4_dev_start(struct rte_eth_dev *dev)
 		return 0;
 	DEBUG("%p: attaching configured flows to all RX queues", (void *)dev);
 	priv->started = 1;
-	ret = mlx4_tx_uar_remap(dev, priv->ctx->cmd_fd);
-	if (ret) {
-		ERROR("%p: cannot remap UAR", (void *)dev);
-		goto err;
-	}
 	ret = mlx4_rss_init(priv);
 	if (ret) {
 		ERROR("%p: cannot initialize RSS resources: %s",
@@ -314,8 +303,6 @@ static void
 mlx4_dev_stop(struct rte_eth_dev *dev)
 {
 	struct mlx4_priv *priv = dev->data->dev_private;
-	const size_t page_size = sysconf(_SC_PAGESIZE);
-	int i;
 
 	if (!priv->started)
 		return;
@@ -326,18 +313,11 @@ mlx4_dev_stop(struct rte_eth_dev *dev)
 	rte_wmb();
 	/* Disable datapath on secondary process. */
 	mlx4_mp_req_stop_rxtx(dev);
+	rte_free(dev->process_private);
+	dev->process_private = NULL;
 	mlx4_flow_sync(priv, NULL);
 	mlx4_rxq_intr_disable(priv);
 	mlx4_rss_deinit(priv);
-	for (i = 0; i != dev->data->nb_tx_queues; ++i) {
-		struct txq *txq;
-
-		txq = dev->data->tx_queues[i];
-		if (!txq)
-			continue;
-		munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->msq.db,
-					       page_size), page_size);
-	}
 }
 
 /**
@@ -662,130 +642,6 @@ mlx4_hw_rss_sup(struct ibv_context *ctx, struct ibv_pd *pd,
 
 static struct rte_pci_driver mlx4_driver;
 
-static int
-find_lower_va_bound(const struct rte_memseg_list *msl,
-		const struct rte_memseg *ms, void *arg)
-{
-	void **addr = arg;
-
-	if (msl->external)
-		return 0;
-	if (*addr == NULL)
-		*addr = ms->addr;
-	else
-		*addr = RTE_MIN(*addr, ms->addr);
-
-	return 0;
-}
-
-/**
- * Reserve UAR address space for primary process.
- *
- * Process local resource is used by both primary and secondary to avoid
- * duplicate reservation. The space has to be available on both primary and
- * secondary process, TXQ UAR maps to this area using fixed mmap w/o double
- * check.
- *
- * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx4_uar_init_primary(void)
-{
-	struct mlx4_shared_data *sd = mlx4_shared_data;
-	void *addr = (void *)0;
-
-	if (sd->uar_base)
-		return 0;
-	/* find out lower bound of hugepage segments */
-	rte_memseg_walk(find_lower_va_bound, &addr);
-	/* keep distance to hugepages to minimize potential conflicts. */
-	addr = RTE_PTR_SUB(addr, (uintptr_t)(MLX4_UAR_OFFSET + MLX4_UAR_SIZE));
-	/* anonymous mmap, no real memory consumption. */
-	addr = mmap(addr, MLX4_UAR_SIZE,
-		    PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-	if (addr == MAP_FAILED) {
-		ERROR("failed to reserve UAR address space, please"
-		      " adjust MLX4_UAR_SIZE or try --base-virtaddr");
-		rte_errno = ENOMEM;
-		return -rte_errno;
-	}
-	/* Accept either same addr or a new addr returned from mmap if target
-	 * range occupied.
-	 */
-	INFO("reserved UAR address space: %p", addr);
-	sd->uar_base = addr; /* for primary and secondary UAR re-mmap. */
-	return 0;
-}
-
-/**
- * Unmap UAR address space reserved for primary process.
- */
-static void
-mlx4_uar_uninit_primary(void)
-{
-	struct mlx4_shared_data *sd = mlx4_shared_data;
-
-	if (!sd->uar_base)
-		return;
-	munmap(sd->uar_base, MLX4_UAR_SIZE);
-	sd->uar_base = NULL;
-}
-
-/**
- * Reserve UAR address space for secondary process, align with primary process.
- *
- * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx4_uar_init_secondary(void)
-{
-	struct mlx4_shared_data *sd = mlx4_shared_data;
-	struct mlx4_local_data *ld = &mlx4_local_data;
-	void *addr;
-
-	if (ld->uar_base) { /* Already reserved. */
-		assert(sd->uar_base == ld->uar_base);
-		return 0;
-	}
-	assert(sd->uar_base);
-	/* anonymous mmap, no real memory consumption. */
-	addr = mmap(sd->uar_base, MLX4_UAR_SIZE,
-		    PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-	if (addr == MAP_FAILED) {
-		ERROR("UAR mmap failed: %p size: %llu",
-		      sd->uar_base, MLX4_UAR_SIZE);
-		rte_errno = ENXIO;
-		return -rte_errno;
-	}
-	if (sd->uar_base != addr) {
-		ERROR("UAR address %p size %llu occupied, please"
-		      " adjust MLX4_UAR_OFFSET or try EAL parameter"
-		      " --base-virtaddr",
-		      sd->uar_base, MLX4_UAR_SIZE);
-		rte_errno = ENXIO;
-		return -rte_errno;
-	}
-	ld->uar_base = addr;
-	INFO("reserved UAR address space: %p", addr);
-	return 0;
-}
-
-/**
- * Unmap UAR address space reserved for secondary process.
- */
-static void
-mlx4_uar_uninit_secondary(void)
-{
-	struct mlx4_local_data *ld = &mlx4_local_data;
-
-	if (!ld->uar_base)
-		return;
-	munmap(ld->uar_base, MLX4_UAR_SIZE);
-	ld->uar_base = NULL;
-}
-
 /**
  * PMD global initialization.
  *
@@ -801,7 +657,6 @@ mlx4_init_once(void)
 {
 	struct mlx4_shared_data *sd;
 	struct mlx4_local_data *ld = &mlx4_local_data;
-	int ret;
 
 	if (mlx4_init_shared_data())
 		return -rte_errno;
@@ -817,18 +672,12 @@ mlx4_init_once(void)
 		rte_mem_event_callback_register("MLX4_MEM_EVENT_CB",
 						mlx4_mr_mem_event_cb, NULL);
 		mlx4_mp_init_primary();
-		ret = mlx4_uar_init_primary();
-		if (ret)
-			goto error;
 		sd->init_done = true;
 		break;
 	case RTE_PROC_SECONDARY:
 		if (ld->init_done)
 			break;
 		mlx4_mp_init_secondary();
-		ret = mlx4_uar_init_secondary();
-		if (ret)
-			goto error;
 		++sd->secondary_cnt;
 		ld->init_done = true;
 		break;
@@ -837,23 +686,6 @@ mlx4_init_once(void)
 	}
 	rte_spinlock_unlock(&sd->lock);
 	return 0;
-error:
-	switch (rte_eal_process_type()) {
-	case RTE_PROC_PRIMARY:
-		mlx4_uar_uninit_primary();
-		mlx4_mp_uninit_primary();
-		rte_mem_event_callback_unregister("MLX4_MEM_EVENT_CB", NULL);
-		break;
-	case RTE_PROC_SECONDARY:
-		mlx4_uar_uninit_secondary();
-		mlx4_mp_uninit_secondary();
-		break;
-	default:
-		break;
-	}
-	rte_spinlock_unlock(&sd->lock);
-	mlx4_uninit_shared_data();
-	return -rte_errno;
 }
 
 /**
@@ -987,6 +819,9 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 		snprintf(name, sizeof(name), "%s port %u",
 			 mlx4_glue->get_device_name(ibv_dev), port);
 		if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+			struct mlx4_proc_priv *ppriv;
+			size_t ppriv_size;
+
 			eth_dev = rte_eth_dev_attach_secondary(name);
 			if (eth_dev == NULL) {
 				ERROR("can not attach rte ethdev");
@@ -1003,6 +838,25 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 				err = rte_errno;
 				goto error;
 			}
+			/*
+			 * UAR register table follows the process private
+			 * structure.  BlueFlame registers for Tx queues come
+			 * first and registers for Rx queues follows.
+			 */
+			ppriv_size =
+				sizeof(struct mlx4_proc_priv) +
+				(eth_dev->data->nb_rx_queues +
+				 eth_dev->data->nb_tx_queues) * sizeof(void *);
+			ppriv = rte_malloc_socket("mlx4_proc_priv", ppriv_size,
+						  RTE_CACHE_LINE_SIZE,
+						  pci_dev->device.numa_node);
+			if (!ppriv) {
+				rte_errno = ENOMEM;
+				err = rte_errno;
+				goto error;
+			}
+			ppriv->uar_table_sz = ppriv_size;
+			eth_dev->process_private = ppriv;
 			eth_dev->device = &pci_dev->device;
 			eth_dev->dev_ops = &mlx4_dev_sec_ops;
 			/* Receive command fd from primary process. */
@@ -1012,7 +866,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 				goto error;
 			}
 			/* Remap UAR for Tx queues. */
-			err = mlx4_tx_uar_remap(eth_dev, err);
+			err = mlx4_tx_uar_init_secondary(eth_dev, err);
 			if (err) {
 				err = rte_errno;
 				goto error;
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 1db23d6cc9..904c4f5c03 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -138,8 +138,6 @@ struct mlx4_shared_data {
 	/* Global spinlock for primary and secondary processes. */
 	int init_done; /* Whether primary has done initialization. */
 	unsigned int secondary_cnt; /* Number of secondary processes init'd. */
-	void *uar_base;
-	/* Reserved UAR address space for TXQ UAR(hw doorbell) mapping. */
 	struct mlx4_dev_list mem_event_cb_list;
 	rte_rwlock_t mem_event_rwlock;
 };
@@ -147,12 +145,21 @@ struct mlx4_shared_data {
 /* Per-process data structure, not visible to other processes. */
 struct mlx4_local_data {
 	int init_done; /* Whether a secondary has done initialization. */
-	void *uar_base;
-	/* Reserved UAR address space for TXQ UAR(hw doorbell) mapping. */
 };
 
 extern struct mlx4_shared_data *mlx4_shared_data;
 
+/* Per-process private structure. */
+struct mlx4_proc_priv {
+	size_t uar_table_sz;
+	/* Size of UAR register table. */
+	void *uar_table[];
+	/* Table of UAR registers for each process. */
+};
+
+#define MLX4_PROC_PRIV(port_id) \
+	((struct mlx4_proc_priv *)rte_eth_devices[port_id].process_private)
+
 /** Private data structure. */
 struct mlx4_priv {
 	LIST_ENTRY(mlx4_priv) mem_event_cb;
diff --git a/drivers/net/mlx4/mlx4_prm.h b/drivers/net/mlx4/mlx4_prm.h
index b3e11dde25..16ae6db82d 100644
--- a/drivers/net/mlx4/mlx4_prm.h
+++ b/drivers/net/mlx4/mlx4_prm.h
@@ -77,8 +77,7 @@ struct mlx4_sq {
 	uint32_t owner_opcode;
 	/**< Default owner opcode with HW valid owner bit. */
 	uint32_t stamp; /**< Stamp value with an invalid HW owner bit. */
-	volatile uint32_t *qp_sdb; /**< Pointer to the doorbell. */
-	volatile uint32_t *db; /**< Pointer to the doorbell remapped. */
+	uint32_t *db; /**< Pointer to the doorbell. */
 	off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
 	uint32_t doorbell_qpn; /**< qp number to write to the doorbell. */
 };
diff --git a/drivers/net/mlx4/mlx4_rxtx.c b/drivers/net/mlx4/mlx4_rxtx.c
index f22f1ba559..391271a616 100644
--- a/drivers/net/mlx4/mlx4_rxtx.c
+++ b/drivers/net/mlx4/mlx4_rxtx.c
@@ -1048,7 +1048,7 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 	/* Make sure that descriptors are written before doorbell record. */
 	rte_wmb();
 	/* Ring QP doorbell. */
-	rte_write32(txq->msq.doorbell_qpn, txq->msq.db);
+	rte_write32(txq->msq.doorbell_qpn, MLX4_TX_BFREG(txq));
 	txq->elts_head += i;
 	return i;
 }
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
index 7d7a8988ed..8baf33fa94 100644
--- a/drivers/net/mlx4/mlx4_rxtx.h
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -97,6 +97,7 @@ struct mlx4_txq_stats {
 struct txq {
 	struct mlx4_sq msq; /**< Info for directly manipulating the SQ. */
 	struct mlx4_cq mcq; /**< Info for directly manipulating the CQ. */
+	uint16_t port_id; /**< Port ID of device. */
 	unsigned int elts_head; /**< Current index in (*elts)[]. */
 	unsigned int elts_tail; /**< First element awaiting completion. */
 	int elts_comp_cd; /**< Countdown for next completion. */
@@ -118,6 +119,9 @@ struct txq {
 	uint8_t data[]; /**< Remaining queue resources. */
 };
 
+#define MLX4_TX_BFREG(txq) \
+		(MLX4_PROC_PRIV((txq)->port_id)->uar_table[(txq)->stats.idx])
+
 /* mlx4_rxq.c */
 
 uint8_t mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE];
@@ -152,7 +156,7 @@ uint16_t mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,
 
 /* mlx4_txq.c */
 
-int mlx4_tx_uar_remap(struct rte_eth_dev *dev, int fd);
+int mlx4_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
 uint64_t mlx4_get_tx_port_offloads(struct mlx4_priv *priv);
 int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
 			uint16_t desc, unsigned int socket,
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index 698a648c8d..01a5efd80d 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -40,11 +40,88 @@
 #include "mlx4_utils.h"
 
 /**
- * Mmap TX UAR(HW doorbell) pages into reserved UAR address space.
- * Both primary and secondary process do mmap to make UAR address
- * aligned.
+ * Initialize Tx UAR registers for primary process.
  *
- * @param[in] dev
+ * @param txq
+ *   Pointer to Tx queue structure.
+ */
+static void
+txq_uar_init(struct txq *txq)
+{
+	struct mlx4_priv *priv = txq->priv;
+	struct mlx4_proc_priv *ppriv = MLX4_PROC_PRIV(PORT_ID(priv));
+
+	assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	assert(ppriv);
+	ppriv->uar_table[txq->stats.idx] = txq->msq.db;
+}
+
+#ifdef HAVE_IBV_MLX4_UAR_MMAP_OFFSET
+/**
+ * Remap UAR register of a Tx queue for secondary process.
+ *
+ * Remapped address is stored at the table in the process private structure of
+ * the device, indexed by queue index.
+ *
+ * @param txq
+ *   Pointer to Tx queue structure.
+ * @param fd
+ *   Verbs file descriptor to map UAR pages.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+txq_uar_init_secondary(struct txq *txq, int fd)
+{
+	struct mlx4_priv *priv = txq->priv;
+	struct mlx4_proc_priv *ppriv = MLX4_PROC_PRIV(PORT_ID(priv));
+	void *addr;
+	uintptr_t uar_va;
+	uintptr_t offset;
+	const size_t page_size = sysconf(_SC_PAGESIZE);
+
+	assert(ppriv);
+	/*
+	 * As rdma-core, UARs are mapped in size of OS page
+	 * size. Ref to libmlx4 function: mlx4_init_context()
+	 */
+	uar_va = (uintptr_t)txq->msq.db;
+	offset = uar_va & (page_size - 1); /* Offset in page. */
+	addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
+			txq->msq.uar_mmap_offset);
+	if (addr == MAP_FAILED) {
+		ERROR("port %u mmap failed for BF reg of txq %u",
+		      txq->port_id, txq->stats.idx);
+		rte_errno = ENXIO;
+		return -rte_errno;
+	}
+	addr = RTE_PTR_ADD(addr, offset);
+	ppriv->uar_table[txq->stats.idx] = addr;
+	return 0;
+}
+
+/**
+ * Unmap UAR register of a Tx queue for secondary process.
+ *
+ * @param txq
+ *   Pointer to Tx queue structure.
+ */
+static void
+txq_uar_uninit_secondary(struct txq *txq)
+{
+	struct mlx4_proc_priv *ppriv = MLX4_PROC_PRIV(PORT_ID(txq->priv));
+	const size_t page_size = sysconf(_SC_PAGESIZE);
+	void *addr;
+
+	addr = ppriv->uar_table[txq->stats.idx];
+	munmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
+}
+
+/**
+ * Initialize Tx UAR registers for secondary process.
+ *
+ * @param dev
  *   Pointer to Ethernet device.
  * @param fd
  *   Verbs file descriptor to map UAR pages.
@@ -52,81 +129,41 @@
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-#ifdef HAVE_IBV_MLX4_UAR_MMAP_OFFSET
 int
-mlx4_tx_uar_remap(struct rte_eth_dev *dev, int fd)
+mlx4_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
 {
-	unsigned int i, j;
 	const unsigned int txqs_n = dev->data->nb_tx_queues;
-	uintptr_t pages[txqs_n];
-	unsigned int pages_n = 0;
-	uintptr_t uar_va;
-	uintptr_t off;
-	void *addr;
-	void *ret;
 	struct txq *txq;
-	int already_mapped;
-	size_t page_size = sysconf(_SC_PAGESIZE);
+	unsigned int i;
+	int ret;
 
-	memset(pages, 0, txqs_n * sizeof(uintptr_t));
-	/*
-	 * As rdma-core, UARs are mapped in size of OS page size.
-	 * Use aligned address to avoid duplicate mmap.
-	 * Ref to libmlx4 function: mlx4_init_context()
-	 */
+	assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
 	for (i = 0; i != txqs_n; ++i) {
 		txq = dev->data->tx_queues[i];
 		if (!txq)
 			continue;
-		/* UAR addr form verbs used to find dup and offset in page. */
-		uar_va = (uintptr_t)txq->msq.qp_sdb;
-		off = uar_va & (page_size - 1); /* offset in page. */
-		uar_va = RTE_ALIGN_FLOOR(uar_va, page_size); /* page addr. */
-		already_mapped = 0;
-		for (j = 0; j != pages_n; ++j) {
-			if (pages[j] == uar_va) {
-				already_mapped = 1;
-				break;
-			}
-		}
-		/* new address in reserved UAR address space. */
-		addr = RTE_PTR_ADD(mlx4_shared_data->uar_base,
-				   uar_va & (uintptr_t)(MLX4_UAR_SIZE - 1));
-		if (!already_mapped) {
-			pages[pages_n++] = uar_va;
-			/* fixed mmap to specified address in reserved
-			 * address space.
-			 */
-			ret = mmap(addr, page_size,
-				   PROT_WRITE, MAP_FIXED | MAP_SHARED, fd,
-				   txq->msq.uar_mmap_offset);
-			if (ret != addr) {
-				/* fixed mmap has to return same address. */
-				ERROR("port %u call to mmap failed on UAR"
-				      " for txq %u",
-				      dev->data->port_id, i);
-				rte_errno = ENXIO;
-				return -rte_errno;
-			}
-		}
-		if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* save once. */
-			txq->msq.db = RTE_PTR_ADD((void *)addr, off);
-		else
-			assert(txq->msq.db ==
-			       RTE_PTR_ADD((void *)addr, off));
+		assert(txq->stats.idx == (uint16_t)i);
+		ret = txq_uar_init_secondary(txq, fd);
+		if (ret)
+			goto error;
 	}
 	return 0;
+error:
+	/* Rollback. */
+	do {
+		txq = dev->data->tx_queues[i];
+		if (!txq)
+			continue;
+		txq_uar_uninit_secondary(txq);
+	} while (i--);
+	return -rte_errno;
 }
 #else
 int
-mlx4_tx_uar_remap(struct rte_eth_dev *dev __rte_unused, int fd __rte_unused)
+mlx4_tx_uar_init_secondary(struct rte_eth_dev *dev __rte_unused,
+			   int fd __rte_unused)
 {
-	/*
-	 * Even if rdma-core doesn't support UAR remap, primary process
-	 * shouldn't be interrupted.
-	 */
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		return 0;
+	assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
 	ERROR("UAR remap is not supported");
 	rte_errno = ENOTSUP;
 	return -rte_errno;
@@ -187,11 +224,10 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv)
 				     (0u << MLX4_SQ_OWNER_BIT));
 #ifdef HAVE_IBV_MLX4_UAR_MMAP_OFFSET
 	sq->uar_mmap_offset = dqp->uar_mmap_offset;
-	sq->qp_sdb = dqp->sdb;
 #else
 	sq->uar_mmap_offset = -1; /* Make mmap() fail. */
-	sq->db = dqp->sdb;
 #endif
+	sq->db = dqp->sdb;
 	sq->doorbell_qpn = dqp->doorbell_qpn;
 	cq->buf = dcq->buf.buf;
 	cq->cqe_cnt = dcq->cqe_cnt;
@@ -314,6 +350,7 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	}
 	*txq = (struct txq){
 		.priv = priv,
+		.port_id = dev->data->port_id,
 		.stats = {
 			.idx = idx,
 		},
@@ -432,6 +469,7 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	}
 #endif
 	mlx4_txq_fill_dv_obj_info(txq, &mlxdv);
+	txq_uar_init(txq);
 	/* Save first wqe pointer in the first element. */
 	(&(*txq->elts)[0])->wqe =
 		(volatile struct mlx4_wqe_ctrl_seg *)txq->msq.buf;
-- 
2.11.0



More information about the dev mailing list