[dpdk-dev] [PATCHv6 31/33] net/dpaa2: enabling the use of physical addresses

Hemant Agrawal hemant.agrawal at nxp.com
Mon Jan 23 13:00:01 CET 2017


DPAA2 - ARM support both physical and virtual addressing.
This patch enables the compile time usages of physical
address instead of virtual address.

The current usages are also set to default as Physical
Address.

Signed-off-by: Hemant Agrawal <hemant.agrawal at nxp.com>
---
 config/common_base                        |  1 +
 config/defconfig_arm64-dpaa2-linuxapp-gcc |  1 +
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h   | 66 +++++++++++++++++++++++++++++++
 drivers/net/dpaa2/base/dpaa2_hw_dpni.c    |  4 +-
 drivers/net/dpaa2/dpaa2_rxtx.c            | 16 ++++----
 drivers/pool/dpaa2/dpaa2_hw_mempool.c     | 19 +++++++--
 6 files changed, 95 insertions(+), 12 deletions(-)

diff --git a/config/common_base b/config/common_base
index dd3de11..6f1787a 100644
--- a/config/common_base
+++ b/config/common_base
@@ -291,6 +291,7 @@ CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_MBOX=n
 # Compile Support Libraries for NXP DPAA2
 #
 CONFIG_RTE_LIBRTE_DPAA2_POOL=n
+CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=y
 
 #
 # Compile NXP DPAA2 FSL-MC Bus
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index 3cdb31b..29a56c7 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -49,6 +49,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
 #
 CONFIG_RTE_LIBRTE_DPAA2_POOL=n
 CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS="dpaa2"
+CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=y
 
 #
 # Compile NXP DPAA2 FSL-MC Bus
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index c26360d3..ad8a22f 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -175,6 +175,72 @@ struct qbman_fle {
  */
 #define DPAA2_EQ_RESP_ALWAYS		1
 
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+static void *dpaa2_mem_ptov(phys_addr_t paddr) __attribute__((unused));
+/* todo - this is costly, need to write a fast coversion routine */
+static void *dpaa2_mem_ptov(phys_addr_t paddr)
+{
+	const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+	int i;
+
+	for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
+		if (paddr >= memseg[i].phys_addr &&
+		   (char *)paddr < (char *)memseg[i].phys_addr + memseg[i].len)
+			return (void *)(memseg[i].addr_64
+				+ (paddr - memseg[i].phys_addr));
+	}
+	return NULL;
+}
+
+static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) __attribute__((unused));
+static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
+{
+	const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+	int i;
+
+	for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
+		if (vaddr >= memseg[i].addr_64 &&
+		    vaddr < memseg[i].addr_64 + memseg[i].len)
+			return memseg[i].phys_addr
+				+ (vaddr - memseg[i].addr_64);
+	}
+	return (phys_addr_t)(NULL);
+}
+
+/**
+ * When we are using Physical addresses as IO Virtual Addresses,
+ * Need to call conversion routines dpaa2_mem_vtop & dpaa2_mem_ptov
+ * whereever required.
+ * These routines are called with help of below MACRO's
+ */
+
+#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_physaddr)
+
+/**
+ * macro to convert Virtual address to IOVA
+ */
+#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((uint64_t)(_vaddr))
+
+/**
+ * macro to convert IOVA to Virtual address
+ */
+#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((phys_addr_t)(_iova))
+
+/**
+ * macro to convert modify the memory containing IOVA to Virtual address
+ */
+#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) \
+	{_mem = (_type)(dpaa2_mem_ptov((phys_addr_t)(_mem))); }
+
+#else	/* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
+
+#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_addr)
+#define DPAA2_VADDR_TO_IOVA(_vaddr) (_vaddr)
+#define DPAA2_IOVA_TO_VADDR(_iova) (_iova)
+#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type)
+
+#endif /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
+
 struct dpaa2_dpbp_dev *dpaa2_alloc_dpbp_dev(void);
 void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp);
 
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
index 08f53b3..3dc60cc 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
@@ -76,7 +76,7 @@
 	memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
 
 	dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
-	tc_cfg.key_cfg_iova = (uint64_t)(p_params);
+	tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
 	tc_cfg.dist_size = eth_dev->data->nb_rx_queues;
 	tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
 
@@ -119,7 +119,7 @@ int dpaa2_remove_flow_dist(
 	memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
 	memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
 
-	tc_cfg.key_cfg_iova = (uint64_t)(p_params);
+	tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
 	tc_cfg.dist_size = 0;
 	tc_cfg.dist_mode = DPNI_DIST_MODE_NONE;
 
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index a94761c..49b4558 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -136,7 +136,7 @@ static inline struct rte_mbuf *__attribute__((hot))
 eth_fd_to_mbuf(const struct qbman_fd *fd)
 {
 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
-			DPAA2_GET_FD_ADDR(fd),
+		DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
 		     rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
 
 	/* need to repopulated some of the fields,
@@ -151,10 +151,11 @@ static inline struct rte_mbuf *__attribute__((hot))
 	/* Parse the packet */
 	/* parse results are after the private - sw annotation area */
 	mbuf->packet_type = dpaa2_dev_rx_parse(
-			(uint64_t)(DPAA2_GET_FD_ADDR(fd))
+			(uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
 			 + DPAA2_FD_PTA_SIZE);
 
-	dpaa2_dev_rx_offload((uint64_t)(DPAA2_GET_FD_ADDR(fd)) +
+	dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
+			     DPAA2_GET_FD_ADDR(fd)) +
 			     DPAA2_FD_PTA_SIZE, mbuf);
 
 	mbuf->next = NULL;
@@ -177,7 +178,7 @@ static void __attribute__ ((noinline)) __attribute__((hot))
 	/*Resetting the buffer pool id and offset field*/
 	fd->simple.bpid_offset = 0;
 
-	DPAA2_SET_FD_ADDR(fd, (mbuf->buf_addr));
+	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
 	DPAA2_SET_FD_LEN(fd, mbuf->data_len);
 	DPAA2_SET_FD_BPID(fd, bpid);
 	DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
@@ -219,7 +220,7 @@ static inline int __attribute__((hot))
 	/*Resetting the buffer pool id and offset field*/
 	fd->simple.bpid_offset = 0;
 
-	DPAA2_SET_FD_ADDR(fd, (m->buf_addr));
+	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(m));
 	DPAA2_SET_FD_LEN(fd, mbuf->data_len);
 	DPAA2_SET_FD_BPID(fd, bpid);
 	DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
@@ -271,7 +272,7 @@ static inline int __attribute__((hot))
 	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	/* todo optimization - we can have dq_storage_phys available*/
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-			(dma_addr_t)(dq_storage), 1);
+			(dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
 
 	/*Issue a volatile dequeue command. */
 	while (1) {
@@ -312,7 +313,8 @@ static inline int __attribute__((hot))
 		}
 
 		fd = qbman_result_DQ_fd(dq_storage);
-		mbuf = (struct rte_mbuf *)(DPAA2_GET_FD_ADDR(fd)
+		mbuf = (struct rte_mbuf *)DPAA2_IOVA_TO_VADDR(
+		   DPAA2_GET_FD_ADDR(fd)
 		   - rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
 		/* Prefeth mbuf */
 		rte_prefetch0(mbuf);
diff --git a/drivers/pool/dpaa2/dpaa2_hw_mempool.c b/drivers/pool/dpaa2/dpaa2_hw_mempool.c
index 0c8de51..ca42418 100644
--- a/drivers/pool/dpaa2/dpaa2_hw_mempool.c
+++ b/drivers/pool/dpaa2/dpaa2_hw_mempool.c
@@ -203,9 +203,14 @@ void rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
 	n = count % DPAA2_MBUF_MAX_ACQ_REL;
 
 	/* convert mbuf to buffers  for the remainder*/
-	for (i = 0; i < n ; i++)
+	for (i = 0; i < n ; i++) {
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+		bufs[i] = (uint64_t)rte_mempool_virt2phy(pool, obj_table[i])
+				+ meta_data_size;
+#else
 		bufs[i] = (uint64_t)obj_table[i] + meta_data_size;
-
+#endif
+	}
 	/* feed them to bman*/
 	do {
 		ret = qbman_swp_release(swp, &releasedesc, bufs, n);
@@ -214,8 +219,15 @@ void rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
 	/* if there are more buffers to free */
 	while (n < count) {
 		/* convert mbuf to buffers */
-		for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++)
+		for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) {
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+			bufs[i] = (uint64_t)
+				rte_mempool_virt2phy(pool, obj_table[n + i])
+					+ meta_data_size;
+#else
 			bufs[i] = (uint64_t)obj_table[n + i] + meta_data_size;
+#endif
+		}
 
 		do {
 			ret = qbman_swp_release(swp, &releasedesc, bufs,
@@ -288,6 +300,7 @@ int rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
 			 * i.e. first buffer is valid,
 			 * remaining 6 buffers may be null
 			 */
+			DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], uint64_t);
 			obj_table[n] = (struct rte_mbuf *)(bufs[i] - mbuf_size);
 			rte_mbuf_refcnt_set((struct rte_mbuf *)obj_table[n], 0);
 			PMD_TX_LOG(DEBUG, "Acquired %p address %p from BMAN",
-- 
1.9.1



More information about the dev mailing list