[dpdk-dev] [PATCH v3 17/19] mempool/dpaa2: support saving context of buffer pool

Shreyansh Jain shreyansh.jain at nxp.com
Fri Jan 11 13:25:02 CET 2019


Initial design was to have the buffer pool per process where a
global static array stores the bpids. But, in case of secondary
processes, this would not allow the I/O threads to translate the
bpid in Rx'd packets.

This patch moves the array to a global area (rte_malloc) and in
case of Rx thread not containing a valid reference to the array,
reference is build using the handle avaialble in the dpaa2_queue.

Signed-off-by: Shreyansh Jain <shreyansh.jain at nxp.com>
---
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h  |  1 +
 drivers/mempool/dpaa2/dpaa2_hw_mempool.c | 12 +++++++++++-
 drivers/mempool/dpaa2/dpaa2_hw_mempool.h |  2 +-
 drivers/net/dpaa2/dpaa2_ethdev.c         |  1 +
 drivers/net/dpaa2/dpaa2_rxtx.c           |  5 +++++
 5 files changed, 19 insertions(+), 2 deletions(-)

diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index efbeebef9..20c606dbe 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -141,6 +141,7 @@ struct dpaa2_queue {
 	};
 	struct rte_event ev;
 	dpaa2_queue_cb_dqrr_t *cb;
+	struct dpaa2_bp_info *bp_array;
 };
 
 struct swp_active_dqs {
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
index 790cded80..335eae40e 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -32,7 +32,7 @@
 
 #include <dpaax_iova_table.h>
 
-struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
+struct dpaa2_bp_info *rte_dpaa2_bpid_info;
 static struct dpaa2_bp_list *h_bp_list;
 
 /* Dynamic logging identified for mempool */
@@ -50,6 +50,16 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
 
 	avail_dpbp = dpaa2_alloc_dpbp_dev();
 
+	if (rte_dpaa2_bpid_info == NULL) {
+		rte_dpaa2_bpid_info = (struct dpaa2_bp_info *)rte_malloc(NULL,
+				      sizeof(struct dpaa2_bp_info) * MAX_BPID,
+				      RTE_CACHE_LINE_SIZE);
+		if (rte_dpaa2_bpid_info == NULL)
+			return -ENOMEM;
+		memset(rte_dpaa2_bpid_info, 0,
+		       sizeof(struct dpaa2_bp_info) * MAX_BPID);
+	}
+
 	if (!avail_dpbp) {
 		DPAA2_MEMPOOL_ERR("DPAA2 pool not available!");
 		return -ENOENT;
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.h b/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
index 4d3468746..93694616e 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
@@ -59,7 +59,7 @@ struct dpaa2_bp_info {
 #define mempool_to_bpinfo(mp) ((struct dpaa2_bp_info *)(mp)->pool_data)
 #define mempool_to_bpid(mp) ((mempool_to_bpinfo(mp))->bpid)
 
-extern struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
+extern struct dpaa2_bp_info *rte_dpaa2_bpid_info;
 
 int rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
 		       void **obj_table, unsigned int count);
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 861fbcd90..3a20158da 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -485,6 +485,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	}
 	dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
 	dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
+	dpaa2_q->bp_array = rte_dpaa2_bpid_info;
 
 	/*Get the flow id from given VQ id*/
 	flow_id = rx_queue_id % priv->nb_rx_queues;
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 816ea00fd..6e2e8abd7 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -518,6 +518,11 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 			return 0;
 		}
 	}
+
+	if (unlikely(!rte_dpaa2_bpid_info &&
+		     rte_eal_process_type() == RTE_PROC_SECONDARY))
+		rte_dpaa2_bpid_info = dpaa2_q->bp_array;
+
 	swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
 	pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
 	if (unlikely(!q_storage->active_dqs)) {
-- 
2.17.1



More information about the dev mailing list