[dpdk-dev] [PATCH v4 10/11] event/octeontx: add option to use fpavf as chunk pool

Pavan Nikhilesh pbhagavatula at caviumnetworks.com
Mon Apr 9 23:00:34 CEST 2018


Add compile-time configurable option to force TIMvf to use Octeontx
FPAvf pool manager as its chunk pool.
When FPAvf is used as pool manager the TIMvf automatically frees the
chunks to FPAvf through gpool-id.

Signed-off-by: Pavan Nikhilesh <pbhagavatula at caviumnetworks.com>
---
 drivers/event/octeontx/timvf_evdev.c  | 22 +++++++++++++++++++++-
 drivers/event/octeontx/timvf_evdev.h  |  3 ++-
 drivers/event/octeontx/timvf_worker.c |  7 +++++--
 drivers/event/octeontx/timvf_worker.h | 22 ++++++++++++++++++++++
 4 files changed, 50 insertions(+), 4 deletions(-)

diff --git a/drivers/event/octeontx/timvf_evdev.c b/drivers/event/octeontx/timvf_evdev.c
index d6a8bb355..b20a2f1f5 100644
--- a/drivers/event/octeontx/timvf_evdev.c
+++ b/drivers/event/octeontx/timvf_evdev.c
@@ -125,7 +125,9 @@ static int
 timvf_ring_start(const struct rte_event_timer_adapter *adptr)
 {
 	int ret;
+	uint8_t use_fpa = 0;
 	uint64_t interval;
+	uintptr_t pool;
 	struct timvf_ctrl_reg rctrl;
 	struct timvf_mbox_dev_info dinfo;
 	struct timvf_ring *timr = adptr->data->adapter_priv;
@@ -155,6 +157,9 @@ timvf_ring_start(const struct rte_event_timer_adapter *adptr)
 		return -EINVAL;
 	}
 
+	if (!strcmp(rte_mbuf_best_mempool_ops(), "octeontx_fpavf"))
+		use_fpa = 1;
+
 	/*CTRL0 register.*/
 	rctrl.rctrl0 = interval;
 
@@ -167,9 +172,24 @@ timvf_ring_start(const struct rte_event_timer_adapter *adptr)
 
 	rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40;
 
+	if (use_fpa) {
+		pool = (uintptr_t)((struct rte_mempool *)
+				timr->chunk_pool)->pool_id;
+		ret = octeontx_fpa_bufpool_gpool(pool);
+		if (ret < 0) {
+			timvf_log_dbg("Unable to get gaura id");
+			ret = -ENOMEM;
+			goto error;
+		}
+		timvf_write64((uint64_t)ret,
+				(uint8_t *)timr->vbar0 + TIM_VRING_AURA);
+	} else {
+		rctrl.rctrl1 |= 1ull << 43 /* ENA_DFB (Enable don't free) */;
+	}
+
 	timvf_write64((uintptr_t)timr->bkt,
 			(uint8_t *)timr->vbar0 + TIM_VRING_BASE);
-	timvf_set_chunk_refill(timr);
+	timvf_set_chunk_refill(timr, use_fpa);
 	if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) {
 		ret = -EACCES;
 		goto error;
diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h
index b3fc343af..b1b2a8464 100644
--- a/drivers/event/octeontx/timvf_evdev.h
+++ b/drivers/event/octeontx/timvf_evdev.h
@@ -25,6 +25,7 @@
 #include <rte_reciprocal.h>
 
 #include <octeontx_mbox.h>
+#include <octeontx_fpavf.h>
 
 #define timvf_log(level, fmt, args...) \
 	rte_log(RTE_LOG_ ## level, otx_logtype_timvf, \
@@ -220,6 +221,6 @@ uint16_t timvf_timer_arm_tmo_brst_stats(
 		const struct rte_event_timer_adapter *adptr,
 		struct rte_event_timer **tim, const uint64_t timeout_tick,
 		const uint16_t nb_timers);
-void timvf_set_chunk_refill(struct timvf_ring * const timr);
+void timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa);
 
 #endif /* __TIMVF_EVDEV_H__ */
diff --git a/drivers/event/octeontx/timvf_worker.c b/drivers/event/octeontx/timvf_worker.c
index 02e17b6f5..e681bc6b8 100644
--- a/drivers/event/octeontx/timvf_worker.c
+++ b/drivers/event/octeontx/timvf_worker.c
@@ -191,7 +191,10 @@ timvf_timer_arm_tmo_brst_stats(const struct rte_event_timer_adapter *adptr,
 }
 
 void
-timvf_set_chunk_refill(struct timvf_ring * const timr)
+timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa)
 {
-	timr->refill_chunk = timvf_refill_chunk_generic;
+	if (use_fpa)
+		timr->refill_chunk = timvf_refill_chunk_fpa;
+	else
+		timr->refill_chunk = timvf_refill_chunk_generic;
 }
diff --git a/drivers/event/octeontx/timvf_worker.h b/drivers/event/octeontx/timvf_worker.h
index 93254cd39..dede1a4a4 100644
--- a/drivers/event/octeontx/timvf_worker.h
+++ b/drivers/event/octeontx/timvf_worker.h
@@ -213,6 +213,28 @@ timvf_refill_chunk_generic(struct tim_mem_bucket * const bkt,
 	return chunk;
 }
 
+static inline struct tim_mem_entry *
+timvf_refill_chunk_fpa(struct tim_mem_bucket * const bkt,
+		struct timvf_ring * const timr)
+{
+	struct tim_mem_entry *chunk;
+
+	if (unlikely(rte_mempool_get(timr->chunk_pool, (void **)&chunk)))
+		return NULL;
+
+	*(uint64_t *)(chunk + nb_chunk_slots) = 0;
+	if (bkt->nb_entry) {
+		*(uint64_t *)(((struct tim_mem_entry *)(uintptr_t)
+				bkt->current_chunk) +
+				nb_chunk_slots) =
+			(uintptr_t) chunk;
+	} else {
+		bkt->first_chunk = (uintptr_t) chunk;
+	}
+
+	return chunk;
+}
+
 static inline struct tim_mem_bucket *
 timvf_get_target_bucket(struct timvf_ring * const timr, const uint32_t rel_bkt)
 {
-- 
2.17.0



More information about the dev mailing list