[dpdk-dev] [PATCH v3 10/12] event/octeontx: add option to use fpavf as chunk pool

Pavan Nikhilesh pbhagavatula at caviumnetworks.com
Tue Apr 3 17:05:12 CEST 2018


Add compile-time configurable option to force TIMvf to use Octeontx
FPAvf pool manager as its chunk pool.
When FPAvf is used as pool manager the TIMvf automatically frees the
chunks to FPAvf through gpool-id.

Signed-off-by: Pavan Nikhilesh <pbhagavatula at caviumnetworks.com>
---
 drivers/event/octeontx/timvf_evdev.c  | 22 +++++++++++++++++++++-
 drivers/event/octeontx/timvf_evdev.h  |  3 ++-
 drivers/event/octeontx/timvf_worker.c |  7 +++++--
 drivers/event/octeontx/timvf_worker.h | 23 +++++++++++++++++++++++
 4 files changed, 51 insertions(+), 4 deletions(-)

diff --git a/drivers/event/octeontx/timvf_evdev.c b/drivers/event/octeontx/timvf_evdev.c
index d1f42a3bd..75dd5cd65 100644
--- a/drivers/event/octeontx/timvf_evdev.c
+++ b/drivers/event/octeontx/timvf_evdev.c
@@ -125,7 +125,9 @@ static int
 timvf_ring_start(const struct rte_event_timer_adapter *adptr)
 {
 	int ret;
+	uint8_t use_fpa = 0;
 	uint64_t interval = 0;
+	uintptr_t pool;
 	struct timvf_ctrl_reg rctrl = {0};
 	struct timvf_mbox_dev_info dinfo;
 	struct timvf_ring *timr = adptr->data->adapter_priv;
@@ -151,6 +153,9 @@ timvf_ring_start(const struct rte_event_timer_adapter *adptr)
 		break;
 	}
 
+	if (!strcmp(rte_mbuf_best_mempool_ops(), "octeontx_fpavf"))
+		use_fpa = 1;
+
 	/*CTRL0 register.*/
 	rctrl.rctrl0 = interval;
 
@@ -163,9 +168,24 @@ timvf_ring_start(const struct rte_event_timer_adapter *adptr)
 
 	rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40;
 
+	if (use_fpa) {
+		pool = (uintptr_t)((struct rte_mempool *)
+				timr->meta.chunk_pool)->pool_id;
+		ret = octeontx_fpa_bufpool_gpool(pool);
+		if (ret < 0) {
+			timvf_log_dbg("Unable to get gaura id");
+			ret = -ENOMEM;
+			goto error;
+		}
+		timvf_write64((uint64_t)ret,
+				(uint8_t *)timr->vbar0 + TIM_VRING_AURA);
+	} else {
+		rctrl.rctrl1 |= 1ull << 43;
+	}
+
 	timvf_write64((uint64_t)timr->meta.bkt,
 			(uint8_t *)timr->vbar0 + TIM_VRING_BASE);
-	timvf_set_chunk_refill(timr);
+	timvf_set_chunk_refill(timr, use_fpa);
 	if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) {
 		ret = -EACCES;
 		goto error;
diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h
index 22c8c2266..96fd33ee2 100644
--- a/drivers/event/octeontx/timvf_evdev.h
+++ b/drivers/event/octeontx/timvf_evdev.h
@@ -25,6 +25,7 @@
 #include <rte_reciprocal.h>
 
 #include <octeontx_mbox.h>
+#include <octeontx_fpavf.h>
 
 #define timvf_log(level, fmt, args...) \
 	rte_log(RTE_LOG_ ## level, otx_logtype_timvf, \
@@ -209,6 +210,6 @@ uint16_t timvf_timer_reg_burst_mp(const struct rte_event_timer_adapter *adptr,
 uint16_t timvf_timer_reg_brst(const struct rte_event_timer_adapter *adptr,
 		struct rte_event_timer **tim, const uint64_t timeout_tick,
 		const uint16_t nb_timers);
-void timvf_set_chunk_refill(struct timvf_ring * const timr);
+void timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa);
 
 #endif /* __TIMVF_EVDEV_H__ */
diff --git a/drivers/event/octeontx/timvf_worker.c b/drivers/event/octeontx/timvf_worker.c
index f4f40d150..26ec4cc74 100644
--- a/drivers/event/octeontx/timvf_worker.c
+++ b/drivers/event/octeontx/timvf_worker.c
@@ -152,7 +152,10 @@ timvf_timer_reg_brst(const struct rte_event_timer_adapter *adptr,
 }
 
 void
-timvf_set_chunk_refill(struct timvf_ring * const timr)
+timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa)
 {
-	timr->meta.refill_chunk = timvf_refill_chunk_generic;
+	if (use_fpa)
+		timr->meta.refill_chunk = timvf_refill_chunk_fpa;
+	else
+		timr->meta.refill_chunk = timvf_refill_chunk_generic;
 }
diff --git a/drivers/event/octeontx/timvf_worker.h b/drivers/event/octeontx/timvf_worker.h
index 9dad5c769..39ce4535c 100644
--- a/drivers/event/octeontx/timvf_worker.h
+++ b/drivers/event/octeontx/timvf_worker.h
@@ -201,6 +201,7 @@ timvf_refill_chunk_generic(struct tim_mem_bucket * const bkt,
 					nb_chunk_slots) =
 				(uint64_t) chunk;
 		} else {
+
 			bkt->first_chunk = (uint64_t) chunk;
 		}
 	} else {
@@ -212,6 +213,28 @@ timvf_refill_chunk_generic(struct tim_mem_bucket * const bkt,
 	return chunk;
 }
 
+static inline struct tim_mem_entry *
+timvf_refill_chunk_fpa(struct tim_mem_bucket * const bkt,
+		struct timvf_ring * const timr)
+{
+	struct tim_mem_entry *chunk;
+
+	if (unlikely(rte_mempool_get(timr->meta.chunk_pool, (void **)&chunk)))
+		return NULL;
+
+	*(uint64_t *)(chunk + nb_chunk_slots) = 0;
+	if (bkt->nb_entry) {
+		*(uint64_t *)((struct tim_mem_entry *)
+				bkt->current_chunk +
+				nb_chunk_slots) =
+			(uint64_t) chunk;
+	} else {
+		bkt->first_chunk = (uint64_t) chunk;
+	}
+
+	return chunk;
+}
+
 static inline struct tim_mem_bucket *
 timvf_get_target_bucket(struct timvf_ring * const timr, const uint32_t rel_bkt)
 {
-- 
2.16.3



More information about the dev mailing list