[PATCH v7 4/6] event/cnkx: add pre-schedule support
pbhagavatula at marvell.com
pbhagavatula at marvell.com
Sat Oct 5 09:59:59 CEST 2024
From: Pavan Nikhilesh <pbhagavatula at marvell.com>
Add device level and port level pre-schedule
support for cnxk eventdev.
Signed-off-by: Pavan Nikhilesh <pbhagavatula at marvell.com>
---
doc/guides/eventdevs/cnxk.rst | 10 ----------
doc/guides/eventdevs/features/cnxk.ini | 1 +
drivers/event/cnxk/cn10k_eventdev.c | 19 +++++++++++++++++--
drivers/event/cnxk/cn10k_worker.c | 21 +++++++++++++++++++++
drivers/event/cnxk/cn10k_worker.h | 2 ++
drivers/event/cnxk/cnxk_eventdev.c | 2 --
drivers/event/cnxk/cnxk_eventdev.h | 1 -
7 files changed, 41 insertions(+), 15 deletions(-)
diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst
index d038930594..e21846f4e0 100644
--- a/doc/guides/eventdevs/cnxk.rst
+++ b/doc/guides/eventdevs/cnxk.rst
@@ -78,16 +78,6 @@ Runtime Config Options
-a 0002:0e:00.0,single_ws=1
-- ``CN10K Getwork mode``
-
- CN10K supports three getwork prefetch modes no prefetch[0], prefetch
- immediately[1] and delayed prefetch on forward progress event[2].
- The default getwork mode is 2.
-
- For example::
-
- -a 0002:0e:00.0,gw_mode=1
-
- ``Event Group QoS support``
SSO GGRPs i.e. queue uses DRAM & SRAM buffers to hold in-flight
diff --git a/doc/guides/eventdevs/features/cnxk.ini b/doc/guides/eventdevs/features/cnxk.ini
index d1516372fa..5ba528f086 100644
--- a/doc/guides/eventdevs/features/cnxk.ini
+++ b/doc/guides/eventdevs/features/cnxk.ini
@@ -17,6 +17,7 @@ carry_flow_id = Y
maintenance_free = Y
runtime_queue_attr = Y
profile_links = Y
+preschedule = Y
[Eth Rx adapter Features]
internal_port = Y
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 2d7b169974..5bd779990e 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -527,6 +527,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
event_dev->dma_enqueue = cn10k_dma_adapter_enqueue;
event_dev->profile_switch = cn10k_sso_hws_profile_switch;
+ event_dev->preschedule_modify = cn10k_sso_hws_preschedule_modify;
#else
RTE_SET_USED(event_dev);
#endif
@@ -541,6 +542,9 @@ cn10k_sso_info_get(struct rte_eventdev *event_dev,
dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN10K_PMD);
cnxk_sso_info_get(dev, dev_info);
dev_info->max_event_port_enqueue_depth = UINT32_MAX;
+ dev_info->event_dev_cap |= RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE |
+ RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE_ADAPTIVE |
+ RTE_EVENT_DEV_CAP_PER_PORT_PRESCHEDULE;
}
static int
@@ -566,6 +570,19 @@ cn10k_sso_dev_configure(const struct rte_eventdev *event_dev)
if (rc < 0)
goto cnxk_rsrc_fini;
+ switch (event_dev->data->dev_conf.preschedule_type) {
+ default:
+ case RTE_EVENT_PRESCHEDULE_NONE:
+ dev->gw_mode = CN10K_GW_MODE_NONE;
+ break;
+ case RTE_EVENT_PRESCHEDULE:
+ dev->gw_mode = CN10K_GW_MODE_PREF;
+ break;
+ case RTE_EVENT_PRESCHEDULE_ADAPTIVE:
+ dev->gw_mode = CN10K_GW_MODE_PREF_WFE;
+ break;
+ }
+
rc = cnxk_setup_event_ports(event_dev, cn10k_sso_init_hws_mem,
cn10k_sso_hws_setup);
if (rc < 0)
@@ -1199,7 +1216,6 @@ cn10k_sso_init(struct rte_eventdev *event_dev)
return 0;
}
- dev->gw_mode = CN10K_GW_MODE_PREF_WFE;
rc = cnxk_sso_init(event_dev);
if (rc < 0)
return rc;
@@ -1256,7 +1272,6 @@ RTE_PMD_REGISTER_KMOD_DEP(event_cn10k, "vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(event_cn10k, CNXK_SSO_XAE_CNT "=<int>"
CNXK_SSO_GGRP_QOS "=<string>"
CNXK_SSO_FORCE_BP "=1"
- CN10K_SSO_GW_MODE "=<int>"
CN10K_SSO_STASH "=<string>"
CNXK_TIM_DISABLE_NPA "=1"
CNXK_TIM_CHNK_SLOTS "=<int>"
diff --git a/drivers/event/cnxk/cn10k_worker.c b/drivers/event/cnxk/cn10k_worker.c
index d59769717e..a0e85face1 100644
--- a/drivers/event/cnxk/cn10k_worker.c
+++ b/drivers/event/cnxk/cn10k_worker.c
@@ -442,3 +442,24 @@ cn10k_sso_hws_profile_switch(void *port, uint8_t profile)
return 0;
}
+
+int __rte_hot
+cn10k_sso_hws_preschedule_modify(void *port, enum rte_event_dev_preschedule_type type)
+{
+ struct cn10k_sso_hws *ws = port;
+
+ ws->gw_wdata &= ~(BIT(19) | BIT(20));
+ switch (type) {
+ default:
+ case RTE_EVENT_PRESCHEDULE_NONE:
+ break;
+ case RTE_EVENT_PRESCHEDULE:
+ ws->gw_wdata |= BIT(19);
+ break;
+ case RTE_EVENT_PRESCHEDULE_ADAPTIVE:
+ ws->gw_wdata |= BIT(19) | BIT(20);
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index c5026409d7..4785cc6575 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -377,6 +377,8 @@ uint16_t __rte_hot cn10k_sso_hws_enq_fwd_burst(void *port,
const struct rte_event ev[],
uint16_t nb_events);
int __rte_hot cn10k_sso_hws_profile_switch(void *port, uint8_t profile);
+int __rte_hot cn10k_sso_hws_preschedule_modify(void *port,
+ enum rte_event_dev_preschedule_type type);
#define R(name, flags) \
uint16_t __rte_hot cn10k_sso_hws_deq_##name( \
diff --git a/drivers/event/cnxk/cnxk_eventdev.c b/drivers/event/cnxk/cnxk_eventdev.c
index 4b2d6bffa6..c1df481827 100644
--- a/drivers/event/cnxk/cnxk_eventdev.c
+++ b/drivers/event/cnxk/cnxk_eventdev.c
@@ -624,8 +624,6 @@ cnxk_sso_parse_devargs(struct cnxk_sso_evdev *dev, struct rte_devargs *devargs)
&dev->force_ena_bp);
rte_kvargs_process(kvlist, CN9K_SSO_SINGLE_WS, &parse_kvargs_flag,
&single_ws);
- rte_kvargs_process(kvlist, CN10K_SSO_GW_MODE, &parse_kvargs_value,
- &dev->gw_mode);
rte_kvargs_process(kvlist, CN10K_SSO_STASH,
&parse_sso_kvargs_stash_dict, dev);
dev->dual_ws = !single_ws;
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index ece49394e7..f147ef3c78 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -30,7 +30,6 @@
#define CNXK_SSO_GGRP_QOS "qos"
#define CNXK_SSO_FORCE_BP "force_rx_bp"
#define CN9K_SSO_SINGLE_WS "single_ws"
-#define CN10K_SSO_GW_MODE "gw_mode"
#define CN10K_SSO_STASH "stash"
#define CNXK_SSO_MAX_PROFILES 2
--
2.25.1
More information about the dev
mailing list