patch 'event/dlb2: fix public symbol namespace' has been queued to stable release 24.11.3
Kevin Traynor
ktraynor at redhat.com
Fri Jul 18 21:31:07 CEST 2025
Hi,
FYI, your patch has been queued to stable release 24.11.3
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 07/23/25. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Queued patches are on a temporary branch at:
https://github.com/kevintraynor/dpdk-stable
This queued commit can be viewed at:
https://github.com/kevintraynor/dpdk-stable/commit/5ac3674a4726e8c399840be1e3040dc550f633d3
Thanks.
Kevin
---
>From 5ac3674a4726e8c399840be1e3040dc550f633d3 Mon Sep 17 00:00:00 2001
From: Pravin Pathak <pravin.pathak at intel.com>
Date: Thu, 19 Jun 2025 17:31:50 -0500
Subject: [PATCH] event/dlb2: fix public symbol namespace
[ upstream commit e31d4679575571890974730c989aa286cb516398 ]
Added RTE_PMD_DLB2_ prefix to dlb2 token pop mode enums
to avoid namespace conflict. These enums are passed to public
API rte_pmd_dlb2_set_token_pop_mode().
Fixes: c667583d82f4 ("event/dlb2: add token pop API")
Signed-off-by: Pravin Pathak <pravin.pathak at intel.com>
---
drivers/event/dlb2/dlb2.c | 28 +++++++++++++++-------------
drivers/event/dlb2/dlb2_priv.h | 2 +-
drivers/event/dlb2/dlb2_selftest.c | 6 +++---
drivers/event/dlb2/rte_pmd_dlb2.c | 4 ++--
drivers/event/dlb2/rte_pmd_dlb2.h | 23 ++++++++++++-----------
5 files changed, 33 insertions(+), 30 deletions(-)
diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index 12c045f4e3..07a37d9aad 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -1672,5 +1672,5 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
* performance reasons.
*/
- if (qm_port->token_pop_mode == DELAYED_POP) {
+ if (qm_port->token_pop_mode == RTE_PMD_DLB2_DELAYED_POP) {
dlb2->event_dev->enqueue_burst =
dlb2_event_enqueue_burst_delayed;
@@ -1881,5 +1881,5 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
/* Directed ports are auto-pop, by default. */
- qm_port->token_pop_mode = AUTO_POP;
+ qm_port->token_pop_mode = RTE_PMD_DLB2_AUTO_POP;
qm_port->owed_tokens = 0;
qm_port->issued_releases = 0;
@@ -3133,5 +3133,5 @@ __dlb2_event_enqueue_burst_reorder(void *event_port,
}
- if (use_delayed && qm_port->token_pop_mode == DELAYED_POP &&
+ if (use_delayed && qm_port->token_pop_mode == RTE_PMD_DLB2_DELAYED_POP &&
(events[i].op == RTE_EVENT_OP_FORWARD ||
events[i].op == RTE_EVENT_OP_RELEASE) &&
@@ -3238,5 +3238,5 @@ __dlb2_event_enqueue_burst(void *event_port,
if (use_delayed &&
- qm_port->token_pop_mode == DELAYED_POP &&
+ qm_port->token_pop_mode == RTE_PMD_DLB2_DELAYED_POP &&
(ev->op == RTE_EVENT_OP_FORWARD ||
ev->op == RTE_EVENT_OP_RELEASE) &&
@@ -3388,5 +3388,5 @@ dlb2_event_release(struct dlb2_eventdev *dlb2,
int16_t thresh = qm_port->token_pop_thresh;
- if (qm_port->token_pop_mode == DELAYED_POP &&
+ if (qm_port->token_pop_mode == RTE_PMD_DLB2_DELAYED_POP &&
qm_port->issued_releases >= thresh - 1) {
/* Insert the token pop QE */
@@ -4132,5 +4132,5 @@ dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2,
if (num) {
- if (qm_port->token_pop_mode == AUTO_POP)
+ if (qm_port->token_pop_mode == RTE_PMD_DLB2_AUTO_POP)
dlb2_consume_qe_immediate(qm_port, num);
@@ -4260,5 +4260,5 @@ dlb2_hw_dequeue(struct dlb2_eventdev *dlb2,
if (num) {
- if (qm_port->token_pop_mode == AUTO_POP)
+ if (qm_port->token_pop_mode == RTE_PMD_DLB2_AUTO_POP)
dlb2_consume_qe_immediate(qm_port, num);
@@ -4297,5 +4297,5 @@ dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
__dlb2_event_enqueue_burst_reorder(event_port, NULL, 0,
- qm_port->token_pop_mode == DELAYED_POP);
+ qm_port->token_pop_mode == RTE_PMD_DLB2_DELAYED_POP);
} else {
dlb2_event_release(dlb2, ev_port->id, out_rels);
@@ -4305,5 +4305,5 @@ dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
}
- if (qm_port->token_pop_mode == DEFERRED_POP && qm_port->owed_tokens)
+ if (qm_port->token_pop_mode == RTE_PMD_DLB2_DEFERRED_POP && qm_port->owed_tokens)
dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
@@ -4346,5 +4346,6 @@ dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
__dlb2_event_enqueue_burst_reorder(event_port,
release_burst, RTE_DIM(release_burst),
- qm_port->token_pop_mode == DELAYED_POP);
+ qm_port->token_pop_mode ==
+ RTE_PMD_DLB2_DELAYED_POP);
num_releases = 0;
}
@@ -4354,5 +4355,6 @@ dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
if (num_releases)
__dlb2_event_enqueue_burst_reorder(event_port, release_burst
- , num_releases, qm_port->token_pop_mode == DELAYED_POP);
+ , num_releases,
+ qm_port->token_pop_mode == RTE_PMD_DLB2_DELAYED_POP);
} else {
dlb2_event_release(dlb2, ev_port->id, out_rels);
@@ -4363,5 +4365,5 @@ dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
}
- if (qm_port->token_pop_mode == DEFERRED_POP && qm_port->owed_tokens)
+ if (qm_port->token_pop_mode == RTE_PMD_DLB2_DEFERRED_POP && qm_port->owed_tokens)
dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
@@ -4805,5 +4807,5 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
/* Initialize each port's token pop mode */
for (i = 0; i < DLB2_MAX_NUM_PORTS(dlb2->version); i++)
- dlb2->ev_ports[i].qm_port.token_pop_mode = AUTO_POP;
+ dlb2->ev_ports[i].qm_port.token_pop_mode = RTE_PMD_DLB2_AUTO_POP;
rte_spinlock_init(&dlb2->qm_instance.resource_lock);
diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h
index 4118d69465..ea4fdacad8 100644
--- a/drivers/event/dlb2/dlb2_priv.h
+++ b/drivers/event/dlb2/dlb2_priv.h
@@ -348,5 +348,5 @@ struct dlb2_port {
uint16_t dir_credits;
uint32_t dequeue_depth;
- enum dlb2_token_pop_mode token_pop_mode;
+ enum rte_pmd_dlb2_token_pop_mode token_pop_mode;
union dlb2_port_config cfg;
RTE_ATOMIC(uint32_t) *credit_pool[DLB2_NUM_QUEUE_TYPES];
diff --git a/drivers/event/dlb2/dlb2_selftest.c b/drivers/event/dlb2/dlb2_selftest.c
index 62aa11d981..87d98700c9 100644
--- a/drivers/event/dlb2/dlb2_selftest.c
+++ b/drivers/event/dlb2/dlb2_selftest.c
@@ -1106,5 +1106,5 @@ test_deferred_sched(void)
}
- ret = rte_pmd_dlb2_set_token_pop_mode(evdev, 0, DEFERRED_POP);
+ ret = rte_pmd_dlb2_set_token_pop_mode(evdev, 0, RTE_PMD_DLB2_DEFERRED_POP);
if (ret < 0) {
printf("%d: Error setting deferred scheduling\n", __LINE__);
@@ -1112,5 +1112,5 @@ test_deferred_sched(void)
}
- ret = rte_pmd_dlb2_set_token_pop_mode(evdev, 1, DEFERRED_POP);
+ ret = rte_pmd_dlb2_set_token_pop_mode(evdev, 1, RTE_PMD_DLB2_DEFERRED_POP);
if (ret < 0) {
printf("%d: Error setting deferred scheduling\n", __LINE__);
@@ -1258,5 +1258,5 @@ test_delayed_pop(void)
}
- ret = rte_pmd_dlb2_set_token_pop_mode(evdev, 0, DELAYED_POP);
+ ret = rte_pmd_dlb2_set_token_pop_mode(evdev, 0, RTE_PMD_DLB2_DELAYED_POP);
if (ret < 0) {
printf("%d: Error setting deferred scheduling\n", __LINE__);
diff --git a/drivers/event/dlb2/rte_pmd_dlb2.c b/drivers/event/dlb2/rte_pmd_dlb2.c
index 43990e46ac..20681b25ed 100644
--- a/drivers/event/dlb2/rte_pmd_dlb2.c
+++ b/drivers/event/dlb2/rte_pmd_dlb2.c
@@ -13,5 +13,5 @@ int
rte_pmd_dlb2_set_token_pop_mode(uint8_t dev_id,
uint8_t port_id,
- enum dlb2_token_pop_mode mode)
+ enum rte_pmd_dlb2_token_pop_mode mode)
{
struct dlb2_eventdev *dlb2;
@@ -23,5 +23,5 @@ rte_pmd_dlb2_set_token_pop_mode(uint8_t dev_id,
dlb2 = dlb2_pmd_priv(dev);
- if (mode >= NUM_TOKEN_POP_MODES)
+ if (mode >= RTE_PMD_DLB2_NUM_TOKEN_POP_MODES)
return -EINVAL;
diff --git a/drivers/event/dlb2/rte_pmd_dlb2.h b/drivers/event/dlb2/rte_pmd_dlb2.h
index 207ce6a3fd..31ce20d414 100644
--- a/drivers/event/dlb2/rte_pmd_dlb2.h
+++ b/drivers/event/dlb2/rte_pmd_dlb2.h
@@ -50,16 +50,16 @@ extern "C" {
* Selects the token pop mode for a DLB2 port.
*/
-enum dlb2_token_pop_mode {
+enum rte_pmd_dlb2_token_pop_mode {
/* Pop the CQ tokens immediately after dequeuing. */
- AUTO_POP,
+ RTE_PMD_DLB2_AUTO_POP,
/* Pop CQ tokens after (dequeue_depth - 1) events are released.
* Supported on load-balanced ports only.
*/
- DELAYED_POP,
+ RTE_PMD_DLB2_DELAYED_POP,
/* Pop the CQ tokens during next dequeue operation. */
- DEFERRED_POP,
+ RTE_PMD_DLB2_DEFERRED_POP,
/* NUM_TOKEN_POP_MODES must be last */
- NUM_TOKEN_POP_MODES
+ RTE_PMD_DLB2_NUM_TOKEN_POP_MODES
};
@@ -69,6 +69,7 @@ enum dlb2_token_pop_mode {
*
* Configure the token pop mode for a DLB2 port. By default, all ports use
- * AUTO_POP. This function must be called before calling rte_event_port_setup()
- * for the port, but after calling rte_event_dev_configure().
+ * RTE_PMD_DLB2_AUTO_POP. This function must be called before calling
+ * rte_event_port_setup() for the port, but after calling
+ * rte_event_dev_configure().
*
* @param dev_id
@@ -81,7 +82,7 @@ enum dlb2_token_pop_mode {
* @return
* - 0: Success
- * - EINVAL: Invalid dev_id, port_id, or mode
- * - EINVAL: The DLB2 is not configured, is already running, or the port is
- * already setup
+ * - EINVAL: Invalid parameter dev_id, port_id, or mode
+ * - EINVAL: The DLB2 device is not configured or is already running,
+ * or the port is already setup
*/
@@ -90,5 +91,5 @@ int
rte_pmd_dlb2_set_token_pop_mode(uint8_t dev_id,
uint8_t port_id,
- enum dlb2_token_pop_mode mode);
+ enum rte_pmd_dlb2_token_pop_mode mode);
#ifdef __cplusplus
--
2.50.0
---
Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- - 2025-07-18 20:29:15.687719753 +0100
+++ 0133-event-dlb2-fix-public-symbol-namespace.patch 2025-07-18 20:29:11.091907824 +0100
@@ -1 +1 @@
-From e31d4679575571890974730c989aa286cb516398 Mon Sep 17 00:00:00 2001
+From 5ac3674a4726e8c399840be1e3040dc550f633d3 Mon Sep 17 00:00:00 2001
@@ -5,0 +6,2 @@
+[ upstream commit e31d4679575571890974730c989aa286cb516398 ]
+
@@ -11 +12,0 @@
-Cc: stable at dpdk.org
@@ -23 +24 @@
-index fd8cc70f3c..084875f1c8 100644
+index 12c045f4e3..07a37d9aad 100644
@@ -26 +27 @@
-@@ -1820,5 +1820,5 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
+@@ -1672,5 +1672,5 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
@@ -33 +34 @@
-@@ -2022,5 +2022,5 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
+@@ -1881,5 +1881,5 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
@@ -40 +41 @@
-@@ -3360,5 +3360,5 @@ __dlb2_event_enqueue_burst_reorder(void *event_port,
+@@ -3133,5 +3133,5 @@ __dlb2_event_enqueue_burst_reorder(void *event_port,
@@ -47 +48 @@
-@@ -3469,5 +3469,5 @@ __dlb2_event_enqueue_burst(void *event_port,
+@@ -3238,5 +3238,5 @@ __dlb2_event_enqueue_burst(void *event_port,
@@ -54 +55 @@
-@@ -3621,5 +3621,5 @@ dlb2_event_release(struct dlb2_eventdev *dlb2,
+@@ -3388,5 +3388,5 @@ dlb2_event_release(struct dlb2_eventdev *dlb2,
@@ -61 +62 @@
-@@ -4366,5 +4366,5 @@ dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2,
+@@ -4132,5 +4132,5 @@ dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2,
@@ -68 +69 @@
-@@ -4496,5 +4496,5 @@ dlb2_hw_dequeue(struct dlb2_eventdev *dlb2,
+@@ -4260,5 +4260,5 @@ dlb2_hw_dequeue(struct dlb2_eventdev *dlb2,
@@ -75 +76 @@
-@@ -4541,5 +4541,5 @@ dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
+@@ -4297,5 +4297,5 @@ dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
@@ -82 +83 @@
-@@ -4549,5 +4549,5 @@ dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
+@@ -4305,5 +4305,5 @@ dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
@@ -89 +90 @@
-@@ -4598,5 +4598,6 @@ dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
+@@ -4346,5 +4346,6 @@ dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
@@ -97 +98 @@
-@@ -4606,5 +4607,6 @@ dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
+@@ -4354,5 +4355,6 @@ dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
@@ -105 +106 @@
-@@ -4615,5 +4617,5 @@ dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
+@@ -4363,5 +4365,5 @@ dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
@@ -112 +113 @@
-@@ -5143,5 +5145,5 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
+@@ -4805,5 +4807,5 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
@@ -120 +121 @@
-index 30d1d5b9ae..7a5cbcca1e 100644
+index 4118d69465..ea4fdacad8 100644
@@ -123 +124 @@
-@@ -350,5 +350,5 @@ struct dlb2_port {
+@@ -348,5 +348,5 @@ struct dlb2_port {
@@ -156 +157 @@
-index b75010027d..80186dd07d 100644
+index 43990e46ac..20681b25ed 100644
@@ -159 +160 @@
-@@ -15,5 +15,5 @@ int
+@@ -13,5 +13,5 @@ int
@@ -166 +167 @@
-@@ -25,5 +25,5 @@ rte_pmd_dlb2_set_token_pop_mode(uint8_t dev_id,
+@@ -23,5 +23,5 @@ rte_pmd_dlb2_set_token_pop_mode(uint8_t dev_id,
@@ -174 +175 @@
-index f58ef2168d..33e741261d 100644
+index 207ce6a3fd..31ce20d414 100644
@@ -226 +227 @@
- /** Set inflight threshold for flow migration */
+ #ifdef __cplusplus
More information about the stable
mailing list