patch 'app/bbdev: fix interrupt tests' has been queued to stable release 23.11.2
Xueming Li
xuemingl at nvidia.com
Mon Aug 12 14:48:02 CEST 2024
Hi,
FYI, your patch has been queued to stable release 23.11.2
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 08/14/24. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Queued patches are on a temporary branch at:
https://git.dpdk.org/dpdk-stable/log/?h=23.11-staging
This queued commit can be viewed at:
https://git.dpdk.org/dpdk-stable/commit/?h=23.11-staging&id=c343cb088f5f741f234eb672f736f31f42df99b2
Thanks.
Xueming Li <xuemingl at nvidia.com>
---
>From c343cb088f5f741f234eb672f736f31f42df99b2 Mon Sep 17 00:00:00 2001
From: Hernan Vargas <hernan.vargas at intel.com>
Date: Mon, 24 Jun 2024 08:02:31 -0700
Subject: [PATCH] app/bbdev: fix interrupt tests
Cc: Xueming Li <xuemingl at nvidia.com>
[ upstream commit fdcee665c5066cd1300d08b85d159ccabf9f3657 ]
Fix possible error with regards to setting the burst size from the
enqueue thread.
Fixes: b2e2aec3239e ("app/bbdev: enhance interrupt test")
Signed-off-by: Hernan Vargas <hernan.vargas at intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
---
app/test-bbdev/test_bbdev_perf.c | 98 ++++++++++++++++----------------
1 file changed, 49 insertions(+), 49 deletions(-)
diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c
index d9267bb91f..5c1755ae0d 100644
--- a/app/test-bbdev/test_bbdev_perf.c
+++ b/app/test-bbdev/test_bbdev_perf.c
@@ -3408,15 +3408,6 @@ throughput_intr_lcore_ldpc_dec(void *arg)
if (unlikely(num_to_process - enqueued < num_to_enq))
num_to_enq = num_to_process - enqueued;
- enq = 0;
- do {
- enq += rte_bbdev_enqueue_ldpc_dec_ops(
- tp->dev_id,
- queue_id, &ops[enqueued],
- num_to_enq);
- } while (unlikely(num_to_enq != enq));
- enqueued += enq;
-
/* Write to thread burst_sz current number of enqueued
* descriptors. It ensures that proper number of
* descriptors will be dequeued in callback
@@ -3426,6 +3417,15 @@ throughput_intr_lcore_ldpc_dec(void *arg)
*/
__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ enq = 0;
+ do {
+ enq += rte_bbdev_enqueue_ldpc_dec_ops(
+ tp->dev_id,
+ queue_id, &ops[enqueued],
+ num_to_enq);
+ } while (unlikely(num_to_enq != enq));
+ enqueued += enq;
+
/* Wait until processing of previous batch is
* completed
*/
@@ -3500,14 +3500,6 @@ throughput_intr_lcore_dec(void *arg)
if (unlikely(num_to_process - enqueued < num_to_enq))
num_to_enq = num_to_process - enqueued;
- enq = 0;
- do {
- enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
- queue_id, &ops[enqueued],
- num_to_enq);
- } while (unlikely(num_to_enq != enq));
- enqueued += enq;
-
/* Write to thread burst_sz current number of enqueued
* descriptors. It ensures that proper number of
* descriptors will be dequeued in callback
@@ -3517,6 +3509,14 @@ throughput_intr_lcore_dec(void *arg)
*/
__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ enq = 0;
+ do {
+ enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
+ queue_id, &ops[enqueued],
+ num_to_enq);
+ } while (unlikely(num_to_enq != enq));
+ enqueued += enq;
+
/* Wait until processing of previous batch is
* completed
*/
@@ -3586,14 +3586,6 @@ throughput_intr_lcore_enc(void *arg)
if (unlikely(num_to_process - enqueued < num_to_enq))
num_to_enq = num_to_process - enqueued;
- enq = 0;
- do {
- enq += rte_bbdev_enqueue_enc_ops(tp->dev_id,
- queue_id, &ops[enqueued],
- num_to_enq);
- } while (unlikely(enq != num_to_enq));
- enqueued += enq;
-
/* Write to thread burst_sz current number of enqueued
* descriptors. It ensures that proper number of
* descriptors will be dequeued in callback
@@ -3603,6 +3595,14 @@ throughput_intr_lcore_enc(void *arg)
*/
__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ enq = 0;
+ do {
+ enq += rte_bbdev_enqueue_enc_ops(tp->dev_id,
+ queue_id, &ops[enqueued],
+ num_to_enq);
+ } while (unlikely(enq != num_to_enq));
+ enqueued += enq;
+
/* Wait until processing of previous batch is
* completed
*/
@@ -3674,15 +3674,6 @@ throughput_intr_lcore_ldpc_enc(void *arg)
if (unlikely(num_to_process - enqueued < num_to_enq))
num_to_enq = num_to_process - enqueued;
- enq = 0;
- do {
- enq += rte_bbdev_enqueue_ldpc_enc_ops(
- tp->dev_id,
- queue_id, &ops[enqueued],
- num_to_enq);
- } while (unlikely(enq != num_to_enq));
- enqueued += enq;
-
/* Write to thread burst_sz current number of enqueued
* descriptors. It ensures that proper number of
* descriptors will be dequeued in callback
@@ -3692,6 +3683,15 @@ throughput_intr_lcore_ldpc_enc(void *arg)
*/
__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ enq = 0;
+ do {
+ enq += rte_bbdev_enqueue_ldpc_enc_ops(
+ tp->dev_id,
+ queue_id, &ops[enqueued],
+ num_to_enq);
+ } while (unlikely(enq != num_to_enq));
+ enqueued += enq;
+
/* Wait until processing of previous batch is
* completed
*/
@@ -3763,14 +3763,6 @@ throughput_intr_lcore_fft(void *arg)
if (unlikely(num_to_process - enqueued < num_to_enq))
num_to_enq = num_to_process - enqueued;
- enq = 0;
- do {
- enq += rte_bbdev_enqueue_fft_ops(tp->dev_id,
- queue_id, &ops[enqueued],
- num_to_enq);
- } while (unlikely(enq != num_to_enq));
- enqueued += enq;
-
/* Write to thread burst_sz current number of enqueued
* descriptors. It ensures that proper number of
* descriptors will be dequeued in callback
@@ -3780,6 +3772,14 @@ throughput_intr_lcore_fft(void *arg)
*/
__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ enq = 0;
+ do {
+ enq += rte_bbdev_enqueue_fft_ops(tp->dev_id,
+ queue_id, &ops[enqueued],
+ num_to_enq);
+ } while (unlikely(enq != num_to_enq));
+ enqueued += enq;
+
/* Wait until processing of previous batch is
* completed
*/
@@ -3846,13 +3846,6 @@ throughput_intr_lcore_mldts(void *arg)
if (unlikely(num_to_process - enqueued < num_to_enq))
num_to_enq = num_to_process - enqueued;
- enq = 0;
- do {
- enq += rte_bbdev_enqueue_mldts_ops(tp->dev_id,
- queue_id, &ops[enqueued], num_to_enq);
- } while (unlikely(enq != num_to_enq));
- enqueued += enq;
-
/* Write to thread burst_sz current number of enqueued
* descriptors. It ensures that proper number of
* descriptors will be dequeued in callback
@@ -3862,6 +3855,13 @@ throughput_intr_lcore_mldts(void *arg)
*/
__atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ enq = 0;
+ do {
+ enq += rte_bbdev_enqueue_mldts_ops(tp->dev_id,
+ queue_id, &ops[enqueued], num_to_enq);
+ } while (unlikely(enq != num_to_enq));
+ enqueued += enq;
+
/* Wait until processing of previous batch is
* completed
*/
--
2.34.1
---
Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- - 2024-08-12 20:44:02.816614136 +0800
+++ 0005-app-bbdev-fix-interrupt-tests.patch 2024-08-12 20:44:01.885069253 +0800
@@ -1 +1 @@
-From fdcee665c5066cd1300d08b85d159ccabf9f3657 Mon Sep 17 00:00:00 2001
+From c343cb088f5f741f234eb672f736f31f42df99b2 Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit fdcee665c5066cd1300d08b85d159ccabf9f3657 ]
@@ -10 +12,0 @@
-Cc: stable at dpdk.org
@@ -19 +21 @@
-index 9841464922..20cd8df19b 100644
+index d9267bb91f..5c1755ae0d 100644
@@ -22 +24 @@
-@@ -3419,15 +3419,6 @@ throughput_intr_lcore_ldpc_dec(void *arg)
+@@ -3408,15 +3408,6 @@ throughput_intr_lcore_ldpc_dec(void *arg)
@@ -38,3 +40,3 @@
-@@ -3438,6 +3429,15 @@ throughput_intr_lcore_ldpc_dec(void *arg)
- rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
- rte_memory_order_relaxed);
+@@ -3426,6 +3417,15 @@ throughput_intr_lcore_ldpc_dec(void *arg)
+ */
+ __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
@@ -54 +56 @@
-@@ -3514,14 +3514,6 @@ throughput_intr_lcore_dec(void *arg)
+@@ -3500,14 +3500,6 @@ throughput_intr_lcore_dec(void *arg)
@@ -69,3 +71,3 @@
-@@ -3532,6 +3524,14 @@ throughput_intr_lcore_dec(void *arg)
- rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
- rte_memory_order_relaxed);
+@@ -3517,6 +3509,14 @@ throughput_intr_lcore_dec(void *arg)
+ */
+ __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
@@ -84 +86 @@
-@@ -3603,14 +3603,6 @@ throughput_intr_lcore_enc(void *arg)
+@@ -3586,14 +3586,6 @@ throughput_intr_lcore_enc(void *arg)
@@ -99,3 +101,3 @@
-@@ -3621,6 +3613,14 @@ throughput_intr_lcore_enc(void *arg)
- rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
- rte_memory_order_relaxed);
+@@ -3603,6 +3595,14 @@ throughput_intr_lcore_enc(void *arg)
+ */
+ __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
@@ -114 +116 @@
-@@ -3694,15 +3694,6 @@ throughput_intr_lcore_ldpc_enc(void *arg)
+@@ -3674,15 +3674,6 @@ throughput_intr_lcore_ldpc_enc(void *arg)
@@ -130,3 +132,3 @@
-@@ -3713,6 +3704,15 @@ throughput_intr_lcore_ldpc_enc(void *arg)
- rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
- rte_memory_order_relaxed);
+@@ -3692,6 +3683,15 @@ throughput_intr_lcore_ldpc_enc(void *arg)
+ */
+ __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
@@ -146 +148 @@
-@@ -3786,14 +3786,6 @@ throughput_intr_lcore_fft(void *arg)
+@@ -3763,14 +3763,6 @@ throughput_intr_lcore_fft(void *arg)
@@ -161,3 +163,3 @@
-@@ -3804,6 +3796,14 @@ throughput_intr_lcore_fft(void *arg)
- rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
- rte_memory_order_relaxed);
+@@ -3780,6 +3772,14 @@ throughput_intr_lcore_fft(void *arg)
+ */
+ __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
@@ -176 +178 @@
-@@ -3872,13 +3872,6 @@ throughput_intr_lcore_mldts(void *arg)
+@@ -3846,13 +3846,6 @@ throughput_intr_lcore_mldts(void *arg)
@@ -190,3 +192,3 @@
-@@ -3889,6 +3882,13 @@ throughput_intr_lcore_mldts(void *arg)
- rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
- rte_memory_order_relaxed);
+@@ -3862,6 +3855,13 @@ throughput_intr_lcore_mldts(void *arg)
+ */
+ __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
More information about the stable
mailing list