[PATCH 24.11 2/3] [PATCH 24.11] app/dma-perf: fix on-flight DMA when verifying data
Shani Peretz
shperetz at nvidia.com
Tue Dec 30 11:53:44 CET 2025
> -----Original Message-----
> From: Chengwen Feng <fengchengwen at huawei.com>
> Sent: Tuesday, 25 November 2025 3:51
> To: stable at dpdk.org; ktraynor at redhat.com
> Cc: david.marchand at redhat.com
> Subject: [PATCH 24.11 2/3] [PATCH 24.11] app/dma-perf: fix on-flight DMA
> when verifying data
>
> External email: Use caution opening links or attachments
>
>
> [ upstream commit d1b3b669674a17c58eabf3d631b21aaad7232403 ]
>
> There maybe on-flight DMA when verify_data() because the DMA device may
> still working when worker exit.
>
> This commit add wait DMA complete stage before worker exit.
>
> Fixes: 623dc9364dc6 ("app/dma-perf: introduce DMA performance test")
> Cc: stable at dpdk.org
>
> Signed-off-by: Chengwen Feng <fengchengwen at huawei.com>
> ---
> app/test-dma-perf/benchmark.c | 55 +++++++++++++++++++++++------------
> 1 file changed, 36 insertions(+), 19 deletions(-)
>
> diff --git a/app/test-dma-perf/benchmark.c b/app/test-dma-perf/benchmark.c
> index c08c5c8dc6..fc583725c4 100644
> --- a/app/test-dma-perf/benchmark.c
> +++ b/app/test-dma-perf/benchmark.c
> @@ -19,7 +19,6 @@
> #define MAX_DMA_CPL_NB 255
>
> #define TEST_WAIT_U_SECOND 10000
> -#define POLL_MAX 1000
>
> #define CSV_LINE_DMA_FMT "Scenario %u,%u,%s,%u,%u,%u,%u,%.2lf,%"
> PRIu64 ",%.3lf,%.3lf\n"
> #define CSV_LINE_CPU_FMT "Scenario %u,%u,NA,NA,NA,%u,%u,%.2lf,%"
> PRIu64 ",%.3lf,%.3lf\n"
> @@ -282,6 +281,40 @@ do_dma_submit_and_poll(uint16_t dev_id, uint64_t
> *async_cnt,
> worker_info->total_cpl += nr_cpl; }
>
> +static int
> +do_dma_submit_and_wait_cpl(uint16_t dev_id, uint64_t async_cnt) {
> +#define MAX_WAIT_MSEC 1000
> +#define MAX_POLL 1000
> +#define DEQ_SZ 64
> + enum rte_dma_vchan_status st;
> + uint32_t poll_cnt = 0;
> + uint32_t wait_ms = 0;
> + uint16_t nr_cpl;
> +
> + rte_dma_submit(dev_id, 0);
> +
> + if (rte_dma_vchan_status(dev_id, 0, &st) < 0) {
> + rte_delay_ms(MAX_WAIT_MSEC);
> + goto wait_cpl;
> + }
> +
> + while (st == RTE_DMA_VCHAN_ACTIVE && wait_ms++ <
> MAX_WAIT_MSEC) {
> + rte_delay_ms(1);
> + rte_dma_vchan_status(dev_id, 0, &st);
> + }
> +
> +wait_cpl:
> + while ((async_cnt > 0) && (poll_cnt++ < MAX_POLL)) {
> + nr_cpl = rte_dma_completed(dev_id, 0, MAX_DMA_CPL_NB, NULL,
> NULL);
> + async_cnt -= nr_cpl;
> + }
> + if (async_cnt > 0)
> + PRINT_ERR("Error: wait DMA %u failed!\n", dev_id);
> +
> + return async_cnt == 0 ? 0 : -1;
> +}
> +
> static inline int
> do_dma_plain_mem_copy(void *p)
> {
> @@ -293,10 +326,8 @@ do_dma_plain_mem_copy(void *p)
> const uint32_t buf_size = para->buf_size;
> struct rte_mbuf **srcs = para->srcs;
> struct rte_mbuf **dsts = para->dsts;
> - uint16_t nr_cpl;
> uint64_t async_cnt = 0;
> uint32_t i;
> - uint32_t poll_cnt = 0;
> int ret;
>
> worker_info->stop_flag = false;
> @@ -327,13 +358,7 @@ do_dma_plain_mem_copy(void *p)
> break;
> }
>
> - rte_dma_submit(dev_id, 0);
> - while ((async_cnt > 0) && (poll_cnt++ < POLL_MAX)) {
> - nr_cpl = rte_dma_completed(dev_id, 0, MAX_DMA_CPL_NB, NULL,
> NULL);
> - async_cnt -= nr_cpl;
> - }
> -
> - return 0;
> + return do_dma_submit_and_wait_cpl(dev_id, async_cnt);
> }
>
> static inline int
> @@ -349,8 +374,6 @@ do_dma_sg_mem_copy(void *p)
> const uint16_t dev_id = para->dev_id;
> uint32_t nr_buf = para->nr_buf;
> uint64_t async_cnt = 0;
> - uint32_t poll_cnt = 0;
> - uint16_t nr_cpl;
> uint32_t i, j;
> int ret;
>
> @@ -386,13 +409,7 @@ do_dma_sg_mem_copy(void *p)
> break;
> }
>
> - rte_dma_submit(dev_id, 0);
> - while ((async_cnt > 0) && (poll_cnt++ < POLL_MAX)) {
> - nr_cpl = rte_dma_completed(dev_id, 0, MAX_DMA_CPL_NB, NULL,
> NULL);
> - async_cnt -= nr_cpl;
> - }
> -
> - return 0;
> + return do_dma_submit_and_wait_cpl(dev_id, async_cnt);
> }
>
> static inline int
> --
> 2.17.1
Hey,
This patch has also been applied to 23.11.
Thanks,
Shani
More information about the stable
mailing list