[v6 14/15] dma/dpaa: add DMA error checks
Gagandeep Singh
g.singh at nxp.com
Mon Oct 14 11:36:38 CEST 2024
From: Jun Yang <jun.yang at nxp.com>
add user configurable DMA error checks.
Signed-off-by: Jun Yang <jun.yang at nxp.com>
Signed-off-by: Gagandeep Singh <g.singh at nxp.com>
---
doc/guides/dmadevs/dpaa.rst | 6 ++
drivers/dma/dpaa/dpaa_qdma.c | 135 ++++++++++++++++++++++++++++++-
drivers/dma/dpaa/dpaa_qdma.h | 42 ++++++++++
drivers/net/dpaa2/dpaa2_ethdev.c | 2 +-
4 files changed, 183 insertions(+), 2 deletions(-)
diff --git a/doc/guides/dmadevs/dpaa.rst b/doc/guides/dmadevs/dpaa.rst
index 8a7c0befc3..a60457229a 100644
--- a/doc/guides/dmadevs/dpaa.rst
+++ b/doc/guides/dmadevs/dpaa.rst
@@ -69,3 +69,9 @@ Platform Requirement
DPAA DMA driver for DPDK can only work on NXP SoCs
as listed in the `Supported DPAA SoCs`_.
+
+Device Arguments
+----------------
+
+Use dev arg option ``dpaa_dma_err_check=1`` to check DMA errors at
+driver level. usage example: ``dpaa_bus:dpaa_qdma-1,dpaa_dma_err_check=1``
diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 7c199b6dd0..7b9d893cbe 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -4,11 +4,15 @@
#include <bus_dpaa_driver.h>
#include <rte_dmadev_pmd.h>
+#include <rte_kvargs.h>
#include "dpaa_qdma.h"
#include "dpaa_qdma_logs.h"
static uint32_t s_sg_max_entry_sz = 2000;
+static bool s_hw_err_check;
+
+#define DPAA_DMA_ERROR_CHECK "dpaa_dma_err_check"
static inline void
qdma_desc_addr_set64(struct fsl_qdma_comp_cmd_desc *ccdf, u64 addr)
@@ -638,7 +642,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
check_num = 0;
overflow_check:
- if (fsl_qdma->is_silent) {
+ if (fsl_qdma->is_silent || unlikely(s_hw_err_check)) {
reg = qdma_readl_be(block +
FSL_QDMA_BCQSR(fsl_queue->queue_id));
overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
@@ -1076,13 +1080,81 @@ dpaa_qdma_copy_sg(void *dev_private,
return ret;
}
+static int
+dpaa_qdma_err_handle(struct fsl_qdma_err_reg *reg)
+{
+ struct fsl_qdma_err_reg local;
+ size_t i, offset = 0;
+ char err_msg[512];
+
+ local.dedr_be = rte_read32(®->dedr_be);
+ if (!local.dedr_be)
+ return 0;
+ offset = sprintf(err_msg, "ERR detected:");
+ if (local.dedr.ere) {
+ offset += sprintf(&err_msg[offset],
+ " ere(Enqueue rejection error)");
+ }
+ if (local.dedr.dde) {
+ offset += sprintf(&err_msg[offset],
+ " dde(Destination descriptor error)");
+ }
+ if (local.dedr.sde) {
+ offset += sprintf(&err_msg[offset],
+ " sde(Source descriptor error)");
+ }
+ if (local.dedr.cde) {
+ offset += sprintf(&err_msg[offset],
+ " cde(Command descriptor error)");
+ }
+ if (local.dedr.wte) {
+ offset += sprintf(&err_msg[offset],
+ " wte(Write transaction error)");
+ }
+ if (local.dedr.rte) {
+ offset += sprintf(&err_msg[offset],
+ " rte(Read transaction error)");
+ }
+ if (local.dedr.me) {
+ offset += sprintf(&err_msg[offset],
+ " me(Multiple errors of the same type)");
+ }
+ DPAA_QDMA_ERR("%s", err_msg);
+ for (i = 0; i < FSL_QDMA_DECCD_ERR_NUM; i++) {
+ local.deccd_le[FSL_QDMA_DECCD_ERR_NUM - 1 - i] =
+ QDMA_IN(®->deccd_le[i]);
+ }
+ local.deccqidr_be = rte_read32(®->deccqidr_be);
+ local.decbr = rte_read32(®->decbr);
+
+ offset = sprintf(err_msg, "ERR command:");
+ offset += sprintf(&err_msg[offset],
+ " status: %02x, ser: %d, offset:%d, fmt: %02x",
+ local.err_cmd.status, local.err_cmd.ser,
+ local.err_cmd.offset, local.err_cmd.format);
+ offset += sprintf(&err_msg[offset],
+ " address: 0x%"PRIx64", queue: %d, dd: %02x",
+ (uint64_t)local.err_cmd.addr_hi << 32 |
+ local.err_cmd.addr_lo,
+ local.err_cmd.queue, local.err_cmd.dd);
+ DPAA_QDMA_ERR("%s", err_msg);
+ DPAA_QDMA_ERR("ERR command block: %d, queue: %d",
+ local.deccqidr.block, local.deccqidr.queue);
+
+ rte_write32(local.dedr_be, ®->dedr_be);
+
+ return -EIO;
+}
+
static uint16_t
dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
const uint16_t nb_cpls, uint16_t *last_idx,
enum rte_dma_status_code *st)
{
struct fsl_qdma_engine *fsl_qdma = dev_private;
+ int err;
struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+ void *status = fsl_qdma->status_base;
struct fsl_qdma_desc *desc_complete[nb_cpls];
uint16_t i, dq_num;
@@ -1107,6 +1179,12 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
st[i] = RTE_DMA_STATUS_SUCCESSFUL;
}
+ if (s_hw_err_check) {
+ err = dpaa_qdma_err_handle(status +
+ FSL_QDMA_ERR_REG_STATUS_OFFSET);
+ if (err)
+ fsl_queue->stats.errors++;
+ }
return dq_num;
}
@@ -1117,7 +1195,9 @@ dpaa_qdma_dequeue(void *dev_private,
uint16_t *last_idx, bool *has_error)
{
struct fsl_qdma_engine *fsl_qdma = dev_private;
+ int err;
struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+ void *status = fsl_qdma->status_base;
struct fsl_qdma_desc *desc_complete[nb_cpls];
uint16_t i, dq_num;
@@ -1138,6 +1218,16 @@ dpaa_qdma_dequeue(void *dev_private,
for (i = 0; i < dq_num; i++)
last_idx[i] = desc_complete[i]->flag;
+ if (s_hw_err_check) {
+ err = dpaa_qdma_err_handle(status +
+ FSL_QDMA_ERR_REG_STATUS_OFFSET);
+ if (err) {
+ if (has_error)
+ *has_error = true;
+ fsl_queue->stats.errors++;
+ }
+ }
+
return dq_num;
}
@@ -1189,6 +1279,43 @@ static struct rte_dma_dev_ops dpaa_qdma_ops = {
.stats_reset = dpaa_qdma_stats_reset,
};
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+ __rte_unused void *opaque)
+{
+ if (strcmp(value, "1"))
+ return -1;
+
+ return 0;
+}
+
+static int
+dpaa_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+ struct rte_kvargs *kvlist;
+
+ if (!devargs)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (!kvlist)
+ return 0;
+
+ if (!rte_kvargs_count(kvlist, key)) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+
+ if (rte_kvargs_process(kvlist, key,
+ check_devargs_handler, NULL) < 0) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+ rte_kvargs_free(kvlist);
+
+ return 1;
+}
+
static int
dpaa_qdma_init(struct rte_dma_dev *dmadev)
{
@@ -1199,6 +1326,11 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
int ret;
uint32_t i, j, k;
+ if (dpaa_get_devargs(dmadev->device->devargs, DPAA_DMA_ERROR_CHECK)) {
+ s_hw_err_check = true;
+ DPAA_QDMA_INFO("Enable DMA error checks");
+ }
+
fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
fsl_qdma->num_blocks = QDMA_BLOCKS;
fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
@@ -1340,4 +1472,5 @@ static struct rte_dpaa_driver rte_dpaa_qdma_pmd = {
};
RTE_PMD_REGISTER_DPAA(dpaa_qdma, rte_dpaa_qdma_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(dpaa_qdma, DPAA_DMA_ERROR_CHECK "=<int>");
RTE_LOG_REGISTER_DEFAULT(dpaa_qdma_logtype, INFO);
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 1e820d0207..91eaf1455a 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -199,6 +199,48 @@ struct fsl_qdma_cmpd_ft {
uint64_t phy_df;
} __rte_packed;
+#define FSL_QDMA_ERR_REG_STATUS_OFFSET 0xe00
+
+struct fsl_qdma_dedr_reg {
+ uint32_t me:1;
+ uint32_t rsv0:1;
+ uint32_t rte:1;
+ uint32_t wte:1;
+ uint32_t cde:1;
+ uint32_t sde:1;
+ uint32_t dde:1;
+ uint32_t ere:1;
+ uint32_t rsv1:24;
+};
+
+struct fsl_qdma_deccqidr_reg {
+ uint32_t rsv:27;
+ uint32_t block:2;
+ uint32_t queue:3;
+};
+
+#define FSL_QDMA_DECCD_ERR_NUM \
+ (sizeof(struct fsl_qdma_comp_cmd_desc) / sizeof(uint32_t))
+
+struct fsl_qdma_err_reg {
+ uint32_t deier;
+ union {
+ rte_be32_t dedr_be;
+ struct fsl_qdma_dedr_reg dedr;
+ };
+ uint32_t rsv0[2];
+ union {
+ rte_le32_t deccd_le[FSL_QDMA_DECCD_ERR_NUM];
+ struct fsl_qdma_comp_cmd_desc err_cmd;
+ };
+ uint32_t rsv1[4];
+ union {
+ rte_be32_t deccqidr_be;
+ struct fsl_qdma_deccqidr_reg deccqidr;
+ };
+ rte_be32_t decbr;
+};
+
#define DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
((void *)(uintptr_t)((flag) - ((flag) & DPAA_QDMA_SG_IDX_ADDR_MASK)))
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 93b88acef8..408418f032 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -2685,7 +2685,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
if (dpaa2_get_devargs(dev->devargs, DRIVER_ERROR_QUEUE)) {
dpaa2_enable_err_queue = 1;
- DPAA2_PMD_INFO("Enable error queue");
+ DPAA2_PMD_INFO("Enable DMA error checks");
}
/* Allocate memory for hardware structure for queues */
--
2.25.1
More information about the dev
mailing list