[dpdk-dev] [PATCH v2 07/12] raw/ioat: allow perform operations function to return error

Bruce Richardson bruce.richardson at intel.com
Mon Apr 26 11:52:54 CEST 2021


From: Kevin Laatz <kevin.laatz at intel.com>

Change the return type for the rte_ioat_perform_ops() function from void to
int to allow the possibility of returning an error code in future, should
it be necessary.

Signed-off-by: Kevin Laatz <kevin.laatz at intel.com>
---
 drivers/raw/ioat/rte_ioat_rawdev.h     |  4 +++-
 drivers/raw/ioat/rte_ioat_rawdev_fns.h | 11 +++++++----
 2 files changed, 10 insertions(+), 5 deletions(-)

diff --git a/drivers/raw/ioat/rte_ioat_rawdev.h b/drivers/raw/ioat/rte_ioat_rawdev.h
index f9e8425a7f..e5a22a0799 100644
--- a/drivers/raw/ioat/rte_ioat_rawdev.h
+++ b/drivers/raw/ioat/rte_ioat_rawdev.h
@@ -124,8 +124,10 @@ rte_ioat_fence(int dev_id);
  *
  * @param dev_id
  *   The rawdev device id of the ioat instance
+ * @return
+ *   0 on success. Non-zero return on error.
  */
-static inline void
+static inline int
 __rte_experimental
 rte_ioat_perform_ops(int dev_id);
 
diff --git a/drivers/raw/ioat/rte_ioat_rawdev_fns.h b/drivers/raw/ioat/rte_ioat_rawdev_fns.h
index e96edc9053..477c1b7b41 100644
--- a/drivers/raw/ioat/rte_ioat_rawdev_fns.h
+++ b/drivers/raw/ioat/rte_ioat_rawdev_fns.h
@@ -291,7 +291,7 @@ __ioat_fence(int dev_id)
 /*
  * Trigger hardware to begin performing enqueued operations
  */
-static __rte_always_inline void
+static __rte_always_inline int
 __ioat_perform_ops(int dev_id)
 {
 	struct rte_ioat_rawdev *ioat =
@@ -301,6 +301,8 @@ __ioat_perform_ops(int dev_id)
 	rte_compiler_barrier();
 	*ioat->doorbell = ioat->next_write;
 	ioat->xstats.started = ioat->xstats.enqueued;
+
+	return 0;
 }
 
 /**
@@ -462,7 +464,7 @@ __idxd_movdir64b(volatile void *dst, const void *src)
 			: "a" (dst), "d" (src));
 }
 
-static __rte_always_inline void
+static __rte_always_inline int
 __idxd_perform_ops(int dev_id)
 {
 	struct rte_idxd_rawdev *idxd =
@@ -470,7 +472,7 @@ __idxd_perform_ops(int dev_id)
 	struct rte_idxd_desc_batch *b = &idxd->batch_ring[idxd->next_batch];
 
 	if (b->submitted || b->op_count == 0)
-		return;
+		return 0;
 	b->hdl_end = idxd->next_free_hdl;
 	b->comp.status = 0;
 	b->submitted = 1;
@@ -480,6 +482,7 @@ __idxd_perform_ops(int dev_id)
 	if (++idxd->next_batch == idxd->batch_ring_sz)
 		idxd->next_batch = 0;
 	idxd->xstats.started = idxd->xstats.enqueued;
+	return 0;
 }
 
 static __rte_always_inline int
@@ -558,7 +561,7 @@ rte_ioat_fence(int dev_id)
 		return __ioat_fence(dev_id);
 }
 
-static inline void
+static inline int
 rte_ioat_perform_ops(int dev_id)
 {
 	enum rte_ioat_dev_type *type =
-- 
2.30.2



More information about the dev mailing list