[dpdk-dev] [PATCH v5 06/25] raw/ioat: split header for readability

Bruce Richardson bruce.richardson at intel.com
Wed Oct 7 18:30:04 CEST 2020


Rather than having a single long complicated header file for general use we
can split things so that there is one header with all the publicly needed
information - data structs and function prototypes - while the rest of the
internal details are put separately. This makes it easier to read,
understand and use the APIs.

Signed-off-by: Bruce Richardson <bruce.richardson at intel.com>
Reviewed-by: Kevin Laatz <kevin.laatz at intel.com>
Acked-by: Radu Nicolau <radu.nicolau at intel.com>
---
 drivers/raw/ioat/meson.build           |   1 +
 drivers/raw/ioat/rte_ioat_rawdev.h     | 147 +---------------------
 drivers/raw/ioat/rte_ioat_rawdev_fns.h | 168 +++++++++++++++++++++++++
 3 files changed, 175 insertions(+), 141 deletions(-)
 create mode 100644 drivers/raw/ioat/rte_ioat_rawdev_fns.h

diff --git a/drivers/raw/ioat/meson.build b/drivers/raw/ioat/meson.build
index 0878418ae..f66e9b605 100644
--- a/drivers/raw/ioat/meson.build
+++ b/drivers/raw/ioat/meson.build
@@ -8,4 +8,5 @@ sources = files('ioat_rawdev.c',
 deps += ['rawdev', 'bus_pci', 'mbuf']
 
 install_headers('rte_ioat_rawdev.h',
+		'rte_ioat_rawdev_fns.h',
 		'rte_ioat_spec.h')
diff --git a/drivers/raw/ioat/rte_ioat_rawdev.h b/drivers/raw/ioat/rte_ioat_rawdev.h
index 28ce95cc9..7067b352f 100644
--- a/drivers/raw/ioat/rte_ioat_rawdev.h
+++ b/drivers/raw/ioat/rte_ioat_rawdev.h
@@ -18,12 +18,7 @@ extern "C" {
  * @b EXPERIMENTAL: these structures and APIs may change without prior notice
  */
 
-#include <x86intrin.h>
-#include <rte_atomic.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_prefetch.h>
-#include "rte_ioat_spec.h"
+#include <rte_common.h>
 
 /** Name of the device driver */
 #define IOAT_PMD_RAWDEV_NAME rawdev_ioat
@@ -42,38 +37,6 @@ struct rte_ioat_rawdev_config {
 	bool hdls_disable;    /**< if set, ignore user-supplied handle params */
 };
 
-/**
- * @internal
- * Structure representing a device instance
- */
-struct rte_ioat_rawdev {
-	struct rte_rawdev *rawdev;
-	const struct rte_memzone *mz;
-	const struct rte_memzone *desc_mz;
-
-	volatile struct rte_ioat_registers *regs;
-	phys_addr_t status_addr;
-	phys_addr_t ring_addr;
-
-	unsigned short ring_size;
-	struct rte_ioat_generic_hw_desc *desc_ring;
-	bool hdls_disable;
-	__m128i *hdls; /* completion handles for returning to user */
-
-
-	unsigned short next_read;
-	unsigned short next_write;
-
-	/* some statistics for tracking, if added/changed update xstats fns*/
-	uint64_t enqueue_failed __rte_cache_aligned;
-	uint64_t enqueued;
-	uint64_t started;
-	uint64_t completed;
-
-	/* to report completions, the device will write status back here */
-	volatile uint64_t status __rte_cache_aligned;
-};
-
 /**
  * Enqueue a copy operation onto the ioat device
  *
@@ -108,39 +71,7 @@ struct rte_ioat_rawdev {
 static inline int
 rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
 		unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl,
-		int fence)
-{
-	struct rte_ioat_rawdev *ioat =
-			(struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
-	unsigned short read = ioat->next_read;
-	unsigned short write = ioat->next_write;
-	unsigned short mask = ioat->ring_size - 1;
-	unsigned short space = mask + read - write;
-	struct rte_ioat_generic_hw_desc *desc;
-
-	if (space == 0) {
-		ioat->enqueue_failed++;
-		return 0;
-	}
-
-	ioat->next_write = write + 1;
-	write &= mask;
-
-	desc = &ioat->desc_ring[write];
-	desc->size = length;
-	/* set descriptor write-back every 16th descriptor */
-	desc->u.control_raw = (uint32_t)((!!fence << 4) | (!(write & 0xF)) << 3);
-	desc->src_addr = src;
-	desc->dest_addr = dst;
-	if (!ioat->hdls_disable)
-		ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl,
-					(int64_t)src_hdl);
-
-	rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]);
-
-	ioat->enqueued++;
-	return 1;
-}
+		int fence);
 
 /**
  * Trigger hardware to begin performing enqueued copy operations
@@ -152,32 +83,7 @@ rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
  *   The rawdev device id of the ioat instance
  */
 static inline void
-rte_ioat_do_copies(int dev_id)
-{
-	struct rte_ioat_rawdev *ioat =
-			(struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
-	ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u
-			.control.completion_update = 1;
-	rte_compiler_barrier();
-	ioat->regs->dmacount = ioat->next_write;
-	ioat->started = ioat->enqueued;
-}
-
-/**
- * @internal
- * Returns the index of the last completed operation.
- */
-static inline int
-rte_ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
-{
-	uint64_t status = ioat->status;
-
-	/* lower 3 bits indicate "transfer status" : active, idle, halted.
-	 * We can ignore bit 0.
-	 */
-	*error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED);
-	return (status - ioat->ring_addr) >> 6;
-}
+rte_ioat_do_copies(int dev_id);
 
 /**
  * Returns details of copy operations that have been completed
@@ -212,51 +118,10 @@ rte_ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
  */
 static inline int
 rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
-		uintptr_t *src_hdls, uintptr_t *dst_hdls)
-{
-	struct rte_ioat_rawdev *ioat =
-			(struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
-	unsigned short mask = (ioat->ring_size - 1);
-	unsigned short read = ioat->next_read;
-	unsigned short end_read, count;
-	int error;
-	int i = 0;
-
-	end_read = (rte_ioat_get_last_completed(ioat, &error) + 1) & mask;
-	count = (end_read - (read & mask)) & mask;
-
-	if (error) {
-		rte_errno = EIO;
-		return -1;
-	}
-
-	if (ioat->hdls_disable) {
-		read += count;
-		goto end;
-	}
+		uintptr_t *src_hdls, uintptr_t *dst_hdls);
 
-	if (count > max_copies)
-		count = max_copies;
-
-	for (; i < count - 1; i += 2, read += 2) {
-		__m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]);
-		__m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]);
-
-		_mm_storeu_si128((__m128i *)&src_hdls[i],
-				_mm_unpacklo_epi64(hdls0, hdls1));
-		_mm_storeu_si128((__m128i *)&dst_hdls[i],
-				_mm_unpackhi_epi64(hdls0, hdls1));
-	}
-	for (; i < count; i++, read++) {
-		uintptr_t *hdls = (uintptr_t *)&ioat->hdls[read & mask];
-		src_hdls[i] = hdls[0];
-		dst_hdls[i] = hdls[1];
-	}
-end:
-	ioat->next_read = read;
-	ioat->completed += count;
-	return count;
-}
+/* include the implementation details from a separate file */
+#include "rte_ioat_rawdev_fns.h"
 
 #ifdef __cplusplus
 }
diff --git a/drivers/raw/ioat/rte_ioat_rawdev_fns.h b/drivers/raw/ioat/rte_ioat_rawdev_fns.h
new file mode 100644
index 000000000..4b7bdb8e2
--- /dev/null
+++ b/drivers/raw/ioat/rte_ioat_rawdev_fns.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Intel Corporation
+ */
+#ifndef _RTE_IOAT_RAWDEV_FNS_H_
+#define _RTE_IOAT_RAWDEV_FNS_H_
+
+#include <x86intrin.h>
+#include <rte_rawdev.h>
+#include <rte_memzone.h>
+#include <rte_prefetch.h>
+#include "rte_ioat_spec.h"
+
+/**
+ * @internal
+ * Structure representing a device instance
+ */
+struct rte_ioat_rawdev {
+	struct rte_rawdev *rawdev;
+	const struct rte_memzone *mz;
+	const struct rte_memzone *desc_mz;
+
+	volatile struct rte_ioat_registers *regs;
+	phys_addr_t status_addr;
+	phys_addr_t ring_addr;
+
+	unsigned short ring_size;
+	bool hdls_disable;
+	struct rte_ioat_generic_hw_desc *desc_ring;
+	__m128i *hdls; /* completion handles for returning to user */
+
+
+	unsigned short next_read;
+	unsigned short next_write;
+
+	/* some statistics for tracking, if added/changed update xstats fns*/
+	uint64_t enqueue_failed __rte_cache_aligned;
+	uint64_t enqueued;
+	uint64_t started;
+	uint64_t completed;
+
+	/* to report completions, the device will write status back here */
+	volatile uint64_t status __rte_cache_aligned;
+};
+
+/*
+ * Enqueue a copy operation onto the ioat device
+ */
+static inline int
+rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
+		unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl,
+		int fence)
+{
+	struct rte_ioat_rawdev *ioat =
+			(struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
+	unsigned short read = ioat->next_read;
+	unsigned short write = ioat->next_write;
+	unsigned short mask = ioat->ring_size - 1;
+	unsigned short space = mask + read - write;
+	struct rte_ioat_generic_hw_desc *desc;
+
+	if (space == 0) {
+		ioat->enqueue_failed++;
+		return 0;
+	}
+
+	ioat->next_write = write + 1;
+	write &= mask;
+
+	desc = &ioat->desc_ring[write];
+	desc->size = length;
+	/* set descriptor write-back every 16th descriptor */
+	desc->u.control_raw = (uint32_t)((!!fence << 4) | (!(write & 0xF)) << 3);
+	desc->src_addr = src;
+	desc->dest_addr = dst;
+
+	if (!ioat->hdls_disable)
+		ioat->hdls[write] = _mm_set_epi64x((int64_t)dst_hdl,
+					(int64_t)src_hdl);
+	rte_prefetch0(&ioat->desc_ring[ioat->next_write & mask]);
+
+	ioat->enqueued++;
+	return 1;
+}
+
+/*
+ * Trigger hardware to begin performing enqueued copy operations
+ */
+static inline void
+rte_ioat_do_copies(int dev_id)
+{
+	struct rte_ioat_rawdev *ioat =
+			(struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
+	ioat->desc_ring[(ioat->next_write - 1) & (ioat->ring_size - 1)].u
+			.control.completion_update = 1;
+	rte_compiler_barrier();
+	ioat->regs->dmacount = ioat->next_write;
+	ioat->started = ioat->enqueued;
+}
+
+/**
+ * @internal
+ * Returns the index of the last completed operation.
+ */
+static inline int
+rte_ioat_get_last_completed(struct rte_ioat_rawdev *ioat, int *error)
+{
+	uint64_t status = ioat->status;
+
+	/* lower 3 bits indicate "transfer status" : active, idle, halted.
+	 * We can ignore bit 0.
+	 */
+	*error = status & (RTE_IOAT_CHANSTS_SUSPENDED | RTE_IOAT_CHANSTS_ARMED);
+	return (status - ioat->ring_addr) >> 6;
+}
+
+/*
+ * Returns details of copy operations that have been completed
+ */
+static inline int
+rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
+		uintptr_t *src_hdls, uintptr_t *dst_hdls)
+{
+	struct rte_ioat_rawdev *ioat =
+			(struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
+	unsigned short mask = (ioat->ring_size - 1);
+	unsigned short read = ioat->next_read;
+	unsigned short end_read, count;
+	int error;
+	int i = 0;
+
+	end_read = (rte_ioat_get_last_completed(ioat, &error) + 1) & mask;
+	count = (end_read - (read & mask)) & mask;
+
+	if (error) {
+		rte_errno = EIO;
+		return -1;
+	}
+
+	if (ioat->hdls_disable) {
+		read += count;
+		goto end;
+	}
+
+	if (count > max_copies)
+		count = max_copies;
+
+	for (; i < count - 1; i += 2, read += 2) {
+		__m128i hdls0 = _mm_load_si128(&ioat->hdls[read & mask]);
+		__m128i hdls1 = _mm_load_si128(&ioat->hdls[(read + 1) & mask]);
+
+		_mm_storeu_si128((__m128i *)&src_hdls[i],
+				_mm_unpacklo_epi64(hdls0, hdls1));
+		_mm_storeu_si128((__m128i *)&dst_hdls[i],
+				_mm_unpackhi_epi64(hdls0, hdls1));
+	}
+	for (; i < count; i++, read++) {
+		uintptr_t *hdls = (uintptr_t *)&ioat->hdls[read & mask];
+		src_hdls[i] = hdls[0];
+		dst_hdls[i] = hdls[1];
+	}
+
+end:
+	ioat->next_read = read;
+	ioat->completed += count;
+	return count;
+}
+
+#endif /* _RTE_IOAT_RAWDEV_FNS_H_ */
-- 
2.25.1



More information about the dev mailing list