[dpdk-dev] [PATCH v7 13/16] dma/idxd: add burst capacity API

Kevin Laatz kevin.laatz at intel.com
Wed Oct 13 18:30:50 CEST 2021


Add support for the burst capacity API. This API will provide the calling
application with the remaining capacity of the current burst (limited by
max HW batch size).

Signed-off-by: Kevin Laatz <kevin.laatz at intel.com>
Reviewed-by: Conor Walsh <conor.walsh at intel.com>
Reviewed-by: Bruce Richardson <bruce.richardson at intel.com>
---
 drivers/dma/idxd/idxd_common.c   | 21 +++++++++++++++++++++
 drivers/dma/idxd/idxd_internal.h |  1 +
 drivers/dma/idxd/idxd_pci.c      |  1 +
 3 files changed, 23 insertions(+)

diff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c
index a2edc8a91f..fcad437275 100644
--- a/drivers/dma/idxd/idxd_common.c
+++ b/drivers/dma/idxd/idxd_common.c
@@ -468,6 +468,26 @@ idxd_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *info, uint32_t
 	return 0;
 }
 
+uint16_t
+idxd_burst_capacity(const void *dev_private, uint16_t vchan __rte_unused)
+{
+	const struct idxd_dmadev *idxd = dev_private;
+	uint16_t write_idx = idxd->batch_start + idxd->batch_size;
+	uint16_t used_space;
+
+	/* Check for space in the batch ring */
+	if ((idxd->batch_idx_read == 0 && idxd->batch_idx_write == idxd->max_batches) ||
+			idxd->batch_idx_write + 1 == idxd->batch_idx_read)
+		return 0;
+
+	/* For descriptors, check for wrap-around on write but not read */
+	if (idxd->ids_returned > write_idx)
+		write_idx += idxd->desc_ring_mask + 1;
+	used_space = write_idx - idxd->ids_returned;
+
+	return RTE_MIN((idxd->desc_ring_mask - used_space), idxd->max_batch_size);
+}
+
 int
 idxd_configure(struct rte_dma_dev *dev __rte_unused, const struct rte_dma_conf *dev_conf,
 		uint32_t conf_sz)
@@ -553,6 +573,7 @@ idxd_dmadev_create(const char *name, struct rte_device *dev,
 	dmadev->fp_obj->submit = idxd_submit;
 	dmadev->fp_obj->completed = idxd_completed;
 	dmadev->fp_obj->completed_status = idxd_completed_status;
+	dmadev->fp_obj->burst_capacity = idxd_burst_capacity;
 
 	idxd = dmadev->data->dev_private;
 	*idxd = *base_idxd; /* copy over the main fields already passed in */
diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h
index 2b16a358e3..67ee4afc7b 100644
--- a/drivers/dma/idxd/idxd_internal.h
+++ b/drivers/dma/idxd/idxd_internal.h
@@ -103,5 +103,6 @@ int idxd_stats_get(const struct rte_dma_dev *dev, uint16_t vchan,
 int idxd_stats_reset(struct rte_dma_dev *dev, uint16_t vchan);
 int idxd_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan,
 		enum rte_dma_vchan_status *status);
+uint16_t idxd_burst_capacity(const void *dev_private, uint16_t vchan);
 
 #endif /* _IDXD_INTERNAL_H_ */
diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c
index 5abf2ad55b..916af296c2 100644
--- a/drivers/dma/idxd/idxd_pci.c
+++ b/drivers/dma/idxd/idxd_pci.c
@@ -254,6 +254,7 @@ init_pci_device(struct rte_pci_device *dev, struct idxd_dmadev *idxd,
 
 	idxd->u.pci = pci;
 	idxd->max_batches = wq_size;
+	idxd->max_batch_size = 1 << lg2_max_batch;
 
 	/* enable the device itself */
 	err_code = idxd_pci_dev_command(idxd, idxd_enable_dev);
-- 
2.30.2



More information about the dev mailing list