[PATCH v15 06/21] common/zsda: add functions to operate hardware queue

Hanxiao Li li.hanxiao at zte.com.cn
Fri Oct 25 16:10:29 CEST 2024


Add functions to operate hardware queue for zsda.

Signed-off-by: Hanxiao Li <li.hanxiao at zte.com.cn>
---
 drivers/common/zsda/meson.build |   1 +
 drivers/common/zsda/zsda_qp.c   | 272 ++++++++++++++++++++++++++++++++
 drivers/common/zsda/zsda_qp.h   |  15 ++
 3 files changed, 288 insertions(+)
 create mode 100644 drivers/common/zsda/zsda_qp.c
 create mode 100644 drivers/common/zsda/zsda_qp.h

diff --git a/drivers/common/zsda/meson.build b/drivers/common/zsda/meson.build
index 376876f4ed..a4859523e2 100644
--- a/drivers/common/zsda/meson.build
+++ b/drivers/common/zsda/meson.build
@@ -12,4 +12,5 @@ sources += files(
 		'zsda_logs.c',
 		'zsda_common.c',
 		'zsda_device.c',
+		'zsda_qp.c',
 		)
diff --git a/drivers/common/zsda/zsda_qp.c b/drivers/common/zsda/zsda_qp.c
new file mode 100644
index 0000000000..95f4ca88d4
--- /dev/null
+++ b/drivers/common/zsda/zsda_qp.c
@@ -0,0 +1,272 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <stdint.h>
+
+#include <rte_malloc.h>
+
+#include "zsda_common.h"
+#include "zsda_logs.h"
+#include "zsda_device.h"
+#include "zsda_qp.h"
+
+#define RING_DIR_TX 0
+#define RING_DIR_RX 1
+
+struct ring_size {
+	uint16_t tx_msg_size;
+	uint16_t rx_msg_size;
+};
+uint8_t zsda_num_used_qps;
+
+struct ring_size zsda_qp_hw_ring_size[ZSDA_MAX_SERVICES] = {
+	[ZSDA_SERVICE_SYMMETRIC_ENCRYPT] = {128, 16},
+	[ZSDA_SERVICE_SYMMETRIC_DECRYPT] = {128, 16},
+	[ZSDA_SERVICE_COMPRESSION] = {32, 16},
+	[ZSDA_SERVICE_DECOMPRESSION] = {32, 16},
+	[ZSDA_SERVICE_HASH_ENCODE] = {32, 16},
+};
+
+static void
+zsda_set_queue_head_tail(const struct zsda_pci_device *zsda_pci_dev,
+			 const uint8_t qid)
+{
+	struct rte_pci_device *pci_dev =
+		zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+
+	ZSDA_CSR_WRITE32(mmio_base + IO_DB_INITIAL_CONFIG + (qid * 4),
+			 SET_HEAD_INTI);
+}
+
+static int
+zsda_get_queue_cfg_by_id(const struct zsda_pci_device *zsda_pci_dev,
+			 const uint8_t qid, struct qinfo *qcfg)
+{
+	struct zsda_admin_req_qcfg req = {0};
+	struct zsda_admin_resp_qcfg resp = {0};
+	int ret;
+	struct rte_pci_device *pci_dev =
+		zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;
+
+	if (qid >= MAX_QPS_ON_FUNCTION) {
+		ZSDA_LOG(ERR, "qid beyond limit!");
+		return ZSDA_FAILED;
+	}
+
+	zsda_admin_msg_init(pci_dev);
+	req.msg_type = ZSDA_ADMIN_QUEUE_CFG_REQ;
+	req.qid = qid;
+
+	ret = zsda_send_admin_msg(pci_dev, &req, sizeof(req));
+	if (ret) {
+		ZSDA_LOG(ERR, "Failed! Send msg");
+		return ret;
+	}
+
+	ret = zsda_recv_admin_msg(pci_dev, &resp, sizeof(resp));
+	if (ret) {
+		ZSDA_LOG(ERR, "Failed! Receive msg");
+		return ret;
+	}
+
+	*qcfg = resp.qcfg;
+
+	return ZSDA_SUCCESS;
+}
+
+int
+zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev)
+{
+	uint8_t i;
+	uint32_t index;
+	enum zsda_service_type type;
+	struct zsda_qp_hw *zsda_hw_qps = zsda_pci_dev->zsda_hw_qps;
+	struct qinfo qcfg = {0};
+	int ret;
+
+	for (i = 0; i < zsda_num_used_qps; i++) {
+		zsda_set_queue_head_tail(zsda_pci_dev, i);
+		ret = zsda_get_queue_cfg_by_id(zsda_pci_dev, i, &qcfg);
+		type = qcfg.q_type;
+		if (ret) {
+			ZSDA_LOG(ERR, "get queue cfg!");
+			return ret;
+		}
+		if (type >= ZSDA_SERVICE_INVALID)
+			continue;
+
+		index = zsda_pci_dev->zsda_qp_hw_num[type];
+		zsda_hw_qps[type].data[index].used = true;
+		zsda_hw_qps[type].data[index].tx_ring_num = i;
+		zsda_hw_qps[type].data[index].rx_ring_num = i;
+		zsda_hw_qps[type].data[index].tx_msg_size =
+			zsda_qp_hw_ring_size[type].tx_msg_size;
+		zsda_hw_qps[type].data[index].rx_msg_size =
+			zsda_qp_hw_ring_size[type].rx_msg_size;
+
+		zsda_pci_dev->zsda_qp_hw_num[type]++;
+	}
+
+	return ret;
+}
+
+static uint8_t
+zsda_get_num_used_qps(const struct rte_pci_device *pci_dev)
+{
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+	uint8_t num_used_qps;
+
+	num_used_qps = ZSDA_CSR_READ8(mmio_base + 0);
+
+	return num_used_qps;
+}
+
+static int
+zsda_check_write(uint8_t *addr, const uint32_t dst_value)
+{
+	int times = ZSDA_TIME_NUM;
+	uint32_t val;
+
+	val = ZSDA_CSR_READ32(addr);
+
+	while ((val != dst_value) && times--) {
+		val = ZSDA_CSR_READ32(addr);
+		rte_delay_us_sleep(ZSDA_TIME_SLEEP_US);
+	}
+	if (val == dst_value)
+		return ZSDA_SUCCESS;
+	else
+		return ZSDA_FAILED;
+}
+
+static int
+zsda_admin_q_start(const struct rte_pci_device *pci_dev)
+{
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+	int ret;
+
+	ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_START, 0);
+
+	ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_START, ZSDA_Q_START);
+	ret = zsda_check_write(mmio_base + ZSDA_ADMIN_Q_START, ZSDA_Q_START);
+
+	return ret;
+}
+
+static int
+zsda_admin_q_stop(const struct rte_pci_device *pci_dev)
+{
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+	int ret;
+
+	ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_STOP_RESP, ZSDA_RESP_INVALID);
+	ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_STOP, ZSDA_Q_STOP);
+
+	ret = zsda_check_write(mmio_base + ZSDA_ADMIN_Q_STOP_RESP,
+			       ZSDA_RESP_VALID);
+
+	if (ret)
+		ZSDA_LOG(INFO, "Failed! zsda_admin q stop");
+
+	return ret;
+}
+
+static int
+zsda_admin_q_clear(const struct rte_pci_device *pci_dev)
+{
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+	int ret;
+
+	ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_CLR_RESP, ZSDA_RESP_INVALID);
+	ZSDA_CSR_WRITE32(mmio_base + ZSDA_ADMIN_Q_CLR, ZSDA_RESP_VALID);
+
+	ret = zsda_check_write(mmio_base + ZSDA_ADMIN_Q_CLR_RESP,
+			       ZSDA_RESP_VALID);
+
+	if (ret)
+		ZSDA_LOG(INFO, "Failed! zsda_admin q clear");
+
+	return ret;
+}
+
+static int
+zsda_queue_start_single(uint8_t *mmio_base, const uint8_t id)
+{
+	uint8_t *addr_start = mmio_base + ZSDA_IO_Q_START + (4 * id);
+
+	ZSDA_CSR_WRITE32(addr_start, ZSDA_Q_START);
+	return zsda_check_write(addr_start, ZSDA_Q_START);
+}
+
+
+static int
+zsda_queue_stop_single(uint8_t *mmio_base, const uint8_t id)
+{
+	int ret;
+	uint8_t *addr_stop = mmio_base + ZSDA_IO_Q_STOP + (4 * id);
+	uint8_t *addr_resp = mmio_base + ZSDA_IO_Q_STOP_RESP + (4 * id);
+
+	ZSDA_CSR_WRITE32(addr_resp, ZSDA_RESP_INVALID);
+	ZSDA_CSR_WRITE32(addr_stop, ZSDA_Q_STOP);
+
+	ret = zsda_check_write(addr_resp, ZSDA_RESP_VALID);
+	ZSDA_CSR_WRITE32(addr_resp, ZSDA_RESP_INVALID);
+
+	return ret;
+}
+
+static int
+zsda_queue_clear_single(uint8_t *mmio_base, const uint8_t id)
+{
+	int ret;
+	uint8_t *addr_clear = mmio_base + ZSDA_IO_Q_CLR + (4 * id);
+	uint8_t *addr_resp = mmio_base + ZSDA_IO_Q_CLR_RESP + (4 * id);
+
+	ZSDA_CSR_WRITE32(addr_resp, ZSDA_RESP_INVALID);
+	ZSDA_CSR_WRITE32(addr_clear, ZSDA_CLEAR_VALID);
+	ret = zsda_check_write(addr_resp, ZSDA_RESP_VALID);
+	ZSDA_CSR_WRITE32(addr_clear, ZSDA_CLEAR_INVALID);
+
+	return ret;
+}
+
+int
+zsda_queue_start(const struct rte_pci_device *pci_dev)
+{
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+	uint8_t id;
+	int ret = ZSDA_SUCCESS;
+
+	for (id = 0; id < zsda_num_used_qps; id++)
+		ret |= zsda_queue_start_single(mmio_base, id);
+
+	return ret;
+}
+
+int
+zsda_queue_stop(const struct rte_pci_device *pci_dev)
+{
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+	uint8_t id;
+	int ret = ZSDA_SUCCESS;
+
+	for (id = 0; id < zsda_num_used_qps; id++)
+		ret |= zsda_queue_stop_single(mmio_base, id);
+
+	return ret;
+}
+
+static int
+zsda_queue_clear(const struct rte_pci_device *pci_dev)
+{
+	uint8_t *mmio_base = pci_dev->mem_resource[0].addr;
+	uint8_t id;
+	int ret = ZSDA_SUCCESS;
+
+	for (id = 0; id < zsda_num_used_qps; id++)
+		ret |= zsda_queue_clear_single(mmio_base, id);
+
+	return ret;
+}
diff --git a/drivers/common/zsda/zsda_qp.h b/drivers/common/zsda/zsda_qp.h
new file mode 100644
index 0000000000..00b1137ba3
--- /dev/null
+++ b/drivers/common/zsda/zsda_qp.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef _ZSDA_QP_H_
+#define _ZSDA_QP_H_
+
+int zsda_queue_start(const struct rte_pci_device *pci_dev);
+int zsda_queue_stop(const struct rte_pci_device *pci_dev);
+
+int zsda_queue_init(struct zsda_pci_device *zsda_pci_dev);
+
+int zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev);
+
+#endif /* _ZSDA_QP_H_ */
-- 
2.27.0
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mails.dpdk.org/archives/dev/attachments/20241025/2b83f807/attachment-0001.htm>


More information about the dev mailing list