[dpdk-dev] [PATCH v4 4/8] crypto/bcmfs: add HW queue pair operations

Vikas Gupta vikas.gupta at broadcom.com
Wed Oct 7 18:45:35 CEST 2020


Add queue pair operations exported by supported devices.

Signed-off-by: Vikas Gupta <vikas.gupta at broadcom.com>
Signed-off-by: Raveendra Padasalagi <raveendra.padasalagi at broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde at broadcom.com>
---
 drivers/crypto/bcmfs/bcmfs_dev_msg.h      |  29 +
 drivers/crypto/bcmfs/bcmfs_device.c       |  51 ++
 drivers/crypto/bcmfs/bcmfs_device.h       |  16 +
 drivers/crypto/bcmfs/bcmfs_qp.c           |   1 +
 drivers/crypto/bcmfs/bcmfs_qp.h           |   4 +
 drivers/crypto/bcmfs/hw/bcmfs4_rm.c       | 743 ++++++++++++++++++++++
 drivers/crypto/bcmfs/hw/bcmfs5_rm.c       | 677 ++++++++++++++++++++
 drivers/crypto/bcmfs/hw/bcmfs_rm_common.c |  82 +++
 drivers/crypto/bcmfs/hw/bcmfs_rm_common.h |  51 ++
 drivers/crypto/bcmfs/meson.build          |   5 +-
 10 files changed, 1658 insertions(+), 1 deletion(-)
 create mode 100644 drivers/crypto/bcmfs/bcmfs_dev_msg.h
 create mode 100644 drivers/crypto/bcmfs/hw/bcmfs4_rm.c
 create mode 100644 drivers/crypto/bcmfs/hw/bcmfs5_rm.c
 create mode 100644 drivers/crypto/bcmfs/hw/bcmfs_rm_common.c
 create mode 100644 drivers/crypto/bcmfs/hw/bcmfs_rm_common.h

diff --git a/drivers/crypto/bcmfs/bcmfs_dev_msg.h b/drivers/crypto/bcmfs/bcmfs_dev_msg.h
new file mode 100644
index 0000000000..5b50bde35a
--- /dev/null
+++ b/drivers/crypto/bcmfs/bcmfs_dev_msg.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BCMFS_DEV_MSG_H_
+#define _BCMFS_DEV_MSG_H_
+
+#define MAX_SRC_ADDR_BUFFERS    8
+#define MAX_DST_ADDR_BUFFERS    3
+
+struct bcmfs_qp_message {
+	/** Physical address of each source */
+	uint64_t srcs_addr[MAX_SRC_ADDR_BUFFERS];
+	/** Length of each sources */
+	uint32_t srcs_len[MAX_SRC_ADDR_BUFFERS];
+	/** Total number of sources */
+	unsigned int srcs_count;
+	/** Physical address of each destination */
+	uint64_t dsts_addr[MAX_DST_ADDR_BUFFERS];
+	/** Length of each destination */
+	uint32_t dsts_len[MAX_DST_ADDR_BUFFERS];
+	/** Total number of destinations */
+	unsigned int dsts_count;
+
+	void *ctx;
+};
+
+#endif /* _BCMFS_DEV_MSG_H_ */
diff --git a/drivers/crypto/bcmfs/bcmfs_device.c b/drivers/crypto/bcmfs/bcmfs_device.c
index a01a5c79d5..07423d3cc1 100644
--- a/drivers/crypto/bcmfs/bcmfs_device.c
+++ b/drivers/crypto/bcmfs/bcmfs_device.c
@@ -44,6 +44,47 @@ static struct bcmfs_device_attr dev_table[] = {
 	}
 };
 
+struct bcmfs_hw_queue_pair_ops_table bcmfs_hw_queue_pair_ops_table = {
+	.tl =  RTE_SPINLOCK_INITIALIZER,
+	.num_ops = 0
+};
+
+int bcmfs_hw_queue_pair_register_ops(const struct bcmfs_hw_queue_pair_ops *h)
+{
+	struct bcmfs_hw_queue_pair_ops *ops;
+	int16_t ops_index;
+
+	rte_spinlock_lock(&bcmfs_hw_queue_pair_ops_table.tl);
+
+	if (h->enq_one_req == NULL || h->dequeue == NULL ||
+	    h->ring_db == NULL || h->startq == NULL || h->stopq == NULL) {
+		rte_spinlock_unlock(&bcmfs_hw_queue_pair_ops_table.tl);
+		BCMFS_LOG(ERR,
+			  "Missing callback while registering device ops");
+		return -EINVAL;
+	}
+
+	if (strlen(h->name) >= sizeof(ops->name) - 1) {
+		rte_spinlock_unlock(&bcmfs_hw_queue_pair_ops_table.tl);
+		BCMFS_LOG(ERR, "%s(): fs device_ops <%s>: name too long",
+				__func__, h->name);
+		return -EEXIST;
+	}
+
+	ops_index = bcmfs_hw_queue_pair_ops_table.num_ops++;
+	ops = &bcmfs_hw_queue_pair_ops_table.qp_ops[ops_index];
+	strlcpy(ops->name, h->name, sizeof(ops->name));
+	ops->enq_one_req = h->enq_one_req;
+	ops->dequeue = h->dequeue;
+	ops->ring_db = h->ring_db;
+	ops->startq = h->startq;
+	ops->stopq = h->stopq;
+
+	rte_spinlock_unlock(&bcmfs_hw_queue_pair_ops_table.tl);
+
+	return ops_index;
+}
+
 TAILQ_HEAD(fsdev_list, bcmfs_device);
 static struct fsdev_list fsdev_list = TAILQ_HEAD_INITIALIZER(fsdev_list);
 
@@ -54,6 +95,7 @@ fsdev_allocate_one_dev(struct rte_vdev_device *vdev,
 		       enum bcmfs_device_type dev_type __rte_unused)
 {
 	struct bcmfs_device *fsdev;
+	uint32_t i;
 
 	fsdev = rte_calloc(__func__, 1, sizeof(*fsdev), 0);
 	if (!fsdev)
@@ -69,6 +111,15 @@ fsdev_allocate_one_dev(struct rte_vdev_device *vdev,
 		goto cleanup;
 	}
 
+	/* check if registered ops name is present in directory path */
+	for (i = 0; i < bcmfs_hw_queue_pair_ops_table.num_ops; i++)
+		if (strstr(dirpath,
+			   bcmfs_hw_queue_pair_ops_table.qp_ops[i].name))
+			fsdev->sym_hw_qp_ops =
+				&bcmfs_hw_queue_pair_ops_table.qp_ops[i];
+	if (!fsdev->sym_hw_qp_ops)
+		goto cleanup;
+
 	strcpy(fsdev->dirname, dirpath);
 	strcpy(fsdev->name, devname);
 
diff --git a/drivers/crypto/bcmfs/bcmfs_device.h b/drivers/crypto/bcmfs/bcmfs_device.h
index dede5b82dc..2fb8eed143 100644
--- a/drivers/crypto/bcmfs/bcmfs_device.h
+++ b/drivers/crypto/bcmfs/bcmfs_device.h
@@ -8,6 +8,7 @@
 
 #include <sys/queue.h>
 
+#include <rte_spinlock.h>
 #include <rte_bus_vdev.h>
 
 #include "bcmfs_logs.h"
@@ -31,6 +32,19 @@ enum bcmfs_device_type {
 	BCMFS_UNKNOWN
 };
 
+/* A table to store registered queue pair opertations */
+struct bcmfs_hw_queue_pair_ops_table {
+	rte_spinlock_t tl;
+	/* Number of used ops structs in the table. */
+	uint32_t num_ops;
+	 /*  Storage for all possible ops structs. */
+	struct bcmfs_hw_queue_pair_ops qp_ops[BCMFS_MAX_NODES];
+};
+
+/* HW queue pair ops register function */
+int
+bcmfs_hw_queue_pair_register_ops(const struct bcmfs_hw_queue_pair_ops *qp_ops);
+
 struct bcmfs_device {
 	TAILQ_ENTRY(bcmfs_device) next;
 	/* Directory path for vfio */
@@ -49,6 +63,8 @@ struct bcmfs_device {
 	uint16_t max_hw_qps;
 	/* current qpairs in use */
 	struct bcmfs_qp *qps_in_use[BCMFS_MAX_HW_QUEUES];
+	/* queue pair ops exported by symmetric crypto hw */
+	struct bcmfs_hw_queue_pair_ops *sym_hw_qp_ops;
 };
 
 #endif /* _BCMFS_DEVICE_H_ */
diff --git a/drivers/crypto/bcmfs/bcmfs_qp.c b/drivers/crypto/bcmfs/bcmfs_qp.c
index 864e7bb746..ec1327b780 100644
--- a/drivers/crypto/bcmfs/bcmfs_qp.c
+++ b/drivers/crypto/bcmfs/bcmfs_qp.c
@@ -227,6 +227,7 @@ bcmfs_qp_setup(struct bcmfs_qp **qp_addr,
 	qp->qpair_id = queue_pair_id;
 	qp->ioreg = qp_conf->iobase;
 	qp->nb_descriptors = nb_descriptors;
+	qp->ops = qp_conf->ops;
 
 	qp->stats.enqueued_count = 0;
 	qp->stats.dequeued_count = 0;
diff --git a/drivers/crypto/bcmfs/bcmfs_qp.h b/drivers/crypto/bcmfs/bcmfs_qp.h
index 52c487956e..59785865b0 100644
--- a/drivers/crypto/bcmfs/bcmfs_qp.h
+++ b/drivers/crypto/bcmfs/bcmfs_qp.h
@@ -44,6 +44,8 @@ struct bcmfs_qp_config {
 	uint16_t nb_descriptors;
 	/* Maximum number of h/w descriptors needed by a request */
 	uint16_t max_descs_req;
+	/* h/w ops associated with qp */
+	struct bcmfs_hw_queue_pair_ops *ops;
 };
 
 struct bcmfs_queue {
@@ -61,6 +63,8 @@ struct bcmfs_queue {
 		/* s/w pointer for completion h/w queue*/
 		uint32_t cmpl_read_ptr;
 	};
+	/* number of inflight descriptor accumulated  before next db ring */
+	uint16_t descs_inflight;
 	/* Memzone name */
 	char memz_name[RTE_MEMZONE_NAMESIZE];
 };
diff --git a/drivers/crypto/bcmfs/hw/bcmfs4_rm.c b/drivers/crypto/bcmfs/hw/bcmfs4_rm.c
new file mode 100644
index 0000000000..aec1089637
--- /dev/null
+++ b/drivers/crypto/bcmfs/hw/bcmfs4_rm.c
@@ -0,0 +1,743 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Broadcom
+ * All rights reserved.
+ */
+
+#include <unistd.h>
+
+#include <rte_bitmap.h>
+
+#include "bcmfs_device.h"
+#include "bcmfs_dev_msg.h"
+#include "bcmfs_hw_defs.h"
+#include "bcmfs_logs.h"
+#include "bcmfs_qp.h"
+#include "bcmfs_rm_common.h"
+
+/* FS4 configuration */
+#define RING_BD_TOGGLE_INVALID(offset)			\
+			(((offset) >> FS_RING_BD_ALIGN_ORDER) & 0x1)
+#define RING_BD_TOGGLE_VALID(offset)			\
+			(!RING_BD_TOGGLE_INVALID(offset))
+
+#define RING_VER_MAGIC					0x76303031
+
+/* Per-Ring register offsets */
+#define RING_VER					0x000
+#define RING_BD_START_ADDR				0x004
+#define RING_BD_READ_PTR				0x008
+#define RING_BD_WRITE_PTR				0x00c
+#define RING_BD_READ_PTR_DDR_LS				0x010
+#define RING_BD_READ_PTR_DDR_MS				0x014
+#define RING_CMPL_START_ADDR				0x018
+#define RING_CMPL_WRITE_PTR				0x01c
+#define RING_NUM_REQ_RECV_LS				0x020
+#define RING_NUM_REQ_RECV_MS				0x024
+#define RING_NUM_REQ_TRANS_LS				0x028
+#define RING_NUM_REQ_TRANS_MS				0x02c
+#define RING_NUM_REQ_OUTSTAND				0x030
+#define RING_CONTROL					0x034
+#define RING_FLUSH_DONE					0x038
+#define RING_MSI_ADDR_LS				0x03c
+#define RING_MSI_ADDR_MS				0x040
+#define RING_MSI_CONTROL				0x048
+#define RING_BD_READ_PTR_DDR_CONTROL			0x04c
+#define RING_MSI_DATA_VALUE				0x064
+
+/* Register RING_BD_START_ADDR fields */
+#define BD_LAST_UPDATE_HW_SHIFT				28
+#define BD_LAST_UPDATE_HW_MASK				0x1
+#define BD_START_ADDR_VALUE(pa)				\
+	((uint32_t)((((uint64_t)(pa)) >> FS_RING_BD_ALIGN_ORDER) & 0x0fffffff))
+#define BD_START_ADDR_DECODE(val)			\
+	((uint64_t)((val) & 0x0fffffff) << FS_RING_BD_ALIGN_ORDER)
+
+/* Register RING_CMPL_START_ADDR fields */
+#define CMPL_START_ADDR_VALUE(pa)			\
+	((uint32_t)((((uint64_t)(pa)) >> FS_RING_CMPL_ALIGN_ORDER) & 0x7ffffff))
+
+/* Register RING_CONTROL fields */
+#define CONTROL_MASK_DISABLE_CONTROL			12
+#define CONTROL_FLUSH_SHIFT				5
+#define CONTROL_ACTIVE_SHIFT				4
+#define CONTROL_RATE_ADAPT_MASK				0xf
+#define CONTROL_RATE_DYNAMIC				0x0
+#define CONTROL_RATE_FAST				0x8
+#define CONTROL_RATE_MEDIUM				0x9
+#define CONTROL_RATE_SLOW				0xa
+#define CONTROL_RATE_IDLE				0xb
+
+/* Register RING_FLUSH_DONE fields */
+#define FLUSH_DONE_MASK					0x1
+
+/* Register RING_MSI_CONTROL fields */
+#define MSI_TIMER_VAL_SHIFT				16
+#define MSI_TIMER_VAL_MASK				0xffff
+#define MSI_ENABLE_SHIFT				15
+#define MSI_ENABLE_MASK					0x1
+#define MSI_COUNT_SHIFT					0
+#define MSI_COUNT_MASK					0x3ff
+
+/* Register RING_BD_READ_PTR_DDR_CONTROL fields */
+#define BD_READ_PTR_DDR_TIMER_VAL_SHIFT			16
+#define BD_READ_PTR_DDR_TIMER_VAL_MASK			0xffff
+#define BD_READ_PTR_DDR_ENABLE_SHIFT			15
+#define BD_READ_PTR_DDR_ENABLE_MASK			0x1
+
+/* ====== Broadcom FS4-RM ring descriptor defines ===== */
+
+
+/* General descriptor format */
+#define DESC_TYPE_SHIFT				60
+#define DESC_TYPE_MASK				0xf
+#define DESC_PAYLOAD_SHIFT			0
+#define DESC_PAYLOAD_MASK			0x0fffffffffffffff
+
+/* Null descriptor format  */
+#define NULL_TYPE				0
+#define NULL_TOGGLE_SHIFT			58
+#define NULL_TOGGLE_MASK			0x1
+
+/* Header descriptor format */
+#define HEADER_TYPE				1
+#define HEADER_TOGGLE_SHIFT			58
+#define HEADER_TOGGLE_MASK			0x1
+#define HEADER_ENDPKT_SHIFT			57
+#define HEADER_ENDPKT_MASK			0x1
+#define HEADER_STARTPKT_SHIFT			56
+#define HEADER_STARTPKT_MASK			0x1
+#define HEADER_BDCOUNT_SHIFT			36
+#define HEADER_BDCOUNT_MASK			0x1f
+#define HEADER_BDCOUNT_MAX			HEADER_BDCOUNT_MASK
+#define HEADER_FLAGS_SHIFT			16
+#define HEADER_FLAGS_MASK			0xffff
+#define HEADER_OPAQUE_SHIFT			0
+#define HEADER_OPAQUE_MASK			0xffff
+
+/* Source (SRC) descriptor format */
+#define SRC_TYPE				2
+#define SRC_LENGTH_SHIFT			44
+#define SRC_LENGTH_MASK				0xffff
+#define SRC_ADDR_SHIFT				0
+#define SRC_ADDR_MASK				0x00000fffffffffff
+
+/* Destination (DST) descriptor format */
+#define DST_TYPE				3
+#define DST_LENGTH_SHIFT			44
+#define DST_LENGTH_MASK				0xffff
+#define DST_ADDR_SHIFT				0
+#define DST_ADDR_MASK				0x00000fffffffffff
+
+/* Next pointer (NPTR) descriptor format */
+#define NPTR_TYPE				5
+#define NPTR_TOGGLE_SHIFT			58
+#define NPTR_TOGGLE_MASK			0x1
+#define NPTR_ADDR_SHIFT				0
+#define NPTR_ADDR_MASK				0x00000fffffffffff
+
+/* Mega source (MSRC) descriptor format */
+#define MSRC_TYPE				6
+#define MSRC_LENGTH_SHIFT			44
+#define MSRC_LENGTH_MASK			0xffff
+#define MSRC_ADDR_SHIFT				0
+#define MSRC_ADDR_MASK				0x00000fffffffffff
+
+/* Mega destination (MDST) descriptor format */
+#define MDST_TYPE				7
+#define MDST_LENGTH_SHIFT			44
+#define MDST_LENGTH_MASK			0xffff
+#define MDST_ADDR_SHIFT				0
+#define MDST_ADDR_MASK				0x00000fffffffffff
+
+static uint8_t
+bcmfs4_is_next_table_desc(void *desc_ptr)
+{
+	uint64_t desc = rm_read_desc(desc_ptr);
+	uint32_t type = FS_DESC_DEC(desc, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+
+	return (type == NPTR_TYPE) ? true : false;
+}
+
+static uint64_t
+bcmfs4_next_table_desc(uint32_t toggle, uint64_t next_addr)
+{
+	return (rm_build_desc(NPTR_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
+		rm_build_desc(toggle, NPTR_TOGGLE_SHIFT, NPTR_TOGGLE_MASK) |
+		rm_build_desc(next_addr, NPTR_ADDR_SHIFT, NPTR_ADDR_MASK));
+}
+
+static uint64_t
+bcmfs4_null_desc(uint32_t toggle)
+{
+	return (rm_build_desc(NULL_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
+		rm_build_desc(toggle, NULL_TOGGLE_SHIFT, NULL_TOGGLE_MASK));
+}
+
+static void
+bcmfs4_flip_header_toggle(void *desc_ptr)
+{
+	uint64_t desc = rm_read_desc(desc_ptr);
+
+	if (desc & ((uint64_t)0x1 << HEADER_TOGGLE_SHIFT))
+		desc &= ~((uint64_t)0x1 << HEADER_TOGGLE_SHIFT);
+	else
+		desc |= ((uint64_t)0x1 << HEADER_TOGGLE_SHIFT);
+
+	rm_write_desc(desc_ptr, desc);
+}
+
+static uint64_t
+bcmfs4_header_desc(uint32_t toggle, uint32_t startpkt,
+		   uint32_t endpkt, uint32_t bdcount,
+		   uint32_t flags, uint32_t opaque)
+{
+	return (rm_build_desc(HEADER_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
+		rm_build_desc(toggle, HEADER_TOGGLE_SHIFT, HEADER_TOGGLE_MASK) |
+		rm_build_desc(startpkt, HEADER_STARTPKT_SHIFT,
+			      HEADER_STARTPKT_MASK) |
+		rm_build_desc(endpkt, HEADER_ENDPKT_SHIFT, HEADER_ENDPKT_MASK) |
+		rm_build_desc(bdcount, HEADER_BDCOUNT_SHIFT,
+			      HEADER_BDCOUNT_MASK) |
+		rm_build_desc(flags, HEADER_FLAGS_SHIFT, HEADER_FLAGS_MASK) |
+		rm_build_desc(opaque, HEADER_OPAQUE_SHIFT, HEADER_OPAQUE_MASK));
+}
+
+static void
+bcmfs4_enqueue_desc(uint32_t nhpos, uint32_t nhcnt,
+		    uint32_t reqid, uint64_t desc,
+		    void **desc_ptr, uint32_t *toggle,
+		    void *start_desc, void *end_desc)
+{
+	uint64_t d;
+	uint32_t nhavail, _toggle, _startpkt, _endpkt, _bdcount;
+
+	/*
+	 * Each request or packet start with a HEADER descriptor followed
+	 * by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST,
+	 * DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors
+	 * following a HEADER descriptor is represented by BDCOUNT field
+	 * of HEADER descriptor. The max value of BDCOUNT field is 31 which
+	 * means we can only have 31 non-HEADER descriptors following one
+	 * HEADER descriptor.
+	 *
+	 * In general use, number of non-HEADER descriptors can easily go
+	 * beyond 31. To tackle this situation, we have packet (or request)
+	 * extension bits (STARTPKT and ENDPKT) in the HEADER descriptor.
+	 *
+	 * To use packet extension, the first HEADER descriptor of request
+	 * (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate
+	 * HEADER descriptors will have STARTPKT=0 and ENDPKT=0. The last
+	 * HEADER descriptor will have STARTPKT=0 and ENDPKT=1. Also, the
+	 * TOGGLE bit of the first HEADER will be set to invalid state to
+	 * ensure that FlexDMA engine does not start fetching descriptors
+	 * till all descriptors are enqueued. The user of this function
+	 * will flip the TOGGLE bit of first HEADER after all descriptors
+	 * are enqueued.
+	 */
+
+	if ((nhpos % HEADER_BDCOUNT_MAX == 0) && (nhcnt - nhpos)) {
+		/* Prepare the header descriptor */
+		nhavail = (nhcnt - nhpos);
+		_toggle = (nhpos == 0) ? !(*toggle) : (*toggle);
+		_startpkt = (nhpos == 0) ? 0x1 : 0x0;
+		_endpkt = (nhavail <= HEADER_BDCOUNT_MAX) ? 0x1 : 0x0;
+		_bdcount = (nhavail <= HEADER_BDCOUNT_MAX) ?
+				nhavail : HEADER_BDCOUNT_MAX;
+		if (nhavail <= HEADER_BDCOUNT_MAX)
+			_bdcount = nhavail;
+		else
+			_bdcount = HEADER_BDCOUNT_MAX;
+		d = bcmfs4_header_desc(_toggle, _startpkt, _endpkt,
+					_bdcount, 0x0, reqid);
+
+		/* Write header descriptor */
+		rm_write_desc(*desc_ptr, d);
+
+		/* Point to next descriptor */
+		*desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
+		if (*desc_ptr == end_desc)
+			*desc_ptr = start_desc;
+
+		/* Skip next pointer descriptors */
+		while (bcmfs4_is_next_table_desc(*desc_ptr)) {
+			*toggle = (*toggle) ? 0 : 1;
+			*desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
+			if (*desc_ptr == end_desc)
+				*desc_ptr = start_desc;
+		}
+	}
+
+	/* Write desired descriptor */
+	rm_write_desc(*desc_ptr, desc);
+
+	/* Point to next descriptor */
+	*desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
+	if (*desc_ptr == end_desc)
+		*desc_ptr = start_desc;
+
+	/* Skip next pointer descriptors */
+	while (bcmfs4_is_next_table_desc(*desc_ptr)) {
+		*toggle = (*toggle) ? 0 : 1;
+		*desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
+		if (*desc_ptr == end_desc)
+			*desc_ptr = start_desc;
+	}
+}
+
+static uint64_t
+bcmfs4_src_desc(uint64_t addr, unsigned int length)
+{
+	return (rm_build_desc(SRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
+		rm_build_desc(length, SRC_LENGTH_SHIFT, SRC_LENGTH_MASK) |
+		rm_build_desc(addr, SRC_ADDR_SHIFT, SRC_ADDR_MASK));
+}
+
+static uint64_t
+bcmfs4_msrc_desc(uint64_t addr, unsigned int length_div_16)
+{
+	return (rm_build_desc(MSRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
+		rm_build_desc(length_div_16, MSRC_LENGTH_SHIFT, MSRC_LENGTH_MASK) |
+		rm_build_desc(addr, MSRC_ADDR_SHIFT, MSRC_ADDR_MASK));
+}
+
+static uint64_t
+bcmfs4_dst_desc(uint64_t addr, unsigned int length)
+{
+	return (rm_build_desc(DST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
+		rm_build_desc(length, DST_LENGTH_SHIFT, DST_LENGTH_MASK) |
+		rm_build_desc(addr, DST_ADDR_SHIFT, DST_ADDR_MASK));
+}
+
+static uint64_t
+bcmfs4_mdst_desc(uint64_t addr, unsigned int length_div_16)
+{
+	return (rm_build_desc(MDST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
+		rm_build_desc(length_div_16, MDST_LENGTH_SHIFT, MDST_LENGTH_MASK) |
+		rm_build_desc(addr, MDST_ADDR_SHIFT, MDST_ADDR_MASK));
+}
+
+static bool
+bcmfs4_sanity_check(struct bcmfs_qp_message *msg)
+{
+	unsigned int i = 0;
+
+	if (msg == NULL)
+		return false;
+
+	for (i = 0; i <  msg->srcs_count; i++) {
+		if (msg->srcs_len[i] & 0xf) {
+			if (msg->srcs_len[i] > SRC_LENGTH_MASK)
+				return false;
+		} else {
+			if (msg->srcs_len[i] > (MSRC_LENGTH_MASK * 16))
+				return false;
+		}
+	}
+	for (i = 0; i <  msg->dsts_count; i++) {
+		if (msg->dsts_len[i] & 0xf) {
+			if (msg->dsts_len[i] > DST_LENGTH_MASK)
+				return false;
+		} else {
+			if (msg->dsts_len[i] > (MDST_LENGTH_MASK * 16))
+				return false;
+		}
+	}
+
+	return true;
+}
+
+static uint32_t
+estimate_nonheader_desc_count(struct bcmfs_qp_message *msg)
+{
+	uint32_t cnt = 0;
+	unsigned int src = 0;
+	unsigned int dst = 0;
+	unsigned int dst_target = 0;
+
+	while (src < msg->srcs_count ||
+	       dst < msg->dsts_count) {
+		if (src < msg->srcs_count) {
+			cnt++;
+			dst_target = msg->srcs_len[src];
+			src++;
+		} else {
+			dst_target = UINT_MAX;
+		}
+		while (dst_target && dst < msg->dsts_count) {
+			cnt++;
+			if (msg->dsts_len[dst] < dst_target)
+				dst_target -= msg->dsts_len[dst];
+			else
+				dst_target = 0;
+			dst++;
+		}
+	}
+
+	return cnt;
+}
+
+static void *
+bcmfs4_enqueue_msg(struct bcmfs_qp_message *msg,
+		   uint32_t nhcnt, uint32_t reqid,
+		   void *desc_ptr, uint32_t toggle,
+		   void *start_desc, void *end_desc)
+{
+	uint64_t d;
+	uint32_t nhpos = 0;
+	unsigned int src = 0;
+	unsigned int dst = 0;
+	unsigned int dst_target = 0;
+	void *orig_desc_ptr = desc_ptr;
+
+	if (!desc_ptr || !start_desc || !end_desc)
+		return NULL;
+
+	if (desc_ptr < start_desc || end_desc <= desc_ptr)
+		return NULL;
+
+	while (src < msg->srcs_count ||	dst < msg->dsts_count) {
+		if (src < msg->srcs_count) {
+			if (msg->srcs_len[src] & 0xf) {
+				d = bcmfs4_src_desc(msg->srcs_addr[src],
+						    msg->srcs_len[src]);
+			} else {
+				d = bcmfs4_msrc_desc(msg->srcs_addr[src],
+						     msg->srcs_len[src] / 16);
+			}
+			bcmfs4_enqueue_desc(nhpos, nhcnt, reqid,
+					    d, &desc_ptr, &toggle,
+					    start_desc, end_desc);
+			nhpos++;
+			dst_target = msg->srcs_len[src];
+			src++;
+		} else {
+			dst_target = UINT_MAX;
+		}
+
+		while (dst_target && (dst < msg->dsts_count)) {
+			if (msg->dsts_len[dst] & 0xf) {
+				d = bcmfs4_dst_desc(msg->dsts_addr[dst],
+						    msg->dsts_len[dst]);
+			} else {
+				d = bcmfs4_mdst_desc(msg->dsts_addr[dst],
+						     msg->dsts_len[dst] / 16);
+			}
+			bcmfs4_enqueue_desc(nhpos, nhcnt, reqid,
+					    d, &desc_ptr, &toggle,
+					    start_desc, end_desc);
+			nhpos++;
+			if (msg->dsts_len[dst] < dst_target)
+				dst_target -= msg->dsts_len[dst];
+			else
+				dst_target = 0;
+			dst++; /* for next buffer */
+		}
+	}
+
+	/* Null descriptor with invalid toggle bit */
+	rm_write_desc(desc_ptr, bcmfs4_null_desc(!toggle));
+
+	/* Ensure that descriptors have been written to memory */
+	rte_smp_wmb();
+
+	bcmfs4_flip_header_toggle(orig_desc_ptr);
+
+	return desc_ptr;
+}
+
+static int
+bcmfs4_enqueue_single_request_qp(struct bcmfs_qp *qp, void *op)
+{
+	int reqid;
+	void *next;
+	uint32_t nhcnt;
+	int ret = 0;
+	uint32_t pos = 0;
+	uint64_t slab = 0;
+	uint8_t exit_cleanup = false;
+	struct bcmfs_queue *txq = &qp->tx_q;
+	struct bcmfs_qp_message *msg = (struct bcmfs_qp_message *)op;
+
+	/* Do sanity check on message */
+	if (!bcmfs4_sanity_check(msg)) {
+		BCMFS_DP_LOG(ERR, "Invalid msg on queue %d", qp->qpair_id);
+		return -EIO;
+	}
+
+	/* Scan from the beginning */
+	__rte_bitmap_scan_init(qp->ctx_bmp);
+	/* Scan bitmap to get the free pool */
+	ret = rte_bitmap_scan(qp->ctx_bmp, &pos, &slab);
+	if (ret == 0) {
+		BCMFS_DP_LOG(ERR, "BD memory exhausted");
+		return -ERANGE;
+	}
+
+	reqid = pos + __builtin_ctzll(slab);
+	rte_bitmap_clear(qp->ctx_bmp, reqid);
+	qp->ctx_pool[reqid] = (unsigned long)msg;
+
+	/*
+	 * Number required descriptors = number of non-header descriptors +
+	 *				 number of header descriptors +
+	 *				 1x null descriptor
+	 */
+	nhcnt = estimate_nonheader_desc_count(msg);
+
+	/* Write descriptors to ring */
+	next = bcmfs4_enqueue_msg(msg, nhcnt, reqid,
+				  (uint8_t *)txq->base_addr + txq->tx_write_ptr,
+				  RING_BD_TOGGLE_VALID(txq->tx_write_ptr),
+				  txq->base_addr,
+				  (uint8_t *)txq->base_addr + txq->queue_size);
+	if (next == NULL) {
+		BCMFS_DP_LOG(ERR, "Enqueue for desc failed on queue %d",
+			     qp->qpair_id);
+		ret = -EINVAL;
+		exit_cleanup = true;
+		goto exit;
+	}
+
+	/* Save ring BD write offset */
+	txq->tx_write_ptr = (uint32_t)((uint8_t *)next -
+				       (uint8_t *)txq->base_addr);
+
+	qp->nb_pending_requests++;
+
+	return 0;
+
+exit:
+	/* Cleanup if we failed */
+	if (exit_cleanup)
+		rte_bitmap_set(qp->ctx_bmp, reqid);
+
+	return ret;
+}
+
+static void
+bcmfs4_ring_doorbell_qp(struct bcmfs_qp *qp __rte_unused)
+{
+	/* no door bell method supported */
+}
+
+static uint16_t
+bcmfs4_dequeue_qp(struct bcmfs_qp *qp, void **ops, uint16_t budget)
+{
+	int err;
+	uint16_t reqid;
+	uint64_t desc;
+	uint16_t count = 0;
+	unsigned long context = 0;
+	struct bcmfs_queue *hwq = &qp->cmpl_q;
+	uint32_t cmpl_read_offset, cmpl_write_offset;
+
+	/*
+	 * Check whether budget is valid, else set the budget to maximum
+	 * so that all the available completions will be processed.
+	 */
+	if (budget > qp->nb_pending_requests)
+		budget =  qp->nb_pending_requests;
+
+	/*
+	 * Get current completion read and write offset
+	 * Note: We should read completion write pointer at least once
+	 * after we get a MSI interrupt because HW maintains internal
+	 * MSI status which will allow next MSI interrupt only after
+	 * completion write pointer is read.
+	 */
+	cmpl_write_offset = FS_MMIO_READ32((uint8_t *)qp->ioreg +
+					   RING_CMPL_WRITE_PTR);
+	cmpl_write_offset *= FS_RING_DESC_SIZE;
+	cmpl_read_offset = hwq->cmpl_read_ptr;
+
+	/* Ensure completion pointer is read before proceeding */
+	rte_io_rmb();
+
+	/* For each completed request notify mailbox clients */
+	reqid = 0;
+	while ((cmpl_read_offset != cmpl_write_offset) && (budget > 0)) {
+		/* Dequeue next completion descriptor */
+		desc = *((uint64_t *)((uint8_t *)hwq->base_addr +
+				       cmpl_read_offset));
+
+		/* Next read offset */
+		cmpl_read_offset += FS_RING_DESC_SIZE;
+		if (cmpl_read_offset == FS_RING_CMPL_SIZE)
+			cmpl_read_offset = 0;
+
+		/* Decode error from completion descriptor */
+		err = rm_cmpl_desc_to_error(desc);
+		if (err < 0)
+			BCMFS_DP_LOG(ERR, "error desc rcvd");
+
+		/* Determine request id from completion descriptor */
+		reqid = rm_cmpl_desc_to_reqid(desc);
+
+		/* Determine message pointer based on reqid */
+		context = qp->ctx_pool[reqid];
+		if (context == 0)
+			BCMFS_DP_LOG(ERR, "HW error detected");
+
+		/* Release reqid for recycling */
+		qp->ctx_pool[reqid] = 0;
+		rte_bitmap_set(qp->ctx_bmp, reqid);
+
+		*ops = (void *)context;
+
+		/* Increment number of completions processed */
+		count++;
+		budget--;
+		ops++;
+	}
+
+	hwq->cmpl_read_ptr = cmpl_read_offset;
+
+	qp->nb_pending_requests -= count;
+
+	return count;
+}
+
+static int
+bcmfs4_start_qp(struct bcmfs_qp *qp)
+{
+	int timeout;
+	uint32_t val, off;
+	uint64_t d, next_addr, msi;
+	struct bcmfs_queue *tx_queue = &qp->tx_q;
+	struct bcmfs_queue *cmpl_queue = &qp->cmpl_q;
+
+	/* Disable/deactivate ring */
+	FS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);
+
+	/* Configure next table pointer entries in BD memory */
+	for (off = 0; off < tx_queue->queue_size; off += FS_RING_DESC_SIZE) {
+		next_addr = off + FS_RING_DESC_SIZE;
+		if (next_addr == tx_queue->queue_size)
+			next_addr = 0;
+		next_addr += (uint64_t)tx_queue->base_phys_addr;
+		if (FS_RING_BD_ALIGN_CHECK(next_addr))
+			d = bcmfs4_next_table_desc(RING_BD_TOGGLE_VALID(off),
+						    next_addr);
+		else
+			d = bcmfs4_null_desc(RING_BD_TOGGLE_INVALID(off));
+		rm_write_desc((uint8_t *)tx_queue->base_addr + off, d);
+	}
+
+	/*
+	 * If user interrupt the test in between the run(Ctrl+C), then all
+	 * subsequent test run will fail because sw cmpl_read_offset and hw
+	 * cmpl_write_offset will be pointing at different completion BD. To
+	 * handle this we should flush all the rings in the startup instead
+	 * of shutdown function.
+	 * Ring flush will reset hw cmpl_write_offset.
+	 */
+
+	/* Set ring flush state */
+	timeout = 1000;
+	FS_MMIO_WRITE32(BIT(CONTROL_FLUSH_SHIFT),
+			(uint8_t *)qp->ioreg + RING_CONTROL);
+	do {
+		/*
+		 * If previous test is stopped in between the run, then
+		 * sw has to read cmpl_write_offset else DME/AE will be not
+		 * come out of flush state.
+		 */
+		FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_CMPL_WRITE_PTR);
+
+		if (FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_FLUSH_DONE) &
+				FLUSH_DONE_MASK)
+			break;
+		usleep(1000);
+	} while (--timeout);
+	if (!timeout) {
+		BCMFS_DP_LOG(ERR, "Ring flush timeout hw-queue %d",
+			     qp->qpair_id);
+	}
+
+	/* Clear ring flush state */
+	timeout = 1000;
+	FS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);
+	do {
+		if (!(FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_FLUSH_DONE) &
+				  FLUSH_DONE_MASK))
+			break;
+		usleep(1000);
+	} while (--timeout);
+	if (!timeout) {
+		BCMFS_DP_LOG(ERR, "Ring clear flush timeout hw-queue %d",
+			     qp->qpair_id);
+	}
+
+	/* Program BD start address */
+	val = BD_START_ADDR_VALUE(tx_queue->base_phys_addr);
+	FS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_BD_START_ADDR);
+
+	/* BD write pointer will be same as HW write pointer */
+	tx_queue->tx_write_ptr = FS_MMIO_READ32((uint8_t *)qp->ioreg +
+						RING_BD_WRITE_PTR);
+	tx_queue->tx_write_ptr *= FS_RING_DESC_SIZE;
+
+
+	for (off = 0; off < FS_RING_CMPL_SIZE; off += FS_RING_DESC_SIZE)
+		rm_write_desc((uint8_t *)cmpl_queue->base_addr + off, 0x0);
+
+	/* Program completion start address */
+	val = CMPL_START_ADDR_VALUE(cmpl_queue->base_phys_addr);
+	FS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_CMPL_START_ADDR);
+
+	/* Completion read pointer will be same as HW write pointer */
+	cmpl_queue->cmpl_read_ptr = FS_MMIO_READ32((uint8_t *)qp->ioreg +
+						   RING_CMPL_WRITE_PTR);
+	cmpl_queue->cmpl_read_ptr *= FS_RING_DESC_SIZE;
+
+	/* Read ring Tx, Rx, and Outstanding counts to clear */
+	FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_RECV_LS);
+	FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_RECV_MS);
+	FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_TRANS_LS);
+	FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_TRANS_MS);
+	FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_OUTSTAND);
+
+	/* Configure per-Ring MSI registers with dummy location */
+	/* We leave 1k * FS_RING_DESC_SIZE size from base phys for MSI */
+	msi = cmpl_queue->base_phys_addr + (1024 * FS_RING_DESC_SIZE);
+	FS_MMIO_WRITE32((msi & 0xFFFFFFFF),
+			(uint8_t *)qp->ioreg + RING_MSI_ADDR_LS);
+	FS_MMIO_WRITE32(((msi >> 32) & 0xFFFFFFFF),
+			(uint8_t *)qp->ioreg + RING_MSI_ADDR_MS);
+	FS_MMIO_WRITE32(qp->qpair_id,
+			(uint8_t *)qp->ioreg + RING_MSI_DATA_VALUE);
+
+	/* Configure RING_MSI_CONTROL */
+	val = 0;
+	val |= (MSI_TIMER_VAL_MASK << MSI_TIMER_VAL_SHIFT);
+	val |= BIT(MSI_ENABLE_SHIFT);
+	val |= (0x1 & MSI_COUNT_MASK) << MSI_COUNT_SHIFT;
+	FS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_MSI_CONTROL);
+
+	/* Enable/activate ring */
+	val = BIT(CONTROL_ACTIVE_SHIFT);
+	FS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_CONTROL);
+
+	return 0;
+}
+
+static void
+bcmfs4_shutdown_qp(struct bcmfs_qp *qp)
+{
+	/* Disable/deactivate ring */
+	FS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);
+}
+
+struct bcmfs_hw_queue_pair_ops bcmfs4_qp_ops = {
+	.name = "fs4",
+	.enq_one_req = bcmfs4_enqueue_single_request_qp,
+	.ring_db = bcmfs4_ring_doorbell_qp,
+	.dequeue = bcmfs4_dequeue_qp,
+	.startq = bcmfs4_start_qp,
+	.stopq = bcmfs4_shutdown_qp,
+};
+
+RTE_INIT(bcmfs4_register_qp_ops)
+{
+	 bcmfs_hw_queue_pair_register_ops(&bcmfs4_qp_ops);
+}
diff --git a/drivers/crypto/bcmfs/hw/bcmfs5_rm.c b/drivers/crypto/bcmfs/hw/bcmfs5_rm.c
new file mode 100644
index 0000000000..86e53051dd
--- /dev/null
+++ b/drivers/crypto/bcmfs/hw/bcmfs5_rm.c
@@ -0,0 +1,677 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Broadcom
+ * All rights reserved.
+ */
+
+#include <unistd.h>
+
+#include <rte_bitmap.h>
+
+#include "bcmfs_qp.h"
+#include "bcmfs_logs.h"
+#include "bcmfs_dev_msg.h"
+#include "bcmfs_device.h"
+#include "bcmfs_hw_defs.h"
+#include "bcmfs_rm_common.h"
+
+/* Ring version */
+#define RING_VER_MAGIC					0x76303032
+
+/* Per-Ring register offsets */
+#define RING_VER					0x000
+#define RING_BD_START_ADDRESS_LSB			0x004
+#define RING_BD_READ_PTR				0x008
+#define RING_BD_WRITE_PTR				0x00c
+#define RING_BD_READ_PTR_DDR_LS				0x010
+#define RING_BD_READ_PTR_DDR_MS				0x014
+#define RING_CMPL_START_ADDR_LSB			0x018
+#define RING_CMPL_WRITE_PTR				0x01c
+#define RING_NUM_REQ_RECV_LS				0x020
+#define RING_NUM_REQ_RECV_MS				0x024
+#define RING_NUM_REQ_TRANS_LS				0x028
+#define RING_NUM_REQ_TRANS_MS				0x02c
+#define RING_NUM_REQ_OUTSTAND				0x030
+#define RING_CONTROL					0x034
+#define RING_FLUSH_DONE					0x038
+#define RING_MSI_ADDR_LS				0x03c
+#define RING_MSI_ADDR_MS				0x040
+#define RING_MSI_CONTROL				0x048
+#define RING_BD_READ_PTR_DDR_CONTROL			0x04c
+#define RING_MSI_DATA_VALUE				0x064
+#define RING_BD_START_ADDRESS_MSB			0x078
+#define RING_CMPL_START_ADDR_MSB			0x07c
+#define RING_DOORBELL_BD_WRITE_COUNT			0x074
+
+/* Register RING_BD_START_ADDR fields */
+#define BD_LAST_UPDATE_HW_SHIFT				28
+#define BD_LAST_UPDATE_HW_MASK				0x1
+#define BD_START_ADDR_VALUE(pa)				\
+	((uint32_t)((((uint64_t)(pa)) >> RING_BD_ALIGN_ORDER) & 0x0fffffff))
+#define BD_START_ADDR_DECODE(val)			\
+	((uint64_t)((val) & 0x0fffffff) << RING_BD_ALIGN_ORDER)
+
+/* Register RING_CMPL_START_ADDR fields */
+#define CMPL_START_ADDR_VALUE(pa)			\
+	((uint32_t)((((uint64_t)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x07ffffff))
+
+/* Register RING_CONTROL fields */
+#define CONTROL_MASK_DISABLE_CONTROL			12
+#define CONTROL_FLUSH_SHIFT				5
+#define CONTROL_ACTIVE_SHIFT				4
+#define CONTROL_RATE_ADAPT_MASK				0xf
+#define CONTROL_RATE_DYNAMIC				0x0
+#define CONTROL_RATE_FAST				0x8
+#define CONTROL_RATE_MEDIUM				0x9
+#define CONTROL_RATE_SLOW				0xa
+#define CONTROL_RATE_IDLE				0xb
+
+/* Register RING_FLUSH_DONE fields */
+#define FLUSH_DONE_MASK					0x1
+
+/* Register RING_MSI_CONTROL fields */
+#define MSI_TIMER_VAL_SHIFT				16
+#define MSI_TIMER_VAL_MASK				0xffff
+#define MSI_ENABLE_SHIFT				15
+#define MSI_ENABLE_MASK					0x1
+#define MSI_COUNT_SHIFT					0
+#define MSI_COUNT_MASK					0x3ff
+
+/* Register RING_BD_READ_PTR_DDR_CONTROL fields */
+#define BD_READ_PTR_DDR_TIMER_VAL_SHIFT			16
+#define BD_READ_PTR_DDR_TIMER_VAL_MASK			0xffff
+#define BD_READ_PTR_DDR_ENABLE_SHIFT			15
+#define BD_READ_PTR_DDR_ENABLE_MASK			0x1
+
+/* General descriptor format */
+#define DESC_TYPE_SHIFT					60
+#define DESC_TYPE_MASK					0xf
+#define DESC_PAYLOAD_SHIFT				0
+#define DESC_PAYLOAD_MASK				0x0fffffffffffffff
+
+/* Null descriptor format  */
+#define NULL_TYPE					0
+#define NULL_TOGGLE_SHIFT				59
+#define NULL_TOGGLE_MASK				0x1
+
+/* Header descriptor format */
+#define HEADER_TYPE					1
+#define HEADER_TOGGLE_SHIFT				59
+#define HEADER_TOGGLE_MASK				0x1
+#define HEADER_ENDPKT_SHIFT				57
+#define HEADER_ENDPKT_MASK				0x1
+#define HEADER_STARTPKT_SHIFT				56
+#define HEADER_STARTPKT_MASK				0x1
+#define HEADER_BDCOUNT_SHIFT				36
+#define HEADER_BDCOUNT_MASK				0x1f
+#define HEADER_BDCOUNT_MAX				HEADER_BDCOUNT_MASK
+#define HEADER_FLAGS_SHIFT				16
+#define HEADER_FLAGS_MASK				0xffff
+#define HEADER_OPAQUE_SHIFT				0
+#define HEADER_OPAQUE_MASK				0xffff
+
+/* Source (SRC) descriptor format */
+
+#define SRC_TYPE					2
+#define SRC_LENGTH_SHIFT				44
+#define SRC_LENGTH_MASK					0xffff
+#define SRC_ADDR_SHIFT					0
+#define SRC_ADDR_MASK					0x00000fffffffffff
+
+/* Destination (DST) descriptor format */
+#define DST_TYPE					3
+#define DST_LENGTH_SHIFT				44
+#define DST_LENGTH_MASK					0xffff
+#define DST_ADDR_SHIFT					0
+#define DST_ADDR_MASK					0x00000fffffffffff
+
+/* Next pointer (NPTR) descriptor format */
+#define NPTR_TYPE					5
+#define NPTR_TOGGLE_SHIFT				59
+#define NPTR_TOGGLE_MASK				0x1
+#define NPTR_ADDR_SHIFT					0
+#define NPTR_ADDR_MASK					0x00000fffffffffff
+
+/* Mega source (MSRC) descriptor format */
+#define MSRC_TYPE					6
+#define MSRC_LENGTH_SHIFT				44
+#define MSRC_LENGTH_MASK				0xffff
+#define MSRC_ADDR_SHIFT					0
+#define MSRC_ADDR_MASK					0x00000fffffffffff
+
+/* Mega destination (MDST) descriptor format */
+#define MDST_TYPE					7
+#define MDST_LENGTH_SHIFT				44
+#define MDST_LENGTH_MASK				0xffff
+#define MDST_ADDR_SHIFT					0
+#define MDST_ADDR_MASK					0x00000fffffffffff
+
+static uint8_t
+bcmfs5_is_next_table_desc(void *desc_ptr)
+{
+	uint64_t desc = rm_read_desc(desc_ptr);
+	uint32_t type = FS_DESC_DEC(desc, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+
+	return (type == NPTR_TYPE) ? true : false;
+}
+
+static uint64_t
+bcmfs5_next_table_desc(uint64_t next_addr)
+{
+	return (rm_build_desc(NPTR_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
+		rm_build_desc(next_addr, NPTR_ADDR_SHIFT, NPTR_ADDR_MASK));
+}
+
+static uint64_t
+bcmfs5_null_desc(void)
+{
+	return rm_build_desc(NULL_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+}
+
+static uint64_t
+bcmfs5_header_desc(uint32_t startpkt, uint32_t endpkt,
+				       uint32_t bdcount, uint32_t flags,
+				       uint32_t opaque)
+{
+	return (rm_build_desc(HEADER_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
+		rm_build_desc(startpkt, HEADER_STARTPKT_SHIFT,
+			      HEADER_STARTPKT_MASK) |
+		rm_build_desc(endpkt, HEADER_ENDPKT_SHIFT, HEADER_ENDPKT_MASK) |
+		rm_build_desc(bdcount, HEADER_BDCOUNT_SHIFT, HEADER_BDCOUNT_MASK) |
+		rm_build_desc(flags, HEADER_FLAGS_SHIFT, HEADER_FLAGS_MASK) |
+		rm_build_desc(opaque, HEADER_OPAQUE_SHIFT, HEADER_OPAQUE_MASK));
+}
+
+static int
+bcmfs5_enqueue_desc(uint32_t nhpos, uint32_t nhcnt,
+		    uint32_t reqid, uint64_t desc,
+		    void **desc_ptr, void *start_desc,
+		    void *end_desc)
+{
+	uint64_t d;
+	uint32_t nhavail, _startpkt, _endpkt, _bdcount;
+	int is_nxt_page = 0;
+
+	/*
+	 * Each request or packet start with a HEADER descriptor followed
+	 * by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST,
+	 * DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors
+	 * following a HEADER descriptor is represented by BDCOUNT field
+	 * of HEADER descriptor. The max value of BDCOUNT field is 31 which
+	 * means we can only have 31 non-HEADER descriptors following one
+	 * HEADER descriptor.
+	 *
+	 * In general use, number of non-HEADER descriptors can easily go
+	 * beyond 31. To tackle this situation, we have packet (or request)
+	 * extension bits (STARTPKT and ENDPKT) in the HEADER descriptor.
+	 *
+	 * To use packet extension, the first HEADER descriptor of request
+	 * (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate
+	 * HEADER descriptors will have STARTPKT=0 and ENDPKT=0. The last
+	 * HEADER descriptor will have STARTPKT=0 and ENDPKT=1.
+	 */
+
+	if ((nhpos % HEADER_BDCOUNT_MAX == 0) && (nhcnt - nhpos)) {
+		/* Prepare the header descriptor */
+		nhavail = (nhcnt - nhpos);
+		_startpkt = (nhpos == 0) ? 0x1 : 0x0;
+		_endpkt = (nhavail <= HEADER_BDCOUNT_MAX) ? 0x1 : 0x0;
+		_bdcount = (nhavail <= HEADER_BDCOUNT_MAX) ?
+				nhavail : HEADER_BDCOUNT_MAX;
+		if (nhavail <= HEADER_BDCOUNT_MAX)
+			_bdcount = nhavail;
+		else
+			_bdcount = HEADER_BDCOUNT_MAX;
+		d = bcmfs5_header_desc(_startpkt, _endpkt,
+				       _bdcount, 0x0, reqid);
+
+		/* Write header descriptor */
+		rm_write_desc(*desc_ptr, d);
+
+		/* Point to next descriptor */
+		*desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
+		if (*desc_ptr == end_desc)
+			*desc_ptr = start_desc;
+
+		/* Skip next pointer descriptors */
+		while (bcmfs5_is_next_table_desc(*desc_ptr)) {
+			is_nxt_page = 1;
+			*desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
+			if (*desc_ptr == end_desc)
+				*desc_ptr = start_desc;
+		}
+	}
+
+	/* Write desired descriptor */
+	rm_write_desc(*desc_ptr, desc);
+
+	/* Point to next descriptor */
+	*desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
+	if (*desc_ptr == end_desc)
+		*desc_ptr = start_desc;
+
+	/* Skip next pointer descriptors */
+	while (bcmfs5_is_next_table_desc(*desc_ptr)) {
+		is_nxt_page = 1;
+		*desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
+		if (*desc_ptr == end_desc)
+			*desc_ptr = start_desc;
+	}
+
+	return is_nxt_page;
+}
+
+static uint64_t
+bcmfs5_src_desc(uint64_t addr, unsigned int len)
+{
+	return (rm_build_desc(SRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
+		rm_build_desc(len, SRC_LENGTH_SHIFT, SRC_LENGTH_MASK) |
+		rm_build_desc(addr, SRC_ADDR_SHIFT, SRC_ADDR_MASK));
+}
+
+static uint64_t
+bcmfs5_msrc_desc(uint64_t addr, unsigned int len_div_16)
+{
+	return (rm_build_desc(MSRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
+		rm_build_desc(len_div_16, MSRC_LENGTH_SHIFT, MSRC_LENGTH_MASK) |
+		rm_build_desc(addr, MSRC_ADDR_SHIFT, MSRC_ADDR_MASK));
+}
+
+static uint64_t
+bcmfs5_dst_desc(uint64_t addr, unsigned int len)
+{
+	return (rm_build_desc(DST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
+		rm_build_desc(len, DST_LENGTH_SHIFT, DST_LENGTH_MASK) |
+		rm_build_desc(addr, DST_ADDR_SHIFT, DST_ADDR_MASK));
+}
+
+static uint64_t
+bcmfs5_mdst_desc(uint64_t addr, unsigned int len_div_16)
+{
+	return (rm_build_desc(MDST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
+		rm_build_desc(len_div_16, MDST_LENGTH_SHIFT, MDST_LENGTH_MASK) |
+		rm_build_desc(addr, MDST_ADDR_SHIFT, MDST_ADDR_MASK));
+}
+
+static bool
+bcmfs5_sanity_check(struct bcmfs_qp_message *msg)
+{
+	unsigned int i = 0;
+
+	if (msg == NULL)
+		return false;
+
+	for (i = 0; i <  msg->srcs_count; i++) {
+		if (msg->srcs_len[i] & 0xf) {
+			if (msg->srcs_len[i] > SRC_LENGTH_MASK)
+				return false;
+		} else {
+			if (msg->srcs_len[i] > (MSRC_LENGTH_MASK * 16))
+				return false;
+		}
+	}
+	for (i = 0; i <  msg->dsts_count; i++) {
+		if (msg->dsts_len[i] & 0xf) {
+			if (msg->dsts_len[i] > DST_LENGTH_MASK)
+				return false;
+		} else {
+			if (msg->dsts_len[i] > (MDST_LENGTH_MASK * 16))
+				return false;
+		}
+	}
+
+	return true;
+}
+
+static void *
+bcmfs5_enqueue_msg(struct bcmfs_queue *txq,
+		   struct bcmfs_qp_message *msg,
+		   uint32_t reqid, void *desc_ptr,
+		   void *start_desc, void *end_desc)
+{
+	uint64_t d;
+	unsigned int src, dst;
+	uint32_t nhpos = 0;
+	int nxt_page = 0;
+	uint32_t nhcnt = msg->srcs_count + msg->dsts_count;
+
+	if (desc_ptr == NULL || start_desc == NULL || end_desc == NULL)
+		return NULL;
+
+	if (desc_ptr < start_desc || end_desc <= desc_ptr)
+		return NULL;
+
+	for (src = 0; src < msg->srcs_count; src++) {
+		if (msg->srcs_len[src] & 0xf)
+			d = bcmfs5_src_desc(msg->srcs_addr[src],
+					    msg->srcs_len[src]);
+		else
+			d = bcmfs5_msrc_desc(msg->srcs_addr[src],
+					     msg->srcs_len[src] / 16);
+
+		nxt_page = bcmfs5_enqueue_desc(nhpos, nhcnt, reqid,
+					       d, &desc_ptr, start_desc,
+					       end_desc);
+		if (nxt_page)
+			txq->descs_inflight++;
+		nhpos++;
+	}
+
+	for (dst = 0; dst < msg->dsts_count; dst++) {
+		if (msg->dsts_len[dst] & 0xf)
+			d = bcmfs5_dst_desc(msg->dsts_addr[dst],
+					    msg->dsts_len[dst]);
+		else
+			d = bcmfs5_mdst_desc(msg->dsts_addr[dst],
+					     msg->dsts_len[dst] / 16);
+
+		nxt_page = bcmfs5_enqueue_desc(nhpos, nhcnt, reqid,
+					       d, &desc_ptr, start_desc,
+					       end_desc);
+		if (nxt_page)
+			txq->descs_inflight++;
+		nhpos++;
+	}
+
+	txq->descs_inflight += nhcnt + 1;
+
+	return desc_ptr;
+}
+
+static int
+bcmfs5_enqueue_single_request_qp(struct bcmfs_qp *qp, void *op)
+{
+	void *next;
+	int reqid;
+	int ret = 0;
+	uint64_t slab = 0;
+	uint32_t pos = 0;
+	uint8_t exit_cleanup = false;
+	struct bcmfs_queue *txq = &qp->tx_q;
+	struct bcmfs_qp_message *msg = (struct bcmfs_qp_message *)op;
+
+	/* Do sanity check on message */
+	if (!bcmfs5_sanity_check(msg)) {
+		BCMFS_DP_LOG(ERR, "Invalid msg on queue %d", qp->qpair_id);
+		return -EIO;
+	}
+
+	/* Scan from the beginning */
+	__rte_bitmap_scan_init(qp->ctx_bmp);
+	/* Scan bitmap to get the free pool */
+	ret = rte_bitmap_scan(qp->ctx_bmp, &pos, &slab);
+	if (ret == 0) {
+		BCMFS_DP_LOG(ERR, "BD memory exhausted");
+		return -ERANGE;
+	}
+
+	reqid = pos + __builtin_ctzll(slab);
+	rte_bitmap_clear(qp->ctx_bmp, reqid);
+	qp->ctx_pool[reqid] = (unsigned long)msg;
+
+	/* Write descriptors to ring */
+	next = bcmfs5_enqueue_msg(txq, msg, reqid,
+				  (uint8_t *)txq->base_addr + txq->tx_write_ptr,
+				  txq->base_addr,
+				  (uint8_t *)txq->base_addr + txq->queue_size);
+	if (next == NULL) {
+		BCMFS_DP_LOG(ERR, "Enqueue for desc failed on queue %d",
+			     qp->qpair_id);
+		ret = -EINVAL;
+		exit_cleanup = true;
+		goto exit;
+	}
+
+	/* Save ring BD write offset */
+	txq->tx_write_ptr = (uint32_t)((uint8_t *)next -
+				       (uint8_t *)txq->base_addr);
+
+	qp->nb_pending_requests++;
+
+	return 0;
+
+exit:
+	/* Cleanup if we failed */
+	if (exit_cleanup)
+		rte_bitmap_set(qp->ctx_bmp, reqid);
+
+	return ret;
+}
+
+static void bcmfs5_write_doorbell(struct bcmfs_qp *qp)
+{
+	struct bcmfs_queue *txq = &qp->tx_q;
+
+	/* sync in bfeore ringing the door-bell */
+	rte_wmb();
+
+	FS_MMIO_WRITE32(txq->descs_inflight,
+			(uint8_t *)qp->ioreg + RING_DOORBELL_BD_WRITE_COUNT);
+
+	/* reset the count */
+	txq->descs_inflight = 0;
+}
+
+static uint16_t
+bcmfs5_dequeue_qp(struct bcmfs_qp *qp, void **ops, uint16_t budget)
+{
+	int err;
+	uint16_t reqid;
+	uint64_t desc;
+	uint16_t count = 0;
+	unsigned long context = 0;
+	struct bcmfs_queue *hwq = &qp->cmpl_q;
+	uint32_t cmpl_read_offset, cmpl_write_offset;
+
+	/*
+	 * Check whether budget is valid, else set the budget to maximum
+	 * so that all the available completions will be processed.
+	 */
+	if (budget > qp->nb_pending_requests)
+		budget =  qp->nb_pending_requests;
+
+	/*
+	 * Get current completion read and write offset
+	 *
+	 * Note: We should read completion write pointer at least once
+	 * after we get a MSI interrupt because HW maintains internal
+	 * MSI status which will allow next MSI interrupt only after
+	 * completion write pointer is read.
+	 */
+	cmpl_write_offset = FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_CMPL_WRITE_PTR);
+	cmpl_write_offset *= FS_RING_DESC_SIZE;
+	cmpl_read_offset = hwq->cmpl_read_ptr;
+
+	/* read the ring cmpl write ptr before cmpl read offset */
+	rte_io_rmb();
+
+	/* For each completed request notify mailbox clients */
+	reqid = 0;
+	while ((cmpl_read_offset != cmpl_write_offset) && (budget > 0)) {
+		/* Dequeue next completion descriptor */
+		desc = *((uint64_t *)((uint8_t *)hwq->base_addr +
+				      cmpl_read_offset));
+
+		/* Next read offset */
+		cmpl_read_offset += FS_RING_DESC_SIZE;
+		if (cmpl_read_offset == FS_RING_CMPL_SIZE)
+			cmpl_read_offset = 0;
+
+		/* Decode error from completion descriptor */
+		err = rm_cmpl_desc_to_error(desc);
+		if (err < 0)
+			BCMFS_DP_LOG(ERR, "error desc rcvd");
+
+		/* Determine request id from completion descriptor */
+		reqid = rm_cmpl_desc_to_reqid(desc);
+
+		/* Retrieve context */
+		context = qp->ctx_pool[reqid];
+		if (context == 0)
+			BCMFS_DP_LOG(ERR, "HW error detected");
+
+		/* Release reqid for recycling */
+		qp->ctx_pool[reqid] = 0;
+		rte_bitmap_set(qp->ctx_bmp, reqid);
+
+		*ops = (void *)context;
+
+		/* Increment number of completions processed */
+		count++;
+		budget--;
+		ops++;
+	}
+
+	hwq->cmpl_read_ptr = cmpl_read_offset;
+
+	qp->nb_pending_requests -= count;
+
+	return count;
+}
+
+static int
+bcmfs5_start_qp(struct bcmfs_qp *qp)
+{
+	uint32_t val, off;
+	uint64_t d, next_addr, msi;
+	int timeout;
+	uint32_t bd_high, bd_low, cmpl_high, cmpl_low;
+	struct bcmfs_queue *tx_queue = &qp->tx_q;
+	struct bcmfs_queue *cmpl_queue = &qp->cmpl_q;
+
+	/* Disable/deactivate ring */
+	FS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);
+
+	/* Configure next table pointer entries in BD memory */
+	for (off = 0; off < tx_queue->queue_size; off += FS_RING_DESC_SIZE) {
+		next_addr = off + FS_RING_DESC_SIZE;
+		if (next_addr == tx_queue->queue_size)
+			next_addr = 0;
+		next_addr += (uint64_t)tx_queue->base_phys_addr;
+		if (FS_RING_BD_ALIGN_CHECK(next_addr))
+			d = bcmfs5_next_table_desc(next_addr);
+		else
+			d = bcmfs5_null_desc();
+		rm_write_desc((uint8_t *)tx_queue->base_addr + off, d);
+	}
+
+	/*
+	 * If user interrupt the test in between the run(Ctrl+C), then all
+	 * subsequent test run will fail because sw cmpl_read_offset and hw
+	 * cmpl_write_offset will be pointing at different completion BD. To
+	 * handle this we should flush all the rings in the startup instead
+	 * of shutdown function.
+	 * Ring flush will reset hw cmpl_write_offset.
+	 */
+
+	/* Set ring flush state */
+	timeout = 1000;
+	FS_MMIO_WRITE32(BIT(CONTROL_FLUSH_SHIFT),
+			(uint8_t *)qp->ioreg + RING_CONTROL);
+	do {
+		/*
+		 * If previous test is stopped in between the run, then
+		 * sw has to read cmpl_write_offset else DME/AE will be not
+		 * come out of flush state.
+		 */
+		FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_CMPL_WRITE_PTR);
+
+		if (FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_FLUSH_DONE) &
+				   FLUSH_DONE_MASK)
+			break;
+		usleep(1000);
+	} while (--timeout);
+	if (!timeout) {
+		BCMFS_DP_LOG(ERR, "Ring flush timeout hw-queue %d",
+			     qp->qpair_id);
+	}
+
+	/* Clear ring flush state */
+	timeout = 1000;
+	FS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);
+	do {
+		if (!(FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_FLUSH_DONE) &
+				     FLUSH_DONE_MASK))
+			break;
+		usleep(1000);
+	} while (--timeout);
+	if (!timeout) {
+		BCMFS_DP_LOG(ERR, "Ring clear flush timeout hw-queue %d",
+			     qp->qpair_id);
+	}
+
+	/* Program BD start address */
+	bd_low = lower_32_bits(tx_queue->base_phys_addr);
+	bd_high = upper_32_bits(tx_queue->base_phys_addr);
+	FS_MMIO_WRITE32(bd_low, (uint8_t *)qp->ioreg +
+				RING_BD_START_ADDRESS_LSB);
+	FS_MMIO_WRITE32(bd_high, (uint8_t *)qp->ioreg +
+				 RING_BD_START_ADDRESS_MSB);
+
+	tx_queue->tx_write_ptr = 0;
+
+	for (off = 0; off < FS_RING_CMPL_SIZE; off += FS_RING_DESC_SIZE)
+		rm_write_desc((uint8_t *)cmpl_queue->base_addr + off, 0x0);
+
+	/* Completion read pointer will be same as HW write pointer */
+	cmpl_queue->cmpl_read_ptr = FS_MMIO_READ32((uint8_t *)qp->ioreg +
+						   RING_CMPL_WRITE_PTR);
+	/* Program completion start address */
+	cmpl_low = lower_32_bits(cmpl_queue->base_phys_addr);
+	cmpl_high = upper_32_bits(cmpl_queue->base_phys_addr);
+	FS_MMIO_WRITE32(cmpl_low, (uint8_t *)qp->ioreg +
+				RING_CMPL_START_ADDR_LSB);
+	FS_MMIO_WRITE32(cmpl_high, (uint8_t *)qp->ioreg +
+				RING_CMPL_START_ADDR_MSB);
+
+	cmpl_queue->cmpl_read_ptr *= FS_RING_DESC_SIZE;
+
+	/* Read ring Tx, Rx, and Outstanding counts to clear */
+	FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_RECV_LS);
+	FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_RECV_MS);
+	FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_TRANS_LS);
+	FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_TRANS_MS);
+	FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_OUTSTAND);
+
+	/* Configure per-Ring MSI registers with dummy location */
+	msi = cmpl_queue->base_phys_addr + (1024 * FS_RING_DESC_SIZE);
+	FS_MMIO_WRITE32((msi & 0xFFFFFFFF),
+			(uint8_t *)qp->ioreg + RING_MSI_ADDR_LS);
+	FS_MMIO_WRITE32(((msi >> 32) & 0xFFFFFFFF),
+			(uint8_t *)qp->ioreg + RING_MSI_ADDR_MS);
+	FS_MMIO_WRITE32(qp->qpair_id, (uint8_t *)qp->ioreg +
+				      RING_MSI_DATA_VALUE);
+
+	/* Configure RING_MSI_CONTROL */
+	val = 0;
+	val |= (MSI_TIMER_VAL_MASK << MSI_TIMER_VAL_SHIFT);
+	val |= BIT(MSI_ENABLE_SHIFT);
+	val |= (0x1 & MSI_COUNT_MASK) << MSI_COUNT_SHIFT;
+	FS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_MSI_CONTROL);
+
+	/* Enable/activate ring */
+	val = BIT(CONTROL_ACTIVE_SHIFT);
+	FS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_CONTROL);
+
+	return 0;
+}
+
+static void
+bcmfs5_shutdown_qp(struct bcmfs_qp *qp)
+{
+	/* Disable/deactivate ring */
+	FS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);
+}
+
+struct bcmfs_hw_queue_pair_ops bcmfs5_qp_ops = {
+	.name = "fs5",
+	.enq_one_req = bcmfs5_enqueue_single_request_qp,
+	.ring_db = bcmfs5_write_doorbell,
+	.dequeue = bcmfs5_dequeue_qp,
+	.startq = bcmfs5_start_qp,
+	.stopq = bcmfs5_shutdown_qp,
+};
+
+RTE_INIT(bcmfs5_register_qp_ops)
+{
+	bcmfs_hw_queue_pair_register_ops(&bcmfs5_qp_ops);
+}
diff --git a/drivers/crypto/bcmfs/hw/bcmfs_rm_common.c b/drivers/crypto/bcmfs/hw/bcmfs_rm_common.c
new file mode 100644
index 0000000000..9445d28f92
--- /dev/null
+++ b/drivers/crypto/bcmfs/hw/bcmfs_rm_common.c
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Broadcom.
+ * All rights reserved.
+ */
+
+#include "bcmfs_hw_defs.h"
+#include "bcmfs_rm_common.h"
+
+/* Completion descriptor format */
+#define FS_CMPL_OPAQUE_SHIFT			0
+#define FS_CMPL_OPAQUE_MASK			0xffff
+#define FS_CMPL_ENGINE_STATUS_SHIFT		16
+#define FS_CMPL_ENGINE_STATUS_MASK		0xffff
+#define FS_CMPL_DME_STATUS_SHIFT		32
+#define FS_CMPL_DME_STATUS_MASK			0xffff
+#define FS_CMPL_RM_STATUS_SHIFT			48
+#define FS_CMPL_RM_STATUS_MASK			0xffff
+/* Completion RM status code */
+#define FS_RM_STATUS_CODE_SHIFT			0
+#define FS_RM_STATUS_CODE_MASK			0x3ff
+#define FS_RM_STATUS_CODE_GOOD			0x0
+#define FS_RM_STATUS_CODE_AE_TIMEOUT		0x3ff
+
+
+/* Completion DME status code */
+#define FS_DME_STATUS_MEM_COR_ERR		BIT(0)
+#define FS_DME_STATUS_MEM_UCOR_ERR		BIT(1)
+#define FS_DME_STATUS_FIFO_UNDRFLOW		BIT(2)
+#define FS_DME_STATUS_FIFO_OVERFLOW		BIT(3)
+#define FS_DME_STATUS_RRESP_ERR			BIT(4)
+#define FS_DME_STATUS_BRESP_ERR			BIT(5)
+#define FS_DME_STATUS_ERROR_MASK		(FS_DME_STATUS_MEM_COR_ERR | \
+						 FS_DME_STATUS_MEM_UCOR_ERR | \
+						 FS_DME_STATUS_FIFO_UNDRFLOW | \
+						 FS_DME_STATUS_FIFO_OVERFLOW | \
+						 FS_DME_STATUS_RRESP_ERR | \
+						 FS_DME_STATUS_BRESP_ERR)
+
+/* APIs related to ring manager descriptors */
+uint64_t
+rm_build_desc(uint64_t val, uint32_t shift,
+	   uint64_t mask)
+{
+	return((val & mask) << shift);
+}
+
+uint64_t
+rm_read_desc(void *desc_ptr)
+{
+	return le64_to_cpu(*((uint64_t *)desc_ptr));
+}
+
+void
+rm_write_desc(void *desc_ptr, uint64_t desc)
+{
+	*((uint64_t *)desc_ptr) = cpu_to_le64(desc);
+}
+
+uint32_t
+rm_cmpl_desc_to_reqid(uint64_t cmpl_desc)
+{
+	return (uint32_t)(cmpl_desc & FS_CMPL_OPAQUE_MASK);
+}
+
+int
+rm_cmpl_desc_to_error(uint64_t cmpl_desc)
+{
+	uint32_t status;
+
+	status = FS_DESC_DEC(cmpl_desc, FS_CMPL_DME_STATUS_SHIFT,
+			     FS_CMPL_DME_STATUS_MASK);
+	if (status & FS_DME_STATUS_ERROR_MASK)
+		return -EIO;
+
+	status = FS_DESC_DEC(cmpl_desc, FS_CMPL_RM_STATUS_SHIFT,
+			     FS_CMPL_RM_STATUS_MASK);
+	status &= FS_RM_STATUS_CODE_MASK;
+	if (status == FS_RM_STATUS_CODE_AE_TIMEOUT)
+		return -ETIMEDOUT;
+
+	return 0;
+}
diff --git a/drivers/crypto/bcmfs/hw/bcmfs_rm_common.h b/drivers/crypto/bcmfs/hw/bcmfs_rm_common.h
new file mode 100644
index 0000000000..e5d30d75c0
--- /dev/null
+++ b/drivers/crypto/bcmfs/hw/bcmfs_rm_common.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BCMFS_RM_COMMON_H_
+#define _BCMFS_RM_COMMON_H_
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_io.h>
+
+/* 32-bit MMIO register write */
+#define FS_MMIO_WRITE32(value, addr) rte_write32_relaxed((value), (addr))
+/* 32-bit MMIO register read */
+#define FS_MMIO_READ32(addr) rte_read32_relaxed((addr))
+
+/* Descriptor helper macros */
+#define FS_DESC_DEC(d, s, m)			(((d) >> (s)) & (m))
+
+#define FS_RING_BD_ALIGN_CHECK(addr)			\
+			(!((addr) & ((0x1 << FS_RING_BD_ALIGN_ORDER) - 1)))
+
+#define cpu_to_le64     rte_cpu_to_le_64
+#define cpu_to_le32     rte_cpu_to_le_32
+#define cpu_to_le16     rte_cpu_to_le_16
+
+#define le64_to_cpu     rte_le_to_cpu_64
+#define le32_to_cpu     rte_le_to_cpu_32
+#define le16_to_cpu     rte_le_to_cpu_16
+
+#define lower_32_bits(x) ((uint32_t)(x))
+#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
+
+uint64_t
+rm_build_desc(uint64_t val, uint32_t shift,
+	   uint64_t mask);
+uint64_t
+rm_read_desc(void *desc_ptr);
+
+void
+rm_write_desc(void *desc_ptr, uint64_t desc);
+
+uint32_t
+rm_cmpl_desc_to_reqid(uint64_t cmpl_desc);
+
+int
+rm_cmpl_desc_to_error(uint64_t cmpl_desc);
+
+#endif /* _BCMFS_RM_COMMON_H_ */
+
diff --git a/drivers/crypto/bcmfs/meson.build b/drivers/crypto/bcmfs/meson.build
index 7e2bcbf14b..cd58bd5e25 100644
--- a/drivers/crypto/bcmfs/meson.build
+++ b/drivers/crypto/bcmfs/meson.build
@@ -8,5 +8,8 @@ sources = files(
 		'bcmfs_logs.c',
 		'bcmfs_device.c',
 		'bcmfs_vfio.c',
-		'bcmfs_qp.c'
+		'bcmfs_qp.c',
+		'hw/bcmfs4_rm.c',
+		'hw/bcmfs5_rm.c',
+		'hw/bcmfs_rm_common.c'
 		)
-- 
2.17.1



More information about the dev mailing list