[dpdk-dev] [PATCH v2 07/27] event/dlb2: add v2.5 create ldb port

Timothy McDaniel timothy.mcdaniel at intel.com
Tue Mar 30 21:35:20 CEST 2021


Update create ldb port low level code to account for new
register map and hardware access macros.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel at intel.com>
---
 drivers/event/dlb2/pf/base/dlb2_resource.c    | 490 ------------------
 .../event/dlb2/pf/base/dlb2_resource_new.c    | 471 +++++++++++++++++
 2 files changed, 471 insertions(+), 490 deletions(-)

diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c
index f8b85bc57..45d096eec 100644
--- a/drivers/event/dlb2/pf/base/dlb2_resource.c
+++ b/drivers/event/dlb2/pf/base/dlb2_resource.c
@@ -1216,496 +1216,6 @@ int dlb2_set_group_sequence_numbers(struct dlb2_hw *hw,
 	return 0;
 }
 
-static void dlb2_ldb_port_configure_pp(struct dlb2_hw *hw,
-				       struct dlb2_hw_domain *domain,
-				       struct dlb2_ldb_port *port,
-				       bool vdev_req,
-				       unsigned int vdev_id)
-{
-	union dlb2_sys_ldb_pp2vas r0 = { {0} };
-	union dlb2_sys_ldb_pp_v r4 = { {0} };
-
-	r0.field.vas = domain->id.phys_id;
-
-	DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VAS(port->id.phys_id), r0.val);
-
-	if (vdev_req) {
-		union dlb2_sys_vf_ldb_vpp2pp r1 = { {0} };
-		union dlb2_sys_ldb_pp2vdev r2 = { {0} };
-		union dlb2_sys_vf_ldb_vpp_v r3 = { {0} };
-		unsigned int offs;
-		u32 virt_id;
-
-		/*
-		 * DLB uses producer port address bits 17:12 to determine the
-		 * producer port ID. In Scalable IOV mode, PP accesses come
-		 * through the PF MMIO window for the physical producer port,
-		 * so for translation purposes the virtual and physical port
-		 * IDs are equal.
-		 */
-		if (hw->virt_mode == DLB2_VIRT_SRIOV)
-			virt_id = port->id.virt_id;
-		else
-			virt_id = port->id.phys_id;
-
-		r1.field.pp = port->id.phys_id;
-
-		offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
-
-		DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP2PP(offs), r1.val);
-
-		r2.field.vdev = vdev_id;
-
-		DLB2_CSR_WR(hw,
-			    DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),
-			    r2.val);
-
-		r3.field.vpp_v = 1;
-
-		DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), r3.val);
-	}
-
-	r4.field.pp_v = 1;
-
-	DLB2_CSR_WR(hw,
-		    DLB2_SYS_LDB_PP_V(port->id.phys_id),
-		    r4.val);
-}
-
-static int dlb2_ldb_port_configure_cq(struct dlb2_hw *hw,
-				      struct dlb2_hw_domain *domain,
-				      struct dlb2_ldb_port *port,
-				      uintptr_t cq_dma_base,
-				      struct dlb2_create_ldb_port_args *args,
-				      bool vdev_req,
-				      unsigned int vdev_id)
-{
-	union dlb2_sys_ldb_cq_addr_l r0 = { {0} };
-	union dlb2_sys_ldb_cq_addr_u r1 = { {0} };
-	union dlb2_sys_ldb_cq2vf_pf_ro r2 = { {0} };
-	union dlb2_chp_ldb_cq_tkn_depth_sel r3 = { {0} };
-	union dlb2_lsp_cq_ldb_tkn_depth_sel r4 = { {0} };
-	union dlb2_chp_hist_list_lim r5 = { {0} };
-	union dlb2_chp_hist_list_base r6 = { {0} };
-	union dlb2_lsp_cq_ldb_infl_lim r7 = { {0} };
-	union dlb2_chp_hist_list_push_ptr r8 = { {0} };
-	union dlb2_chp_hist_list_pop_ptr r9 = { {0} };
-	union dlb2_sys_ldb_cq_at r10 = { {0} };
-	union dlb2_sys_ldb_cq_pasid r11 = { {0} };
-	union dlb2_chp_ldb_cq2vas r12 = { {0} };
-	union dlb2_lsp_cq2priov r13 = { {0} };
-
-	/* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
-	r0.field.addr_l = cq_dma_base >> 6;
-
-	DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id), r0.val);
-
-	r1.field.addr_u = cq_dma_base >> 32;
-
-	DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id), r1.val);
-
-	/*
-	 * 'ro' == relaxed ordering. This setting allows DLB2 to write
-	 * cache lines out-of-order (but QEs within a cache line are always
-	 * updated in-order).
-	 */
-	r2.field.vf = vdev_id;
-	r2.field.is_pf = !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV);
-	r2.field.ro = 1;
-
-	DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id), r2.val);
-
-	if (args->cq_depth <= 8) {
-		r3.field.token_depth_select = 1;
-	} else if (args->cq_depth == 16) {
-		r3.field.token_depth_select = 2;
-	} else if (args->cq_depth == 32) {
-		r3.field.token_depth_select = 3;
-	} else if (args->cq_depth == 64) {
-		r3.field.token_depth_select = 4;
-	} else if (args->cq_depth == 128) {
-		r3.field.token_depth_select = 5;
-	} else if (args->cq_depth == 256) {
-		r3.field.token_depth_select = 6;
-	} else if (args->cq_depth == 512) {
-		r3.field.token_depth_select = 7;
-	} else if (args->cq_depth == 1024) {
-		r3.field.token_depth_select = 8;
-	} else {
-		DLB2_HW_ERR(hw,
-			    "[%s():%d] Internal error: invalid CQ depth\n",
-			    __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id.phys_id),
-		    r3.val);
-
-	/*
-	 * To support CQs with depth less than 8, program the token count
-	 * register with a non-zero initial value. Operations such as domain
-	 * reset must take this initial value into account when quiescing the
-	 * CQ.
-	 */
-	port->init_tkn_cnt = 0;
-
-	if (args->cq_depth < 8) {
-		union dlb2_lsp_cq_ldb_tkn_cnt r14 = { {0} };
-
-		port->init_tkn_cnt = 8 - args->cq_depth;
-
-		r14.field.token_count = port->init_tkn_cnt;
-
-		DLB2_CSR_WR(hw,
-			    DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id),
-			    r14.val);
-	} else {
-		DLB2_CSR_WR(hw,
-			    DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id),
-			    DLB2_LSP_CQ_LDB_TKN_CNT_RST);
-	}
-
-	r4.field.token_depth_select = r3.field.token_depth_select;
-	r4.field.ignore_depth = 0;
-
-	DLB2_CSR_WR(hw,
-		    DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id.phys_id),
-		    r4.val);
-
-	/* Reset the CQ write pointer */
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_LDB_CQ_WPTR(port->id.phys_id),
-		    DLB2_CHP_LDB_CQ_WPTR_RST);
-
-	r5.field.limit = port->hist_list_entry_limit - 1;
-
-	DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_LIM(port->id.phys_id), r5.val);
-
-	r6.field.base = port->hist_list_entry_base;
-
-	DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_BASE(port->id.phys_id), r6.val);
-
-	/*
-	 * The inflight limit sets a cap on the number of QEs for which this CQ
-	 * can owe completions at one time.
-	 */
-	r7.field.limit = args->cq_history_list_size;
-
-	DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_INFL_LIM(port->id.phys_id), r7.val);
-
-	r8.field.push_ptr = r6.field.base;
-	r8.field.generation = 0;
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_HIST_LIST_PUSH_PTR(port->id.phys_id),
-		    r8.val);
-
-	r9.field.pop_ptr = r6.field.base;
-	r9.field.generation = 0;
-
-	DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_POP_PTR(port->id.phys_id), r9.val);
-
-	/*
-	 * Address translation (AT) settings: 0: untranslated, 2: translated
-	 * (see ATS spec regarding Address Type field for more details)
-	 */
-	r10.field.cq_at = 0;
-
-	DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_AT(port->id.phys_id), r10.val);
-
-	if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
-		r11.field.pasid = hw->pasid[vdev_id];
-		r11.field.fmt2 = 1;
-	}
-
-	DLB2_CSR_WR(hw,
-		    DLB2_SYS_LDB_CQ_PASID(port->id.phys_id),
-		    r11.val);
-
-	r12.field.cq2vas = domain->id.phys_id;
-
-	DLB2_CSR_WR(hw, DLB2_CHP_LDB_CQ2VAS(port->id.phys_id), r12.val);
-
-	/* Disable the port's QID mappings */
-	r13.field.v = 0;
-
-	DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(port->id.phys_id), r13.val);
-
-	return 0;
-}
-
-static int dlb2_configure_ldb_port(struct dlb2_hw *hw,
-				   struct dlb2_hw_domain *domain,
-				   struct dlb2_ldb_port *port,
-				   uintptr_t cq_dma_base,
-				   struct dlb2_create_ldb_port_args *args,
-				   bool vdev_req,
-				   unsigned int vdev_id)
-{
-	int ret, i;
-
-	port->hist_list_entry_base = domain->hist_list_entry_base +
-				     domain->hist_list_entry_offset;
-	port->hist_list_entry_limit = port->hist_list_entry_base +
-				      args->cq_history_list_size;
-
-	domain->hist_list_entry_offset += args->cq_history_list_size;
-	domain->avail_hist_list_entries -= args->cq_history_list_size;
-
-	ret = dlb2_ldb_port_configure_cq(hw,
-					 domain,
-					 port,
-					 cq_dma_base,
-					 args,
-					 vdev_req,
-					 vdev_id);
-	if (ret < 0)
-		return ret;
-
-	dlb2_ldb_port_configure_pp(hw,
-				   domain,
-				   port,
-				   vdev_req,
-				   vdev_id);
-
-	dlb2_ldb_port_cq_enable(hw, port);
-
-	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++)
-		port->qid_map[i].state = DLB2_QUEUE_UNMAPPED;
-	port->num_mappings = 0;
-
-	port->enabled = true;
-
-	port->configured = true;
-
-	return 0;
-}
-
-static void
-dlb2_log_create_ldb_port_args(struct dlb2_hw *hw,
-			      u32 domain_id,
-			      uintptr_t cq_dma_base,
-			      struct dlb2_create_ldb_port_args *args,
-			      bool vdev_req,
-			      unsigned int vdev_id)
-{
-	DLB2_HW_DBG(hw, "DLB2 create load-balanced port arguments:\n");
-	if (vdev_req)
-		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
-	DLB2_HW_DBG(hw, "\tDomain ID:                 %d\n",
-		    domain_id);
-	DLB2_HW_DBG(hw, "\tCQ depth:                  %d\n",
-		    args->cq_depth);
-	DLB2_HW_DBG(hw, "\tCQ hist list size:         %d\n",
-		    args->cq_history_list_size);
-	DLB2_HW_DBG(hw, "\tCQ base address:           0x%lx\n",
-		    cq_dma_base);
-	DLB2_HW_DBG(hw, "\tCoS ID:                    %u\n", args->cos_id);
-	DLB2_HW_DBG(hw, "\tStrict CoS allocation:     %u\n",
-		    args->cos_strict);
-}
-
-static int
-dlb2_verify_create_ldb_port_args(struct dlb2_hw *hw,
-				 u32 domain_id,
-				 uintptr_t cq_dma_base,
-				 struct dlb2_create_ldb_port_args *args,
-				 struct dlb2_cmd_response *resp,
-				 bool vdev_req,
-				 unsigned int vdev_id)
-{
-	struct dlb2_hw_domain *domain;
-	int i;
-
-	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
-
-	if (domain == NULL) {
-		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
-		return -EINVAL;
-	}
-
-	if (!domain->configured) {
-		resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
-		return -EINVAL;
-	}
-
-	if (domain->started) {
-		resp->status = DLB2_ST_DOMAIN_STARTED;
-		return -EINVAL;
-	}
-
-	if (args->cos_id >= DLB2_NUM_COS_DOMAINS) {
-		resp->status = DLB2_ST_INVALID_COS_ID;
-		return -EINVAL;
-	}
-
-	if (args->cos_strict) {
-		if (dlb2_list_empty(&domain->avail_ldb_ports[args->cos_id])) {
-			resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
-			return -EINVAL;
-		}
-	} else {
-		for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
-			if (!dlb2_list_empty(&domain->avail_ldb_ports[i]))
-				break;
-		}
-
-		if (i == DLB2_NUM_COS_DOMAINS) {
-			resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
-			return -EINVAL;
-		}
-	}
-
-	/* Check cache-line alignment */
-	if ((cq_dma_base & 0x3F) != 0) {
-		resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
-		return -EINVAL;
-	}
-
-	if (args->cq_depth != 1 &&
-	    args->cq_depth != 2 &&
-	    args->cq_depth != 4 &&
-	    args->cq_depth != 8 &&
-	    args->cq_depth != 16 &&
-	    args->cq_depth != 32 &&
-	    args->cq_depth != 64 &&
-	    args->cq_depth != 128 &&
-	    args->cq_depth != 256 &&
-	    args->cq_depth != 512 &&
-	    args->cq_depth != 1024) {
-		resp->status = DLB2_ST_INVALID_CQ_DEPTH;
-		return -EINVAL;
-	}
-
-	/* The history list size must be >= 1 */
-	if (!args->cq_history_list_size) {
-		resp->status = DLB2_ST_INVALID_HIST_LIST_DEPTH;
-		return -EINVAL;
-	}
-
-	if (args->cq_history_list_size > domain->avail_hist_list_entries) {
-		resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-
-/**
- * dlb2_hw_create_ldb_port() - Allocate and initialize a load-balanced port and
- *	its resources.
- * @hw:	Contains the current state of the DLB2 hardware.
- * @domain_id: Domain ID
- * @args: User-provided arguments.
- * @cq_dma_base: Base DMA address for consumer queue memory
- * @resp: Response to user.
- * @vdev_req: Request came from a virtual device.
- * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
- *
- * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
- * satisfy a request, resp->status will be set accordingly.
- */
-int dlb2_hw_create_ldb_port(struct dlb2_hw *hw,
-			    u32 domain_id,
-			    struct dlb2_create_ldb_port_args *args,
-			    uintptr_t cq_dma_base,
-			    struct dlb2_cmd_response *resp,
-			    bool vdev_req,
-			    unsigned int vdev_id)
-{
-	struct dlb2_hw_domain *domain;
-	struct dlb2_ldb_port *port;
-	int ret, cos_id, i;
-
-	dlb2_log_create_ldb_port_args(hw,
-				      domain_id,
-				      cq_dma_base,
-				      args,
-				      vdev_req,
-				      vdev_id);
-
-	/*
-	 * Verify that hardware resources are available before attempting to
-	 * satisfy the request. This simplifies the error unwinding code.
-	 */
-	ret = dlb2_verify_create_ldb_port_args(hw,
-					       domain_id,
-					       cq_dma_base,
-					       args,
-					       resp,
-					       vdev_req,
-					       vdev_id);
-	if (ret)
-		return ret;
-
-	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
-	if (domain == NULL) {
-		DLB2_HW_ERR(hw,
-			    "[%s():%d] Internal error: domain not found\n",
-			    __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	if (args->cos_strict) {
-		cos_id = args->cos_id;
-
-		port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[cos_id],
-					  typeof(*port));
-	} else {
-		int idx;
-
-		for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
-			idx = (args->cos_id + i) % DLB2_NUM_COS_DOMAINS;
-
-			port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[idx],
-						  typeof(*port));
-			if (port)
-				break;
-		}
-
-		cos_id = idx;
-	}
-
-	if (port == NULL) {
-		DLB2_HW_ERR(hw,
-			    "[%s():%d] Internal error: no available ldb ports\n",
-			    __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	if (port->configured) {
-		DLB2_HW_ERR(hw,
-			    "[%s()] Internal error: avail_ldb_ports contains configured ports.\n",
-			    __func__);
-		return -EFAULT;
-	}
-
-	ret = dlb2_configure_ldb_port(hw,
-				      domain,
-				      port,
-				      cq_dma_base,
-				      args,
-				      vdev_req,
-				      vdev_id);
-	if (ret < 0)
-		return ret;
-
-	/*
-	 * Configuration succeeded, so move the resource from the 'avail' to
-	 * the 'used' list.
-	 */
-	dlb2_list_del(&domain->avail_ldb_ports[cos_id], &port->domain_list);
-
-	dlb2_list_add(&domain->used_ldb_ports[cos_id], &port->domain_list);
-
-	resp->status = 0;
-	resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
-
-	return 0;
-}
-
 static void
 dlb2_log_create_dir_port_args(struct dlb2_hw *hw,
 			      u32 domain_id,
diff --git a/drivers/event/dlb2/pf/base/dlb2_resource_new.c b/drivers/event/dlb2/pf/base/dlb2_resource_new.c
index b52d2becd..2eb39e23d 100644
--- a/drivers/event/dlb2/pf/base/dlb2_resource_new.c
+++ b/drivers/event/dlb2/pf/base/dlb2_resource_new.c
@@ -3972,3 +3972,474 @@ int dlb2_hw_create_ldb_queue(struct dlb2_hw *hw,
 
 	return 0;
 }
+
+static void dlb2_ldb_port_configure_pp(struct dlb2_hw *hw,
+				       struct dlb2_hw_domain *domain,
+				       struct dlb2_ldb_port *port,
+				       bool vdev_req,
+				       unsigned int vdev_id)
+{
+	u32 reg = 0;
+
+	DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_SYS_LDB_PP2VAS_VAS);
+	DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VAS(port->id.phys_id), reg);
+
+	if (vdev_req) {
+		unsigned int offs;
+		u32 virt_id;
+
+		/*
+		 * DLB uses producer port address bits 17:12 to determine the
+		 * producer port ID. In Scalable IOV mode, PP accesses come
+		 * through the PF MMIO window for the physical producer port,
+		 * so for translation purposes the virtual and physical port
+		 * IDs are equal.
+		 */
+		if (hw->virt_mode == DLB2_VIRT_SRIOV)
+			virt_id = port->id.virt_id;
+		else
+			virt_id = port->id.phys_id;
+
+		reg = 0;
+		DLB2_BITS_SET(reg, port->id.phys_id, DLB2_SYS_VF_LDB_VPP2PP_PP);
+		offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
+		DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP2PP(offs), reg);
+
+		reg = 0;
+		DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_LDB_PP2VDEV_VDEV);
+		DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP2VDEV(port->id.phys_id), reg);
+
+		reg = 0;
+		DLB2_BIT_SET(reg, DLB2_SYS_VF_LDB_VPP_V_VPP_V);
+		DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), reg);
+	}
+
+	reg = 0;
+	DLB2_BIT_SET(reg, DLB2_SYS_LDB_PP_V_PP_V);
+	DLB2_CSR_WR(hw, DLB2_SYS_LDB_PP_V(port->id.phys_id), reg);
+}
+
+static int dlb2_ldb_port_configure_cq(struct dlb2_hw *hw,
+				      struct dlb2_hw_domain *domain,
+				      struct dlb2_ldb_port *port,
+				      uintptr_t cq_dma_base,
+				      struct dlb2_create_ldb_port_args *args,
+				      bool vdev_req,
+				      unsigned int vdev_id)
+{
+	u32 hl_base = 0;
+	u32 reg = 0;
+	u32 ds = 0;
+
+	/* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
+	DLB2_BITS_SET(reg, cq_dma_base >> 6, DLB2_SYS_LDB_CQ_ADDR_L_ADDR_L);
+	DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id), reg);
+
+	reg = cq_dma_base >> 32;
+	DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id), reg);
+
+	/*
+	 * 'ro' == relaxed ordering. This setting allows DLB2 to write
+	 * cache lines out-of-order (but QEs within a cache line are always
+	 * updated in-order).
+	 */
+	reg = 0;
+	DLB2_BITS_SET(reg, vdev_id, DLB2_SYS_LDB_CQ2VF_PF_RO_VF);
+	DLB2_BITS_SET(reg,
+		 !vdev_req && (hw->virt_mode != DLB2_VIRT_SIOV),
+		 DLB2_SYS_LDB_CQ2VF_PF_RO_IS_PF);
+	DLB2_BIT_SET(reg, DLB2_SYS_LDB_CQ2VF_PF_RO_RO);
+
+	DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id), reg);
+
+	port->cq_depth = args->cq_depth;
+
+	if (args->cq_depth <= 8) {
+		ds = 1;
+	} else if (args->cq_depth == 16) {
+		ds = 2;
+	} else if (args->cq_depth == 32) {
+		ds = 3;
+	} else if (args->cq_depth == 64) {
+		ds = 4;
+	} else if (args->cq_depth == 128) {
+		ds = 5;
+	} else if (args->cq_depth == 256) {
+		ds = 6;
+	} else if (args->cq_depth == 512) {
+		ds = 7;
+	} else if (args->cq_depth == 1024) {
+		ds = 8;
+	} else {
+		DLB2_HW_ERR(hw,
+			    "[%s():%d] Internal error: invalid CQ depth\n",
+			    __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	reg = 0;
+	DLB2_BITS_SET(reg, ds,
+		      DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT);
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
+		    reg);
+
+	/*
+	 * To support CQs with depth less than 8, program the token count
+	 * register with a non-zero initial value. Operations such as domain
+	 * reset must take this initial value into account when quiescing the
+	 * CQ.
+	 */
+	port->init_tkn_cnt = 0;
+
+	if (args->cq_depth < 8) {
+		reg = 0;
+		port->init_tkn_cnt = 8 - args->cq_depth;
+
+		DLB2_BITS_SET(reg,
+			      port->init_tkn_cnt,
+			      DLB2_LSP_CQ_LDB_TKN_CNT_TOKEN_COUNT);
+		DLB2_CSR_WR(hw,
+			    DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
+			    reg);
+	} else {
+		DLB2_CSR_WR(hw,
+			    DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
+			    DLB2_LSP_CQ_LDB_TKN_CNT_RST);
+	}
+
+	reg = 0;
+	DLB2_BITS_SET(reg, ds,
+		      DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_TOKEN_DEPTH_SELECT_V2);
+	DLB2_CSR_WR(hw,
+		    DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
+		    reg);
+
+	/* Reset the CQ write pointer */
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_LDB_CQ_WPTR(hw->ver, port->id.phys_id),
+		    DLB2_CHP_LDB_CQ_WPTR_RST);
+
+	reg = 0;
+	DLB2_BITS_SET(reg,
+		      port->hist_list_entry_limit - 1,
+		      DLB2_CHP_HIST_LIST_LIM_LIMIT);
+	DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_LIM(hw->ver, port->id.phys_id), reg);
+
+	DLB2_BITS_SET(hl_base, port->hist_list_entry_base,
+		      DLB2_CHP_HIST_LIST_BASE_BASE);
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_HIST_LIST_BASE(hw->ver, port->id.phys_id),
+		    hl_base);
+
+	/*
+	 * The inflight limit sets a cap on the number of QEs for which this CQ
+	 * can owe completions at one time.
+	 */
+	reg = 0;
+	DLB2_BITS_SET(reg, args->cq_history_list_size,
+		      DLB2_LSP_CQ_LDB_INFL_LIM_LIMIT);
+	DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_INFL_LIM(hw->ver, port->id.phys_id),
+		    reg);
+
+	reg = 0;
+	DLB2_BITS_SET(reg, DLB2_BITS_GET(hl_base, DLB2_CHP_HIST_LIST_BASE_BASE),
+		      DLB2_CHP_HIST_LIST_PUSH_PTR_PUSH_PTR);
+	DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_PUSH_PTR(hw->ver, port->id.phys_id),
+		    reg);
+
+	reg = 0;
+	DLB2_BITS_SET(reg, DLB2_BITS_GET(hl_base, DLB2_CHP_HIST_LIST_BASE_BASE),
+		      DLB2_CHP_HIST_LIST_POP_PTR_POP_PTR);
+	DLB2_CSR_WR(hw, DLB2_CHP_HIST_LIST_POP_PTR(hw->ver, port->id.phys_id),
+		    reg);
+
+	/*
+	 * Address translation (AT) settings: 0: untranslated, 2: translated
+	 * (see ATS spec regarding Address Type field for more details)
+	 */
+
+	if (hw->ver == DLB2_HW_V2) {
+		reg = 0;
+		DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_AT(port->id.phys_id), reg);
+	}
+
+	if (vdev_req && hw->virt_mode == DLB2_VIRT_SIOV) {
+		reg = 0;
+		DLB2_BITS_SET(reg, hw->pasid[vdev_id],
+			      DLB2_SYS_LDB_CQ_PASID_PASID);
+		DLB2_BIT_SET(reg, DLB2_SYS_LDB_CQ_PASID_FMT2);
+	}
+
+	DLB2_CSR_WR(hw, DLB2_SYS_LDB_CQ_PASID(hw->ver, port->id.phys_id), reg);
+
+	reg = 0;
+	DLB2_BITS_SET(reg, domain->id.phys_id, DLB2_CHP_LDB_CQ2VAS_CQ2VAS);
+	DLB2_CSR_WR(hw, DLB2_CHP_LDB_CQ2VAS(hw->ver, port->id.phys_id), reg);
+
+	/* Disable the port's QID mappings */
+	reg = 0;
+	DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id), reg);
+
+	return 0;
+}
+
+static bool
+dlb2_cq_depth_is_valid(u32 depth)
+{
+	if (depth != 1 && depth != 2 &&
+	    depth != 4 && depth != 8 &&
+	    depth != 16 && depth != 32 &&
+	    depth != 64 && depth != 128 &&
+	    depth != 256 && depth != 512 &&
+	    depth != 1024)
+		return false;
+
+	return true;
+}
+
+static int dlb2_configure_ldb_port(struct dlb2_hw *hw,
+				   struct dlb2_hw_domain *domain,
+				   struct dlb2_ldb_port *port,
+				   uintptr_t cq_dma_base,
+				   struct dlb2_create_ldb_port_args *args,
+				   bool vdev_req,
+				   unsigned int vdev_id)
+{
+	int ret, i;
+
+	port->hist_list_entry_base = domain->hist_list_entry_base +
+				     domain->hist_list_entry_offset;
+	port->hist_list_entry_limit = port->hist_list_entry_base +
+				      args->cq_history_list_size;
+
+	domain->hist_list_entry_offset += args->cq_history_list_size;
+	domain->avail_hist_list_entries -= args->cq_history_list_size;
+
+	ret = dlb2_ldb_port_configure_cq(hw,
+					 domain,
+					 port,
+					 cq_dma_base,
+					 args,
+					 vdev_req,
+					 vdev_id);
+	if (ret)
+		return ret;
+
+	dlb2_ldb_port_configure_pp(hw,
+				   domain,
+				   port,
+				   vdev_req,
+				   vdev_id);
+
+	dlb2_ldb_port_cq_enable(hw, port);
+
+	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++)
+		port->qid_map[i].state = DLB2_QUEUE_UNMAPPED;
+	port->num_mappings = 0;
+
+	port->enabled = true;
+
+	port->configured = true;
+
+	return 0;
+}
+
+static void
+dlb2_log_create_ldb_port_args(struct dlb2_hw *hw,
+			      u32 domain_id,
+			      uintptr_t cq_dma_base,
+			      struct dlb2_create_ldb_port_args *args,
+			      bool vdev_req,
+			      unsigned int vdev_id)
+{
+	DLB2_HW_DBG(hw, "DLB2 create load-balanced port arguments:\n");
+	if (vdev_req)
+		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
+	DLB2_HW_DBG(hw, "\tDomain ID:                 %d\n",
+		    domain_id);
+	DLB2_HW_DBG(hw, "\tCQ depth:                  %d\n",
+		    args->cq_depth);
+	DLB2_HW_DBG(hw, "\tCQ hist list size:         %d\n",
+		    args->cq_history_list_size);
+	DLB2_HW_DBG(hw, "\tCQ base address:           0x%lx\n",
+		    cq_dma_base);
+	DLB2_HW_DBG(hw, "\tCoS ID:                    %u\n", args->cos_id);
+	DLB2_HW_DBG(hw, "\tStrict CoS allocation:     %u\n",
+		    args->cos_strict);
+}
+
+static int
+dlb2_verify_create_ldb_port_args(struct dlb2_hw *hw,
+				 u32 domain_id,
+				 uintptr_t cq_dma_base,
+				 struct dlb2_create_ldb_port_args *args,
+				 struct dlb2_cmd_response *resp,
+				 bool vdev_req,
+				 unsigned int vdev_id,
+				 struct dlb2_hw_domain **out_domain,
+				 struct dlb2_ldb_port **out_port,
+				 int *out_cos_id)
+{
+	struct dlb2_hw_domain *domain;
+	struct dlb2_ldb_port *port;
+	int i, id;
+
+	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
+
+	if (!domain) {
+		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
+		return -EINVAL;
+	}
+
+	if (!domain->configured) {
+		resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
+		return -EINVAL;
+	}
+
+	if (domain->started) {
+		resp->status = DLB2_ST_DOMAIN_STARTED;
+		return -EINVAL;
+	}
+
+	if (args->cos_id >= DLB2_NUM_COS_DOMAINS) {
+		resp->status = DLB2_ST_INVALID_COS_ID;
+		return -EINVAL;
+	}
+
+	if (args->cos_strict) {
+		id = args->cos_id;
+		port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[id],
+					  typeof(*port));
+	} else {
+		for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+			id = (args->cos_id + i) % DLB2_NUM_COS_DOMAINS;
+
+			port = DLB2_DOM_LIST_HEAD(domain->avail_ldb_ports[id],
+						  typeof(*port));
+			if (port)
+				break;
+		}
+	}
+
+	if (!port) {
+		resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
+		return -EINVAL;
+	}
+
+	/* Check cache-line alignment */
+	if ((cq_dma_base & 0x3F) != 0) {
+		resp->status = DLB2_ST_INVALID_CQ_VIRT_ADDR;
+		return -EINVAL;
+	}
+
+	if (!dlb2_cq_depth_is_valid(args->cq_depth)) {
+		resp->status = DLB2_ST_INVALID_CQ_DEPTH;
+		return -EINVAL;
+	}
+
+	/* The history list size must be >= 1 */
+	if (!args->cq_history_list_size) {
+		resp->status = DLB2_ST_INVALID_HIST_LIST_DEPTH;
+		return -EINVAL;
+	}
+
+	if (args->cq_history_list_size > domain->avail_hist_list_entries) {
+		resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
+		return -EINVAL;
+	}
+
+	*out_domain = domain;
+	*out_port = port;
+	*out_cos_id = id;
+
+	return 0;
+}
+
+/**
+ * dlb2_hw_create_ldb_port() - create a load-balanced port
+ * @hw: dlb2_hw handle for a particular device.
+ * @domain_id: domain ID.
+ * @args: port creation arguments.
+ * @cq_dma_base: base address of the CQ memory. This can be a PA or an IOVA.
+ * @resp: response structure.
+ * @vdev_req: indicates whether this request came from a vdev.
+ * @vdev_id: If vdev_req is true, this contains the vdev's ID.
+ *
+ * This function creates a load-balanced port.
+ *
+ * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
+ * device.
+ *
+ * Return:
+ * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is
+ * assigned a detailed error code from enum dlb2_error. If successful, resp->id
+ * contains the port ID.
+ *
+ * resp->id contains a virtual ID if vdev_req is true.
+ *
+ * Errors:
+ * EINVAL - A requested resource is unavailable, a credit setting is invalid, a
+ *	    pointer address is not properly aligned, the domain is not
+ *	    configured, or the domain has already been started.
+ * EFAULT - Internal error (resp->status not set).
+ */
+int dlb2_hw_create_ldb_port(struct dlb2_hw *hw,
+			    u32 domain_id,
+			    struct dlb2_create_ldb_port_args *args,
+			    uintptr_t cq_dma_base,
+			    struct dlb2_cmd_response *resp,
+			    bool vdev_req,
+			    unsigned int vdev_id)
+{
+	struct dlb2_hw_domain *domain;
+	struct dlb2_ldb_port *port;
+	int ret, cos_id;
+
+	dlb2_log_create_ldb_port_args(hw,
+				      domain_id,
+				      cq_dma_base,
+				      args,
+				      vdev_req,
+				      vdev_id);
+
+	/*
+	 * Verify that hardware resources are available before attempting to
+	 * satisfy the request. This simplifies the error unwinding code.
+	 */
+	ret = dlb2_verify_create_ldb_port_args(hw,
+					       domain_id,
+					       cq_dma_base,
+					       args,
+					       resp,
+					       vdev_req,
+					       vdev_id,
+					       &domain,
+					       &port,
+					       &cos_id);
+	if (ret)
+		return ret;
+
+	ret = dlb2_configure_ldb_port(hw,
+				      domain,
+				      port,
+				      cq_dma_base,
+				      args,
+				      vdev_req,
+				      vdev_id);
+	if (ret)
+		return ret;
+
+	/*
+	 * Configuration succeeded, so move the resource from the 'avail' to
+	 * the 'used' list.
+	 */
+	dlb2_list_del(&domain->avail_ldb_ports[cos_id], &port->domain_list);
+
+	dlb2_list_add(&domain->used_ldb_ports[cos_id], &port->domain_list);
+
+	resp->status = 0;
+	resp->id = (vdev_req) ? port->id.virt_id : port->id.phys_id;
+
+	return 0;
+}
-- 
2.23.0



More information about the dev mailing list