[dpdk-dev] [PATCH 05/28] bnxt: add support for PF/VF communications

Ajit Khaparde ajit.khaparde at broadcom.com
Tue Mar 28 05:48:40 CEST 2017


Set up interrupts and default completion ring during device init to
allow VF communications.

Request most VF HWRM commands be forwarded to the PF driver.  In the
future, this should be configurable.

Fix reporting of active VFs.

Add a log message when a completion is ignored on the default completion
ring since this will indicate support that needs to be added.

Do not initialize the default completion ring in bnxt_alloc_hwrm_rings(),
but move it out to bnxt_dev_init().

Create a structure bnxt_child_vf_info for PF to cache VF specific info.

Signed-off-by: Stephen Hurd <stephen.hurd at broadcom.com>
Signed-off-by: Ajit Khaparde <ajit.khaparde at broadcom.com>
---
 drivers/net/bnxt/bnxt.h        |   9 +-
 drivers/net/bnxt/bnxt_cpr.c    |  88 ++++++++++-------
 drivers/net/bnxt/bnxt_cpr.h    |   1 +
 drivers/net/bnxt/bnxt_ethdev.c |  80 +++++++++++----
 drivers/net/bnxt/bnxt_hwrm.c   | 218 +++++++++++++++++++++++++++++++++--------
 drivers/net/bnxt/bnxt_hwrm.h   |  10 +-
 drivers/net/bnxt/bnxt_irq.c    |   5 +-
 drivers/net/bnxt/bnxt_ring.c   |  15 ---
 8 files changed, 308 insertions(+), 118 deletions(-)

diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 7fa0d8a..f2887d1 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -54,6 +54,12 @@ enum bnxt_hw_context {
 	HW_CONTEXT_IS_LB    = 3,
 };
 
+struct bnxt_child_vf_info {
+	uint16_t		fid;
+	uint32_t		func_cfg_flags;
+	void			*req_buf;
+};
+
 struct bnxt_pf_info {
 #define BNXT_FIRST_PF_FID	1
 #define BNXT_MAX_VFS(bp)	(bp->pf.max_vfs)
@@ -65,9 +71,10 @@ struct bnxt_pf_info {
 	uint16_t		first_vf_id;
 	uint16_t		active_vfs;
 	uint16_t		max_vfs;
+	uint32_t		func_cfg_flags;
 	void			*vf_req_buf;
-	phys_addr_t		vf_req_buf_dma_addr;
 	uint32_t		vf_req_fwd[8];
+	struct bnxt_child_vf_info	*vf_info;
 };
 
 /* Max wait time is 10 * 100ms = 1s */
diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index 3aedcb8..5d961f6 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -65,56 +65,76 @@ void bnxt_handle_async_event(struct bnxt *bp,
 
 void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
 {
+	struct hwrm_exec_fwd_resp_input *fwreq;
 	struct hwrm_fwd_req_cmpl *fwd_cmpl = (struct hwrm_fwd_req_cmpl *)cmpl;
 	struct input *fwd_cmd;
-	uint16_t logical_vf_id, error_code;
+	uint16_t vf_id;
+	uint16_t req_len;
 
-	/* Qualify the fwd request */
-	if (fwd_cmpl->source_id < bp->pf.first_vf_id) {
-		RTE_LOG(ERR, PMD,
-			"FWD req's source_id 0x%x > first_vf_id 0x%x\n",
-			fwd_cmpl->source_id, bp->pf.first_vf_id);
-		error_code = HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED;
-		goto reject;
-	} else if (fwd_cmpl->req_len_type >> HWRM_FWD_REQ_CMPL_REQ_LEN_SFT >
-		   128 - sizeof(struct input)) {
-		RTE_LOG(ERR, PMD,
-		    "FWD req's cmd len 0x%x > 108 bytes allowed\n",
-		    fwd_cmpl->req_len_type >> HWRM_FWD_REQ_CMPL_REQ_LEN_SFT);
-		error_code = HWRM_ERR_CODE_INVALID_PARAMS;
-		goto reject;
+	if (bp->pf.active_vfs <= 0) {
+		RTE_LOG(ERR, PMD, "Forwarded VF with no active VFs\n");
+		return;
 	}
 
+	/* Qualify the fwd request */
+	vf_id = rte_le_to_cpu_16(fwd_cmpl->source_id);
+
+	/*
+	 * TODO: req_len is always 128, is there a way to get the
+	 * actual request length?
+	 */
+	req_len = (rte_le_to_cpu_16(fwd_cmpl->req_len_type) &
+	   HWRM_FWD_REQ_CMPL_REQ_LEN_MASK) >> HWRM_FWD_REQ_CMPL_REQ_LEN_SFT;
+	if (req_len > sizeof(fwreq->encap_request))
+		req_len = sizeof(fwreq->encap_request);
+
 	/* Locate VF's forwarded command */
-	logical_vf_id = fwd_cmpl->source_id - bp->pf.first_vf_id;
-	fwd_cmd = (struct input *)((uint8_t *)bp->pf.vf_req_buf +
-		   (logical_vf_id * 128));
-
-	/* Provision the request */
-	switch (fwd_cmd->req_type) {
-	case HWRM_CFA_L2_FILTER_ALLOC:
-	case HWRM_CFA_L2_FILTER_FREE:
-	case HWRM_CFA_L2_FILTER_CFG:
-	case HWRM_CFA_L2_SET_RX_MASK:
-		break;
-	default:
-		error_code = HWRM_ERR_CODE_INVALID_PARAMS;
+	fwd_cmd = (struct input *)
+		bp->pf.vf_info[vf_id - bp->pf.first_vf_id].req_buf;
+	/* Force the target ID to the source VF */
+	fwd_cmd->target_id = rte_cpu_to_le_16(vf_id);
+
+	if (vf_id < bp->pf.first_vf_id ||
+	    vf_id >= (bp->pf.first_vf_id) + bp->pf.active_vfs) {
+		RTE_LOG(ERR, PMD,
+			"FWD req src_id 0x%x out of range 0x%x - 0x%x(%d %d)\n",
+			vf_id, bp->pf.first_vf_id,
+			(bp->pf.first_vf_id) + bp->pf.active_vfs - 1,
+			bp->pf.first_vf_id, bp->pf.active_vfs);
 		goto reject;
 	}
 
+	/* TODO: Call "mailbox" callback if necessary */
+
 	/* Forward */
-	fwd_cmd->target_id = fwd_cmpl->source_id;
-	bnxt_hwrm_exec_fwd_resp(bp, fwd_cmd);
+	bnxt_hwrm_exec_fwd_resp(bp, vf_id, fwd_cmd, req_len);
 	return;
 
 reject:
-	/* TODO: Encap the reject error resp into the hwrm_err_iput? */
-	/* Use the error_code for the reject cmd */
-	RTE_LOG(ERR, PMD,
-		"Error 0x%x found in the forward request\n", error_code);
+	bnxt_hwrm_reject_fwd_resp(bp, vf_id, fwd_cmd, req_len);
+	return;
 }
 
 /* For the default completion ring only */
+int bnxt_alloc_def_cp_ring(struct bnxt *bp)
+{
+	struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+	struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+	int rc;
+
+	rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
+				  HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
+				  0, HWRM_NA_SIGNATURE);
+	if (rc)
+		goto err_out;
+	cpr->cp_doorbell = bp->pdev->mem_resource[2].addr;
+	B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+	bp->grp_info[0].cp_fw_ring_id = cp_ring->fw_ring_id;
+
+err_out:
+	return rc;
+}
+
 void bnxt_free_def_cp_ring(struct bnxt *bp)
 {
 	struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
index 83e5376..8e147a5 100644
--- a/drivers/net/bnxt/bnxt_cpr.h
+++ b/drivers/net/bnxt/bnxt_cpr.h
@@ -82,6 +82,7 @@ struct bnxt_cp_ring_info {
 
 
 struct bnxt;
+int bnxt_alloc_def_cp_ring(struct bnxt *bp);
 void bnxt_free_def_cp_ring(struct bnxt *bp);
 int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id);
 void bnxt_handle_async_event(struct bnxt *bp, struct cmpl_base *cmp);
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 667ba12..b7732e0 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -478,34 +478,17 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
 
 	bp->dev_stopped = 0;
 
-	rc = bnxt_setup_int(bp);
-	if (rc)
-		goto error;
-
-	rc = bnxt_alloc_mem(bp);
-	if (rc)
-		goto error;
-
-	rc = bnxt_request_int(bp);
-	if (rc)
-		goto error;
-
 	rc = bnxt_init_nic(bp);
 	if (rc)
 		goto error;
 
-	bnxt_enable_int(bp);
-
 	bnxt_link_update_op(eth_dev, 0);
 	return 0;
 
 error:
 	bnxt_shutdown_nic(bp);
-	bnxt_disable_int(bp);
-	bnxt_free_int(bp);
 	bnxt_free_tx_mbufs(bp);
 	bnxt_free_rx_mbufs(bp);
-	bnxt_free_mem(bp);
 	return rc;
 }
 
@@ -537,8 +520,6 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
 		eth_dev->data->dev_link.link_status = 0;
 	}
 	bnxt_set_hwrm_link_config(bp, false);
-	bnxt_disable_int(bp);
-	bnxt_free_int(bp);
 	bnxt_shutdown_nic(bp);
 	bp->dev_stopped = 1;
 }
@@ -1063,6 +1044,12 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev)
 	return rc;
 }
 
+#define ALLOW_FUNC(x)	\
+	{ \
+		typeof(x) arg = (x); \
+		bp->pf.vf_req_fwd[((arg) >> 5)] &= \
+		~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
+	}
 static int
 bnxt_dev_init(struct rte_eth_dev *eth_dev)
 {
@@ -1140,8 +1127,30 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
 		goto error_free;
 	}
 
-	rc = bnxt_hwrm_func_driver_register(bp, 0,
-					    bp->pf.vf_req_fwd);
+	/* Forward all requests */
+	memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
+	/*
+	 * We can't forward commands before the VF driver calls drv_rgtr.
+	 * These are the ones that are may be used by drivers.
+	 */
+	ALLOW_FUNC(HWRM_VER_GET);
+	ALLOW_FUNC(HWRM_QUEUE_QPORTCFG);
+	ALLOW_FUNC(HWRM_FUNC_QCFG);
+	ALLOW_FUNC(HWRM_FUNC_QCAPS);
+	ALLOW_FUNC(HWRM_FUNC_DRV_RGTR);
+
+	/*
+	 * The following are used for driver cleanup.  If we disallow these,
+	 * VF drivers can't clean up cleanly.
+	 */
+	ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
+	ALLOW_FUNC(HWRM_VNIC_FREE);
+	ALLOW_FUNC(HWRM_RING_FREE);
+	ALLOW_FUNC(HWRM_RING_GRP_FREE);
+	ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
+	ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
+	ALLOW_FUNC(HWRM_STAT_CTX_FREE);
+	rc = bnxt_hwrm_func_driver_register(bp);
 	if (rc) {
 		RTE_LOG(ERR, PMD,
 			"Failed to register driver");
@@ -1183,8 +1192,32 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
 		}
 	}
 
+	rc = bnxt_setup_int(bp);
+	if (rc)
+		goto error_free;
+
+	rc = bnxt_alloc_mem(bp);
+	if (rc)
+		goto error_free_int;
+
+	rc = bnxt_request_int(bp);
+	if (rc)
+		goto error_free_int;
+
+	rc = bnxt_alloc_def_cp_ring(bp);
+	if (rc)
+		goto error_free_int;
+
+	bnxt_enable_int(bp);
+
 	return 0;
 
+error_free_int:
+	bnxt_disable_int(bp);
+	bnxt_free_def_cp_ring(bp);
+	bnxt_hwrm_func_buf_unrgtr(bp);
+	bnxt_free_int(bp);
+	bnxt_free_mem(bp);
 error_free:
 	eth_dev->driver->eth_dev_uninit(eth_dev);
 error:
@@ -1196,6 +1229,9 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
 	struct bnxt *bp = eth_dev->data->dev_private;
 	int rc;
 
+	bnxt_disable_int(bp);
+	bnxt_free_int(bp);
+	bnxt_free_mem(bp);
 	if (eth_dev->data->mac_addrs != NULL) {
 		rte_free(eth_dev->data->mac_addrs);
 		eth_dev->data->mac_addrs = NULL;
@@ -1208,6 +1244,8 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
 	bnxt_free_hwrm_resources(bp);
 	if (bp->dev_stopped == 0)
 		bnxt_dev_close_op(eth_dev);
+	if (bp->pf.vf_info)
+		rte_free(bp->pf.vf_info);
 	eth_dev->dev_ops = NULL;
 	eth_dev->rx_pkt_burst = NULL;
 	eth_dev->tx_pkt_burst = NULL;
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index cb974d5..02e483e 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -54,6 +54,31 @@
 
 #define HWRM_CMD_TIMEOUT		2000
 
+static int page_getenum(size_t size)
+{
+	if (size <= 1 << 4)
+		return 4;
+	if (size <= 1 << 12)
+		return 12;
+	if (size <= 1 << 13)
+		return 13;
+	if (size <= 1 << 16)
+		return 16;
+	if (size <= 1 << 21)
+		return 21;
+	if (size <= 1 << 22)
+		return 22;
+	if (size <= 1 << 30)
+		return 30;
+	RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
+	return sizeof(void *) * 8 - 1;
+}
+
+static int page_roundup(size_t size)
+{
+	return 1 << page_getenum(size);
+}
+
 /*
  * HWRM Functions (sent to HWRM)
  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
@@ -267,29 +292,13 @@ int bnxt_hwrm_set_filter(struct bnxt *bp,
 	return rc;
 }
 
-int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
-{
-	int rc;
-	struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
-	struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
-
-	HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
-
-	memcpy(req.encap_request, fwd_cmd,
-	       sizeof(req.encap_request));
-
-	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
-
-	HWRM_CHECK_RESULT;
-
-	return rc;
-}
-
 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 {
 	int rc = 0;
 	struct hwrm_func_qcaps_input req = {.req_type = 0 };
 	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+	uint16_t new_max_vfs;
+	int i;
 
 	HWRM_PREP(req, FUNC_QCAPS, -1, resp);
 
@@ -303,7 +312,16 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 	if (BNXT_PF(bp)) {
 		bp->pf.port_id = resp->port_id;
 		bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
-		bp->pf.max_vfs = rte_le_to_cpu_16(resp->max_vfs);
+		new_max_vfs = rte_le_to_cpu_16(resp->max_vfs);
+		if (new_max_vfs != bp->pf.max_vfs) {
+			if (bp->pf.vf_info)
+				rte_free(bp->pf.vf_info);
+			bp->pf.vf_info = rte_malloc("bnxt_vf_info",
+			    sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
+			bp->pf.max_vfs = new_max_vfs;
+			for (i = 0; i < new_max_vfs; i++)
+				bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
+		}
 	}
 
 	bp->fw_fid = rte_le_to_cpu_32(resp->fid);
@@ -336,8 +354,7 @@ int bnxt_hwrm_func_reset(struct bnxt *bp)
 	return rc;
 }
 
-int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
-				   uint32_t *vf_req_fwd)
+int bnxt_hwrm_func_driver_register(struct bnxt *bp)
 {
 	int rc;
 	struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
@@ -347,15 +364,19 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
 		return 0;
 
 	HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
-	req.flags = flags;
-	req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
-			HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD |
-			HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD;
+	req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
+			HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
 	req.ver_maj = RTE_VER_YEAR;
 	req.ver_min = RTE_VER_MONTH;
 	req.ver_upd = RTE_VER_MINOR;
 
-	memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
+	if (BNXT_PF(bp)) {
+		req.enables |=
+		rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
+		memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
+		       RTE_MIN(sizeof(req.vf_req_fwd),
+			       sizeof(bp->pf.vf_req_fwd)));
+	}
 
 	req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
 
@@ -866,7 +887,8 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 	req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
 				   ETHER_CRC_LEN + VLAN_TAG_SIZE);
 	if (vnic->func_default)
-		req.flags = 1;
+		req.flags |=
+			rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
 	if (vnic->vlan_strip)
 		req.flags |=
 		    rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
@@ -961,6 +983,88 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
 	return rc;
 }
 
+int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
+{
+	int rc = 0;
+	struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
+	struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
+
+	req.req_buf_num_pages = rte_cpu_to_le_16(1);
+	req.req_buf_page_size =
+		rte_cpu_to_le_16(page_getenum(bp->pf.active_vfs *
+					      HWRM_MAX_REQ_LEN));
+	req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
+	req.req_buf_page_addr[0] =
+		rte_cpu_to_le_64(rte_malloc_virt2phy(bp->pf.vf_req_buf));
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT;
+
+	return rc;
+}
+
+int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
+{
+	int rc = 0;
+	struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
+	struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
+
+	HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT;
+
+	return rc;
+}
+
+int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
+			      void *encaped, size_t ec_size)
+{
+	int rc = 0;
+	struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
+	struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+	if (ec_size > sizeof(req.encap_request))
+		return -1;
+
+	HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
+
+	req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
+	memcpy(req.encap_request, encaped, ec_size);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT;
+
+	return rc;
+}
+
+int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
+			    void *encaped, size_t ec_size)
+{
+	int rc = 0;
+	struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
+	struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+	if (ec_size > sizeof(req.encap_request))
+		return -1;
+
+	HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
+
+	req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
+	memcpy(req.encap_request, encaped, ec_size);
+
+	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+	HWRM_CHECK_RESULT;
+
+	return rc;
+}
+
 /*
  * HWRM utility functions
  */
@@ -1222,7 +1326,8 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
 		return;
 
 	vnic = &bp->vnic_info[0];
-	bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
+	if (BNXT_PF(bp))
+		bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
 
 	/* VNIC resources */
 	for (i = 0; i < bp->nr_vnics; i++) {
@@ -1519,8 +1624,6 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp)
 
 	/* Hard Coded.. 0xfff VLAN ID mask */
 	bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
-	if (BNXT_PF(bp))
-		bp->pf.active_vfs = rte_le_to_cpu_16(resp->alloc_vfs);
 
 	switch (resp->port_partition_type) {
 	case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
@@ -1562,7 +1665,7 @@ static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
 	qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
 }
 
-static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings, bool std_mode)
+static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
 {
 	struct hwrm_func_cfg_input req = {0};
 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
@@ -1578,8 +1681,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings, bool std_mode)
 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
 			HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
-	if (std_mode)
-		req.flags = rte_cpu_to_le_32(
+	req.flags = rte_cpu_to_le_32(
 			HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE);
 	req.mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu +
 				ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE);
@@ -1645,7 +1747,7 @@ static void add_random_mac_if_needed(struct bnxt *bp,
 
 	/* Check for zero MAC address */
 	HWRM_PREP(req, FUNC_QCFG, -1, resp);
-	req.fid = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
+	req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 	if (rc) {
 		RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
@@ -1675,7 +1777,7 @@ static void reserve_resources_from_vf(struct bnxt *bp,
 
 	/* Get the actual allocated values now */
 	HWRM_PREP(req, FUNC_QCAPS, -1, resp);
-	req.fid = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
+	req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
 	if (rc) {
@@ -1710,8 +1812,8 @@ static int update_pf_resource_max(struct bnxt *bp)
 	HWRM_CHECK_RESULT;
 
 	bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
-	bp->pf.active_vfs = rte_le_to_cpu_16(resp->alloc_vfs);
 	/* TODO: Only TX ring value reflects actual allocation */
+	//bp->pf.active_vfs = rte_le_to_cpu_16(resp->alloc_vfs);
 	//bp->max_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
 	//bp->max_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
 	//bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
@@ -1735,7 +1837,9 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
 	if (rc)
 		return rc;
 
-	rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings, false);
+	bp->pf.func_cfg_flags &=
+			~HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
+	rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
 	return rc;
 }
 
@@ -1755,7 +1859,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
 	if (rc)
 		return rc;
 
-	bp->pf.active_vfs = 0;
+	bp->pf.active_vfs = num_vfs;
 
 	/*
 	 * First, configure the PF to only use one TX ring.  This ensures that
@@ -1767,17 +1871,40 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
 	 *
 	 * This has been fixed with firmware versions above 20.6.54
 	 */
-	rc = bnxt_hwrm_pf_func_cfg(bp, 1, true);
+	bp->pf.func_cfg_flags |=
+			HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
+	rc = bnxt_hwrm_pf_func_cfg(bp, 1);
 	if (rc)
 		return rc;
 
+	/*
+	 * Now, create and register a buffer to hold forwarded VF requests
+	 */
+	bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd",
+				       num_vfs * HWRM_MAX_REQ_LEN,
+				       page_roundup(num_vfs *
+						    HWRM_MAX_REQ_LEN));
+	if (bp->pf.vf_req_buf == NULL) {
+		rc = -ENOMEM;
+		goto error_free;
+	}
+	for (i = 0; i < num_vfs; i++)
+		bp->pf.vf_info[i].req_buf =
+			((char *)bp->pf.vf_req_buf) + (i * HWRM_MAX_REQ_LEN);
+
+	rc = bnxt_hwrm_func_buf_rgtr(bp);
+	if (rc)
+		goto error_free;
+
 	populate_vf_func_cfg_req(bp, &req, num_vfs);
 
+	bp->pf.active_vfs = 0;
 	for (i = 0; i < num_vfs; i++) {
 		add_random_mac_if_needed(bp, &req, i);
 
 		HWRM_PREP(req, FUNC_CFG, -1, resp);
-		req.fid = rte_cpu_to_le_16(bp->pf.first_vf_id + i);
+		req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
+		req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
 		rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
 		/* Clear enable flag for next pass */
@@ -1793,6 +1920,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
 		}
 
 		reserve_resources_from_vf(bp, &req, i);
+		bp->pf.active_vfs++;
 	}
 
 	/*
@@ -1801,11 +1929,17 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
 	 * rings.  This will allow QoS to function properly.  Not setting this
 	 * will cause PF rings to break bandwidth settings.
 	 */
-	rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings, true);
+	rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
 	if (rc)
-		return rc;
+		goto error_free;
 
 	rc = update_pf_resource_max(bp);
+	if (rc)
+		goto error_free;
+
+	return rc;
 
+error_free:
+	bnxt_hwrm_func_buf_unrgtr(bp);
 	return rc;
 }
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 1639e84..6ae2380 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -52,10 +52,14 @@ int bnxt_hwrm_set_filter(struct bnxt *bp,
 			 struct bnxt_vnic_info *vnic,
 			 struct bnxt_filter_info *filter);
 
-int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd);
+int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
+			    void *encaped, size_t ec_size);
+int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
+			      void *encaped, size_t ec_size);
 
-int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
-				   uint32_t *vf_req_fwd);
+int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp);
+int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp);
+int bnxt_hwrm_func_driver_register(struct bnxt *bp);
 int bnxt_hwrm_func_qcaps(struct bnxt *bp);
 int bnxt_hwrm_func_reset(struct bnxt *bp);
 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags);
diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c
index e93585a..2842ba8 100644
--- a/drivers/net/bnxt/bnxt_irq.c
+++ b/drivers/net/bnxt/bnxt_irq.c
@@ -67,11 +67,12 @@ static void bnxt_int_handler(struct rte_intr_handle *handle __rte_unused,
 			/* Handle any async event */
 			bnxt_handle_async_event(bp, cmp);
 			break;
-		case CMPL_BASE_TYPE_HWRM_FWD_RESP:
-			/* Handle HWRM forwarded responses */
+		case CMPL_BASE_TYPE_HWRM_FWD_REQ:
 			bnxt_handle_fwd_req(bp, cmp);
 			break;
 		default:
+			RTE_LOG(INFO, PMD,
+				"Ignoring %02x completion\n", CMP_TYPE(cmp));
 			/* Ignore any other events */
 			break;
 		}
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 0fafa13..389bef2 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -213,21 +213,6 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
 	unsigned int i;
 	int rc = 0;
 
-	/* Default completion ring */
-	{
-		struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
-		struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
-
-		rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
-					  HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
-					  0, HWRM_NA_SIGNATURE);
-		if (rc)
-			goto err_out;
-		cpr->cp_doorbell = pci_dev->mem_resource[2].addr;
-		B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
-		bp->grp_info[0].cp_fw_ring_id = cp_ring->fw_ring_id;
-	}
-
 	for (i = 0; i < bp->rx_cp_nr_rings; i++) {
 		struct bnxt_rx_queue *rxq = bp->rx_queues[i];
 		struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
-- 
2.10.1 (Apple Git-78)



More information about the dev mailing list