[dpdk-dev] [PATCH 08/11] net/qede/base: fix coverity issues

Rasesh Mody rasesh.mody at cavium.com
Tue Apr 25 09:28:43 CEST 2017


Remove unused code to address coverity issues and
address a code flow issue.

Coverity issue: 1379468
Coverity issue: 1379521
Coverity issue: 1379522
Coverity issue: 1379523
Coverity issue: 1423918
Fixes: 86a2265e59d7 ("qede: add SRIOV support")
Fixes: ec94dbc57362 ("qede: add base driver")
Fixes: 2ea6f76aff40 ("qede: add core driver")
Fixes: 29540be7efce ("net/qede: support LRO/TSO offloads")
Cc: stable at dpdk.org

Signed-off-by: Rasesh Mody <rasesh.mody at cavium.com>
---
 drivers/net/qede/base/ecore_cxt.c     |   90 ---------------------------------
 drivers/net/qede/base/ecore_cxt.h     |    4 --
 drivers/net/qede/base/ecore_cxt_api.h |   11 ----
 drivers/net/qede/base/ecore_iov_api.h |   11 ----
 drivers/net/qede/base/ecore_sriov.c   |   24 ---------
 drivers/net/qede/qede_main.c          |   10 +++-
 drivers/net/qede/qede_rxtx.c          |    1 -
 7 files changed, 8 insertions(+), 143 deletions(-)

diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index 80ad102..688118b 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -2014,47 +2014,6 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
 	return ECORE_SUCCESS;
 }
 
-enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
-						struct ecore_tid_mem *p_info)
-{
-	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
-	u32 proto, seg, total_lines, i, shadow_line;
-	struct ecore_ilt_client_cfg *p_cli;
-	struct ecore_ilt_cli_blk *p_fl_seg;
-	struct ecore_tid_seg *p_seg_info;
-
-	/* Verify the personality */
-	switch (p_hwfn->hw_info.personality) {
-	default:
-		return ECORE_INVAL;
-	}
-
-	p_cli = &p_mngr->clients[ILT_CLI_CDUT];
-	if (!p_cli->active)
-		return ECORE_INVAL;
-
-	p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
-	if (!p_seg_info->has_fl_mem)
-		return ECORE_INVAL;
-
-	p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
-	total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
-				   p_fl_seg->real_size_in_page);
-
-	for (i = 0; i < total_lines; i++) {
-		shadow_line = i + p_fl_seg->start_line -
-		    p_hwfn->p_cxt_mngr->pf_start_line;
-		p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
-	}
-	p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
-	    p_fl_seg->real_size_in_page;
-	p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
-	p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
-	    p_info->tid_size;
-
-	return ECORE_SUCCESS;
-}
-
 /* This function is very RoCE oriented, if another protocol in the future
  * will want this feature we'll need to modify the function to be more generic
  */
@@ -2292,52 +2251,3 @@ enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
 
 	return rc;
 }
-
-enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
-					    u32 tid,
-					    u8 ctx_type, void **pp_task_ctx)
-{
-	struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
-	struct ecore_ilt_client_cfg *p_cli;
-	struct ecore_ilt_cli_blk *p_seg;
-	struct ecore_tid_seg *p_seg_info;
-	u32 proto, seg;
-	u32 total_lines;
-	u32 tid_size, ilt_idx;
-	u32 num_tids_per_block;
-
-	/* Verify the personality */
-	switch (p_hwfn->hw_info.personality) {
-	default:
-		return ECORE_INVAL;
-	}
-
-	p_cli = &p_mngr->clients[ILT_CLI_CDUT];
-	if (!p_cli->active)
-		return ECORE_INVAL;
-
-	p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
-
-	if (ctx_type == ECORE_CTX_WORKING_MEM) {
-		p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
-	} else if (ctx_type == ECORE_CTX_FL_MEM) {
-		if (!p_seg_info->has_fl_mem)
-			return ECORE_INVAL;
-		p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
-	} else {
-		return ECORE_INVAL;
-	}
-	total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
-	tid_size = p_mngr->task_type_size[p_seg_info->type];
-	num_tids_per_block = p_seg->real_size_in_page / tid_size;
-
-	if (total_lines < tid / num_tids_per_block)
-		return ECORE_INVAL;
-
-	ilt_idx = tid / num_tids_per_block + p_seg->start_line -
-	    p_mngr->pf_start_line;
-	*pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
-	    (tid % num_tids_per_block) * tid_size;
-
-	return ECORE_SUCCESS;
-}
diff --git a/drivers/net/qede/base/ecore_cxt.h b/drivers/net/qede/base/ecore_cxt.h
index e678118..6ff823a 100644
--- a/drivers/net/qede/base/ecore_cxt.h
+++ b/drivers/net/qede/base/ecore_cxt.h
@@ -197,9 +197,5 @@ enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
 
 #define ECORE_CTX_WORKING_MEM 0
 #define ECORE_CTX_FL_MEM 1
-enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
-					    u32 tid,
-					    u8 ctx_type,
-					    void **task_ctx);
 
 #endif /* _ECORE_CID_ */
diff --git a/drivers/net/qede/base/ecore_cxt_api.h b/drivers/net/qede/base/ecore_cxt_api.h
index f154e0d..6d87620 100644
--- a/drivers/net/qede/base/ecore_cxt_api.h
+++ b/drivers/net/qede/base/ecore_cxt_api.h
@@ -37,15 +37,4 @@ struct ecore_tid_mem {
 enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
 					    struct ecore_cxt_info *p_info);
 
-/**
-* @brief ecore_cxt_get_tid_mem_info
-*
-* @param p_hwfn
-* @param p_info
-*
-* @return enum _ecore_status_t
-*/
-enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
-						struct ecore_tid_mem *p_info);
-
 #endif
diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h
index 870c57e..50cb3f2 100644
--- a/drivers/net/qede/base/ecore_iov_api.h
+++ b/drivers/net/qede/base/ecore_iov_api.h
@@ -693,17 +693,6 @@ bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
  * @return - rate in Mbps
  */
 int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
-
-/**
- * @brief - Configure min rate for VF's vport.
- * @param p_dev
- * @param vfid
- * @param - rate in Mbps
- *
- * @return
- */
-enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
-						     int vfid, u32 rate);
 #endif
 
 /**
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index 39d3e88..db2873e 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -4386,30 +4386,6 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
 	return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
 }
 
-enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
-						     int vfid, u32 rate)
-{
-	struct ecore_vf_info *vf;
-	u8 vport_id;
-	int i;
-
-	for_each_hwfn(p_dev, i) {
-		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
-
-		if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
-			DP_NOTICE(p_hwfn, true,
-				  "SR-IOV sanity check failed,"
-				  " can't set min rate\n");
-			return ECORE_INVAL;
-		}
-	}
-
-	vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
-	vport_id = vf->vport_id;
-
-	return ecore_configure_vport_wfq(p_dev, vport_id, rate);
-}
-
 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
 					    struct ecore_ptt *p_ptt,
 					    int vfid,
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index d7847d1..009dbb4 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -265,7 +265,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
 		if (rc) {
 			DP_NOTICE(edev, true,
 			"Failed to allocate stream memory\n");
-			goto err2;
+			goto err1;
 		}
 	}
 
@@ -306,7 +306,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
 		if (rc) {
 			DP_NOTICE(edev, true,
 				  "Failed sending drv version command\n");
-			return rc;
+			goto err3;
 		}
 	}
 
@@ -314,8 +314,14 @@ static int qed_slowpath_start(struct ecore_dev *edev,
 
 	return 0;
 
+err3:
 	ecore_hw_stop(edev);
 err2:
+	qed_stop_iov_task(edev);
+#ifdef CONFIG_ECORE_ZIPPED_FW
+	qed_free_stream_mem(edev);
+err1:
+#endif
 	ecore_resc_free(edev);
 err:
 #ifdef CONFIG_ECORE_BINARY_FW
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 191d8af..47ebba7 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -1501,7 +1501,6 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 
 		/* Fill the entry in the SW ring and the BDs in the FW ring */
 		idx = TX_PROD(txq);
-		*tx_pkts++;
 		txq->sw_tx_ring[idx].mbuf = mbuf;
 
 		/* BD1 */
-- 
1.7.10.3



More information about the dev mailing list