[dpdk-dev] [PATCH v3] net/dpaa: add null point check and fix mem leak

Yong Wang wang.yong19 at zte.com.cn
Thu Jan 18 12:48:56 CET 2018


There are several func calls to rte_zmalloc() which don't do null
point check on the return value. And before return, the memory is not
freed. Fix it by adding null point check and rte_free().

Signed-off-by: Yong Wang <wang.yong19 at zte.com.cn>

---
v3:
* Rebase on master and modify again.
v2:
* Fix code style warning.
---
 drivers/net/dpaa/dpaa_ethdev.c | 47 +++++++++++++++++++++++++++++-------------
 1 file changed, 33 insertions(+), 14 deletions(-)

diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 444c122..ef5fc33 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -1007,16 +1007,26 @@ static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
 
 	dpaa_intf->rx_queues = rte_zmalloc(NULL,
 		sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
+	if (!dpaa_intf->rx_queues) {
+		DPAA_PMD_ERR("Failed to alloc mem for RX queues\n");
+		return -ENOMEM;
+	}
 
 	/* If congestion control is enabled globally*/
 	if (td_threshold) {
 		dpaa_intf->cgr_rx = rte_zmalloc(NULL,
 			sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE);
+		if (!dpaa_intf->cgr_rx) {
+			DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n");
+			ret = -ENOMEM;
+			goto free_rx;
+		}
 
 		ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0);
 		if (ret != num_rx_fqs) {
 			DPAA_PMD_WARN("insufficient CGRIDs available");
-			return -EINVAL;
+			ret = -EINVAL;
+			goto free_rx;
 		}
 	} else {
 		dpaa_intf->cgr_rx = NULL;
@@ -1033,23 +1043,26 @@ static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
 			dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL,
 			fqid);
 		if (ret)
-			return ret;
+			goto free_rx;
 		dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
 	}
 	dpaa_intf->nb_rx_queues = num_rx_fqs;
 
-	/* Initialise Tx FQs. Have as many Tx FQ's as number of cores */
+	/* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */
 	num_cores = rte_lcore_count();
 	dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
 		num_cores, MAX_CACHELINE);
-	if (!dpaa_intf->tx_queues)
-		return -ENOMEM;
+	if (!dpaa_intf->tx_queues) {
+		DPAA_PMD_ERR("Failed to alloc mem for TX queues\n");
+		ret = -ENOMEM;
+		goto free_rx;
+	}
 
 	for (loop = 0; loop < num_cores; loop++) {
 		ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
 					 fman_intf);
 		if (ret)
-			return ret;
+			goto free_tx;
 		dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
 	}
 	dpaa_intf->nb_tx_queues = num_cores;
@@ -1086,14 +1099,8 @@ static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
 		DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
 						"store MAC addresses",
 				ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
-		rte_free(dpaa_intf->cgr_rx);
-		rte_free(dpaa_intf->rx_queues);
-		rte_free(dpaa_intf->tx_queues);
-		dpaa_intf->rx_queues = NULL;
-		dpaa_intf->tx_queues = NULL;
-		dpaa_intf->nb_rx_queues = 0;
-		dpaa_intf->nb_tx_queues = 0;
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto free_tx;
 	}
 
 	/* copy the primary mac address */
@@ -1119,6 +1126,18 @@ static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
 	fman_if_stats_reset(fman_intf);
 
 	return 0;
+
+free_tx:
+	rte_free(dpaa_intf->tx_queues);
+	dpaa_intf->tx_queues = NULL;
+	dpaa_intf->nb_tx_queues = 0;
+
+free_rx:
+	rte_free(dpaa_intf->cgr_rx);
+	rte_free(dpaa_intf->rx_queues);
+	dpaa_intf->rx_queues = NULL;
+	dpaa_intf->nb_rx_queues = 0;
+	return ret;
 }
 
 static int
-- 
1.8.3.1



More information about the dev mailing list