[dpdk-dev] [PATCH] net/qede: fix memory alloc for multiple port reconfig

Kevin Traynor ktraynor at redhat.com
Fri Jun 8 13:56:12 CEST 2018


On 06/07/2018 05:30 PM, Rasesh Mody wrote:
> Multiple port reconfigurations can lead to memory allocation failures
> due to hitting RTE memzone limit or no more room in config while
> reserving memzone.
> 
> When freeing memzones, update the memzone mapping and the memzone count.
> Release Rx and Tx queue rings allocated during queue setup.
> 
> Fixes: a39001d90dbd ("net/qede: fix DMA memory leak")
> Cc: stable at dpdk.org
> 
> Signed-off-by: Rasesh Mody <rasesh.mody at cavium.com>

Reviewed-by: Kevin Traynor <ktraynor at redhat.com>

> ---
>  drivers/net/qede/base/bcm_osal.c |    5 +++++
>  drivers/net/qede/qede_rxtx.c     |   21 +++++++++++----------
>  2 files changed, 16 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
> index ca1c2b1..72627df 100644
> --- a/drivers/net/qede/base/bcm_osal.c
> +++ b/drivers/net/qede/base/bcm_osal.c
> @@ -201,6 +201,11 @@ void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys)
>  			DP_VERBOSE(p_dev, ECORE_MSG_SP,
>  				"Free memzone %s\n", ecore_mz_mapping[j]->name);
>  			rte_memzone_free(ecore_mz_mapping[j]);
> +			while (j < ecore_mz_count - 1) {
> +				ecore_mz_mapping[j] = ecore_mz_mapping[j + 1];
> +				j++;
> +			}
> +			ecore_mz_count--;
>  			return;
>  		}
>  	}
> diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
> index bdb5d6f..4fa1c61 100644
> --- a/drivers/net/qede/qede_rxtx.c
> +++ b/drivers/net/qede/qede_rxtx.c
> @@ -192,9 +192,15 @@ static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
>  void qede_rx_queue_release(void *rx_queue)
>  {
>  	struct qede_rx_queue *rxq = rx_queue;
> +	struct qede_dev *qdev = rxq->qdev;
> +	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
> +
> +	PMD_INIT_FUNC_TRACE(edev);
>  
>  	if (rxq) {
>  		qede_rx_queue_release_mbufs(rxq);
> +		qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
> +		qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring);
>  		rte_free(rxq->sw_rx_ring);
>  		rte_free(rxq);
>  	}
> @@ -350,9 +356,14 @@ static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
>  void qede_tx_queue_release(void *tx_queue)
>  {
>  	struct qede_tx_queue *txq = tx_queue;
> +	struct qede_dev *qdev = txq->qdev;
> +	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
> +
> +	PMD_INIT_FUNC_TRACE(edev);
>  
>  	if (txq) {
>  		qede_tx_queue_release_mbufs(txq);
> +		qdev->ops->common->chain_free(edev, &txq->tx_pbl);
>  		rte_free(txq->sw_tx_ring);
>  		rte_free(txq);
>  	}
> @@ -441,8 +452,6 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
>  	struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
>  	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
>  	struct qede_fastpath *fp;
> -	struct qede_rx_queue *rxq;
> -	struct qede_tx_queue *txq;
>  	uint16_t sb_idx;
>  	uint8_t i;
>  
> @@ -467,21 +476,13 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
>  	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
>  		if (eth_dev->data->rx_queues[i]) {
>  			qede_rx_queue_release(eth_dev->data->rx_queues[i]);
> -			rxq = eth_dev->data->rx_queues[i];
> -			qdev->ops->common->chain_free(edev,
> -						      &rxq->rx_bd_ring);
> -			qdev->ops->common->chain_free(edev,
> -						      &rxq->rx_comp_ring);
>  			eth_dev->data->rx_queues[i] = NULL;
>  		}
>  	}
>  
>  	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
>  		if (eth_dev->data->tx_queues[i]) {
> -			txq = eth_dev->data->tx_queues[i];
>  			qede_tx_queue_release(eth_dev->data->tx_queues[i]);
> -			qdev->ops->common->chain_free(edev,
> -						      &txq->tx_pbl);
>  			eth_dev->data->tx_queues[i] = NULL;
>  		}
>  	}
> 



More information about the dev mailing list