[dpdk-dev] [PATCH 2/2] net/netvsc: free all queues on close
Ferruh Yigit
ferruh.yigit at intel.com
Tue Apr 30 10:11:02 CEST 2019
On 4/29/2019 9:33 PM, Stephen Hemminger wrote:
> When dev_close is called, the netvsc driver will clean up all
> queues including the primary ring buffer.
>
> Signed-off-by: Stephen Hemminger <sthemmin at microsoft.com>
> ---
> drivers/net/netvsc/hn_ethdev.c | 8 +++++--
> drivers/net/netvsc/hn_rxtx.c | 39 ++++++++++++++++++++++++++++------
> drivers/net/netvsc/hn_var.h | 1 +
> 3 files changed, 40 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/net/netvsc/hn_ethdev.c b/drivers/net/netvsc/hn_ethdev.c
> index 407ee484935a..553cb06f6e33 100644
> --- a/drivers/net/netvsc/hn_ethdev.c
> +++ b/drivers/net/netvsc/hn_ethdev.c
> @@ -112,6 +112,9 @@ eth_dev_vmbus_allocate(struct rte_vmbus_device *dev, size_t private_data_size)
> eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
> eth_dev->intr_handle = &dev->intr_handle;
>
> + /* allow ethdev to remove on close */
> + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
> +
> return eth_dev;
> }
>
> @@ -632,11 +635,12 @@ hn_dev_stop(struct rte_eth_dev *dev)
> }
>
> static void
> -hn_dev_close(struct rte_eth_dev *dev __rte_unused)
> +hn_dev_close(struct rte_eth_dev *dev)
> {
> - PMD_INIT_LOG(DEBUG, "close");
> + PMD_INIT_FUNC_TRACE();
>
> hn_vf_close(dev);
> + hn_dev_free_queues(dev);
> }
>
> static const struct eth_dev_ops hn_eth_dev_ops = {
> diff --git a/drivers/net/netvsc/hn_rxtx.c b/drivers/net/netvsc/hn_rxtx.c
> index 7856f7e6ec48..1bebae7cddd4 100644
> --- a/drivers/net/netvsc/hn_rxtx.c
> +++ b/drivers/net/netvsc/hn_rxtx.c
> @@ -840,12 +840,9 @@ hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
> return error;
> }
>
> -void
> -hn_dev_rx_queue_release(void *arg)
> +static void
> +hn_rx_queue_free(struct hn_rx_queue *rxq, bool keep_primary)
> {
> - struct hn_rx_queue *rxq = arg;
> -
> - PMD_INIT_FUNC_TRACE();
>
> if (!rxq)
> return;
> @@ -857,12 +854,22 @@ hn_dev_rx_queue_release(void *arg)
> hn_vf_rx_queue_release(rxq->hv, rxq->queue_id);
>
> /* Keep primary queue to allow for control operations */
> - if (rxq != rxq->hv->primary) {
> + if (keep_primary && rxq != rxq->hv->primary) {
Isn't something wrong in this logic, when 'keep_primary' is 'false', it will
prevent taking this branch and it will prevent freeing _any_ 'rxq->event_buf' &
'rxq'. There is already a primary queue check after '&&', this flag looks like
working more like enable/disable freeing queues more than being related to primary.
And int he '.dev_close()' path [1] this function is called with 'false', so
queue resources are not cleaned properly.
[1]
hn_dev_close
hn_dev_free_queues
foreach rx_queue
hn_rx_queue_free(rxq, false);
> rte_free(rxq->event_buf);
> rte_free(rxq);
> }
> }
>
> +void
> +hn_dev_rx_queue_release(void *arg)
> +{
> + struct hn_rx_queue *rxq = arg;
> +
> + PMD_INIT_FUNC_TRACE();
> +
> + hn_rx_queue_free(rxq, true);
> +}
> +
> int
> hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt)
> {
> @@ -1440,3 +1447,23 @@ hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
>
> return nb_rcv;
> }
> +
> +void
> +hn_dev_free_queues(struct rte_eth_dev *dev)
> +{
> + unsigned int i;
> +
> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> + struct hn_rx_queue *rxq = dev->data->rx_queues[i];
> +
> + hn_rx_queue_free(rxq, false);
> + dev->data->rx_queues[i] = NULL;
> + }
> + dev->data->nb_rx_queues = 0;
> +
> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> + hn_dev_tx_queue_release(dev->data->tx_queues[i]);
> + dev->data->tx_queues[i] = NULL;
> + }
> + dev->data->nb_tx_queues = 0;
> +}
> diff --git a/drivers/net/netvsc/hn_var.h b/drivers/net/netvsc/hn_var.h
> index 8383f3246ca4..de885d898e6a 100644
> --- a/drivers/net/netvsc/hn_var.h
> +++ b/drivers/net/netvsc/hn_var.h
> @@ -173,6 +173,7 @@ int hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
> const struct rte_eth_rxconf *rx_conf,
> struct rte_mempool *mp);
> void hn_dev_rx_queue_release(void *arg);
> +void hn_dev_free_queues(struct rte_eth_dev *dev);
>
> /* Check if VF is attached */
> static inline bool
>
More information about the dev
mailing list