[PATCH 04/10] net/gve: support queue release and stop for DQO
Junfeng Guo
junfeng.guo at intel.com
Thu Apr 13 08:16:44 CEST 2023
Add support for queue operations:
- gve_tx_queue_release_dqo
- gve_rx_queue_release_dqo
- gve_stop_tx_queues_dqo
- gve_stop_rx_queues_dqo
Signed-off-by: Junfeng Guo <junfeng.guo at intel.com>
Signed-off-by: Rushil Gupta <rushilg at google.com>
Signed-off-by: Joshua Washington <joshwash at google.com>
Signed-off-by: Jeroen de Borst <jeroendb at google.com>
---
drivers/net/gve/gve_ethdev.c | 18 +++++++++---
drivers/net/gve/gve_ethdev.h | 12 ++++++++
drivers/net/gve/gve_rx.c | 3 ++
drivers/net/gve/gve_rx_dqo.c | 57 ++++++++++++++++++++++++++++++++++++
drivers/net/gve/gve_tx.c | 3 ++
drivers/net/gve/gve_tx_dqo.c | 55 ++++++++++++++++++++++++++++++++++
6 files changed, 144 insertions(+), 4 deletions(-)
diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index fc60db63c5..340315a1a3 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -292,11 +292,19 @@ gve_dev_close(struct rte_eth_dev *dev)
PMD_DRV_LOG(ERR, "Failed to stop dev.");
}
- for (i = 0; i < dev->data->nb_tx_queues; i++)
- gve_tx_queue_release(dev, i);
+ if (gve_is_gqi(priv)) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ gve_tx_queue_release(dev, i);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ gve_rx_queue_release(dev, i);
+ } else {
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ gve_tx_queue_release_dqo(dev, i);
- for (i = 0; i < dev->data->nb_rx_queues; i++)
- gve_rx_queue_release(dev, i);
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ gve_rx_queue_release_dqo(dev, i);
+ }
gve_free_qpls(priv);
rte_free(priv->adminq);
@@ -578,6 +586,8 @@ static const struct eth_dev_ops gve_eth_dev_ops_dqo = {
.dev_infos_get = gve_dev_info_get,
.rx_queue_setup = gve_rx_queue_setup_dqo,
.tx_queue_setup = gve_tx_queue_setup_dqo,
+ .rx_queue_release = gve_rx_queue_release_dqo,
+ .tx_queue_release = gve_tx_queue_release_dqo,
.link_update = gve_link_update,
.stats_get = gve_dev_stats_get,
.stats_reset = gve_dev_stats_reset,
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index cb8cd62886..c8e1dd1435 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -378,4 +378,16 @@ gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *conf);
+void
+gve_tx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid);
+
+void
+gve_rx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid);
+
+void
+gve_stop_tx_queues_dqo(struct rte_eth_dev *dev);
+
+void
+gve_stop_rx_queues_dqo(struct rte_eth_dev *dev);
+
#endif /* _GVE_ETHDEV_H_ */
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index 8d8f94efff..3dd3f578f9 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -359,6 +359,9 @@ gve_stop_rx_queues(struct rte_eth_dev *dev)
uint16_t i;
int err;
+ if (!gve_is_gqi(hw))
+ return gve_stop_rx_queues_dqo(dev);
+
err = gve_adminq_destroy_rx_queues(hw, dev->data->nb_rx_queues);
if (err != 0)
PMD_DRV_LOG(WARNING, "failed to destroy rxqs");
diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c
index c419c4dd2f..7f58844839 100644
--- a/drivers/net/gve/gve_rx_dqo.c
+++ b/drivers/net/gve/gve_rx_dqo.c
@@ -7,6 +7,38 @@
#include "gve_ethdev.h"
#include "base/gve_adminq.h"
+static inline void
+gve_release_rxq_mbufs_dqo(struct gve_rx_queue *rxq)
+{
+ uint16_t i;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i]) {
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ rxq->sw_ring[i] = NULL;
+ }
+ }
+
+ rxq->nb_avail = rxq->nb_rx_desc;
+}
+
+void
+gve_rx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid)
+{
+ struct gve_rx_queue *q = dev->data->rx_queues[qid];
+
+ if (q == NULL)
+ return;
+
+ gve_release_rxq_mbufs_dqo(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->compl_ring_mz);
+ rte_memzone_free(q->mz);
+ rte_memzone_free(q->qres_mz);
+ q->qres = NULL;
+ rte_free(q);
+}
+
static void
gve_reset_rxq_dqo(struct gve_rx_queue *rxq)
{
@@ -56,6 +88,12 @@ gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
}
nb_desc = hw->rx_desc_cnt;
+ /* Free memory if needed */
+ if (dev->data->rx_queues[queue_id]) {
+ gve_rx_queue_release_dqo(dev, queue_id);
+ dev->data->rx_queues[queue_id] = NULL;
+ }
+
/* Allocate the RX queue data structure. */
rxq = rte_zmalloc_socket("gve rxq",
sizeof(struct gve_rx_queue),
@@ -154,3 +192,22 @@ gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
rte_free(rxq);
return err;
}
+
+void
+gve_stop_rx_queues_dqo(struct rte_eth_dev *dev)
+{
+ struct gve_priv *hw = dev->data->dev_private;
+ struct gve_rx_queue *rxq;
+ uint16_t i;
+ int err;
+
+ err = gve_adminq_destroy_rx_queues(hw, dev->data->nb_rx_queues);
+ if (err != 0)
+ PMD_DRV_LOG(WARNING, "failed to destroy rxqs");
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ gve_release_rxq_mbufs_dqo(rxq);
+ gve_reset_rxq_dqo(rxq);
+ }
+}
diff --git a/drivers/net/gve/gve_tx.c b/drivers/net/gve/gve_tx.c
index fee3b939c7..13dc807623 100644
--- a/drivers/net/gve/gve_tx.c
+++ b/drivers/net/gve/gve_tx.c
@@ -672,6 +672,9 @@ gve_stop_tx_queues(struct rte_eth_dev *dev)
uint16_t i;
int err;
+ if (!gve_is_gqi(hw))
+ return gve_stop_tx_queues_dqo(dev);
+
err = gve_adminq_destroy_tx_queues(hw, dev->data->nb_tx_queues);
if (err != 0)
PMD_DRV_LOG(WARNING, "failed to destroy txqs");
diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c
index 22d20ff16f..ea6d5ff85e 100644
--- a/drivers/net/gve/gve_tx_dqo.c
+++ b/drivers/net/gve/gve_tx_dqo.c
@@ -6,6 +6,36 @@
#include "gve_ethdev.h"
#include "base/gve_adminq.h"
+static inline void
+gve_release_txq_mbufs_dqo(struct gve_tx_queue *txq)
+{
+ uint16_t i;
+
+ for (i = 0; i < txq->sw_size; i++) {
+ if (txq->sw_ring[i]) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i]);
+ txq->sw_ring[i] = NULL;
+ }
+ }
+}
+
+void
+gve_tx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid)
+{
+ struct gve_tx_queue *q = dev->data->tx_queues[qid];
+
+ if (q == NULL)
+ return;
+
+ gve_release_txq_mbufs_dqo(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_memzone_free(q->compl_ring_mz);
+ rte_memzone_free(q->qres_mz);
+ q->qres = NULL;
+ rte_free(q);
+}
+
static int
check_tx_thresh_dqo(uint16_t nb_desc, uint16_t tx_rs_thresh,
uint16_t tx_free_thresh)
@@ -91,6 +121,12 @@ gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
}
nb_desc = hw->tx_desc_cnt;
+ /* Free memory if needed. */
+ if (dev->data->tx_queues[queue_id]) {
+ gve_tx_queue_release_dqo(dev, queue_id);
+ dev->data->tx_queues[queue_id] = NULL;
+ }
+
/* Allocate the TX queue data structure. */
txq = rte_zmalloc_socket("gve txq",
sizeof(struct gve_tx_queue),
@@ -183,3 +219,22 @@ gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
rte_free(txq);
return err;
}
+
+void
+gve_stop_tx_queues_dqo(struct rte_eth_dev *dev)
+{
+ struct gve_priv *hw = dev->data->dev_private;
+ struct gve_tx_queue *txq;
+ uint16_t i;
+ int err;
+
+ err = gve_adminq_destroy_tx_queues(hw, dev->data->nb_tx_queues);
+ if (err != 0)
+ PMD_DRV_LOG(WARNING, "failed to destroy txqs");
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ gve_release_txq_mbufs_dqo(txq);
+ gve_reset_txq_dqo(txq);
+ }
+}
--
2.34.1
More information about the dev
mailing list