[PATCH v6 6/8] net/gve: add support for dev info get and dev configure

Junfeng Guo junfeng.guo at intel.com
Thu Oct 20 12:36:54 CEST 2022


Add dev_ops dev_infos_get.
Complete dev_configure with RX offloads configuration.

Signed-off-by: Xiaoyun Li <xiaoyun.li at intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo at intel.com>
---
 doc/guides/nics/features/gve.ini |  2 ++
 doc/guides/nics/gve.rst          |  1 +
 drivers/net/gve/gve_ethdev.c     | 56 +++++++++++++++++++++++++++++++-
 3 files changed, 58 insertions(+), 1 deletion(-)

diff --git a/doc/guides/nics/features/gve.ini b/doc/guides/nics/features/gve.ini
index d1703d8dab..986df7f94a 100644
--- a/doc/guides/nics/features/gve.ini
+++ b/doc/guides/nics/features/gve.ini
@@ -4,8 +4,10 @@
 ; Refer to default.ini for the full list of available PMD features.
 ;
 [Features]
+Speed capabilities   = Y
 Link status          = Y
 MTU update           = Y
+RSS hash             = Y
 Linux                = Y
 x86-32               = Y
 x86-64               = Y
diff --git a/doc/guides/nics/gve.rst b/doc/guides/nics/gve.rst
index c42ff23841..8c09a5a7fa 100644
--- a/doc/guides/nics/gve.rst
+++ b/doc/guides/nics/gve.rst
@@ -62,6 +62,7 @@ In this release, the GVE PMD provides the basic functionality of packet
 reception and transmission.
 Supported features of the GVE PMD are:
 
+- Receiver Side Scaling (RSS)
 - Link state information
 
 Currently, only GQI_QPL and GQI_RDA queue format are supported in PMD.
diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index 1968f38eb6..5be8d664f3 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -29,8 +29,13 @@ gve_write_version(uint8_t *driver_version_register)
 }
 
 static int
-gve_dev_configure(__rte_unused struct rte_eth_dev *dev)
+gve_dev_configure(struct rte_eth_dev *dev)
 {
+	struct gve_priv *priv = dev->data->dev_private;
+
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
+		priv->enable_rsc = 1;
+
 	return 0;
 }
 
@@ -94,6 +99,54 @@ gve_dev_close(struct rte_eth_dev *dev)
 	return err;
 }
 
+static int
+gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+	struct gve_priv *priv = dev->data->dev_private;
+
+	dev_info->device = dev->device;
+	dev_info->max_mac_addrs = 1;
+	dev_info->max_rx_queues = priv->max_nb_rxq;
+	dev_info->max_tx_queues = priv->max_nb_txq;
+	dev_info->min_rx_bufsize = GVE_MIN_BUF_SIZE;
+	dev_info->max_rx_pktlen = GVE_MAX_RX_PKTLEN;
+	dev_info->max_mtu = RTE_ETHER_MTU;
+	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+
+	dev_info->rx_offload_capa = 0;
+	dev_info->tx_offload_capa = 0;
+
+	if (priv->queue_format == GVE_DQO_RDA_FORMAT)
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
+
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_free_thresh = GVE_DEFAULT_RX_FREE_THRESH,
+		.rx_drop_en = 0,
+		.offloads = 0,
+	};
+
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_free_thresh = GVE_DEFAULT_TX_FREE_THRESH,
+		.offloads = 0,
+	};
+
+	dev_info->default_rxportconf.ring_size = priv->rx_desc_cnt;
+	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = priv->rx_desc_cnt,
+		.nb_min = priv->rx_desc_cnt,
+		.nb_align = 1,
+	};
+
+	dev_info->default_txportconf.ring_size = priv->tx_desc_cnt;
+	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = priv->tx_desc_cnt,
+		.nb_min = priv->tx_desc_cnt,
+		.nb_align = 1,
+	};
+
+	return 0;
+}
+
 static int
 gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 {
@@ -125,6 +178,7 @@ static const struct eth_dev_ops gve_eth_dev_ops = {
 	.dev_start            = gve_dev_start,
 	.dev_stop             = gve_dev_stop,
 	.dev_close            = gve_dev_close,
+	.dev_infos_get        = gve_dev_info_get,
 	.link_update          = gve_link_update,
 	.mtu_set              = gve_dev_mtu_set,
 };
-- 
2.34.1



More information about the dev mailing list