[dpdk-dev] [PATCH] ixgbe: Add runtime tx/rx queue setup for X550

Wu, Jianyue (NSB - CN/Hangzhou) jianyue.wu at nokia-sbell.com
Fri May 21 02:59:19 CEST 2021


X550 eth card support runtime tx/rx queue setup,
so add capacity in dev_capa and queue offload capacity.

Signed-off-by: Wu Jianyue <jianyue.wu at nokia-sbell.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 22 ++++++++++++++++++++++
 drivers/net/ixgbe/ixgbe_rxtx.c   | 21 +++++++++++++++++++--
 2 files changed, 41 insertions(+), 2 deletions(-)

diff --git drivers/net/ixgbe/ixgbe_ethdev.c drivers/net/ixgbe/ixgbe_ethdev.c index b5371568b..0839426b4 100644
--- drivers/net/ixgbe/ixgbe_ethdev.c
+++ drivers/net/ixgbe/ixgbe_ethdev.c
@@ -3915,6 +3915,17 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->default_rxportconf.ring_size = 256;
 	dev_info->default_txportconf.ring_size = 256;
 
+	if (hw->mac.type == ixgbe_mac_X550 ||
+	    hw->mac.type == ixgbe_mac_X550EM_x ||
+	    hw->mac.type == ixgbe_mac_X550EM_a ||
+	    hw->mac.type == ixgbe_mac_X550_vf ||
+	    hw->mac.type == ixgbe_mac_X550EM_x_vf ||
+	    hw->mac.type == ixgbe_mac_X550EM_a_vf) {
+		dev_info->dev_capa =
+			RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
+			RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+	}
+
 	return 0;
 }
 
@@ -4010,6 +4021,17 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->rx_desc_lim = rx_desc_lim;
 	dev_info->tx_desc_lim = tx_desc_lim;
 
+	if (hw->mac.type == ixgbe_mac_X550 ||
+	    hw->mac.type == ixgbe_mac_X550EM_x ||
+	    hw->mac.type == ixgbe_mac_X550EM_a ||
+	    hw->mac.type == ixgbe_mac_X550_vf ||
+	    hw->mac.type == ixgbe_mac_X550EM_x_vf ||
+	    hw->mac.type == ixgbe_mac_X550EM_a_vf) {
+		dev_info->dev_capa =
+			RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
+			RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+	}
+
 	return 0;
 }
 
diff --git drivers/net/ixgbe/ixgbe_rxtx.c drivers/net/ixgbe/ixgbe_rxtx.c index d69f36e97..ea813aefe 100644
--- drivers/net/ixgbe/ixgbe_rxtx.c
+++ drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2571,9 +2571,18 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)  uint64_t  ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)  {
-	RTE_SET_USED(dev);
+	uint64_t offloads = 0;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	return 0;
+	if (hw->mac.type == ixgbe_mac_X550 ||
+	    hw->mac.type == ixgbe_mac_X550EM_x ||
+	    hw->mac.type == ixgbe_mac_X550EM_a ||
+	    hw->mac.type == ixgbe_mac_X550_vf ||
+	    hw->mac.type == ixgbe_mac_X550EM_x_vf ||
+	    hw->mac.type == ixgbe_mac_X550EM_a_vf)
+		offloads |= RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+
+	return offloads;
 }
 
 uint64_t
@@ -3008,6 +3017,14 @@ ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
 	if (hw->mac.type != ixgbe_mac_82598EB)
 		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
 
+	if (hw->mac.type == ixgbe_mac_X550 ||
+	    hw->mac.type == ixgbe_mac_X550EM_x ||
+	    hw->mac.type == ixgbe_mac_X550EM_a ||
+	    hw->mac.type == ixgbe_mac_X550_vf ||
+	    hw->mac.type == ixgbe_mac_X550EM_x_vf ||
+	    hw->mac.type == ixgbe_mac_X550EM_a_vf)
+		offloads |= RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP;
+
 	return offloads;
 }
 
--
2.17.1



More information about the dev mailing list