<div dir="ltr"><div dir="ltr"><br></div><br><div class="gmail_quote gmail_quote_container"><div dir="ltr" class="gmail_attr">On Fri, Apr 3, 2026 at 5:19 AM David Marchand <<a href="mailto:david.marchand@redhat.com">david.marchand@redhat.com</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">Let's mark VMDq feature availability as a per device capability.<br>
We can then enforce API calls related to this feature are done on device<br>
with such capability.<br>
<br>
Signed-off-by: David Marchand <<a href="mailto:david.marchand@redhat.com" target="_blank">david.marchand@redhat.com</a>><br>
---<br>
drivers/net/bnxt/bnxt_ethdev.c | 3 ++-<br>
drivers/net/bnxt/bnxt_reps.c | 1 +<br>
drivers/net/intel/e1000/em_ethdev.c | 1 +<br>
drivers/net/intel/e1000/igb_ethdev.c | 1 +<br>
drivers/net/intel/fm10k/fm10k_ethdev.c | 1 +<br>
drivers/net/intel/i40e/i40e_ethdev.c | 3 ++-<br>
drivers/net/intel/i40e/i40e_vf_representor.c | 1 +<br>
drivers/net/intel/ipn3ke/ipn3ke_representor.c | 3 ++-<br>
drivers/net/intel/ixgbe/ixgbe_ethdev.c | 2 ++<br>
drivers/net/txgbe/txgbe_ethdev.c | 1 +<br>
drivers/net/txgbe/txgbe_ethdev_vf.c | 1 +<br>
lib/ethdev/rte_ethdev.c | 17 +++++++++++++++++<br>
lib/ethdev/rte_ethdev.h | 2 ++<br>
13 files changed, 34 insertions(+), 3 deletions(-)<br>
<br>
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c<br>
index b677f9491d..0f783b9e98 100644<br>
--- a/drivers/net/bnxt/bnxt_ethdev.c<br>
+++ b/drivers/net/bnxt/bnxt_ethdev.c<br>
@@ -1214,7 +1214,8 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,<br>
<br>
dev_info->speed_capa = bnxt_get_speed_capabilities(bp);<br>
dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |<br>
- RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;<br>
+ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP |<br>
+ RTE_ETH_DEV_CAPA_VMDQ;<br></blockquote><div>We have not been testing the VMDq feature for sometime, planning to deprecate this feature. Please remove this change. </div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;<br>
<br>
dev_info->default_rxconf = (struct rte_eth_rxconf) {<br>
diff --git a/drivers/net/bnxt/bnxt_reps.c b/drivers/net/bnxt/bnxt_reps.c<br>
index e26a086f41..5e834830e2 100644<br>
--- a/drivers/net/bnxt/bnxt_reps.c<br>
+++ b/drivers/net/bnxt/bnxt_reps.c<br>
@@ -649,6 +649,7 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,<br>
dev_info->max_tx_queues = max_rx_rings;<br>
dev_info->reta_size = bnxt_rss_hash_tbl_size(parent_bp);<br>
dev_info->hash_key_size = 40;<br>
+ dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;<br>
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;<br>
<br></blockquote><div>We have not been testing the VMDq feature for sometime, planning to deprecate this feature. Please remove this change. </div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">
/* MTU specifics */<br>
diff --git a/drivers/net/intel/e1000/em_ethdev.c b/drivers/net/intel/e1000/em_ethdev.c<br>
index 9e15e882b9..389744ad5e 100644<br>
--- a/drivers/net/intel/e1000/em_ethdev.c<br>
+++ b/drivers/net/intel/e1000/em_ethdev.c<br>
@@ -1175,6 +1175,7 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)<br>
RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |<br>
RTE_ETH_LINK_SPEED_1G;<br>
<br>
+ dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;<br>
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;<br>
<br>
/* Preferred queue parameters */<br>
diff --git a/drivers/net/intel/e1000/igb_ethdev.c b/drivers/net/intel/e1000/igb_ethdev.c<br>
index ef1599ac38..fe68c18417 100644<br>
--- a/drivers/net/intel/e1000/igb_ethdev.c<br>
+++ b/drivers/net/intel/e1000/igb_ethdev.c<br>
@@ -2324,6 +2324,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)<br>
dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);<br>
dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |<br>
dev_info->tx_queue_offload_capa;<br>
+ dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;<br>
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;<br>
<br>
switch (hw->mac.type) {<br>
diff --git a/drivers/net/intel/fm10k/fm10k_ethdev.c b/drivers/net/intel/fm10k/fm10k_ethdev.c<br>
index 97f61afec2..037d2206fd 100644<br>
--- a/drivers/net/intel/fm10k/fm10k_ethdev.c<br>
+++ b/drivers/net/intel/fm10k/fm10k_ethdev.c<br>
@@ -1444,6 +1444,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,<br>
dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G |<br>
RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |<br>
RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G;<br>
+ dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;<br>
<br>
return 0;<br>
}<br>
diff --git a/drivers/net/intel/i40e/i40e_ethdev.c b/drivers/net/intel/i40e/i40e_ethdev.c<br>
index 100a751225..64c29c6e85 100644<br>
--- a/drivers/net/intel/i40e/i40e_ethdev.c<br>
+++ b/drivers/net/intel/i40e/i40e_ethdev.c<br>
@@ -3878,7 +3878,8 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)<br>
<br>
dev_info->dev_capa =<br>
RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |<br>
- RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;<br>
+ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP |<br>
+ RTE_ETH_DEV_CAPA_VMDQ;<br>
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;<br>
<br>
dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *<br>
diff --git a/drivers/net/intel/i40e/i40e_vf_representor.c b/drivers/net/intel/i40e/i40e_vf_representor.c<br>
index e8f0bb62a0..d31148acb5 100644<br>
--- a/drivers/net/intel/i40e/i40e_vf_representor.c<br>
+++ b/drivers/net/intel/i40e/i40e_vf_representor.c<br>
@@ -33,6 +33,7 @@ i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,<br>
/* get dev info for the vdev */<br>
dev_info->device = ethdev->device;<br>
<br>
+ dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;<br>
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;<br>
<br>
dev_info->max_rx_queues = ethdev->data->nb_rx_queues;<br>
diff --git a/drivers/net/intel/ipn3ke/ipn3ke_representor.c b/drivers/net/intel/ipn3ke/ipn3ke_representor.c<br>
index cd34d08055..d581ee3c37 100644<br>
--- a/drivers/net/intel/ipn3ke/ipn3ke_representor.c<br>
+++ b/drivers/net/intel/ipn3ke/ipn3ke_representor.c<br>
@@ -95,7 +95,8 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,<br>
<br>
dev_info->dev_capa =<br>
RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |<br>
- RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;<br>
+ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP |<br>
+ RTE_ETH_DEV_CAPA_VMDQ;<br>
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;<br>
<br>
dev_info-><a href="http://switch_info.name" rel="noreferrer" target="_blank">switch_info.name</a> = ethdev->device->name;<br>
diff --git a/drivers/net/intel/ixgbe/ixgbe_ethdev.c b/drivers/net/intel/ixgbe/ixgbe_ethdev.c<br>
index 57d929cf2c..5d886b3e28 100644<br>
--- a/drivers/net/intel/ixgbe/ixgbe_ethdev.c<br>
+++ b/drivers/net/intel/ixgbe/ixgbe_ethdev.c<br>
@@ -3997,6 +3997,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)<br>
dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;<br>
dev_info->min_mtu = RTE_ETHER_MIN_MTU;<br>
dev_info->vmdq_queue_num = dev_info->max_rx_queues;<br>
+ dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;<br>
dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);<br>
dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |<br>
dev_info->rx_queue_offload_capa);<br>
@@ -4115,6 +4116,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,<br>
dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;<br>
else<br>
dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;<br>
+ dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;<br>
dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);<br>
dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |<br>
dev_info->rx_queue_offload_capa);<br>
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c<br>
index 5d360f8305..bd818e8269 100644<br>
--- a/drivers/net/txgbe/txgbe_ethdev.c<br>
+++ b/drivers/net/txgbe/txgbe_ethdev.c<br>
@@ -2836,6 +2836,7 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)<br>
dev_info->max_vfs = pci_dev->max_vfs;<br>
dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;<br>
dev_info->vmdq_queue_num = dev_info->max_rx_queues;<br>
+ dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;<br>
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;<br>
dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);<br>
dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |<br>
diff --git a/drivers/net/txgbe/txgbe_ethdev_vf.c b/drivers/net/txgbe/txgbe_ethdev_vf.c<br>
index 39a5fff65c..934763574c 100644<br>
--- a/drivers/net/txgbe/txgbe_ethdev_vf.c<br>
+++ b/drivers/net/txgbe/txgbe_ethdev_vf.c<br>
@@ -572,6 +572,7 @@ txgbevf_dev_info_get(struct rte_eth_dev *dev,<br>
dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;<br>
dev_info->max_vfs = pci_dev->max_vfs;<br>
dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;<br>
+ dev_info->dev_capa = RTE_ETH_DEV_CAPA_VMDQ;<br>
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;<br>
dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);<br>
dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |<br>
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c<br>
index 9577b7d848..7ba539e796 100644<br>
--- a/lib/ethdev/rte_ethdev.c<br>
+++ b/lib/ethdev/rte_ethdev.c<br>
@@ -158,6 +158,7 @@ static const struct {<br>
{RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"},<br>
{RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"},<br>
{RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"},<br>
+ {RTE_ETH_DEV_CAPA_VMDQ, "VMDQ"},<br>
};<br>
<br>
enum {<br>
@@ -1581,6 +1582,22 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,<br>
goto rollback;<br>
}<br>
<br>
+ if (!(dev_info.dev_capa & RTE_ETH_DEV_CAPA_VMDQ)) {<br>
+ if ((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) != 0) {<br>
+ RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u does not support VMDq rx mode",<br>
+ port_id);<br>
+ ret = -EINVAL;<br>
+ goto rollback;<br>
+ }<br>
+ if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB ||<br>
+ dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY) {<br>
+ RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u does not support VMDq tx mode",<br>
+ port_id);<br>
+ ret = -EINVAL;<br>
+ goto rollback;<br>
+ }<br>
+ }<br>
+<br>
/*<br>
* Setup new number of Rx/Tx queues and reconfigure device.<br>
*/<br>
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h<br>
index 0d8e2d0236..62c72de0e5 100644<br>
--- a/lib/ethdev/rte_ethdev.h<br>
+++ b/lib/ethdev/rte_ethdev.h<br>
@@ -1696,6 +1696,8 @@ struct rte_eth_conf {<br>
#define RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP RTE_BIT64(3)<br>
/** Device supports keeping shared flow objects across restart. */<br>
#define RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP RTE_BIT64(4)<br>
+/** Device supports VMDq. */<br>
+#define RTE_ETH_DEV_CAPA_VMDQ RTE_BIT64(5)<br>
/**@}*/<br>
<br>
/*<br>
-- <br>
2.53.0<br>
<br>
</blockquote></div></div>