[dpdk-dev] [PATCH v2 41/56] net/txgbe: add VMDq configure

Jiawen Wu jiawenwu at trustnetic.com
Mon Oct 5 14:08:55 CEST 2020


From: jiawenwu <jiawenwu at trustnetic.com>

Add multiple queue setting with VMDq.

Signed-off-by: jiawenwu <jiawenwu at trustnetic.com>
---
 doc/guides/nics/features/txgbe.ini |   1 +
 drivers/net/txgbe/txgbe_ethdev.c   |  35 ++++
 drivers/net/txgbe/txgbe_ethdev.h   |   2 +
 drivers/net/txgbe/txgbe_rxtx.c     | 262 +++++++++++++++++++++++++++++
 4 files changed, 300 insertions(+)

diff --git a/doc/guides/nics/features/txgbe.ini b/doc/guides/nics/features/txgbe.ini
index 022e56d45..578ec05b0 100644
--- a/doc/guides/nics/features/txgbe.ini
+++ b/doc/guides/nics/features/txgbe.ini
@@ -15,6 +15,7 @@ LRO                  = Y
 TSO                  = Y
 Unicast MAC filter   = Y
 Multicast MAC filter = Y
+VMDq                 = Y
 SR-IOV               = Y
 VLAN filter          = Y
 Rate limitation      = Y
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index ab25086b3..7f3df1513 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -946,6 +946,17 @@ txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	return 0;
 }
 
+static void
+txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	/* VLNCTL: enable vlan filtering and allow all vlan tags through */
+	uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL);
+
+	vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
+	wr32(hw, TXGBE_VLANCTL, vlanctrl);
+}
+
 static int
 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
 {
@@ -1331,6 +1342,11 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 		goto error;
 	}
 
+	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+		/* Enable vlan filtering for VMDq */
+		txgbe_vmdq_vlan_hw_filter_enable(dev);
+	}
+
 	/* Restore vf rate limit */
 	if (vfinfo != NULL) {
 		for (vf = 0; vf < pci_dev->max_vfs; vf++)
@@ -2764,6 +2780,25 @@ txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
 	return 0;
 }
 
+uint32_t
+txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
+{
+	uint32_t new_val = orig_val;
+
+	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+		new_val |= TXGBE_POOLETHCTL_UTA;
+	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+		new_val |= TXGBE_POOLETHCTL_MCHA;
+	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+		new_val |= TXGBE_POOLETHCTL_UCHA;
+	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+		new_val |= TXGBE_POOLETHCTL_BCA;
+	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+		new_val |= TXGBE_POOLETHCTL_MCP;
+
+	return new_val;
+}
+
 static int
 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index eea8191b9..4bab7f358 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -255,6 +255,8 @@ void txgbe_pf_mbx_process(struct rte_eth_dev *eth_dev);
 
 int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
 
+uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
+
 int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
 			    uint16_t tx_rate, uint64_t q_msk);
 int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index 24677651a..3ce4035c6 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -2566,6 +2566,146 @@ txgbe_dev_free_queues(struct rte_eth_dev *dev)
 	dev->data->nb_tx_queues = 0;
 }
 
+static void
+txgbe_rss_disable(struct rte_eth_dev *dev)
+{
+	struct txgbe_hw *hw;
+
+	hw = TXGBE_DEV_HW(dev);
+
+	wr32m(hw, TXGBE_RACTL, TXGBE_RACTL_RSSENA, 0);
+}
+
+#define NUM_VFTA_REGISTERS 128
+
+/*
+ * VMDq only support for 10 GbE NIC.
+ */
+static void
+txgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
+{
+	struct rte_eth_vmdq_rx_conf *cfg;
+	struct txgbe_hw *hw;
+	enum rte_eth_nb_pools num_pools;
+	uint32_t mrqc, vt_ctl, vlanctrl;
+	uint32_t vmolr = 0;
+	int i;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = TXGBE_DEV_HW(dev);
+	cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+	num_pools = cfg->nb_queue_pools;
+
+	txgbe_rss_disable(dev);
+
+	/* enable vmdq */
+	mrqc = TXGBE_PORTCTL_NUMVT_64;
+	wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, mrqc);
+
+	/* turn on virtualisation and set the default pool */
+	vt_ctl = TXGBE_POOLCTL_RPLEN;
+	if (cfg->enable_default_pool)
+		vt_ctl |= TXGBE_POOLCTL_DEFPL(cfg->default_pool);
+	else
+		vt_ctl |= TXGBE_POOLCTL_DEFDSA;
+
+	wr32(hw, TXGBE_POOLCTL, vt_ctl);
+
+	for (i = 0; i < (int)num_pools; i++) {
+		vmolr = txgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
+		wr32(hw, TXGBE_POOLETHCTL(i), vmolr);
+	}
+
+	/* enable vlan filtering and allow all vlan tags through */
+	vlanctrl = rd32(hw, TXGBE_VLANCTL);
+	vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
+	wr32(hw, TXGBE_VLANCTL, vlanctrl);
+
+	/* enable all vlan filters */
+	for (i = 0; i < NUM_VFTA_REGISTERS; i++)
+		wr32(hw, TXGBE_VLANTBL(i), UINT32_MAX);
+
+	/* pool enabling for receive - 64 */
+	wr32(hw, TXGBE_POOLRXENA(0), UINT32_MAX);
+	if (num_pools == ETH_64_POOLS)
+		wr32(hw, TXGBE_POOLRXENA(1), UINT32_MAX);
+
+	/*
+	 * allow pools to read specific mac addresses
+	 * In this case, all pools should be able to read from mac addr 0
+	 */
+	wr32(hw, TXGBE_ETHADDRIDX, 0);
+	wr32(hw, TXGBE_ETHADDRASSL, 0xFFFFFFFF);
+	wr32(hw, TXGBE_ETHADDRASSH, 0xFFFFFFFF);
+
+	/* set up filters for vlan tags as configured */
+	for (i = 0; i < cfg->nb_pool_maps; i++) {
+		/* set vlan id in VF register and set the valid bit */
+		wr32(hw, TXGBE_PSRVLANIDX, i);
+		wr32(hw, TXGBE_PSRVLAN, (TXGBE_PSRVLAN_EA |
+				TXGBE_PSRVLAN_VID(cfg->pool_map[i].vlan_id)));
+		/*
+		 * Put the allowed pools in VFB reg. As we only have 16 or 64
+		 * pools, we only need to use the first half of the register
+		 * i.e. bits 0-31
+		 */
+		if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
+			wr32(hw, TXGBE_PSRVLANPLM(0),
+				(cfg->pool_map[i].pools & UINT32_MAX));
+		else
+			wr32(hw, TXGBE_PSRVLANPLM(1),
+				((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
+
+	}
+
+	/* Tx General Switch Control Enables VMDQ loopback */
+	if (cfg->enable_loop_back) {
+		wr32(hw, TXGBE_PSRCTL, TXGBE_PSRCTL_LBENA);
+		for (i = 0; i < 64; i++)
+			wr32m(hw, TXGBE_POOLETHCTL(i),
+				TXGBE_POOLETHCTL_LLB, TXGBE_POOLETHCTL_LLB);
+	}
+
+	txgbe_flush(hw);
+}
+
+/*
+ * txgbe_vmdq_tx_hw_configure - Configure general VMDq TX parameters
+ * @hw: pointer to hardware structure
+ */
+static void
+txgbe_vmdq_tx_hw_configure(struct txgbe_hw *hw)
+{
+	uint32_t reg;
+	uint32_t q;
+
+	PMD_INIT_FUNC_TRACE();
+	/*PF VF Transmit Enable*/
+	wr32(hw, TXGBE_POOLTXENA(0), UINT32_MAX);
+	wr32(hw, TXGBE_POOLTXENA(1), UINT32_MAX);
+
+	/* Disable the Tx desc arbiter */
+	reg = rd32(hw, TXGBE_ARBTXCTL);
+	reg |= TXGBE_ARBTXCTL_DIA;
+	wr32(hw, TXGBE_ARBTXCTL, reg);
+
+	wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK,
+		TXGBE_PORTCTL_NUMVT_64);
+
+	/* Disable drop for all queues */
+	for (q = 0; q < 128; q++) {
+		u32 val = 1 << (q % 32);
+		wr32m(hw, TXGBE_QPRXDROP(q / 32), val, val);
+	}
+
+	/* Enable the Tx desc arbiter */
+	reg = rd32(hw, TXGBE_ARBTXCTL);
+	reg &= ~TXGBE_ARBTXCTL_DIA;
+	wr32(hw, TXGBE_ARBTXCTL, reg);
+
+	txgbe_flush(hw);
+}
+
 static int __rte_cold
 txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq)
 {
@@ -2597,6 +2737,120 @@ txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq)
 
 	return 0;
 }
+static int
+txgbe_config_vf_default(struct rte_eth_dev *dev)
+{
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	uint32_t mrqc;
+
+	mrqc = rd32(hw, TXGBE_PORTCTL);
+	mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
+	switch (RTE_ETH_DEV_SRIOV(dev).active) {
+	case ETH_64_POOLS:
+		mrqc |= TXGBE_PORTCTL_NUMVT_64;
+		break;
+
+	case ETH_32_POOLS:
+		mrqc |= TXGBE_PORTCTL_NUMVT_32;
+		break;
+
+	case ETH_16_POOLS:
+		mrqc |= TXGBE_PORTCTL_NUMVT_16;
+		break;
+	default:
+		PMD_INIT_LOG(ERR,
+			"invalid pool number in IOV mode");
+		return 0;
+	}
+
+	wr32(hw, TXGBE_PORTCTL, mrqc);
+
+	return 0;
+}
+
+static int
+txgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
+{
+	if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+		/*
+		 * SRIOV inactive scheme
+		 * VMDq multi-queue setting
+		 */
+		switch (dev->data->dev_conf.rxmode.mq_mode) {
+		case ETH_MQ_RX_VMDQ_ONLY:
+			txgbe_vmdq_rx_hw_configure(dev);
+			break;
+
+		case ETH_MQ_RX_NONE:
+		default:
+			/* if mq_mode is none, disable rss mode.*/
+			txgbe_rss_disable(dev);
+			break;
+		}
+	} else {
+		/* SRIOV active scheme
+		 */
+		switch (dev->data->dev_conf.rxmode.mq_mode) {
+		default:
+			txgbe_config_vf_default(dev);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int
+txgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
+{
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	uint32_t mtqc;
+	uint32_t rttdcs;
+
+	/* disable arbiter */
+	rttdcs = rd32(hw, TXGBE_ARBTXCTL);
+	rttdcs |= TXGBE_ARBTXCTL_DIA;
+	wr32(hw, TXGBE_ARBTXCTL, rttdcs);
+
+	if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+		/*
+		 * SRIOV inactive scheme
+		 * any DCB w/o VMDq multi-queue setting
+		 */
+		if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+			txgbe_vmdq_tx_hw_configure(hw);
+		else {
+			wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, 0);
+		}
+	} else {
+		switch (RTE_ETH_DEV_SRIOV(dev).active) {
+
+		/*
+		 * SRIOV active scheme
+		 * FIXME if support DCB together with VMDq & SRIOV
+		 */
+		case ETH_64_POOLS:
+			mtqc = TXGBE_PORTCTL_NUMVT_64;
+			break;
+		case ETH_32_POOLS:
+			mtqc = TXGBE_PORTCTL_NUMVT_32;
+			break;
+		case ETH_16_POOLS:
+			mtqc = TXGBE_PORTCTL_NUMVT_16;
+			break;
+		default:
+			mtqc = 0;
+			PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
+		}
+		wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, mtqc);
+	}
+
+	/* re-enable arbiter */
+	rttdcs &= ~TXGBE_ARBTXCTL_DIA;
+	wr32(hw, TXGBE_ARBTXCTL, rttdcs);
+
+	return 0;
+}
 
 /**
  * txgbe_get_rscctl_maxdesc
@@ -2930,6 +3184,11 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
 		dev->data->scattered_rx = 1;
 
+	/*
+	 * Device configured with multiple RX queues.
+	 */
+	txgbe_dev_mq_rx_configure(dev);
+
 	/*
 	 * Setup the Checksum Register.
 	 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
@@ -2991,6 +3250,9 @@ txgbe_dev_tx_init(struct rte_eth_dev *dev)
 		wr32(hw, TXGBE_TXRP(txq->reg_idx), 0);
 		wr32(hw, TXGBE_TXWP(txq->reg_idx), 0);
 	}
+
+	/* Device configured with multiple TX queues. */
+	txgbe_dev_mq_tx_configure(dev);
 }
 
 /*
-- 
2.18.4





More information about the dev mailing list