[dpdk-dev] [PATCH v3 25/34] net/ice: support RX queue interruption

Wenzhuo Lu wenzhuo.lu at intel.com
Wed Dec 12 07:59:55 CET 2018


Add below ops,
rx_queue_intr_enable
rx_queue_intr_disable

Signed-off-by: Wenzhuo Lu <wenzhuo.lu at intel.com>
Signed-off-by: Qiming Yang <qiming.yang at intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun.li at intel.com>
Signed-off-by: Jingjing Wu <jingjing.wu at intel.com>
---
 drivers/net/ice/ice_ethdev.c | 230 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 230 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index d82ce23..d78169a 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -48,6 +48,10 @@ static int ice_macaddr_add(struct rte_eth_dev *dev,
 			   __rte_unused uint32_t index,
 			   uint32_t pool);
 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
+static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
+				    uint16_t queue_id);
+static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
+				     uint16_t queue_id);
 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
 			     uint16_t pvid, int on);
 
@@ -86,6 +90,8 @@ static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
 	.reta_query                   = ice_rss_reta_query,
 	.rss_hash_update              = ice_rss_hash_update,
 	.rss_hash_conf_get            = ice_rss_hash_conf_get,
+	.rx_queue_intr_enable         = ice_rx_queue_intr_enable,
+	.rx_queue_intr_disable        = ice_rx_queue_intr_disable,
 	.vlan_pvid_set                = ice_vlan_pvid_set,
 };
 
@@ -1397,6 +1403,186 @@ static int ice_init_rss(struct ice_pf *pf)
 	return 0;
 }
 
+static void
+__vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
+		       int base_queue, int nb_queue)
+{
+	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+	uint32_t val, val_tx;
+	int i;
+
+	for (i = 0; i < nb_queue; i++) {
+		/*do actual bind*/
+		val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
+		      (0 < QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
+		val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
+			 (0 < QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
+
+		PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
+			    base_queue + i, msix_vect);
+		/* set ITR0 value */
+		ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10);
+		ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
+		ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
+	}
+}
+
+static void
+ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
+{
+	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
+	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+	uint16_t msix_vect = vsi->msix_intr;
+	uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
+	uint16_t queue_idx = 0;
+	int record = 0;
+	int i;
+
+	/* clear Rx/Tx queue interrupt */
+	for (i = 0; i < vsi->nb_used_qps; i++) {
+		ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
+		ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
+	}
+
+	/* PF bind interrupt */
+	if (rte_intr_dp_is_en(intr_handle)) {
+		queue_idx = 0;
+		record = 1;
+	}
+
+	for (i = 0; i < vsi->nb_used_qps; i++) {
+		if (nb_msix <= 1) {
+			if (!rte_intr_allow_others(intr_handle))
+				msix_vect = ICE_MISC_VEC_ID;
+
+			/* uio mapping all queue to one msix_vect */
+			__vsi_queues_bind_intr(vsi, msix_vect,
+					       vsi->base_queue + i,
+					       vsi->nb_used_qps - i);
+
+			for (; !!record && i < vsi->nb_used_qps; i++)
+				intr_handle->intr_vec[queue_idx + i] =
+					msix_vect;
+			break;
+		}
+
+		/* vfio 1:1 queue/msix_vect mapping */
+		__vsi_queues_bind_intr(vsi, msix_vect,
+				       vsi->base_queue + i, 1);
+
+		if (!!record)
+			intr_handle->intr_vec[queue_idx + i] = msix_vect;
+
+		msix_vect++;
+		nb_msix--;
+	}
+}
+
+static void
+ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
+{
+	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
+	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+	uint16_t msix_intr, i;
+
+	if (rte_intr_allow_others(intr_handle))
+		for (i = 0; i < vsi->nb_used_qps; i++) {
+			msix_intr = vsi->msix_intr + i;
+			ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
+				      GLINT_DYN_CTL_INTENA_M |
+				      GLINT_DYN_CTL_CLEARPBA_M |
+				      GLINT_DYN_CTL_ITR_INDX_M |
+				      GLINT_DYN_CTL_WB_ON_ITR_M);
+		}
+	else
+		ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
+			      GLINT_DYN_CTL_INTENA_M |
+			      GLINT_DYN_CTL_CLEARPBA_M |
+			      GLINT_DYN_CTL_ITR_INDX_M |
+			      GLINT_DYN_CTL_WB_ON_ITR_M);
+}
+
+static void
+ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
+{
+	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
+	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
+	uint16_t msix_intr, i;
+
+	/* disable interrupt and also clear all the exist config */
+	for (i = 0; i < vsi->nb_qps; i++) {
+		ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
+		ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
+		rte_wmb();
+	}
+
+	if (rte_intr_allow_others(intr_handle))
+		/* vfio-pci */
+		for (i = 0; i < vsi->nb_msix; i++) {
+			msix_intr = vsi->msix_intr + i;
+			ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
+				      GLINT_DYN_CTL_WB_ON_ITR_M);
+		}
+	else
+		/* igb_uio */
+		ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
+}
+
+static int
+ice_rxq_intr_setup(struct rte_eth_dev *dev)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
+	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct ice_vsi *vsi = pf->main_vsi;
+	uint32_t intr_vector = 0;
+
+	rte_intr_disable(intr_handle);
+
+	/* check and configure queue intr-vector mapping */
+	if ((rte_intr_cap_multiple(intr_handle) ||
+	     !RTE_ETH_DEV_SRIOV(dev).active) &&
+	    dev->data->dev_conf.intr_conf.rxq != 0) {
+		intr_vector = dev->data->nb_rx_queues;
+		if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
+			PMD_DRV_LOG(ERR, "At most %d intr queues supported",
+				    ICE_MAX_INTR_QUEUE_NUM);
+			return -ENOTSUP;
+		}
+		if (rte_intr_efd_enable(intr_handle, intr_vector))
+			return -1;
+	}
+
+	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+		intr_handle->intr_vec =
+		rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
+			    0);
+		if (!intr_handle->intr_vec) {
+			PMD_DRV_LOG(ERR,
+				    "Failed to allocate %d rx_queues intr_vec",
+				    dev->data->nb_rx_queues);
+			return -ENOMEM;
+		}
+	}
+
+	/* Map queues with MSIX interrupt */
+	vsi->nb_used_qps = dev->data->nb_rx_queues;
+	ice_vsi_queues_bind_intr(vsi);
+
+	/* Enable interrupts for all the queues */
+	ice_vsi_enable_queues_intr(vsi);
+
+	rte_intr_enable(intr_handle);
+
+	return 0;
+}
+
 static int
 ice_dev_start(struct rte_eth_dev *dev)
 {
@@ -1431,6 +1617,10 @@ static int ice_init_rss(struct ice_pf *pf)
 		goto rx_err;
 	}
 
+	/* enable Rx interrput and mapping Rx queue to interrupt vector */
+	if (ice_rxq_intr_setup(dev))
+		return -EIO;
+
 	ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
 				    ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
 				     ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
@@ -1465,6 +1655,7 @@ static int ice_init_rss(struct ice_pf *pf)
 {
 	struct rte_eth_dev_data *data = dev->data;
 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct ice_vsi *main_vsi = pf->main_vsi;
 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
 	uint16_t i;
@@ -1481,6 +1672,9 @@ static int ice_init_rss(struct ice_pf *pf)
 	for (i = 0; i < data->nb_tx_queues; i++)
 		ice_tx_queue_stop(dev, i);
 
+	/* disable all queue interrupts */
+	ice_vsi_disable_queues_intr(main_vsi);
+
 	/* Clear all queues and release mbufs */
 	ice_clear_queues(dev);
 
@@ -2307,6 +2501,42 @@ static int ice_macaddr_set(struct rte_eth_dev *dev,
 	return 0;
 }
 
+static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
+				    uint16_t queue_id)
+{
+	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
+	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t val;
+	uint16_t msix_intr;
+
+	msix_intr = intr_handle->intr_vec[queue_id];
+
+	val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
+	      GLINT_DYN_CTL_ITR_INDX_M;
+	val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
+
+	ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
+	rte_intr_enable(&pci_dev->intr_handle);
+
+	return 0;
+}
+
+static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
+				     uint16_t queue_id)
+{
+	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
+	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint16_t msix_intr;
+
+	msix_intr = intr_handle->intr_vec[queue_id];
+
+	ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
+
+	return 0;
+}
+
 static int
 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
 {
-- 
1.9.3



More information about the dev mailing list