[dpdk-dev] [PATCH v2 2/2] i40e: Enlarge the number of supported queues

Helin Zhang helin.zhang at intel.com
Thu Oct 22 09:28:07 CEST 2015


It enlarges the number of supported queues to hardware allowed
maximum. There was a software limitation of 64 per physical port
which is not reasonable.

Signed-off-by: Helin Zhang <helin.zhang at intel.com>
---
 config/common_bsdapp           |   3 +-
 config/common_linuxapp         |   3 +-
 drivers/net/i40e/i40e_ethdev.c | 138 +++++++++++++++++------------------------
 drivers/net/i40e/i40e_ethdev.h |   8 +++
 4 files changed, 69 insertions(+), 83 deletions(-)

v2 changes:
Fixed issues of using wrong configured number of VF queues

diff --git a/config/common_bsdapp b/config/common_bsdapp
index b37dcf4..dac6dad 100644
--- a/config/common_bsdapp
+++ b/config/common_bsdapp
@@ -141,7 +141,7 @@ CONFIG_RTE_LIBRTE_KVARGS=y
 CONFIG_RTE_LIBRTE_ETHER=y
 CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n
 CONFIG_RTE_MAX_ETHPORTS=32
-CONFIG_RTE_MAX_QUEUES_PER_PORT=256
+CONFIG_RTE_MAX_QUEUES_PER_PORT=1024
 CONFIG_RTE_LIBRTE_IEEE1588=n
 CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS=16
 CONFIG_RTE_ETHDEV_RXTX_CALLBACKS=y
@@ -187,6 +187,7 @@ CONFIG_RTE_LIBRTE_I40E_DEBUG_TX_FREE=n
 CONFIG_RTE_LIBRTE_I40E_DEBUG_DRIVER=n
 CONFIG_RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC=y
 CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n
+CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF=64
 CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF=4
 CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM=4
 # interval up to 8160 us, aligned to 2 (or default value)
diff --git a/config/common_linuxapp b/config/common_linuxapp
index 0de43d5..2ce8d66 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -139,7 +139,7 @@ CONFIG_RTE_LIBRTE_KVARGS=y
 CONFIG_RTE_LIBRTE_ETHER=y
 CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n
 CONFIG_RTE_MAX_ETHPORTS=32
-CONFIG_RTE_MAX_QUEUES_PER_PORT=256
+CONFIG_RTE_MAX_QUEUES_PER_PORT=1024
 CONFIG_RTE_LIBRTE_IEEE1588=n
 CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS=16
 CONFIG_RTE_ETHDEV_RXTX_CALLBACKS=y
@@ -185,6 +185,7 @@ CONFIG_RTE_LIBRTE_I40E_DEBUG_TX_FREE=n
 CONFIG_RTE_LIBRTE_I40E_DEBUG_DRIVER=n
 CONFIG_RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC=y
 CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n
+CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF=64
 CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF=4
 CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM=4
 # interval up to 8160 us, aligned to 2 (or default value)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 4b70588..8928b0a 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -2240,113 +2240,88 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
-	uint16_t sum_queues = 0, sum_vsis, left_queues;
+	uint16_t qp_count = 0, vsi_count = 0;
 
-	/* First check if FW support SRIOV */
 	if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
 		PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
 		return -EINVAL;
 	}
 
 	pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
-	pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
-	PMD_INIT_LOG(INFO, "Max supported VSIs:%u", pf->max_num_vsi);
-	/* Allocate queues for pf */
-	if (hw->func_caps.rss) {
-		pf->flags |= I40E_FLAG_RSS;
-		pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
-			(uint32_t)(1 << hw->func_caps.rss_table_entry_width));
-		pf->lan_nb_qps = i40e_align_floor(pf->lan_nb_qps);
-	} else
+	pf->max_num_vsi = hw->func_caps.num_vsis;
+	pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
+	pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
+	pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
+
+	/* FDir queue/VSI allocation */
+	pf->fdir_qp_offset = 0;
+	if (hw->func_caps.fd) {
+		pf->flags |= I40E_FLAG_FDIR;
+		pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
+	} else {
+		pf->fdir_nb_qps = 0;
+	}
+	qp_count += pf->fdir_nb_qps;
+	vsi_count += 1;
+
+	/* LAN queue/VSI allocation */
+	pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
+	if (!hw->func_caps.rss) {
 		pf->lan_nb_qps = 1;
-	sum_queues = pf->lan_nb_qps;
-	/* Default VSI is not counted in */
-	sum_vsis = 0;
-	PMD_INIT_LOG(INFO, "PF queue pairs:%u", pf->lan_nb_qps);
+	} else {
+		pf->flags |= I40E_FLAG_RSS;
+		pf->lan_nb_qps = pf->lan_nb_qp_max;
+	}
+	qp_count += pf->lan_nb_qps;
+	vsi_count += 1;
 
+	/* VF queue/VSI allocation */
+	pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
 	if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
 		pf->flags |= I40E_FLAG_SRIOV;
 		pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
-		if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
-			PMD_INIT_LOG(ERR, "Config VF number %u, "
-				     "max supported %u.",
-				     dev->pci_dev->max_vfs,
-				     hw->func_caps.num_vfs);
-			return -EINVAL;
-		}
-		if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
-			PMD_INIT_LOG(ERR, "FVL VF queue %u, "
-				     "max support %u queues.",
-				     pf->vf_nb_qps, I40E_MAX_QP_NUM_PER_VF);
-			return -EINVAL;
-		}
 		pf->vf_num = dev->pci_dev->max_vfs;
-		sum_queues += pf->vf_nb_qps * pf->vf_num;
-		sum_vsis   += pf->vf_num;
-		PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u",
-			     pf->vf_num, pf->vf_nb_qps);
-	} else
+		PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, "
+			    "in total %u queues", pf->vf_num, pf->vf_nb_qps,
+			    pf->vf_nb_qps * pf->vf_num);
+	} else {
+		pf->vf_nb_qps = 0;
 		pf->vf_num = 0;
+	}
+	qp_count += pf->vf_nb_qps * pf->vf_num;
+	vsi_count += pf->vf_num;
 
+	/* VMDq queue/VSI allocation */
+	pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
 	if (hw->func_caps.vmdq) {
 		pf->flags |= I40E_FLAG_VMDQ;
-		pf->vmdq_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
+		pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
 		pf->max_nb_vmdq_vsi = 1;
-		/*
-		 * If VMDQ available, assume a single VSI can be created.  Will adjust
-		 * later.
-		 */
-		sum_queues += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
-		sum_vsis += pf->max_nb_vmdq_vsi;
+		PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues per VMDQ VSI, "
+			    "in total %u queues", pf->max_nb_vmdq_vsi,
+			    pf->vmdq_nb_qps,
+			    pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
 	} else {
 		pf->vmdq_nb_qps = 0;
 		pf->max_nb_vmdq_vsi = 0;
 	}
-	pf->nb_cfg_vmdq_vsi = 0;
-
-	if (hw->func_caps.fd) {
-		pf->flags |= I40E_FLAG_FDIR;
-		pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
-		/**
-		 * Each flow director consumes one VSI and one queue,
-		 * but can't calculate out predictably here.
-		 */
-	}
+	qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
+	vsi_count += pf->max_nb_vmdq_vsi;
 
-	if (sum_vsis > pf->max_num_vsi ||
-		sum_queues > hw->func_caps.num_rx_qp) {
-		PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied");
-		PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u",
-			     pf->max_num_vsi, sum_vsis);
-		PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u",
-			     hw->func_caps.num_rx_qp, sum_queues);
+	if (qp_count > hw->func_caps.num_tx_qp) {
+		PMD_DRV_LOG(ERR, "Failed to allocate %u queues, which exceeds "
+			    "the hardware maximum %u", qp_count,
+			    hw->func_caps.num_tx_qp);
 		return -EINVAL;
 	}
-
-	/* Adjust VMDQ setting to support as many VMs as possible */
-	if (pf->flags & I40E_FLAG_VMDQ) {
-		left_queues = hw->func_caps.num_rx_qp - sum_queues;
-
-		pf->max_nb_vmdq_vsi += RTE_MIN(left_queues / pf->vmdq_nb_qps,
-					pf->max_num_vsi - sum_vsis);
-
-		/* Limit the max VMDQ number that rte_ether that can support  */
-		pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
-					ETH_64_POOLS - 1);
-
-		PMD_INIT_LOG(INFO, "Max VMDQ VSI num:%u",
-				pf->max_nb_vmdq_vsi);
-		PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u", pf->vmdq_nb_qps);
-	}
-
-	/* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr
-	 * cause */
-	if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
-		PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough",
-			     sum_vsis, hw->func_caps.num_msix_vectors);
+	if (vsi_count > hw->func_caps.num_vsis) {
+		PMD_DRV_LOG(ERR, "Failed to allocate %u VSIs, which exceeds "
+			    "the hardware maximum %u", vsi_count,
+			    hw->func_caps.num_vsis);
 		return -EINVAL;
 	}
-	return I40E_SUCCESS;
+
+	return 0;
 }
 
 static int
@@ -2736,7 +2711,8 @@ i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
 	bsf = rte_bsf32(qpnum_per_tc);
 
 	/* Adjust the queue number to actual queues that can be applied */
-	vsi->nb_qps = qpnum_per_tc * total_tc;
+	if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
+		vsi->nb_qps = qpnum_per_tc * total_tc;
 
 	/**
 	 * Configure TC and queue mapping parameters, for enabled TC,
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 6185657..7656b20 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -370,10 +370,18 @@ struct i40e_pf {
 	uint16_t vf_num;
 	/* Each of below queue pairs should be power of 2 since it's the
 	   precondition after TC configuration applied */
+	uint16_t lan_nb_qp_max;
 	uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
+	uint16_t lan_qp_offset;
+	uint16_t vmdq_nb_qp_max;
 	uint16_t vmdq_nb_qps; /* The number of queue pairs of VMDq */
+	uint16_t vmdq_qp_offset;
+	uint16_t vf_nb_qp_max;
 	uint16_t vf_nb_qps; /* The number of queue pairs of VF */
+	uint16_t vf_qp_offset;
 	uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
+	uint16_t fdir_qp_offset;
+
 	uint16_t hash_lut_size; /* The size of hash lookup table */
 	/* store VXLAN UDP ports */
 	uint16_t vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
-- 
1.9.3



More information about the dev mailing list