[dpdk-dev] [PATCH v7 1/2] net/i40e: queue region set and flush

Wei Zhao wei.zhao1 at intel.com
Fri Sep 29 11:16:23 CEST 2017


This feature enable queue regions configuration for RSS in PF,
so that different traffic classes or different packet
classification types can be separated to different queues in
different queue regions.This patch can set queue region range,
it include queue number in a region and the index of first queue.
This patch enable mapping between different priorities (UP) and
different traffic classes.It also enable mapping between a region
index and a sepcific flowtype(PCTYPE).It also provide the solution
of flush all configuration about queue region the above described.

Signed-off-by: Wei Zhao <wei.zhao1 at intel.com>
---
 drivers/net/i40e/i40e_ethdev.c            |  27 +-
 drivers/net/i40e/i40e_ethdev.h            |  39 +++
 drivers/net/i40e/rte_pmd_i40e.c           | 520 ++++++++++++++++++++++++++++++
 drivers/net/i40e/rte_pmd_i40e.h           |  60 ++++
 drivers/net/i40e/rte_pmd_i40e_version.map |   1 +
 5 files changed, 641 insertions(+), 6 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 720f067..03511f1 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -137,10 +137,6 @@
 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
 
-#define I40E_MAX_PERCENT            100
-#define I40E_DEFAULT_DCB_APP_NUM    1
-#define I40E_DEFAULT_DCB_APP_PRIO   3
-
 /**
  * Below are values for writing un-exposed registers suggested
  * by silicon experts
@@ -309,7 +305,6 @@ static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
 static int i40e_pf_setup(struct i40e_pf *pf);
 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
-static int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
 static int i40e_dcb_setup(struct rte_eth_dev *dev);
 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
 		bool offset_loaded, uint64_t *offset, uint64_t *stat);
@@ -1036,6 +1031,20 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
 	return ret;
 }
 
+void
+i40e_init_queue_region_conf(struct rte_eth_dev *dev)
+{
+	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct i40e_queue_region_info *info = &pf->queue_region;
+	uint16_t i;
+
+	for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
+		i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
+
+	memset(info, 0, sizeof(struct i40e_queue_region_info));
+}
+
 static int
 eth_i40e_dev_init(struct rte_eth_dev *dev)
 {
@@ -1311,6 +1320,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
 	if (ret < 0)
 		goto err_init_fdir_filter_list;
 
+	/* initialize queue region configuration */
+	i40e_init_queue_region_conf(dev);
+
 	return 0;
 
 err_init_fdir_filter_list:
@@ -2123,6 +2135,9 @@ i40e_dev_stop(struct rte_eth_dev *dev)
 	/* reset hierarchy commit */
 	pf->tm_conf.committed = false;
 
+	/* Remove all the queue region configuration */
+	i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+
 	hw->adapter_stopped = 1;
 }
 
@@ -10419,7 +10434,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
  *
  * Returns 0 on success, negative value on failure
  */
-static int
+int
 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index ad80f0f..3d237cd 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -260,6 +260,12 @@ enum i40e_flxpld_layer_idx {
 #define I40E_QOS_BW_WEIGHT_MIN 1
 /* The max bandwidth weight is 127. */
 #define I40E_QOS_BW_WEIGHT_MAX 127
+/* The max queue region index is 7. */
+#define I40E_REGION_MAX_INDEX 7
+
+#define I40E_MAX_PERCENT            100
+#define I40E_DEFAULT_DCB_APP_NUM    1
+#define I40E_DEFAULT_DCB_APP_PRIO   3
 
 /**
  * The overhead from MTU to max frame size.
@@ -541,6 +547,34 @@ struct i40e_ethertype_rule {
 	struct rte_hash *hash_table;
 };
 
+/* queue region info */
+struct i40e_region_info {
+	/* the region id for this configuration */
+	uint8_t region_id;
+	/* the start queue index for this region */
+	uint8_t queue_start_index;
+	/* the total queue number of this queue region */
+	uint8_t queue_num;
+	/* the total number of user priority for this region */
+	uint8_t user_priority_num;
+	/* the packet's user priority for this region */
+	uint8_t user_priority[I40E_MAX_USER_PRIORITY];
+	/* the total number of flowtype for this region */
+	uint8_t flowtype_num;
+	/**
+	 * the pctype or hardware flowtype of packet,
+	 * the specific index for each type has been defined
+	 * in file i40e_type.h as enum i40e_filter_pctype.
+	 */
+	uint8_t hw_flowtype[I40E_FILTER_PCTYPE_MAX];
+};
+
+struct i40e_queue_region_info {
+	/* the total number of queue region for this port */
+	uint16_t queue_region_number;
+	struct i40e_region_info region[I40E_REGION_MAX_INDEX + 1];
+};
+
 /* Tunnel filter number HW supports */
 #define I40E_MAX_TUNNEL_FILTER_NUM 400
 
@@ -776,6 +810,7 @@ struct i40e_pf {
 	struct i40e_fdir_info fdir; /* flow director info */
 	struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
 	struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
+	struct i40e_queue_region_info queue_region; /* queue region info */
 	struct i40e_fc_conf fc_conf; /* Flow control conf */
 	struct i40e_mirror_rule_list mirror_list;
 	uint16_t nb_mirror_rule;   /* The number of mirror rules */
@@ -1003,6 +1038,10 @@ void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val);
 int i40e_tm_ops_get(struct rte_eth_dev *dev, void *ops);
 void i40e_tm_conf_init(struct rte_eth_dev *dev);
 void i40e_tm_conf_uninit(struct rte_eth_dev *dev);
+int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
+int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
+		struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on);
+void i40e_init_queue_region_conf(struct rte_eth_dev *dev);
 
 #define I40E_DEV_TO_PCI(eth_dev) \
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
index c08e07a..a8afb3b 100644
--- a/drivers/net/i40e/rte_pmd_i40e.c
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -35,6 +35,7 @@
 #include <rte_tailq.h>
 
 #include "base/i40e_prototype.h"
+#include "base/i40e_dcb.h"
 #include "i40e_ethdev.h"
 #include "i40e_pf.h"
 #include "i40e_rxtx.h"
@@ -2161,3 +2162,522 @@ rte_pmd_i40e_add_vf_mac_addr(uint8_t port, uint16_t vf_id,
 
 	return 0;
 }
+
+static int
+i40e_vsi_update_queue_region_mapping(struct i40e_hw *hw,
+			      struct i40e_pf *pf)
+{
+	uint16_t i;
+	struct i40e_vsi *vsi = pf->main_vsi;
+	uint16_t queue_offset, bsf, tc_index;
+	struct i40e_vsi_context ctxt;
+	struct i40e_aqc_vsi_properties_data *vsi_info;
+	struct i40e_queue_region_info *region_info =
+				&pf->queue_region;
+	int32_t ret = -EINVAL;
+
+	if (!region_info->queue_region_number) {
+		PMD_INIT_LOG(ERR, "there is no that region id been set before");
+		return ret;
+	}
+
+	memset(&ctxt, 0, sizeof(struct i40e_vsi_context));
+
+	/* Update Queue Pairs Mapping for currently enabled UPs */
+	ctxt.seid = vsi->seid;
+	ctxt.pf_num = hw->pf_id;
+	ctxt.vf_num = 0;
+	ctxt.uplink_seid = vsi->uplink_seid;
+	ctxt.info = vsi->info;
+	vsi_info = &ctxt.info;
+
+	memset(vsi_info->tc_mapping, 0, sizeof(uint16_t) * 8);
+	memset(vsi_info->queue_mapping, 0, sizeof(uint16_t) * 16);
+
+	/**
+	 * Configure queue region and queue mapping parameters,
+	 * for enabled queue region, allocate queues to this region.
+	 */
+
+	for (i = 0; i < region_info->queue_region_number; i++) {
+		tc_index = region_info->region[i].region_id;
+		bsf = rte_bsf32(region_info->region[i].queue_num);
+		queue_offset = region_info->region[i].queue_start_index;
+		vsi_info->tc_mapping[tc_index] = rte_cpu_to_le_16(
+			(queue_offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+				(bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
+	}
+
+	/* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
+	vsi_info->mapping_flags |=
+			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+	vsi_info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
+	vsi_info->valid_sections |=
+		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
+
+	/* Update the VSI after updating the VSI queue-mapping information */
+	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to configure queue region mapping = %d ",
+				hw->aq.asq_last_status);
+		return ret;
+	}
+	/* update the local VSI info with updated queue map */
+	(void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
+					sizeof(vsi->info.tc_mapping));
+	(void)rte_memcpy(&vsi->info.queue_mapping,
+			&ctxt.info.queue_mapping,
+			sizeof(vsi->info.queue_mapping));
+	vsi->info.mapping_flags = ctxt.info.mapping_flags;
+	vsi->info.valid_sections = 0;
+
+	return 0;
+}
+
+
+static int
+i40e_queue_region_set_region(struct i40e_pf *pf,
+				struct rte_i40e_rss_region_conf *conf_ptr)
+{
+	uint16_t i;
+	struct i40e_vsi *main_vsi = pf->main_vsi;
+	struct i40e_queue_region_info *info = &pf->queue_region;
+	int32_t ret = -EINVAL;
+
+	if (!((rte_is_power_of_2(conf_ptr->queue_num)) &&
+				conf_ptr->queue_num <= 64)) {
+		PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
+			"total number of queues do not exceed the VSI allocation");
+		return ret;
+	}
+
+	if (conf_ptr->region_id > I40E_REGION_MAX_INDEX) {
+		PMD_DRV_LOG(ERR, "the queue region max index is 7");
+		return ret;
+	}
+
+	if ((conf_ptr->queue_start_index + conf_ptr->queue_num)
+					> main_vsi->nb_used_qps) {
+		PMD_DRV_LOG(ERR, "the queue index exceeds the VSI range");
+		return ret;
+	}
+
+	for (i = 0; i < info->queue_region_number; i++)
+		if (conf_ptr->region_id == info->region[i].region_id)
+			break;
+
+	if (i == info->queue_region_number &&
+				i <= I40E_REGION_MAX_INDEX) {
+		info->region[i].region_id = conf_ptr->region_id;
+		info->region[i].queue_num = conf_ptr->queue_num;
+		info->region[i].queue_start_index =
+			conf_ptr->queue_start_index;
+		info->queue_region_number++;
+	} else {
+		PMD_DRV_LOG(ERR, "queue region number exceeds maxnum 8 or the queue region id has been set before");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+i40e_queue_region_set_flowtype(struct i40e_pf *pf,
+			struct rte_i40e_rss_region_conf *rss_region_conf)
+{
+	int32_t ret = -EINVAL;
+	struct i40e_queue_region_info *info = &pf->queue_region;
+	uint16_t i, j;
+	uint16_t region_index, flowtype_index;
+
+	/**
+	 * For the pctype or hardware flowtype of packet,
+	 * the specific index for each type has been defined
+	 * in file i40e_type.h as enum i40e_filter_pctype.
+	 */
+
+	if (rss_region_conf->region_id > I40E_PFQF_HREGION_MAX_INDEX) {
+		PMD_DRV_LOG(ERR, "the queue region max index is 7");
+		return ret;
+	}
+
+	if (rss_region_conf->hw_flowtype >= I40E_FILTER_PCTYPE_MAX) {
+		PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
+		return ret;
+	}
+
+
+	for (i = 0; i < info->queue_region_number; i++)
+		if (rss_region_conf->region_id == info->region[i].region_id)
+			break;
+
+	if (i == info->queue_region_number) {
+		PMD_DRV_LOG(ERR, "that region id has not been set before");
+		ret = -ENODATA;
+		return ret;
+	}
+	region_index = i;
+
+	for (i = 0; i < info->queue_region_number; i++) {
+		for (j = 0; j < info->region[i].flowtype_num; j++) {
+			if (rss_region_conf->hw_flowtype ==
+				info->region[i].hw_flowtype[j]) {
+				PMD_DRV_LOG(ERR, "that hw_flowtype has been set before");
+				return 0;
+
+			}
+		}
+	}
+
+	flowtype_index = info->region[region_index].flowtype_num;
+	info->region[region_index].hw_flowtype[flowtype_index] =
+					rss_region_conf->hw_flowtype;
+	info->region[region_index].flowtype_num++;
+
+	return 0;
+}
+
+static void
+i40e_queue_region_pf_flowtype_conf(struct i40e_hw *hw,
+				struct i40e_pf *pf)
+{
+	uint8_t hw_flowtype;
+	uint32_t pfqf_hregion;
+	uint16_t i, j, index;
+	struct i40e_queue_region_info *info = &pf->queue_region;
+
+	/**
+	 * For the pctype or hardware flowtype of packet,
+	 * the specific index for each type has been defined
+	 * in file i40e_type.h as enum i40e_filter_pctype.
+	 */
+
+	for (i = 0; i < info->queue_region_number; i++) {
+		for (j = 0; j < info->region[i].flowtype_num; j++) {
+			hw_flowtype = info->region[i].hw_flowtype[j];
+			index = hw_flowtype >> 3;
+			pfqf_hregion =
+				i40e_read_rx_ctl(hw, I40E_PFQF_HREGION(index));
+
+			if ((hw_flowtype & 0x7) == 0) {
+				pfqf_hregion |= info->region[i].region_id <<
+					I40E_PFQF_HREGION_REGION_0_SHIFT;
+				pfqf_hregion |= 1 <<
+					I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT;
+			} else if ((hw_flowtype & 0x7) == 1) {
+				pfqf_hregion |= info->region[i].region_id  <<
+					I40E_PFQF_HREGION_REGION_1_SHIFT;
+				pfqf_hregion |= 1 <<
+					I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT;
+			} else if ((hw_flowtype & 0x7) == 2) {
+				pfqf_hregion |= info->region[i].region_id  <<
+					I40E_PFQF_HREGION_REGION_2_SHIFT;
+				pfqf_hregion |= 1 <<
+					I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT;
+			} else if ((hw_flowtype & 0x7) == 3) {
+				pfqf_hregion |= info->region[i].region_id  <<
+					I40E_PFQF_HREGION_REGION_3_SHIFT;
+				pfqf_hregion |= 1 <<
+					I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT;
+			} else if ((hw_flowtype & 0x7) == 4) {
+				pfqf_hregion |= info->region[i].region_id  <<
+					I40E_PFQF_HREGION_REGION_4_SHIFT;
+				pfqf_hregion |= 1 <<
+					I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT;
+			} else if ((hw_flowtype & 0x7) == 5) {
+				pfqf_hregion |= info->region[i].region_id  <<
+					I40E_PFQF_HREGION_REGION_5_SHIFT;
+				pfqf_hregion |= 1 <<
+					I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT;
+			} else if ((hw_flowtype & 0x7) == 6) {
+				pfqf_hregion |= info->region[i].region_id  <<
+					I40E_PFQF_HREGION_REGION_6_SHIFT;
+				pfqf_hregion |= 1 <<
+					I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT;
+			} else {
+				pfqf_hregion |= info->region[i].region_id  <<
+					I40E_PFQF_HREGION_REGION_7_SHIFT;
+				pfqf_hregion |= 1 <<
+					I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT;
+			}
+
+			i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(index),
+						pfqf_hregion);
+		}
+	}
+}
+static int
+i40e_queue_region_set_user_priority(struct i40e_pf *pf,
+		struct rte_i40e_rss_region_conf *rss_region_conf)
+{
+	struct i40e_queue_region_info *info = &pf->queue_region;
+	int32_t ret = -EINVAL;
+	uint16_t i, j, region_index;
+
+	if (rss_region_conf->user_priority >= I40E_MAX_USER_PRIORITY) {
+		PMD_DRV_LOG(ERR, "the queue region max index is 7");
+		return ret;
+	}
+
+	if (rss_region_conf->region_id > I40E_REGION_MAX_INDEX) {
+		PMD_DRV_LOG(ERR, "the region_id max index is 7");
+		return ret;
+	}
+
+	for (i = 0; i < info->queue_region_number; i++)
+		if (rss_region_conf->region_id == info->region[i].region_id)
+			break;
+
+	if (i == info->queue_region_number) {
+		PMD_DRV_LOG(ERR, "that region id has not been set before");
+		ret = -ENODATA;
+		return ret;
+	}
+
+	region_index = i;
+
+	for (i = 0; i < info->queue_region_number; i++) {
+		for (j = 0; j < info->region[i].user_priority_num; j++) {
+			if (info->region[i].user_priority[j] ==
+				rss_region_conf->user_priority) {
+				PMD_DRV_LOG(ERR, "that user priority has been set before");
+				return 0;
+			}
+		}
+	}
+
+	j = info->region[region_index].user_priority_num;
+	info->region[region_index].user_priority[j] =
+					rss_region_conf->user_priority;
+	info->region[region_index].user_priority_num++;
+
+	return 0;
+}
+
+static int
+i40e_queue_region_dcb_configure(struct i40e_hw *hw,
+				struct i40e_pf *pf)
+{
+	struct i40e_dcbx_config dcb_cfg_local;
+	struct i40e_dcbx_config *dcb_cfg;
+	struct i40e_queue_region_info *info = &pf->queue_region;
+	struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
+	int32_t ret = -EINVAL;
+	uint16_t i, j, prio_index, region_index;
+	uint8_t tc_map, tc_bw, bw_lf;
+
+	if (!info->queue_region_number) {
+		PMD_DRV_LOG(ERR, "No queue region been set before");
+		return ret;
+	}
+
+	dcb_cfg = &dcb_cfg_local;
+	memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
+
+	/* assume each tc has the same bw */
+	tc_bw = I40E_MAX_PERCENT / info->queue_region_number;
+	for (i = 0; i < info->queue_region_number; i++)
+		dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
+	/* to ensure the sum of tcbw is equal to 100 */
+	bw_lf = I40E_MAX_PERCENT %  info->queue_region_number;
+	for (i = 0; i < bw_lf; i++)
+		dcb_cfg->etscfg.tcbwtable[i]++;
+
+	/* assume each tc has the same Transmission Selection Algorithm */
+	for (i = 0; i < info->queue_region_number; i++)
+		dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+
+	for (i = 0; i < info->queue_region_number; i++) {
+		for (j = 0; j < info->region[i].user_priority_num; j++) {
+			prio_index = info->region[i].user_priority[j];
+			region_index = info->region[i].region_id;
+			dcb_cfg->etscfg.prioritytable[prio_index] =
+						region_index;
+		}
+	}
+
+	/* FW needs one App to configure HW */
+	dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
+	dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
+	dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
+	dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+	tc_map = RTE_LEN2MASK(info->queue_region_number, uint8_t);
+
+	dcb_cfg->pfc.willing = 0;
+	dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+	dcb_cfg->pfc.pfcenable = tc_map;
+
+	/* Copy the new config to the current config */
+	*old_cfg = *dcb_cfg;
+	old_cfg->etsrec = old_cfg->etscfg;
+	ret = i40e_set_dcb_config(hw);
+
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Set queue region DCB Config failed, err %s aq_err %s",
+			 i40e_stat_str(hw, ret),
+			 i40e_aq_str(hw, hw->aq.asq_last_status));
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
+	struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on)
+{
+	int32_t ret = -EINVAL;
+	struct i40e_queue_region_info *info = &pf->queue_region;
+
+	if (on) {
+		i40e_queue_region_pf_flowtype_conf(hw, pf);
+
+		ret = i40e_vsi_update_queue_region_mapping(hw, pf);
+		if (ret != I40E_SUCCESS) {
+			PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
+			return ret;
+		}
+
+		ret = i40e_queue_region_dcb_configure(hw, pf);
+		if (ret != I40E_SUCCESS) {
+			PMD_DRV_LOG(INFO, "Failed to flush dcb.");
+			return ret;
+		}
+
+		return 0;
+	}
+
+	info->queue_region_number = 1;
+	info->region[0].queue_num = 64;
+	info->region[0].queue_start_index = 0;
+
+	ret = i40e_vsi_update_queue_region_mapping(hw, pf);
+	if (ret != I40E_SUCCESS)
+		PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
+
+	ret = i40e_dcb_init_configure(dev, TRUE);
+	if (ret != I40E_SUCCESS) {
+		PMD_DRV_LOG(INFO, "Failed to flush dcb.");
+		pf->flags &= ~I40E_FLAG_DCB;
+	}
+
+	i40e_init_queue_region_conf(dev);
+
+	return 0;
+}
+
+static int
+i40e_queue_region_pf_check_rss(struct i40e_pf *pf)
+{
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	uint64_t hena;
+
+	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
+	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
+
+	if (!hena)
+		return -ENOTSUP;
+
+	return 0;
+}
+
+static int
+i40e_queue_region_display_all_info(struct i40e_pf *pf, uint16_t port_id)
+{
+	uint16_t i, j;
+	struct i40e_queue_region_info *info = &pf->queue_region;
+	static const char *queue_region_info_stats_border = "-------";
+
+	if (!info->queue_region_number)
+		PMD_DRV_LOG(ERR, "there is no has been region set before");
+
+	printf("\n  %s All queue region info for port=%2d %s",
+			queue_region_info_stats_border, port_id,
+			queue_region_info_stats_border);
+	printf("\n  queue_region_number: %-14u \n", info->queue_region_number);
+
+	for (i = 0; i < info->queue_region_number; i++) {
+		printf("\n  region_id: %-14u queue_number: %-14u "
+			"queue_start_index: %-14u \n",
+			info->region[i].region_id,
+			info->region[i].queue_num,
+			info->region[i].queue_start_index);
+
+		printf("  user_priority_num is  %-14u :",
+				info->region[i].user_priority_num);
+		for (j = 0; j < info->region[i].user_priority_num; j++)
+			printf(" %-14u ", info->region[i].user_priority[j]);
+
+		printf("\n  flowtype_num is  %-14u :",
+				info->region[i].flowtype_num);
+		for (j = 0; j < info->region[i].flowtype_num; j++)
+			printf(" %-14u ", info->region[i].hw_flowtype[j]);
+	}
+
+	printf("\n\n");
+	return 0;
+}
+
+int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
+			struct rte_i40e_rss_region_conf *rss_region_conf)
+{
+	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	enum rte_pmd_i40e_queue_region_op op_type = rss_region_conf->op;
+	int32_t ret;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+	if (!is_i40e_supported(dev))
+		return -ENOTSUP;
+
+	if (!(!i40e_queue_region_pf_check_rss(pf)))
+		return -ENOTSUP;
+
+	/**
+	 * This queue region feature only support pf by now. It should
+	 * be called after dev_start, and will be clear after dev_stop.
+	 * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON"
+	 * is just an enable function which server for other configuration,
+	 * it is for all configuration about queue region from up layer,
+	 * at first will only keep in DPDK softwarestored in driver,
+	 * only after "FLUSH_ON", it commit all configuration to HW.
+	 * Because PMD had to set hardware configuration at a time, so
+	 * it will record all up layer command at first.
+	 * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is
+	 * just clean all configuration about queue region just now,
+	 * and restore all to DPDK i40e driver default
+	 * config when start up.
+	 */
+
+	switch (op_type) {
+	case RTE_PMD_I40E_QUEUE_REGION_SET:
+		ret = i40e_queue_region_set_region(pf, rss_region_conf);
+		break;
+	case RTE_PMD_I40E_REGION_FLOWTYPE_SET:
+		ret = i40e_queue_region_set_flowtype(pf, rss_region_conf);
+		break;
+	case RTE_PMD_I40E_USER_PRIORITY_REGION_SET:
+		ret = i40e_queue_region_set_user_priority(pf, rss_region_conf);
+		break;
+	case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON:
+		ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
+		break;
+	case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF:
+		ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+		break;
+	case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET:
+		ret = i40e_queue_region_display_all_info(pf, port_id);
+		break;
+	default:
+		PMD_DRV_LOG(WARNING, "op type (%d) not supported",
+			    op_type);
+		ret = -EINVAL;
+	}
+
+	I40E_WRITE_FLUSH(hw);
+
+	return ret;
+}
diff --git a/drivers/net/i40e/rte_pmd_i40e.h b/drivers/net/i40e/rte_pmd_i40e.h
index 155b7e8..2219318 100644
--- a/drivers/net/i40e/rte_pmd_i40e.h
+++ b/drivers/net/i40e/rte_pmd_i40e.h
@@ -91,6 +91,33 @@ enum rte_pmd_i40e_package_info {
 	RTE_PMD_I40E_PKG_INFO_MAX = 0xFFFFFFFF
 };
 
+/**
+ *  Option types of queue region.
+ */
+enum rte_pmd_i40e_queue_region_op {
+	RTE_PMD_I40E_REGION_UNDEFINED,
+	RTE_PMD_I40E_QUEUE_REGION_SET,      /**< add queue region set */
+	RTE_PMD_I40E_REGION_FLOWTYPE_SET,   /**< add pf region pctype set */
+	/*** add queue region user priority set */
+	RTE_PMD_I40E_USER_PRIORITY_REGION_SET,
+	/**
+	 * ALL configuration about queue region from up layer
+	 * at first will only keep in DPDK softwarestored in driver,
+	 * only after " FLUSH_ON ", it commit all configuration to HW.
+	 * Because PMD had to set hardware configuration at a time, so
+	 * it will record all up layer command at first.
+	 */
+	RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON,
+	/**
+	 * "FLUSH_OFF " is just clean all configuration about queue
+	 * region just now, and restore all to DPDK i40e driver default
+	 * config when start up.
+	 */
+	RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF,
+	RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET,
+	RTE_PMD_I40E_QUEUE_REGION_OP_MAX
+};
+
 #define RTE_PMD_I40E_DDP_NAME_SIZE 32
 
 /**
@@ -146,6 +173,27 @@ struct rte_pmd_i40e_ptype_mapping {
 };
 
 /**
+ * Queue region related information.
+ */
+struct rte_i40e_rss_region_conf {
+	/*** the region id for this configuration */
+	uint8_t region_id;
+	/** the pctype or hardware flowtype of packet,
+	 * the specific index for each type has been defined
+	 * in file i40e_type.h as enum i40e_filter_pctype.
+	 */
+	uint8_t hw_flowtype;
+	/*** the start queue index for this region */
+	uint8_t queue_start_index;
+	/*** the total queue number of this queue region */
+	uint8_t queue_num;
+	/*** the packet's user priority for this region */
+	uint8_t user_priority;
+	 /*** Option types of queue region */
+	enum rte_pmd_i40e_queue_region_op op;
+};
+
+/**
  * Notify VF when PF link status changes.
  *
  * @param port
@@ -657,4 +705,16 @@ int rte_pmd_i40e_ptype_mapping_replace(uint8_t port,
 int rte_pmd_i40e_add_vf_mac_addr(uint8_t port, uint16_t vf_id,
 				 struct ether_addr *mac_addr);
 
+/**
+ * Do RSS queue region configuration for that port as
+ * the command option type
+ *
+ * @param port
+ *    pointer id for that port device
+ * @param conf_ptr
+ *    pointer to the struct that contain all the
+ *    region configuration parameters
+ */
+int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
+		struct rte_i40e_rss_region_conf *rss_region_conf);
 #endif /* _PMD_I40E_H_ */
diff --git a/drivers/net/i40e/rte_pmd_i40e_version.map b/drivers/net/i40e/rte_pmd_i40e_version.map
index ef8882b..29d6b74 100644
--- a/drivers/net/i40e/rte_pmd_i40e_version.map
+++ b/drivers/net/i40e/rte_pmd_i40e_version.map
@@ -50,5 +50,6 @@ DPDK_17.11 {
 	global:
 
 	rte_pmd_i40e_add_vf_mac_addr;
+	rte_pmd_i40e_rss_queue_region_conf;
 
 } DPDK_17.08;
-- 
2.7.4



More information about the dev mailing list