[dpdk-dev] [PATCH v3] net/i40e: implement hash function in rte flow API

Chenxu Di chenxux.di at intel.com
Fri Mar 20 02:24:12 CET 2020


implement set hash global configurations, set symmetric hash enable
 and set hash input set in rte flow API.

Signed-off-by: Chenxu Di <chenxux.di at intel.com>
---
v3:
-modified the doc i40e.rst
v2:
-canceled remove legacy filter functions.
---
 doc/guides/nics/i40e.rst               |  14 +
 doc/guides/rel_notes/release_20_05.rst |   6 +
 drivers/net/i40e/i40e_ethdev.c         | 451 ++++++++++++++++++++++---
 drivers/net/i40e/i40e_ethdev.h         |  18 +
 drivers/net/i40e/i40e_flow.c           | 186 ++++++++--
 5 files changed, 603 insertions(+), 72 deletions(-)

diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst
index d6e578eda..03b117a99 100644
--- a/doc/guides/nics/i40e.rst
+++ b/doc/guides/nics/i40e.rst
@@ -569,6 +569,20 @@ details please refer to :doc:`../testpmd_app_ug/index`.
    testpmd> set port (port_id) queue-region flush (on|off)
    testpmd> show port (port_id) queue-region
 
+Generic flow API
+~~~~~~~~~~~~~~~~~~~
+Enable set hash input set and hash enable in generic flow API.
+For the reason queue region configuration in i40e is for all PCTYPE,
+pctype must be empty while configuring queue region.
+The pctype in pattern and actions must be matched.
+For exampale, to set queue region configuration queue 0, 1, 2, 3
+and set PCTYPE ipv4-tcp hash enable and set input set l3-src-only:
+
+   testpmd> flow create 0 ingress pattern end actions rss types end \
+		queues 0 1 2 3 end / end
+   testpmd> flow create 0 ingress pattern eth / ipv4 / tcp / end \
+		actions rss types ipv4-tcp l3-src-only end queues end / end
+
 Limitations or Known issues
 ---------------------------
 
diff --git a/doc/guides/rel_notes/release_20_05.rst b/doc/guides/rel_notes/release_20_05.rst
index 000bbf501..89ce8de6c 100644
--- a/doc/guides/rel_notes/release_20_05.rst
+++ b/doc/guides/rel_notes/release_20_05.rst
@@ -62,6 +62,12 @@ New Features
 
   * Added support for matching on IPv4 Time To Live and IPv6 Hop Limit.
 
+* **Updated Intel i40e driver.**
+
+  Updated i40e PMD with new features and improvements, including:
+
+  * Added support set hash function and set hash input set in rte flow API.
+
 
 Removed Items
 -------------
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 9539b0470..62e4ef7a7 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1656,6 +1656,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
 	/* initialize mirror rule list */
 	TAILQ_INIT(&pf->mirror_list);
 
+	/* initialize rss rule list */
+	TAILQ_INIT(&pf->rss_info_list);
+
 	/* initialize Traffic Manager configuration */
 	i40e_tm_conf_init(dev);
 
@@ -12329,10 +12332,12 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
 static inline void
 i40e_rss_filter_restore(struct i40e_pf *pf)
 {
-	struct i40e_rte_flow_rss_conf *conf =
-					&pf->rss_info;
-	if (conf->conf.queue_num)
-		i40e_config_rss_filter(pf, conf, TRUE);
+	struct i40e_rss_conf_list *rss_list = &pf->rss_info_list;
+	struct i40e_rte_flow_rss_filter *rss_item;
+
+	TAILQ_FOREACH(rss_item, rss_list, next) {
+		i40e_config_rss_filter(pf, &rss_item->rss_filter_info, TRUE);
+	}
 }
 
 static void
@@ -12956,31 +12961,214 @@ i40e_action_rss_same(const struct rte_flow_action_rss *comp,
 			sizeof(*with->queue) * with->queue_num));
 }
 
-int
-i40e_config_rss_filter(struct i40e_pf *pf,
-		struct i40e_rte_flow_rss_conf *conf, bool add)
+/* config rss hash input set */
+static int
+i40e_config_rss_inputset(struct i40e_pf *pf, uint64_t types)
 {
 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
-	uint32_t i, lut = 0;
-	uint16_t j, num;
-	struct rte_eth_rss_conf rss_conf = {
-		.rss_key = conf->conf.key_len ?
-			(void *)(uintptr_t)conf->conf.key : NULL,
-		.rss_key_len = conf->conf.key_len,
-		.rss_hf = conf->conf.types,
+	struct rte_eth_input_set_conf conf;
+	int i, ret;
+	uint32_t j;
+	static const struct {
+		uint64_t type;
+		enum rte_eth_input_set_field field;
+	} inset_type_table[] = {
+		{ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY,
+			RTE_ETH_INPUT_SET_L3_SRC_IP4},
+		{ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY,
+			RTE_ETH_INPUT_SET_L3_DST_IP4},
+
+		{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY,
+			RTE_ETH_INPUT_SET_L3_SRC_IP4},
+		{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY,
+			RTE_ETH_INPUT_SET_L3_DST_IP4},
+		{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY,
+			RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
+		{ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY,
+			RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
+
+		{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY,
+			RTE_ETH_INPUT_SET_L3_SRC_IP4},
+		{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY,
+			RTE_ETH_INPUT_SET_L3_DST_IP4},
+		{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY,
+			RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
+		{ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY,
+			RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
+
+		{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY,
+			RTE_ETH_INPUT_SET_L3_SRC_IP4},
+		{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY,
+			RTE_ETH_INPUT_SET_L3_DST_IP4},
+		{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY,
+			RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
+		{ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY,
+			RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
+
+		{ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY,
+			RTE_ETH_INPUT_SET_L3_SRC_IP4},
+		{ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY,
+			RTE_ETH_INPUT_SET_L3_DST_IP4},
+
+		{ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY,
+			RTE_ETH_INPUT_SET_L3_SRC_IP6},
+		{ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY,
+			RTE_ETH_INPUT_SET_L3_DST_IP6},
+
+		{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY,
+			RTE_ETH_INPUT_SET_L3_SRC_IP6},
+		{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY,
+			RTE_ETH_INPUT_SET_L3_DST_IP6},
+		{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY,
+			RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
+		{ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY,
+			RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
+
+		{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY,
+			RTE_ETH_INPUT_SET_L3_SRC_IP6},
+		{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY,
+			RTE_ETH_INPUT_SET_L3_DST_IP6},
+		{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY,
+			RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
+		{ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY,
+			RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
+
+		{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY,
+			RTE_ETH_INPUT_SET_L3_SRC_IP6},
+		{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY,
+			RTE_ETH_INPUT_SET_L3_DST_IP6},
+		{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY,
+			RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
+		{ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY,
+			RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
+
+		{ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY,
+			RTE_ETH_INPUT_SET_L3_SRC_IP6},
+		{ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY,
+			RTE_ETH_INPUT_SET_L3_DST_IP6},
 	};
-	struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
 
-	if (!add) {
-		if (i40e_action_rss_same(&rss_info->conf, &conf->conf)) {
-			i40e_pf_disable_rss(pf);
-			memset(rss_info, 0,
-				sizeof(struct i40e_rte_flow_rss_conf));
-			return 0;
+	ret = 0;
+
+	for (i = RTE_ETH_FLOW_IPV4; i <= RTE_ETH_FLOW_L2_PAYLOAD; i++) {
+		if (!(pf->adapter->flow_types_mask & (1ull << i)) ||
+		    !(types & (1ull << i)))
+			continue;
+
+		conf.op = RTE_ETH_INPUT_SET_SELECT;
+		conf.flow_type = i;
+		conf.inset_size = 0;
+		for (j = 0; j < RTE_DIM(inset_type_table); j++) {
+			if ((types & inset_type_table[j].type) ==
+			    inset_type_table[j].type) {
+				conf.field[conf.inset_size] =
+					inset_type_table[j].field;
+				conf.inset_size++;
+			}
+		}
+
+		if (conf.inset_size) {
+			ret = i40e_hash_filter_inset_select(hw, &conf);
+			if (ret)
+				return ret;
 		}
+	}
+
+	return ret;
+}
+
+/* set existing rule invalid if it is covered */
+static void
+i40e_config_rss_invalidate_previous_rule(struct i40e_pf *pf,
+		struct i40e_rte_flow_rss_conf *conf)
+{
+	struct i40e_rte_flow_rss_filter *rss_item;
+	uint64_t input_bits;
+
+	/* to check pctype same need without input set bits */
+	input_bits = ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
+		ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
+
+	TAILQ_FOREACH(rss_item, &pf->rss_info_list, next) {
+		if (!rss_item->rss_filter_info.valid)
+			continue;
+
+		/* config rss queue rule */
+		if (conf->conf.queue_num &&
+		    rss_item->rss_filter_info.conf.queue_num)
+			rss_item->rss_filter_info.valid = false;
+
+		/* config rss input set rule */
+		if (conf->conf.types &&
+		    (rss_item->rss_filter_info.conf.types &
+		    input_bits) ==
+		    (conf->conf.types & input_bits))
+			rss_item->rss_filter_info.valid = false;
+
+		/* config rss function symmetric rule */
+		if (conf->conf.func ==
+		    RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ &&
+		    rss_item->rss_filter_info.conf.func ==
+		    RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ)
+			rss_item->rss_filter_info.valid = false;
+
+		/* config rss function xor or toeplitz rule */
+		if (rss_item->rss_filter_info.conf.func !=
+		    RTE_ETH_HASH_FUNCTION_DEFAULT &&
+		    conf->conf.func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
+		    (rss_item->rss_filter_info.conf.types & input_bits) ==
+		    (conf->conf.types & input_bits))
+			rss_item->rss_filter_info.valid = false;
+	}
+}
+
+/* config  rss hash enable and set hash input set */
+static int
+i40e_config_hash_pctype_add(struct i40e_pf *pf,
+		struct i40e_rte_flow_rss_conf *conf,
+		struct rte_eth_rss_conf *rss_conf)
+{
+	struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+
+	if (!(rss_conf->rss_hf & pf->adapter->flow_types_mask))
+		return -ENOTSUP;
+
+	/* Confirm hash input set */
+	if (i40e_config_rss_inputset(pf, rss_conf->rss_hf))
 		return -EINVAL;
+
+	if (rss_conf->rss_key == NULL || rss_conf->rss_key_len <
+	    (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
+		/* Random default keys */
+		static uint32_t rss_key_default[] = {0x6b793944,
+			0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
+			0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
+			0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
+
+		rss_conf->rss_key = (uint8_t *)rss_key_default;
+		rss_conf->rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+				sizeof(uint32_t);
+		PMD_DRV_LOG(INFO,
+			"No valid RSS key config for i40e, using default\n");
 	}
 
+	rss_conf->rss_hf |= rss_info->conf.types;
+	i40e_hw_rss_hash_set(pf, rss_conf);
+
+	i40e_config_rss_invalidate_previous_rule(pf, conf);
+
+	return 0;
+}
+
+/* config rss queue region */
+static int
+i40e_config_hash_queue_add(struct i40e_pf *pf,
+		struct i40e_rte_flow_rss_conf *conf)
+{
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	uint32_t i, lut;
+	uint16_t j, num;
+
 	/* If both VMDQ and RSS enabled, not all of PF queues are configured.
 	 * It's necessary to calculate the actual PF queues that are configured.
 	 */
@@ -13000,6 +13188,7 @@ i40e_config_rss_filter(struct i40e_pf *pf,
 		return -ENOTSUP;
 	}
 
+	lut = 0;
 	/* Fill in redirection table */
 	for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
 		if (j == num)
@@ -13010,29 +13199,215 @@ i40e_config_rss_filter(struct i40e_pf *pf,
 			I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
 	}
 
-	if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
-		i40e_pf_disable_rss(pf);
+	i40e_config_rss_invalidate_previous_rule(pf, conf);
+
+	return 0;
+}
+
+/* config rss hash function */
+static int
+i40e_config_hash_function_add(struct i40e_pf *pf,
+		struct i40e_rte_flow_rss_conf *conf)
+{
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	struct rte_eth_hash_global_conf g_cfg;
+	uint64_t input_bits;
+
+	if (conf->conf.func ==
+			RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ){
+		i40e_set_symmetric_hash_enable_per_port(hw, 1);
+	} else {
+		input_bits = ~(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY |
+			    ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY);
+		g_cfg.hash_func = conf->conf.func;
+		g_cfg.sym_hash_enable_mask[0] = conf->conf.types & input_bits;
+		g_cfg.valid_bit_mask[0] = conf->conf.types & input_bits;
+		i40e_set_hash_filter_global_config(hw, &g_cfg);
+	}
+
+	i40e_config_rss_invalidate_previous_rule(pf, conf);
+
+	return 0;
+}
+
+/* config rss hena disable and set hash input set to defalut */
+static int
+i40e_config_hash_pctype_del(struct i40e_pf *pf,
+		struct i40e_rte_flow_rss_conf *conf)
+{
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+	struct rte_eth_rss_conf rss_conf = {
+		.rss_key = pf->rss_info.conf.key_len ?
+			(void *)(uintptr_t)conf->conf.key : NULL,
+		.rss_key_len = pf->rss_info.conf.key_len,
+	};
+	uint32_t i;
+
+	/* set hash enable register to disable */
+	rss_conf.rss_hf = rss_info->conf.types & ~(conf->conf.types);
+	i40e_hw_rss_hash_set(pf, &rss_conf);
+
+	for (i = RTE_ETH_FLOW_IPV4; i <= RTE_ETH_FLOW_L2_PAYLOAD; i++) {
+		if (!(pf->adapter->flow_types_mask & (1ull << i)) ||
+		    !(conf->conf.types & (1ull << i)))
+			continue;
+
+		/* set hash input set default */
+		struct rte_eth_input_set_conf input_conf = {
+			.op = RTE_ETH_INPUT_SET_SELECT,
+			.flow_type = i,
+			.inset_size = 1,
+		};
+		input_conf.field[0] = RTE_ETH_INPUT_SET_DEFAULT;
+		i40e_hash_filter_inset_select(hw, &input_conf);
+	}
+
+	rss_info->conf.types = rss_conf.rss_hf;
+
+	return 0;
+}
+
+/* config rss queue region to default */
+static int
+i40e_config_hash_queue_del(struct i40e_pf *pf)
+{
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+	uint16_t queue[I40E_MAX_Q_PER_TC];
+	uint32_t num_rxq, i, lut;
+	uint16_t j, num;
+
+	num_rxq = RTE_MIN(pf->dev_data->nb_rx_queues, I40E_MAX_Q_PER_TC);
+
+	for (j = 0; j < num_rxq; j++)
+		queue[j] = j;
+
+	/* If both VMDQ and RSS enabled, not all of PF queues are configured.
+	 * It's necessary to calculate the actual PF queues that are configured.
+	 */
+	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+		num = i40e_pf_calc_configured_queues_num(pf);
+	else
+		num = pf->dev_data->nb_rx_queues;
+
+	num = RTE_MIN(num, num_rxq);
+	PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
+			num);
+
+	if (num == 0) {
+		PMD_DRV_LOG(ERR,
+			"No PF queues are configured to enable RSS for port %u",
+			pf->dev_data->port_id);
+		return -ENOTSUP;
+	}
+
+	lut = 0;
+	/* Fill in redirection table */
+	for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
+		if (j == num)
+			j = 0;
+		lut = (lut << 8) | (queue[j] & ((0x1 <<
+			hw->func_caps.rss_table_entry_width) - 1));
+		if ((i & 3) == 3)
+			I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
+	}
+
+	rss_info->conf.queue_num = 0;
+	memset(&rss_info->conf.queue, 0, sizeof(uint16_t));
+
+	return 0;
+}
+
+/* config rss hash function to default */
+static int
+i40e_config_hash_function_del(struct i40e_pf *pf,
+		struct i40e_rte_flow_rss_conf *conf)
+{
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	uint32_t i;
+	uint16_t j;
+
+	/* set symmetric hash to default status */
+	if (conf->conf.func ==
+	    RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) {
+		i40e_set_symmetric_hash_enable_per_port(hw, 0);
+
 		return 0;
 	}
-	if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
-		(I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
-		/* Random default keys */
-		static uint32_t rss_key_default[] = {0x6b793944,
-			0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
-			0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
-			0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
 
-		rss_conf.rss_key = (uint8_t *)rss_key_default;
-		rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
-							sizeof(uint32_t);
-		PMD_DRV_LOG(INFO,
-			"No valid RSS key config for i40e, using default\n");
+	for (i = RTE_ETH_FLOW_IPV4; i <= RTE_ETH_FLOW_L2_PAYLOAD; i++) {
+		if (!(conf->conf.types & (1ull << i)))
+			continue;
+
+		/* set hash global config disable */
+		for (j = I40E_FILTER_PCTYPE_INVALID + 1;
+		     j < I40E_FILTER_PCTYPE_MAX; j++) {
+			if (pf->adapter->pctypes_tbl[i] &
+			    (1ULL << j))
+				i40e_write_global_rx_ctl(hw,
+					I40E_GLQF_HSYM(j), 0);
+		}
 	}
 
-	i40e_hw_rss_hash_set(pf, &rss_conf);
+	return 0;
+}
 
-	if (i40e_rss_conf_init(rss_info, &conf->conf))
-		return -EINVAL;
+int
+i40e_config_rss_filter(struct i40e_pf *pf,
+		struct i40e_rte_flow_rss_conf *conf, bool add)
+{
+	struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+	struct rte_flow_action_rss update_conf = rss_info->conf;
+	struct rte_eth_rss_conf rss_conf = {
+		.rss_key = conf->conf.key_len ?
+			(void *)(uintptr_t)conf->conf.key : NULL,
+		.rss_key_len = conf->conf.key_len,
+		.rss_hf = conf->conf.types,
+	};
+	int ret = 0;
+
+	if (add) {
+		if (conf->conf.queue_num) {
+			/* config rss queue region */
+			ret = i40e_config_hash_queue_add(pf, conf);
+			if (ret)
+				return ret;
+
+			update_conf.queue_num = conf->conf.queue_num;
+			update_conf.queue = conf->conf.queue;
+		} else if (conf->conf.func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
+			/* config hash function */
+			ret = i40e_config_hash_function_add(pf, conf);
+			if (ret)
+				return ret;
+
+			update_conf.func = conf->conf.func;
+		} else {
+			/* config hash enable and input set for each pctype */
+			ret = i40e_config_hash_pctype_add(pf, conf, &rss_conf);
+			if (ret)
+				return ret;
+
+			update_conf.types = rss_conf.rss_hf;
+			update_conf.key = rss_conf.rss_key;
+			update_conf.key_len = rss_conf.rss_key_len;
+		}
+
+		/* update rss info in pf */
+		if (i40e_rss_conf_init(rss_info, &update_conf))
+			return -EINVAL;
+	} else {
+		if (!conf->valid)
+			return 0;
+
+		if (conf->conf.queue_num)
+			i40e_config_hash_queue_del(pf);
+		else if (conf->conf.func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+			i40e_config_hash_function_del(pf, conf);
+		else
+			i40e_config_hash_pctype_del(pf, conf);
+	}
 
 	return 0;
 }
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index aac89de91..1e4e64ea7 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -194,6 +194,9 @@ enum i40e_flxpld_layer_idx {
 #define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK  \
 	I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
 
+#define I40E_RSS_TYPE_NONE           0ULL
+#define I40E_RSS_TYPE_INVALID        1ULL
+
 #define I40E_INSET_NONE            0x00000000000000000ULL
 
 /* bit0 ~ bit 7 */
@@ -749,6 +752,11 @@ struct i40e_queue_regions {
 	struct i40e_queue_region_info region[I40E_REGION_MAX_INDEX + 1];
 };
 
+struct i40e_rss_pattern_info {
+	uint8_t action_flag;
+	uint64_t types;
+};
+
 /* Tunnel filter number HW supports */
 #define I40E_MAX_TUNNEL_FILTER_NUM 400
 
@@ -968,6 +976,15 @@ struct i40e_rte_flow_rss_conf {
 		     I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX + 1) *
 		    sizeof(uint32_t)]; /* Hash key. */
 	uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */
+	bool valid; /* Check if it's valid */
+};
+
+TAILQ_HEAD(i40e_rss_conf_list, i40e_rte_flow_rss_filter);
+
+/* rss filter list structure */
+struct i40e_rte_flow_rss_filter {
+	TAILQ_ENTRY(i40e_rte_flow_rss_filter) next;
+	struct i40e_rte_flow_rss_conf rss_filter_info;
 };
 
 struct i40e_vf_msg_cfg {
@@ -1039,6 +1056,7 @@ struct i40e_pf {
 	struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
 	struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
 	struct i40e_rte_flow_rss_conf rss_info; /* rss info */
+	struct i40e_rss_conf_list rss_info_list; /* rss rull list */
 	struct i40e_queue_regions queue_region; /* queue region info */
 	struct i40e_fc_conf fc_conf; /* Flow control conf */
 	struct i40e_mirror_rule_list mirror_list;
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index d877ac250..4774fde6d 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -4424,10 +4424,10 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
  * function for RSS, or flowtype for queue region configuration.
  * For example:
  * pattern:
- * Case 1: only ETH, indicate  flowtype for queue region will be parsed.
- * Case 2: only VLAN, indicate user_priority for queue region will be parsed.
- * Case 3: none, indicate RSS related will be parsed in action.
- * Any pattern other the ETH or VLAN will be treated as invalid except END.
+ * Case 1: try to transform patterns to pctype. valid pctype will be
+ *         used in parse action.
+ * Case 2: only ETH, indicate flowtype for queue region will be parsed.
+ * Case 3: only VLAN, indicate user_priority for queue region will be parsed.
  * So, pattern choice is depened on the purpose of configuration of
  * that flow.
  * action:
@@ -4438,15 +4438,66 @@ static int
 i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
 			     const struct rte_flow_item *pattern,
 			     struct rte_flow_error *error,
-			     uint8_t *action_flag,
+			     struct i40e_rss_pattern_info *p_info,
 			     struct i40e_queue_regions *info)
 {
 	const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
 	const struct rte_flow_item *item = pattern;
 	enum rte_flow_item_type item_type;
-
-	if (item->type == RTE_FLOW_ITEM_TYPE_END)
+	struct rte_flow_item *items;
+	uint32_t item_num = 0; /* non-void item number of pattern*/
+	uint32_t i = 0;
+	static const struct {
+		enum rte_flow_item_type *item_array;
+		uint64_t type;
+	} i40e_rss_pctype_patterns[] = {
+		{ pattern_fdir_ipv4,
+			ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER },
+		{ pattern_fdir_ipv4_tcp, ETH_RSS_NONFRAG_IPV4_TCP },
+		{ pattern_fdir_ipv4_udp, ETH_RSS_NONFRAG_IPV4_UDP },
+		{ pattern_fdir_ipv4_sctp, ETH_RSS_NONFRAG_IPV4_SCTP },
+		{ pattern_fdir_ipv6,
+			ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER },
+		{ pattern_fdir_ipv6_tcp, ETH_RSS_NONFRAG_IPV6_TCP },
+		{ pattern_fdir_ipv6_udp, ETH_RSS_NONFRAG_IPV6_UDP },
+		{ pattern_fdir_ipv6_sctp, ETH_RSS_NONFRAG_IPV6_SCTP },
+	};
+
+	p_info->types = I40E_RSS_TYPE_INVALID;
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_END) {
+		p_info->types = I40E_RSS_TYPE_NONE;
 		return 0;
+	}
+
+	/* convert flow to pctype  */
+	while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+		if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+			item_num++;
+		i++;
+	}
+	item_num++;
+
+	items = rte_zmalloc("i40e_pattern",
+			    item_num * sizeof(struct rte_flow_item), 0);
+	if (!items) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "No memory for PMD internal items.");
+		return -ENOMEM;
+	}
+
+	i40e_pattern_skip_void_item(items, pattern);
+
+	for (i = 0; i < RTE_DIM(i40e_rss_pctype_patterns); i++) {
+		if (i40e_match_pattern(i40e_rss_pctype_patterns[i].item_array,
+					items)) {
+			p_info->types = i40e_rss_pctype_patterns[i].type;
+			rte_free(items);
+			return 0;
+		}
+	}
+
+	rte_free(items);
 
 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
 		if (item->last) {
@@ -4459,7 +4510,7 @@ i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
 		item_type = item->type;
 		switch (item_type) {
 		case RTE_FLOW_ITEM_TYPE_ETH:
-			*action_flag = 1;
+			p_info->action_flag = 1;
 			break;
 		case RTE_FLOW_ITEM_TYPE_VLAN:
 			vlan_spec = item->spec;
@@ -4472,7 +4523,7 @@ i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
 						vlan_spec->tci) >> 13) & 0x7;
 					info->region[0].user_priority_num = 1;
 					info->queue_region_number = 1;
-					*action_flag = 0;
+					p_info->action_flag = 0;
 				}
 			}
 			break;
@@ -4500,12 +4551,14 @@ i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
  * max index should be 7, and so on. And also, queue index should be
  * continuous sequence and queue region index should be part of rss
  * queue index for this port.
+ * For hash params, the pctype in action and pattern must be same.
+ * Set queue index or symmetric hash enable must be with non-types.
  */
 static int
 i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
 			    const struct rte_flow_action *actions,
 			    struct rte_flow_error *error,
-			    uint8_t action_flag,
+				struct i40e_rss_pattern_info p_info,
 			    struct i40e_queue_regions *conf_info,
 			    union i40e_filter_t *filter)
 {
@@ -4516,7 +4569,7 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
 	struct i40e_rte_flow_rss_conf *rss_config =
 			&filter->rss_conf;
 	struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
-	uint16_t i, j, n, tmp;
+	uint16_t i, j, n, tmp, nb_types;
 	uint32_t index = 0;
 	uint64_t hf_bit = 1;
 
@@ -4535,7 +4588,7 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
 		return -rte_errno;
 	}
 
-	if (action_flag) {
+	if (p_info.action_flag) {
 		for (n = 0; n < 64; n++) {
 			if (rss->types & (hf_bit << n)) {
 				conf_info->region[0].hw_flowtype[0] = n;
@@ -4674,11 +4727,11 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
 	if (rss_config->queue_region_conf)
 		return 0;
 
-	if (!rss || !rss->queue_num) {
+	if (!rss) {
 		rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ACTION,
 				act,
-				"no valid queues");
+				"no valid rules");
 		return -rte_errno;
 	}
 
@@ -4692,19 +4745,40 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
 		}
 	}
 
-	if (rss_info->conf.queue_num) {
-		rte_flow_error_set(error, EINVAL,
-				RTE_FLOW_ERROR_TYPE_ACTION,
-				act,
-				"rss only allow one valid rule");
-		return -rte_errno;
+	if (rss->queue_num && (p_info.types || rss->types))
+		return rte_flow_error_set
+			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+			 "pctype must be empty while configuring queue region");
+
+	/* validate pattern and pctype */
+	if (!(rss->types & p_info.types) &&
+	    (rss->types || p_info.types) && !rss->queue_num)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+			 act, "invaild pctype");
+
+	nb_types = 0;
+	for (n = 0; n < RTE_ETH_FLOW_MAX; n++) {
+		if (rss->types & (hf_bit << n))
+			nb_types++;
+		if (nb_types > 1)
+			return rte_flow_error_set
+				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+				 act, "multi pctype is not supported");
 	}
 
+	if (rss->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ &&
+	    (p_info.types || rss->types || rss->queue_num))
+		return rte_flow_error_set
+			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+			 "pctype and queues must be empty while"
+			 " setting SYMMETRIC hash function");
+
 	/* Parse RSS related parameters from configuration */
-	if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+	if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX)
 		return rte_flow_error_set
 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
-			 "non-default RSS hash functions are not supported");
+			 "RSS hash functions are not supported");
 	if (rss->level)
 		return rte_flow_error_set
 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
@@ -4748,17 +4822,18 @@ i40e_parse_rss_filter(struct rte_eth_dev *dev,
 {
 	int ret;
 	struct i40e_queue_regions info;
-	uint8_t action_flag = 0;
+	struct i40e_rss_pattern_info p_info;
 
 	memset(&info, 0, sizeof(struct i40e_queue_regions));
+	memset(&p_info, 0, sizeof(struct i40e_rss_pattern_info));
 
 	ret = i40e_flow_parse_rss_pattern(dev, pattern,
-					error, &action_flag, &info);
+					error, &p_info, &info);
 	if (ret)
 		return ret;
 
 	ret = i40e_flow_parse_rss_action(dev, actions, error,
-					action_flag, &info, filter);
+					p_info, &info, filter);
 	if (ret)
 		return ret;
 
@@ -4777,15 +4852,33 @@ i40e_config_rss_filter_set(struct rte_eth_dev *dev,
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct i40e_rte_flow_rss_filter *rss_filter;
 	int ret;
 
 	if (conf->queue_region_conf) {
 		ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
-		conf->queue_region_conf = 0;
 	} else {
 		ret = i40e_config_rss_filter(pf, conf, 1);
 	}
-	return ret;
+
+	if (ret)
+		return ret;
+
+	rss_filter = rte_zmalloc("i40e_rte_flow_rss_filter",
+				sizeof(*rss_filter), 0);
+	if (rss_filter == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	rss_filter->rss_filter_info = *conf;
+	/* the rull new created is always valid
+	 * the existing rull covered by new rull will be set invalid
+	 */
+	rss_filter->rss_filter_info.valid = true;
+
+	TAILQ_INSERT_TAIL(&pf->rss_info_list, rss_filter, next);
+
+	return 0;
 }
 
 static int
@@ -4794,10 +4887,20 @@ i40e_config_rss_filter_del(struct rte_eth_dev *dev,
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct i40e_rte_flow_rss_filter *rss_filter;
 
-	i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+	if (conf->queue_region_conf)
+		i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+	else
+		i40e_config_rss_filter(pf, conf, 0);
 
-	i40e_config_rss_filter(pf, conf, 0);
+	TAILQ_FOREACH(rss_filter, &pf->rss_info_list, next) {
+		if (!memcmp(&rss_filter->rss_filter_info, conf,
+			sizeof(struct rte_flow_action_rss))) {
+			TAILQ_REMOVE(&pf->rss_info_list, rss_filter, next);
+			rte_free(rss_filter);
+		}
+	}
 	return 0;
 }
 
@@ -4940,7 +5043,8 @@ i40e_flow_create(struct rte_eth_dev *dev,
 			    &cons_filter.rss_conf);
 		if (ret)
 			goto free_flow;
-		flow->rule = &pf->rss_info;
+		flow->rule = TAILQ_LAST(&pf->rss_info_list,
+				i40e_rss_conf_list);
 		break;
 	default:
 		goto free_flow;
@@ -4990,7 +5094,7 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
 		break;
 	case RTE_ETH_FILTER_HASH:
 		ret = i40e_config_rss_filter_del(dev,
-			   (struct i40e_rte_flow_rss_conf *)flow->rule);
+			&((struct i40e_rte_flow_rss_filter *)flow->rule)->rss_filter_info);
 		break;
 	default:
 		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
@@ -5248,13 +5352,27 @@ static int
 i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-	struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct rte_flow *flow;
+	void *temp;
 	int32_t ret = -EINVAL;
 
 	ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
 
-	if (rss_info->conf.queue_num)
-		ret = i40e_config_rss_filter(pf, rss_info, FALSE);
+	/* Delete rss flows in flow list. */
+	TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+		if (flow->filter_type != RTE_ETH_FILTER_HASH)
+			continue;
+
+		if (flow->rule) {
+			ret = i40e_config_rss_filter_del(dev,
+				&((struct i40e_rte_flow_rss_filter *)flow->rule)->rss_filter_info);
+			if (ret)
+				return ret;
+		}
+		TAILQ_REMOVE(&pf->flow_list, flow, node);
+		rte_free(flow);
+	}
+
 	return ret;
 }
-- 
2.17.1



More information about the dev mailing list