[dpdk-dev] [PATCH v2] net/i40e: move RSS to flow API

Zhao1, Wei wei.zhao1 at intel.com
Wed Jan 10 02:53:30 CET 2018


A new v3 has been commit to DPDK.org
https://dpdk.org/dev/patchwork/patch/33158/

> -----Original Message-----
> From: Zhang, Qi Z
> Sent: Tuesday, January 9, 2018 10:34 AM
> To: Zhao1, Wei <wei.zhao1 at intel.com>; dev at dpdk.org
> Subject: RE: [PATCH v2] net/i40e: move RSS to flow API
> 
> Checked with author offline.
> 
> Require more comments to explain the acceptable pattern for
> i40e_flow_parse_rss_pattern and also need to correct the logic since current
> implementation will accept any combination of ETH and VLAN pattern which
> does not make sense.
> 
> Regards
> Qi
> 
> > -----Original Message-----
> > From: Zhao1, Wei
> > Sent: Monday, January 8, 2018 4:36 PM
> > To: dev at dpdk.org
> > Cc: Zhang, Qi Z <qi.z.zhang at intel.com>; Zhao1, Wei
> > <wei.zhao1 at intel.com>
> > Subject: [PATCH v2] net/i40e: move RSS to flow API
> >
> > Rte_flow actually defined to include RSS, but till now, RSS is out of rte_flow.
> > This patch is to move i40e existing RSS to rte_flow.
> > This patch also enable queue region configuration using flow API for i40e.
> >
> > Signed-off-by: Wei Zhao <wei.zhao1 at intel.com>
> >
> > ---
> >
> > v2:
> > -change some code style.
> > ---
> >  drivers/net/i40e/i40e_ethdev.c |  91 +++++++++++
> > drivers/net/i40e/i40e_ethdev.h |  11 ++
> >  drivers/net/i40e/i40e_flow.c   | 340
> > +++++++++++++++++++++++++++++++++++++++++
> >  3 files changed, 442 insertions(+)
> >
> > diff --git a/drivers/net/i40e/i40e_ethdev.c
> > b/drivers/net/i40e/i40e_ethdev.c index 811cc9f..75b3bf3 100644
> > --- a/drivers/net/i40e/i40e_ethdev.c
> > +++ b/drivers/net/i40e/i40e_ethdev.c
> > @@ -1349,6 +1349,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
> >  	/* initialize queue region configuration */
> >  	i40e_init_queue_region_conf(dev);
> >
> > +	/* initialize rss configuration from rte_flow */
> > +	memset(&pf->rss_info, 0,
> > +		sizeof(struct i40e_rte_flow_rss_conf));
> > +
> >  	return 0;
> >
> >  err_init_fdir_filter_list:
> > @@ -10943,12 +10947,23 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
> >  	}
> >  }
> >
> > +/* Restore rss filter */
> > +static inline void
> > +i40e_rss_filter_restore(struct i40e_pf *pf) {
> > +	struct i40e_rte_flow_rss_conf *conf =
> > +					&pf->rss_info;
> > +	if (conf->num)
> > +		i40e_config_rss_filter(pf, conf, TRUE); }
> > +
> >  static void
> >  i40e_filter_restore(struct i40e_pf *pf)  {
> >  	i40e_ethertype_filter_restore(pf);
> >  	i40e_tunnel_filter_restore(pf);
> >  	i40e_fdir_filter_restore(pf);
> > +	i40e_rss_filter_restore(pf);
> >  }
> >
> >  static bool
> > @@ -11366,6 +11381,82 @@ i40e_cloud_filter_qinq_create(struct i40e_pf
> > *pf)
> >  	return ret;
> >  }
> >
> > +int
> > +i40e_config_rss_filter(struct i40e_pf *pf,
> > +		struct i40e_rte_flow_rss_conf *conf, bool add) {
> > +	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
> > +	uint32_t i, lut = 0;
> > +	uint16_t j, num;
> > +	struct rte_eth_rss_conf rss_conf = conf->rss_conf;
> > +	struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
> > +
> > +	if (!add) {
> > +		if (memcmp(conf, rss_info,
> > +			sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
> > +			i40e_pf_disable_rss(pf);
> > +			memset(rss_info, 0,
> > +				sizeof(struct i40e_rte_flow_rss_conf));
> > +			return 0;
> > +		}
> > +		return -EINVAL;
> > +	}
> > +
> > +	if (rss_info->num)
> > +		return -EINVAL;
> > +
> > +	/* If both VMDQ and RSS enabled, not all of PF queues are
> configured.
> > +	 * It's necessary to calculate the actual PF queues that are configured.
> > +	 */
> > +	if (pf->dev_data->dev_conf.rxmode.mq_mode &
> > ETH_MQ_RX_VMDQ_FLAG)
> > +		num = i40e_pf_calc_configured_queues_num(pf);
> > +	else
> > +		num = pf->dev_data->nb_rx_queues;
> > +
> > +	num = RTE_MIN(num, conf->num);
> > +	PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are
> configured",
> > +			num);
> > +
> > +	if (num == 0) {
> > +		PMD_DRV_LOG(ERR, "No PF queues are configured to
> enable RSS");
> > +		return -ENOTSUP;
> > +	}
> > +
> > +	/* Fill in redirection table */
> > +	for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
> > +		if (j == num)
> > +			j = 0;
> > +		lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
> > +			hw->func_caps.rss_table_entry_width) - 1));
> > +		if ((i & 3) == 3)
> > +			I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
> > +	}
> > +
> > +	if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
> > +		i40e_pf_disable_rss(pf);
> > +		return 0;
> > +	}
> > +	if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
> > +		(I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
> > +		/* Random default keys */
> > +		static uint32_t rss_key_default[] = {0x6b793944,
> > +			0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
> > +			0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
> > +			0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
> > +
> > +		rss_conf.rss_key = (uint8_t *)rss_key_default;
> > +		rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
> > +							sizeof(uint32_t);
> > +	}
> > +
> > +	return i40e_hw_rss_hash_set(pf, &rss_conf);
> > +
> > +	rte_memcpy(rss_info,
> > +		conf, sizeof(struct i40e_rte_flow_rss_conf));
> > +
> > +	return 0;
> > +}
> > +
> >  RTE_INIT(i40e_init_log);
> >  static void
> >  i40e_init_log(void)
> > diff --git a/drivers/net/i40e/i40e_ethdev.h
> > b/drivers/net/i40e/i40e_ethdev.h index cd67453..0a59e39 100644
> > --- a/drivers/net/i40e/i40e_ethdev.h
> > +++ b/drivers/net/i40e/i40e_ethdev.h
> > @@ -891,6 +891,13 @@ struct i40e_customized_pctype {
> >  	bool valid;   /* Check if it's valid */
> >  };
> >
> > +struct i40e_rte_flow_rss_conf {
> > +	struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
> > +	uint16_t queue_region_conf; /**< Queue region config flag */
> > +	uint16_t num; /**< Number of entries in queue[]. */
> > +	uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use.
> */ };
> > +
> >  /*
> >   * Structure to store private data specific for PF instance.
> >   */
> > @@ -945,6 +952,7 @@ struct i40e_pf {
> >  	struct i40e_fdir_info fdir; /* flow director info */
> >  	struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
> >  	struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
> > +	struct i40e_rte_flow_rss_conf rss_info; /* rss info */
> >  	struct i40e_queue_regions queue_region; /* queue region info */
> >  	struct i40e_fc_conf fc_conf; /* Flow control conf */
> >  	struct i40e_mirror_rule_list mirror_list; @@ -1071,6 +1079,7 @@
> > union i40e_filter_t {
> >  	struct i40e_fdir_filter_conf fdir_filter;
> >  	struct rte_eth_tunnel_filter_conf tunnel_filter;
> >  	struct i40e_tunnel_filter_conf consistent_tunnel_filter;
> > +	struct i40e_rte_flow_rss_conf rss_conf;
> >  };
> >
> >  typedef int (*parse_filter_t)(struct rte_eth_dev *dev, @@ -1198,6
> > +1207,8 @@ int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool
> > sw_dcb);  int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
> >  		struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on);  void
> > i40e_init_queue_region_conf(struct rte_eth_dev *dev);
> > +int i40e_config_rss_filter(struct i40e_pf *pf,
> > +		struct i40e_rte_flow_rss_conf *conf, bool add);
> >
> >  #define I40E_DEV_TO_PCI(eth_dev) \
> >  	RTE_DEV_TO_PCI((eth_dev)->device)
> > diff --git a/drivers/net/i40e/i40e_flow.c
> > b/drivers/net/i40e/i40e_flow.c index
> > 7e4936e..4d29818 100644
> > --- a/drivers/net/i40e/i40e_flow.c
> > +++ b/drivers/net/i40e/i40e_flow.c
> > @@ -138,6 +138,8 @@ static int i40e_flow_flush_fdir_filter(struct
> > i40e_pf *pf);  static int i40e_flow_flush_ethertype_filter(struct
> > i40e_pf *pf);  static int i40e_flow_flush_tunnel_filter(struct i40e_pf
> > *pf);  static int
> > +i40e_flow_flush_rss_filter(struct rte_eth_dev *dev); static int
> >  i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
> >  			      const struct rte_flow_attr *attr,
> >  			      const struct rte_flow_item pattern[], @@ -4095,6
> > +4097,301 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,  }
> >
> >  static int
> > +i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
> > +			     const struct rte_flow_item *pattern,
> > +			     struct rte_flow_error *error,
> > +			     uint8_t *action_flag,
> > +			     struct i40e_queue_regions *info) {
> > +	const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
> > +	const struct rte_flow_item *item = pattern;
> > +	enum rte_flow_item_type item_type;
> > +
> > +	if (item->type == RTE_FLOW_ITEM_TYPE_END)
> > +		return 0;
> > +
> > +	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> > +		if (item->last) {
> > +			rte_flow_error_set(error, EINVAL,
> > +					   RTE_FLOW_ERROR_TYPE_ITEM,
> > +					   item,
> > +					   "Not support range");
> > +			return -rte_errno;
> > +		}
> > +		item_type = item->type;
> > +		switch (item_type) {
> > +		case RTE_FLOW_ITEM_TYPE_ETH:
> > +			*action_flag = 1;
> > +			break;
> > +		case RTE_FLOW_ITEM_TYPE_VLAN:
> > +			vlan_spec =
> > +				(const struct rte_flow_item_vlan *)item-
> >spec;
> > +			vlan_mask =
> > +				(const struct rte_flow_item_vlan *)item-
> >mask;
> > +			if (vlan_spec && vlan_mask) {
> > +				if (vlan_mask->tci ==
> > +					rte_cpu_to_be_16(I40E_TCI_MASK))
> {
> > +					info->region[0].user_priority[0] =
> > +						(vlan_spec->tci >> 13) & 0x7;
> > +					info->region[0].user_priority_num =
> 1;
> > +					info->queue_region_number = 1;
> > +					*action_flag = 0;
> > +				}
> > +			}
> > +			break;
> > +		default:
> > +			rte_flow_error_set(error, EINVAL,
> > +					RTE_FLOW_ERROR_TYPE_ITEM,
> > +					item,
> > +					"Not support range");
> > +			return -rte_errno;
> > +		}
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +static int
> > +i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
> > +			    const struct rte_flow_action *actions,
> > +			    struct rte_flow_error *error,
> > +			    uint8_t *action_flag,
> > +			    struct i40e_queue_regions *conf_info,
> > +			    union i40e_filter_t *filter)
> > +{
> > +	const struct rte_flow_action *act;
> > +	const struct rte_flow_action_rss *rss;
> > +	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> >dev_private);
> > +	struct i40e_queue_regions *info = &pf->queue_region;
> > +	struct i40e_rte_flow_rss_conf *rss_config =
> > +			&filter->rss_conf;
> > +	struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
> > +	uint16_t i, j, n;
> > +	uint32_t index = 0;
> > +
> > +	NEXT_ITEM_OF_ACTION(act, actions, index);
> > +	rss = (const struct rte_flow_action_rss *)act->conf;
> > +
> > +	/**
> > +	 * rss only supports forwarding,
> > +	 * check if the first not void action is RSS.
> > +	 */
> > +	if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
> > +		memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
> > +		rte_flow_error_set(error, EINVAL,
> > +			RTE_FLOW_ERROR_TYPE_ACTION,
> > +			act, "Not supported action.");
> > +		return -rte_errno;
> > +	}
> > +
> > +	if (action_flag) {
> > +		for (n = 0; n < 64; n++) {
> > +			if (rss->rss_conf->rss_hf & (1 << n)) {
> > +				conf_info->region[0].user_priority[0] = n;
> > +				conf_info->region[0].user_priority_num = 1;
> > +				conf_info->queue_region_number = 1;
> > +				break;
> > +			}
> > +		}
> > +	}
> > +
> > +	for (n = 0; n < conf_info->queue_region_number; n++) {
> > +		if (conf_info->region[n].user_priority_num ||
> > +				conf_info->region[n].flowtype_num) {
> > +			if (!((rte_is_power_of_2(rss->num)) &&
> > +					rss->num <= 64)) {
> > +				PMD_DRV_LOG(ERR, "The region sizes
> should be any of the
> > following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
> > +				"total number of queues do not exceed the
> VSI allocation");
> > +				return -rte_errno;
> > +			}
> > +
> > +			if (conf_info->region[n].user_priority[n] >=
> > +					I40E_MAX_USER_PRIORITY) {
> > +				PMD_DRV_LOG(ERR, "the user priority max
> index is 7");
> > +				return -rte_errno;
> > +			}
> > +
> > +			if (conf_info->region[n].hw_flowtype[n] >=
> > +					I40E_FILTER_PCTYPE_MAX) {
> > +				PMD_DRV_LOG(ERR, "the hw_flowtype or
> PCTYPE max
> > index is 63");
> > +				return -rte_errno;
> > +			}
> > +
> > +			if (rss_info->num < rss->num ||
> > +				rss_info->queue[0] < rss->queue[0] ||
> > +				(rss->queue[0] + rss->num >
> > +					rss_info->num + rss_info->queue[0]))
> {
> > +				rte_flow_error_set(error, EINVAL,
> > +					RTE_FLOW_ERROR_TYPE_ACTION,
> > +					act,
> > +					"no valid queues");
> > +				return -rte_errno;
> > +			}
> > +
> > +			for (i = 0; i < info->queue_region_number; i++) {
> > +				if (info->region[i].queue_num == rss->num
> &&
> > +					info->region[i].queue_start_index ==
> > +						rss->queue[0])
> > +					break;
> > +			}
> > +
> > +			if (i == info->queue_region_number) {
> > +				if (i > I40E_REGION_MAX_INDEX) {
> > +					PMD_DRV_LOG(ERR, "the queue
> region max index is
> > 7");
> > +					return -rte_errno;
> > +				}
> > +
> > +				info->region[i].queue_num =
> > +					rss->num;
> > +				info->region[i].queue_start_index =
> > +					rss->queue[0];
> > +				info->region[i].region_id =
> > +					info->queue_region_number;
> > +
> > +				j = info->region[i].user_priority_num;
> > +				if (conf_info->region[n].user_priority_num) {
> > +					info->region[i].user_priority[j] =
> > +						conf_info->
> > +						region[n].user_priority[0];
> > +					info->region[i].user_priority_num++;
> > +				}
> > +
> > +				j = info->region[i].flowtype_num;
> > +				if (conf_info->region[n].flowtype_num) {
> > +					info->region[i].hw_flowtype[j] =
> > +						conf_info->
> > +						region[n].hw_flowtype[0];
> > +					info->region[i].flowtype_num++;
> > +				}
> > +				info->queue_region_number++;
> > +			} else {
> > +				j = info->region[i].user_priority_num;
> > +				if (conf_info->region[n].user_priority_num) {
> > +					info->region[i].user_priority[j] =
> > +						conf_info->
> > +						region[n].user_priority[0];
> > +					info->region[i].user_priority_num++;
> > +				}
> > +
> > +				j = info->region[i].flowtype_num;
> > +				if (conf_info->region[n].flowtype_num) {
> > +					info->region[i].hw_flowtype[j] =
> > +						conf_info->
> > +						region[n].hw_flowtype[0];
> > +					info->region[i].flowtype_num++;
> > +				}
> > +			}
> > +		}
> > +
> > +		rss_config->queue_region_conf = TRUE;
> > +		return 0;
> > +	}
> > +
> > +	if (!rss || !rss->num) {
> > +		rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ACTION,
> > +				act,
> > +				"no valid queues");
> > +		return -rte_errno;
> > +	}
> > +
> > +	for (n = 0; n < rss->num; n++) {
> > +		if (rss->queue[n] >= dev->data->nb_rx_queues) {
> > +			rte_flow_error_set(error, EINVAL,
> > +				   RTE_FLOW_ERROR_TYPE_ACTION,
> > +				   act,
> > +				   "queue id > max number of queues");
> > +			return -rte_errno;
> > +		}
> > +	}
> > +	if (rss->rss_conf)
> > +		rss_config->rss_conf = *rss->rss_conf;
> > +	else
> > +		rss_config->rss_conf.rss_hf =
> > +			pf->adapter->flow_types_mask;
> > +
> > +	for (n = 0; n < rss->num; ++n)
> > +		rss_config->queue[n] = rss->queue[n];
> > +	rss_config->num = rss->num;
> > +	index++;
> > +
> > +	/* check if the next not void action is END */
> > +	NEXT_ITEM_OF_ACTION(act, actions, index);
> > +	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
> > +		memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
> > +		rte_flow_error_set(error, EINVAL,
> > +			RTE_FLOW_ERROR_TYPE_ACTION,
> > +			act, "Not supported action.");
> > +		return -rte_errno;
> > +	}
> > +	rss_config->queue_region_conf = FALSE;
> > +
> > +	return 0;
> > +}
> > +
> > +static int
> > +i40e_parse_rss_filter(struct rte_eth_dev *dev,
> > +			const struct rte_flow_attr *attr,
> > +			const struct rte_flow_item pattern[],
> > +			const struct rte_flow_action actions[],
> > +			union i40e_filter_t *filter,
> > +			struct rte_flow_error *error)
> > +{
> > +	int ret;
> > +	struct i40e_queue_regions info;
> > +	uint8_t action_flag = 0;
> > +
> > +	memset(&info, 0, sizeof(struct i40e_queue_regions));
> > +
> > +	ret = i40e_flow_parse_rss_pattern(dev, pattern,
> > +					error, &action_flag, &info);
> > +	if (ret)
> > +		return ret;
> > +
> > +	ret = i40e_flow_parse_rss_action(dev, actions, error,
> > +					&action_flag, &info, filter);
> > +	if (ret)
> > +		return ret;
> > +
> > +	ret = i40e_flow_parse_attr(attr, error);
> > +	if (ret)
> > +		return ret;
> > +
> > +	cons_filter_type = RTE_ETH_FILTER_HASH;
> > +
> > +	return 0;
> > +}
> > +
> > +static int
> > +i40e_config_rss_filter_set(struct rte_eth_dev *dev,
> > +		struct i40e_rte_flow_rss_conf *conf) {
> > +	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> >dev_private);
> > +	struct i40e_hw *hw =
> > I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> > +
> > +	if (conf->queue_region_conf) {
> > +		i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
> > +		conf->queue_region_conf = 0;
> > +	} else {
> > +		i40e_config_rss_filter(pf, conf, 1);
> > +	}
> > +	return 0;
> > +}
> > +
> > +static int
> > +i40e_config_rss_filter_del(struct rte_eth_dev *dev,
> > +		struct i40e_rte_flow_rss_conf *conf) {
> > +	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> >dev_private);
> > +	struct i40e_hw *hw =
> > I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> > +
> > +	i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
> > +
> > +	i40e_config_rss_filter(pf, conf, 0);
> > +	return 0;
> > +}
> > +
> > +static int
> >  i40e_flow_validate(struct rte_eth_dev *dev,
> >  		   const struct rte_flow_attr *attr,
> >  		   const struct rte_flow_item pattern[], @@ -4130,6 +4427,17
> @@
> > i40e_flow_validate(struct rte_eth_dev *dev,
> >
> >  	memset(&cons_filter, 0, sizeof(cons_filter));
> >
> > +	/* Get the non-void item of action */
> > +	while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
> > +		i++;
> > +
> > +	if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
> > +		ret = i40e_parse_rss_filter(dev, attr, pattern,
> > +					actions, &cons_filter, error);
> > +		return ret;
> > +	}
> > +
> > +	i = 0;
> >  	/* Get the non-void item number of pattern */
> >  	while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
> >  		if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID) @@ -
> 4217,6
> > +4525,11 @@ i40e_flow_create(struct rte_eth_dev *dev,
> >  		flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
> >  					i40e_tunnel_filter_list);
> >  		break;
> > +	case RTE_ETH_FILTER_HASH:
> > +		ret = i40e_config_rss_filter_set(dev,
> > +			    &cons_filter.rss_conf);
> > +		flow->rule = &pf->rss_info;
> > +		break;
> >  	default:
> >  		goto free_flow;
> >  	}
> > @@ -4255,6 +4568,9 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
> >  		ret = i40e_flow_add_del_fdir_filter(dev,
> >  		       &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
> >  		break;
> > +	case RTE_ETH_FILTER_HASH:
> > +		ret = i40e_config_rss_filter_del(dev,
> > +			   (struct i40e_rte_flow_rss_conf *)flow->rule);
> >  	default:
> >  		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
> >  			    filter_type);
> > @@ -4397,6 +4713,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct
> > rte_flow_error *error)
> >  		return -rte_errno;
> >  	}
> >
> > +	ret = i40e_flow_flush_rss_filter(dev);
> > +	if (ret) {
> > +		rte_flow_error_set(error, -ret,
> > +				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> > +				   "Failed to flush rss flows.");
> > +		return -rte_errno;
> > +	}
> > +
> >  	return ret;
> >  }
> >
> > @@ -4487,3 +4811,19 @@ i40e_flow_flush_tunnel_filter(struct i40e_pf
> > *pf)
> >
> >  	return ret;
> >  }
> > +
> > +/* remove the rss filter */
> > +static int
> > +i40e_flow_flush_rss_filter(struct rte_eth_dev *dev) {
> > +	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> >dev_private);
> > +	struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
> > +	struct i40e_hw *hw =
> > I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> > +	int32_t ret = -EINVAL;
> > +
> > +	ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
> > +
> > +	if (rss_info->num)
> > +		ret = i40e_config_rss_filter(pf, rss_info, FALSE);
> > +	return ret;
> > +}
> > --
> > 2.9.3



More information about the dev mailing list