[dpdk-dev] [PATCH v3 2/2] net/ixgbe: move RSS to flow API

Wei Zhao wei.zhao1 at intel.com
Fri Nov 24 09:05:27 CET 2017


Rte_flow actually defined to include RSS,
but till now, RSS is out of rte_flow.
This patch is to move ixgbe existing RSS to rte_flow.

Signed-off-by: Wei Zhao <wei.zhao1 at intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c |  13 +++
 drivers/net/ixgbe/ixgbe_ethdev.h |  10 +++
 drivers/net/ixgbe/ixgbe_flow.c   | 165 +++++++++++++++++++++++++++++++++++++++
 drivers/net/ixgbe/ixgbe_rxtx.c   |  65 +++++++++++++++
 4 files changed, 253 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index ff19a56..4960650 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -8339,6 +8339,18 @@ ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
 	}
 }
 
+/* restore rss filter */
+static inline void
+ixgbe_rss_filter_restore(struct rte_eth_dev *dev)
+{
+	struct ixgbe_filter_info *filter_info =
+		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+	if (filter_info->rss_info.num)
+		ixgbe_config_rss_filter(dev,
+			&filter_info->rss_info, TRUE);
+}
+
 static int
 ixgbe_filter_restore(struct rte_eth_dev *dev)
 {
@@ -8347,6 +8359,7 @@ ixgbe_filter_restore(struct rte_eth_dev *dev)
 	ixgbe_syn_filter_restore(dev);
 	ixgbe_fdir_filter_restore(dev);
 	ixgbe_l2_tn_filter_restore(dev);
+	ixgbe_rss_filter_restore(dev);
 
 	return 0;
 }
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 51ddcfd..4af79b4 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -224,6 +224,12 @@ struct ixgbe_hw_fdir_info {
 	bool mask_added; /* If already got mask from consistent filter */
 };
 
+struct ixgbe_rte_flow_rss_conf {
+	struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
+	uint16_t num; /**< Number of entries in queue[]. */
+	uint16_t queue[IXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
+};
+
 /* structure for interrupt relative data */
 struct ixgbe_interrupt {
 	uint32_t flags;
@@ -340,6 +346,8 @@ struct ixgbe_filter_info {
 	struct ixgbe_5tuple_filter_list fivetuple_list;
 	/* store the SYN filter info */
 	uint32_t syn_info;
+	/* store the rss filter info */
+	struct ixgbe_rte_flow_rss_conf rss_info;
 };
 
 struct ixgbe_l2_tn_key {
@@ -719,6 +727,8 @@ void ixgbe_tm_conf_init(struct rte_eth_dev *dev);
 void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev);
 int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
 			       uint16_t tx_rate);
+int ixgbe_config_rss_filter(struct rte_eth_dev *dev,
+		struct ixgbe_rte_flow_rss_conf *conf, bool add);
 
 static inline int
 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 19c2d47..8f964cf 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -103,6 +103,11 @@ struct ixgbe_eth_l2_tunnel_conf_ele {
 	TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
 	struct rte_eth_l2_tunnel_conf filter_info;
 };
+/* rss filter list structure */
+struct ixgbe_rss_conf_ele {
+	TAILQ_ENTRY(ixgbe_rss_conf_ele) entries;
+	struct ixgbe_rte_flow_rss_conf filter_info;
+};
 /* ixgbe_flow memory list structure */
 struct ixgbe_flow_mem {
 	TAILQ_ENTRY(ixgbe_flow_mem) entries;
@@ -114,6 +119,7 @@ TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
+TAILQ_HEAD(ixgbe_rss_filter_list, ixgbe_rss_conf_ele);
 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
 
 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
@@ -121,6 +127,7 @@ static struct ixgbe_ethertype_filter_list filter_ethertype_list;
 static struct ixgbe_syn_filter_list filter_syn_list;
 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
+static struct ixgbe_rss_filter_list filter_rss_list;
 static struct ixgbe_flow_mem_list ixgbe_flow_list;
 
 /**
@@ -2700,6 +2707,109 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
 	return ret;
 }
 
+static int
+ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_action actions[],
+			struct ixgbe_rte_flow_rss_conf *rss_conf,
+			struct rte_flow_error *error)
+{
+	const struct rte_flow_action *act;
+	const struct rte_flow_action_rss *rss;
+	uint16_t n;
+
+	/**
+	 * rss only supports forwarding,
+	 * check if the first not void action is RSS.
+	 */
+	act = next_no_void_action(actions, NULL);
+	if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+		memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION,
+			act, "Not supported action.");
+		return -rte_errno;
+	}
+
+	rss = (const struct rte_flow_action_rss *)act->conf;
+
+	if (!rss || !rss->num) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION,
+				act,
+			   "no valid queues");
+		return -rte_errno;
+	}
+
+	for (n = 0; n < rss->num; n++) {
+		if (rss->queue[n] >= dev->data->nb_rx_queues) {
+			rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION,
+				   act,
+				   "queue id > max number of queues");
+			return -rte_errno;
+		}
+	}
+	if (rss->rss_conf)
+		rss_conf->rss_conf = *rss->rss_conf;
+	else
+		rss_conf->rss_conf.rss_hf = IXGBE_RSS_OFFLOAD_ALL;
+
+	for (n = 0; n < rss->num; ++n)
+		rss_conf->queue[n] = rss->queue[n];
+	rss_conf->num = rss->num;
+
+	/* check if the next not void item is END */
+	act = next_no_void_action(actions, act);
+	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+		memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION,
+			act, "Not supported action.");
+		return -rte_errno;
+	}
+
+	/* parse attr */
+	/* must be input direction */
+	if (!attr->ingress) {
+		memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				   attr, "Only support ingress.");
+		return -rte_errno;
+	}
+
+	/* not supported */
+	if (attr->egress) {
+		memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				   attr, "Not support egress.");
+		return -rte_errno;
+	}
+
+	if (attr->priority > 0xFFFF) {
+		memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Error priority.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+/* remove the rss filter */
+static void
+ixgbe_clear_rss_filter(struct rte_eth_dev *dev)
+{
+	struct ixgbe_filter_info *filter_info =
+		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+	if (filter_info->rss_info.num)
+		ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
+}
+
 void
 ixgbe_filterlist_init(void)
 {
@@ -2708,6 +2818,7 @@ ixgbe_filterlist_init(void)
 	TAILQ_INIT(&filter_syn_list);
 	TAILQ_INIT(&filter_fdir_list);
 	TAILQ_INIT(&filter_l2_tunnel_list);
+	TAILQ_INIT(&filter_rss_list);
 	TAILQ_INIT(&ixgbe_flow_list);
 }
 
@@ -2720,6 +2831,7 @@ ixgbe_filterlist_flush(void)
 	struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
 	struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
 	struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+	struct ixgbe_rss_conf_ele *rss_filter_ptr;
 
 	while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
 		TAILQ_REMOVE(&filter_ntuple_list,
@@ -2756,6 +2868,13 @@ ixgbe_filterlist_flush(void)
 		rte_free(fdir_rule_ptr);
 	}
 
+	while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
+		TAILQ_REMOVE(&filter_rss_list,
+				 rss_filter_ptr,
+				 entries);
+		rte_free(rss_filter_ptr);
+	}
+
 	while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
 		TAILQ_REMOVE(&ixgbe_flow_list,
 				 ixgbe_flow_mem_ptr,
@@ -2786,12 +2905,14 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
 	struct rte_eth_l2_tunnel_conf l2_tn_filter;
 	struct ixgbe_hw_fdir_info *fdir_info =
 		IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+	struct ixgbe_rte_flow_rss_conf rss_conf;
 	struct rte_flow *flow = NULL;
 	struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
 	struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
 	struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
 	struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
 	struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
+	struct ixgbe_rss_conf_ele *rss_filter_ptr;
 	struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
 	uint8_t first_mask = FALSE;
 
@@ -2992,6 +3113,29 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
 		}
 	}
 
+	memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+	ret = ixgbe_parse_rss_filter(dev, attr,
+					actions, &rss_conf, error);
+	if (!ret) {
+		ret = ixgbe_config_rss_filter(dev, &rss_conf, TRUE);
+		if (!ret) {
+			rss_filter_ptr = rte_zmalloc("ixgbe_rss_filter",
+				sizeof(struct ixgbe_rss_conf_ele), 0);
+			if (!rss_filter_ptr) {
+				PMD_DRV_LOG(ERR, "failed to allocate memory");
+				goto out;
+			}
+			rte_memcpy(&rss_filter_ptr->filter_info,
+				&rss_conf,
+				sizeof(struct ixgbe_rte_flow_rss_conf));
+			TAILQ_INSERT_TAIL(&filter_rss_list,
+				rss_filter_ptr, entries);
+			flow->rule = rss_filter_ptr;
+			flow->filter_type = RTE_ETH_FILTER_HASH;
+			return flow;
+		}
+	}
+
 out:
 	TAILQ_REMOVE(&ixgbe_flow_list,
 		ixgbe_flow_mem_ptr, entries);
@@ -3020,6 +3164,7 @@ ixgbe_flow_validate(struct rte_eth_dev *dev,
 	struct rte_eth_syn_filter syn_filter;
 	struct rte_eth_l2_tunnel_conf l2_tn_filter;
 	struct ixgbe_fdir_rule fdir_rule;
+	struct ixgbe_rte_flow_rss_conf rss_conf;
 	int ret;
 
 	memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
@@ -3049,6 +3194,12 @@ ixgbe_flow_validate(struct rte_eth_dev *dev,
 	memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
 	ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
 				actions, &l2_tn_filter, error);
+	if (!ret)
+		return 0;
+
+	memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+	ret = ixgbe_parse_rss_filter(dev, attr,
+					actions, &rss_conf, error);
 
 	return ret;
 }
@@ -3075,6 +3226,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
 	struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
 	struct ixgbe_hw_fdir_info *fdir_info =
 		IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+	struct ixgbe_rss_conf_ele *rss_filter_ptr;
 
 	switch (filter_type) {
 	case RTE_ETH_FILTER_NTUPLE:
@@ -3143,6 +3295,17 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
 			rte_free(l2_tn_filter_ptr);
 		}
 		break;
+	case RTE_ETH_FILTER_HASH:
+		rss_filter_ptr = (struct ixgbe_rss_conf_ele *)
+				pmd_flow->rule;
+		ret = ixgbe_config_rss_filter(dev,
+					&rss_filter_ptr->filter_info, FALSE);
+		if (!ret) {
+			TAILQ_REMOVE(&filter_rss_list,
+				rss_filter_ptr, entries);
+			rte_free(rss_filter_ptr);
+		}
+		break;
 	default:
 		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
 			    filter_type);
@@ -3194,6 +3357,8 @@ ixgbe_flow_flush(struct rte_eth_dev *dev,
 		return ret;
 	}
 
+	ixgbe_clear_rss_filter(dev);
+
 	ixgbe_filterlist_flush();
 
 	return 0;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 9bc8462..4b38247 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -5550,6 +5550,71 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
 	}
 }
 
+int
+ixgbe_config_rss_filter(struct rte_eth_dev *dev,
+		struct ixgbe_rte_flow_rss_conf *conf, bool add)
+{
+	struct ixgbe_hw *hw;
+	uint32_t reta;
+	uint16_t i;
+	uint16_t j;
+	uint16_t sp_reta_size;
+	uint32_t reta_reg;
+	struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+	struct ixgbe_filter_info *filter_info =
+		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+	PMD_INIT_FUNC_TRACE();
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+
+	if (!add) {
+		if (memcmp(conf, &filter_info->rss_info,
+			sizeof(struct ixgbe_rte_flow_rss_conf)) == 0) {
+			ixgbe_rss_disable(dev);
+			memset(&filter_info->rss_info, 0,
+				sizeof(struct ixgbe_rte_flow_rss_conf));
+			return 0;
+		}
+		return -EINVAL;
+	}
+
+	if (filter_info->rss_info.num)
+		return -EINVAL;
+	/* Fill in redirection table
+	 * The byte-swap is needed because NIC registers are in
+	 * little-endian order.
+	 */
+	reta = 0;
+	for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
+		reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
+
+		if (j == conf->num)
+			j = 0;
+		reta = (reta << 8) | conf->queue[j];
+		if ((i & 3) == 3)
+			IXGBE_WRITE_REG(hw, reta_reg,
+					rte_bswap32(reta));
+	}
+
+	/* Configure the RSS key and the RSS protocols used to compute
+	 * the RSS hash of input packets.
+	 */
+	if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
+		ixgbe_rss_disable(dev);
+		return -EINVAL;
+	}
+	if (rss_conf.rss_key == NULL)
+		rss_conf.rss_key = rss_intel_key; /* Default hash key */
+	ixgbe_hw_rss_hash_set(hw, &rss_conf);
+
+	rte_memcpy(&filter_info->rss_info,
+		conf, sizeof(struct ixgbe_rte_flow_rss_conf));
+
+	return 0;
+}
+
 /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
 int __attribute__((weak))
 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
-- 
2.9.3



More information about the dev mailing list