[dpdk-dev] [PATCH v1 07/12] net/ice: init RSS during DCF start

Ye Xiaolong xiaolong.ye at intel.com
Fri Jun 5 17:26:54 CEST 2020


On 06/05, Ting Xu wrote:
>From: Qi Zhang <qi.z.zhang at intel.com>
>
>Enable RSS initialization during DCF start. Add RSS LUT and
>RSS key configuration functions.
>
>Signed-off-by: Qi Zhang <qi.z.zhang at intel.com>
>---
> drivers/net/ice/ice_dcf.c        | 123 +++++++++++++++++++++++++++++++
> drivers/net/ice/ice_dcf.h        |   1 +
> drivers/net/ice/ice_dcf_ethdev.c |  14 +++-
> 3 files changed, 135 insertions(+), 3 deletions(-)
>
>diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
>index 93fabd5f7..8d078163e 100644
>--- a/drivers/net/ice/ice_dcf.c
>+++ b/drivers/net/ice/ice_dcf.c
>@@ -708,3 +708,126 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
> 	rte_free(hw->rss_lut);
> 	rte_free(hw->rss_key);
> }
>+
>+static int
>+ice_dcf_configure_rss_key(struct ice_dcf_hw *hw)
>+{
>+	struct virtchnl_rss_key *rss_key;
>+	struct dcf_virtchnl_cmd args;
>+	int len, err;
>+
>+	len = sizeof(*rss_key) + hw->vf_res->rss_key_size - 1;
>+	rss_key = rte_zmalloc("rss_key", len, 0);
>+	if (!rss_key)
>+		return -ENOMEM;
>+
>+	rss_key->vsi_id = hw->vsi_res->vsi_id;
>+	rss_key->key_len = hw->vf_res->rss_key_size;
>+	rte_memcpy(rss_key->key, hw->rss_key, hw->vf_res->rss_key_size);
>+
>+	args.v_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
>+	args.req_msglen = len;
>+	args.req_msg = (uint8_t *)rss_key;
>+	args.rsp_msglen = 0;
>+	args.rsp_buflen = 0;
>+	args.rsp_msgbuf = NULL;
>+	args.pending = 0;
>+
>+	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
>+	if (err) {
>+		PMD_INIT_LOG(ERR, "Failed to execute OP_CONFIG_RSS_KEY");

Need to free rss_key in error handling as well.

>+		return err;
>+	}
>+
>+	rte_free(rss_key);
>+	return 0;
>+}
>+
>+static int
>+ice_dcf_configure_rss_lut(struct ice_dcf_hw *hw)
>+{
>+	struct virtchnl_rss_lut *rss_lut;
>+	struct dcf_virtchnl_cmd args;
>+	int len, err;
>+
>+	len = sizeof(*rss_lut) + hw->vf_res->rss_lut_size - 1;
>+	rss_lut = rte_zmalloc("rss_lut", len, 0);
>+	if (!rss_lut)
>+		return -ENOMEM;
>+
>+	rss_lut->vsi_id = hw->vsi_res->vsi_id;
>+	rss_lut->lut_entries = hw->vf_res->rss_lut_size;
>+	rte_memcpy(rss_lut->lut, hw->rss_lut, hw->vf_res->rss_lut_size);
>+
>+	args.v_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
>+	args.req_msglen = len;
>+	args.req_msg = (uint8_t *)rss_lut;
>+	args.rsp_msglen = 0;
>+	args.rsp_buflen = 0;
>+	args.rsp_msgbuf = NULL;
>+	args.pending = 0;
>+
>+	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
>+	if (err) {
>+		PMD_INIT_LOG(ERR, "Failed to execute OP_CONFIG_RSS_LUT");

Need to free rss_lut here.

>+		return err;
>+	}
>+
>+	rte_free(rss_lut);
>+	return 0;
>+}
>+
>+int
>+ice_dcf_init_rss(struct ice_dcf_hw *hw)
>+{
>+	struct rte_eth_dev *dev = hw->eth_dev;
>+	struct rte_eth_rss_conf *rss_conf;
>+	uint8_t i, j, nb_q;
>+	int ret;
>+
>+	rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
>+	nb_q = dev->data->nb_rx_queues;
>+
>+	if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
>+		PMD_DRV_LOG(DEBUG, "RSS is not supported");
>+		return -ENOTSUP;
>+	}
>+	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
>+		PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
>+		/* set all lut items to default queue */
>+		for (i = 0; i < hw->vf_res->rss_lut_size; i++)
>+			hw->rss_lut[i] = 0;

How about	memset(hw->rss_lut, 0, hw->vf_res->rss_lut_size);

>+		ret = ice_dcf_configure_rss_lut(hw);
>+		return ret;

return ice_dcf_configure_rss_lut(hw);

>+	}
>+
>+	/* In IAVF, RSS enablement is set by PF driver. It is not supported
>+	 * to set based on rss_conf->rss_hf.
>+	 */
>+
>+	/* configure RSS key */
>+	if (!rss_conf->rss_key)
>+		/* Calculate the default hash key */
>+		for (i = 0; i <= hw->vf_res->rss_key_size; i++)
>+			hw->rss_key[i] = (uint8_t)rte_rand();

Why use <=, will it cause out-of-bounds access?

>+	else
>+		rte_memcpy(hw->rss_key, rss_conf->rss_key,
>+			   RTE_MIN(rss_conf->rss_key_len,
>+				   hw->vf_res->rss_key_size));
>+
>+	/* init RSS LUT table */
>+	for (i = 0, j = 0; i < hw->vf_res->rss_lut_size; i++, j++) {
>+		if (j >= nb_q)
>+			j = 0;
>+		hw->rss_lut[i] = j;
>+	}
>+	/* send virtchnnl ops to configure rss*/
>+	ret = ice_dcf_configure_rss_lut(hw);
>+	if (ret)
>+		return ret;
>+	ret = ice_dcf_configure_rss_key(hw);
>+	if (ret)
>+		return ret;
>+
>+	return 0;
>+}
>diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
>index dcb2a0283..eea4b286b 100644
>--- a/drivers/net/ice/ice_dcf.h
>+++ b/drivers/net/ice/ice_dcf.h
>@@ -63,5 +63,6 @@ int ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc,
> int ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw);
> int ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw);
> void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw);
>+int ice_dcf_init_rss(struct ice_dcf_hw *hw);
> 
> #endif /* _ICE_DCF_H_ */
>diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
>index 1f7474dc3..5fbf70803 100644
>--- a/drivers/net/ice/ice_dcf_ethdev.c
>+++ b/drivers/net/ice/ice_dcf_ethdev.c
>@@ -51,9 +51,9 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
> 	uint16_t buf_size, max_pkt_len, len;
> 
> 	buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
>-
>-	/* Calculate the maximum packet length allowed */
>-	len = rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS;
>+	rxq->rx_hdr_len = 0;
>+	rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
>+	len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;

Above change seems unrelated to this patch, what about squashing it to patch 6?

Thanks,
Xiaolong

> 	max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
> 
> 	/* Check if the jumbo frame and maximum packet length are set
>@@ -133,6 +133,14 @@ ice_dcf_dev_start(struct rte_eth_dev *dev)
> 		return ret;
> 	}
> 
>+	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
>+		ret = ice_dcf_init_rss(hw);
>+		if (ret) {
>+			PMD_DRV_LOG(ERR, "Failed to configure RSS");
>+			return ret;
>+		}
>+	}
>+
> 	dev->data->dev_link.link_status = ETH_LINK_UP;
> 
> 	return 0;
>-- 
>2.17.1
>


More information about the dev mailing list