[dpdk-dev] [PATCH v2 7/7] net/ice: get the VF hardware index in DCF

Wang, Haiyue haiyue.wang at intel.com
Mon Mar 16 06:58:29 CET 2020


> -----Original Message-----
> From: Ye, Xiaolong <xiaolong.ye at intel.com>
> Sent: Friday, March 13, 2020 15:01
> To: Wang, Haiyue <haiyue.wang at intel.com>
> Cc: dev at dpdk.org; Zhang, Qi Z <qi.z.zhang at intel.com>; Yang, Qiming <qiming.yang at intel.com>; Xing,
> Beilei <beilei.xing at intel.com>; Zhao1, Wei <wei.zhao1 at intel.com>
> Subject: Re: [PATCH v2 7/7] net/ice: get the VF hardware index in DCF
> 
> On 03/10, Haiyue Wang wrote:
> >The DCF (Device Config Function) needs the hardware index of the VFs to
> >control the flow setting. And also if the VF resets, the index may be
> >changed, so it should handle this in VF reset event.
> >
> >Signed-off-by: Haiyue Wang <haiyue.wang at intel.com>
> >---
> > drivers/net/ice/ice_dcf.c        | 81 +++++++++++++++++++++++++++++++
> > drivers/net/ice/ice_dcf.h        |  4 ++
> > drivers/net/ice/ice_dcf_parent.c | 83 +++++++++++++++++++++++++++++++-
> > 3 files changed, 167 insertions(+), 1 deletion(-)
> >
> >diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
> >index 480c449f5..1d3c8fa95 100644
> >--- a/drivers/net/ice/ice_dcf.c
> >+++ b/drivers/net/ice/ice_dcf.c
> >@@ -269,6 +269,63 @@ ice_dcf_get_vf_resource(struct ice_dcf_hw *hw)
> > 	return 0;
> > }
> >
> >+static int
> >+ice_dcf_get_vf_vsi_map(struct ice_dcf_hw *hw)
> >+{
> >+	struct virtchnl_dcf_vsi_map *vsi_map;
> >+	uint16_t len;
> >+	int err;
> >+
> >+	err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP,
> >+					  NULL, 0);
> >+	if (err) {
> >+		PMD_DRV_LOG(ERR, "Fail to send msg OP_DCF_GET_VSI_MAP");
> 
> Better to make the err log format consistent, either "Failed to xxx" or "Fail to xxxx",
> I prefer to "Failed to xxx".
> 

Fixed in v3 with "Failed to xxx".

> 
> >+		return err;
> >+	}
> >+
> >+	err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP,
> >+					  hw->arq_buf, ICE_DCF_AQ_BUF_SZ,
> >+					  &len);
> >+	if (err) {
> >+		PMD_DRV_LOG(ERR, "Fail to get response of OP_DCF_GET_VSI_MAP");
> >+		return err;
> >+	}
> >+
> >+	vsi_map = (struct virtchnl_dcf_vsi_map *)hw->arq_buf;
> >+	if (len < sizeof(*vsi_map) || !vsi_map->num_vfs ||
> >+	    len < sizeof(*vsi_map) +
> >+			(vsi_map->num_vfs - 1) * sizeof(vsi_map->vf_vsi[0])) {
> 
> Better to use a tmp variable to make the code more readable.
> And is the first len < sizeof(*vsi_map) redundant?

Yes, and enhanced this with valid_msg_len for checking the length in v3.

> 
> >+		PMD_DRV_LOG(ERR, "invalid vf vsi map response with length %u",
> >+			    len);
> >+		return -EINVAL;
> >+	}
> >+
> >+	if (hw->num_vfs != 0 && hw->num_vfs != vsi_map->num_vfs) {
> >+		PMD_DRV_LOG(ERR, "The number VSI map (%u) doesn't match the number of VFs (%u)",
> >+			    vsi_map->num_vfs, hw->num_vfs);
> >+		return -EINVAL;
> >+	}
> >+
> >+	len = vsi_map->num_vfs * sizeof(vsi_map->vf_vsi[0]);
> >+	if (!hw->vf_vsi_map) {
> >+		hw->num_vfs = vsi_map->num_vfs;
> >+		hw->vf_vsi_map = rte_zmalloc("vf_vsi_ctx", len, 0);
> >+	}
> >+
> >+	if (!hw->vf_vsi_map) {
> >+		PMD_DRV_LOG(ERR, "Fail to alloc memory for VSI context");
> >+		return -ENOMEM;
> >+	}
> 
> I think above two blocks can be combined with one if (!hw->vf_vsi_map).
> 

Done in v3.

> >+
> >+	if (!memcmp(hw->vf_vsi_map, vsi_map->vf_vsi, len)) {
> >+		PMD_DRV_LOG(DEBUG, "VF VSI map doesn't change");
> >+		return 1;
> >+	}
> >+
> >+	rte_memcpy(hw->vf_vsi_map, vsi_map->vf_vsi, len);
> >+	return 0;
> >+}
> >+
> > static int
> > ice_dcf_mode_disable(struct ice_dcf_hw *hw)
> > {
> >@@ -467,6 +524,23 @@ ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc,
> > 	return err;
> > }
> >
> >+int
> >+ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw)
> >+{
> >+	int err = 0;
> >+
> >+	rte_spinlock_lock(&hw->vc_cmd_send_lock);
> >+	ice_dcf_disable_irq0(hw);
> >+
> >+	if (ice_dcf_get_vf_resource(hw) || ice_dcf_get_vf_vsi_map(hw))
> >+		err = -1;
> >+
> >+	ice_dcf_enable_irq0(hw);
> >+	rte_spinlock_unlock(&hw->vc_cmd_send_lock);
> >+
> >+	return err;
> >+}
> >+
> > int
> > ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
> > {
> >@@ -534,6 +608,12 @@ ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
> > 		goto err_alloc;
> > 	}
> >
> >+	if (ice_dcf_get_vf_vsi_map(hw) < 0) {
> >+		PMD_INIT_LOG(ERR, "Failed to get VF VSI map");
> >+		ice_dcf_mode_disable(hw);
> >+		goto err_alloc;
> >+	}
> >+
> > 	rte_intr_callback_register(&pci_dev->intr_handle,
> > 				   ice_dcf_dev_interrupt_handler, hw);
> > 	rte_intr_enable(&pci_dev->intr_handle);
> >@@ -566,5 +646,6 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
> > 	iavf_shutdown_adminq(&hw->avf);
> >
> > 	rte_free(hw->arq_buf);
> >+	rte_free(hw->vf_vsi_map);
> > 	rte_free(hw->vf_res);
> > }
> >diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
> >index ecd6303a0..12bef4a2a 100644
> >--- a/drivers/net/ice/ice_dcf.h
> >+++ b/drivers/net/ice/ice_dcf.h
> >@@ -41,6 +41,9 @@ struct ice_dcf_hw {
> >
> > 	uint8_t *arq_buf;
> >
> >+	uint16_t num_vfs;
> >+	uint16_t *vf_vsi_map;
> >+
> > 	struct virtchnl_version_info virtchnl_version;
> > 	struct virtchnl_vf_resource *vf_res; /* VF resource */
> > 	struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */
> >@@ -51,6 +54,7 @@ int ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw,
> > 				 struct dcf_virtchnl_cmd *cmd);
> > int ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc,
> > 			void *buf, uint16_t buf_size);
> >+int ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw);
> > int ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw);
> > void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw);
> >
> >diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
> >index 4c3bb68b1..2735221e9 100644
> >--- a/drivers/net/ice/ice_dcf_parent.c
> >+++ b/drivers/net/ice/ice_dcf_parent.c
> >@@ -5,10 +5,76 @@
> > #include <sys/stat.h>
> > #include <unistd.h>
> >
> >+#include <rte_alarm.h>
> >+
> > #include "ice_dcf_ethdev.h"
> >
> >+#define ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL	100000 /* us */
> >+
> >+static __rte_always_inline void
> >+ice_dcf_update_vsi_ctx(struct ice_hw *hw, uint16_t vsi_handle,
> >+		       uint16_t vsi_map)
> >+{
> >+	struct ice_vsi_ctx *vsi_ctx;
> >+
> >+	if (unlikely(vsi_handle >= ICE_MAX_VSI)) {
> >+		PMD_DRV_LOG(ERR, "Invalid vsi handle %u", vsi_handle);
> >+		return;
> >+	}
> >+
> >+	vsi_ctx = hw->vsi_ctx[vsi_handle];
> >+
> >+	if (vsi_map & VIRTCHNL_DCF_VF_VSI_VALID) {
> >+		if (!vsi_ctx)
> >+			vsi_ctx = ice_malloc(hw, sizeof(*vsi_ctx));
> >+
> >+		if (!vsi_ctx) {
> >+			PMD_DRV_LOG(ERR, "No memory for vsi context %u",
> >+				    vsi_handle);
> >+			return;
> >+		}
> 
> Above two blocks can be combined.

Done in v3.

> 
> >+
> >+		vsi_ctx->vsi_num = (vsi_map & VIRTCHNL_DCF_VF_VSI_ID_M) >>
> >+					      VIRTCHNL_DCF_VF_VSI_ID_S;
> >+		hw->vsi_ctx[vsi_handle] = vsi_ctx;
> >+
> >+		PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
> >+			    vsi_handle, vsi_ctx->vsi_num);
> >+	} else {
> >+		hw->vsi_ctx[vsi_handle] = NULL;
> >+
> >+		ice_free(hw, vsi_ctx);
> >+
> >+		PMD_DRV_LOG(NOTICE, "VF%u is disabled", vsi_handle);
> >+	}
> >+}
> >+
> >+static void
> >+ice_dcf_update_vf_vsi_map(struct ice_hw *hw, uint16_t num_vfs,
> >+			  uint16_t *vf_vsi_map)
> >+{
> >+	uint16_t vf_id;
> >+
> >+	for (vf_id = 0; vf_id < num_vfs; vf_id++)
> >+		ice_dcf_update_vsi_ctx(hw, vf_id, vf_vsi_map[vf_id]);
> >+}
> >+
> >+static void
> >+ice_dcf_vsi_update_service_handler(void *param)
> >+{
> >+	struct ice_dcf_hw *hw = param;
> >+
> >+	if (!ice_dcf_handle_vsi_update_event(hw)) {
> >+		struct ice_dcf_adapter *dcf_ad =
> >+			container_of(hw, struct ice_dcf_adapter, real_hw);
> >+
> >+		ice_dcf_update_vf_vsi_map(&dcf_ad->parent.hw,
> >+					  hw->num_vfs, hw->vf_vsi_map);
> >+	}
> >+}
> >+
> > void
> >-ice_dcf_handle_pf_event_msg(__rte_unused struct ice_dcf_hw *dcf_hw,
> >+ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw,
> > 			    uint8_t *msg, uint16_t msglen)
> > {
> > 	struct virtchnl_pf_event *pf_msg = (struct virtchnl_pf_event *)msg;
> >@@ -21,6 +87,8 @@ ice_dcf_handle_pf_event_msg(__rte_unused struct ice_dcf_hw *dcf_hw,
> > 	switch (pf_msg->event) {
> > 	case VIRTCHNL_EVENT_RESET_IMPENDING:
> > 		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
> >+		rte_eal_alarm_set(ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL * 2,
> >+				  ice_dcf_vsi_update_service_handler, dcf_hw);
> 
> Why * 2 for the VIRTCHNL_EVENT_RESET_IMPENDING event?
> 

Original thought is VF itself reset needs more work (by referring to iavf PMD),
after check, no need *2. Fixed in v3.

> > 		break;
> > 	case VIRTCHNL_EVENT_LINK_CHANGE:
> > 		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
> >@@ -28,6 +96,13 @@ ice_dcf_handle_pf_event_msg(__rte_unused struct ice_dcf_hw *dcf_hw,
> > 	case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
> > 		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
> > 		break;
> >+	case VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE:
> >+		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : VF%u with VSI num %u",
> >+			    pf_msg->event_data.vf_vsi_map.vf_id,
> >+			    pf_msg->event_data.vf_vsi_map.vsi_id);
> >+		rte_eal_alarm_set(ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL,
> >+				  ice_dcf_vsi_update_service_handler, dcf_hw);
> >+		break;
> > 	default:
> > 		PMD_DRV_LOG(ERR, "Unknown event received %u", pf_msg->event);
> > 		break;
> >@@ -235,6 +310,9 @@ ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev)
> > 	}
> > 	parent_adapter->active_pkg_type = ice_load_pkg_type(parent_hw);
> >
> >+	ice_dcf_update_vf_vsi_map(parent_hw,
> >+				  hw->num_vfs, hw->vf_vsi_map);
> >+
> 
> No need to split into 2 lines.

Done in v3.
> 
> > 	mac = (const struct rte_ether_addr *)hw->avf.mac.addr;
> > 	if (rte_is_valid_assigned_ether_addr(mac))
> > 		rte_ether_addr_copy(mac, &parent_adapter->pf.dev_addr);
> >@@ -259,5 +337,8 @@ ice_dcf_uninit_parent_adapter(struct rte_eth_dev *eth_dev)
> >
> > 	eth_dev->data->mac_addrs = NULL;
> >
> >+	rte_eal_alarm_cancel(ice_dcf_vsi_update_service_handler,
> >+			     &adapter->real_hw);
> >+
> > 	ice_dcf_uninit_parent_hw(parent_hw);
> > }
> >--
> >2.25.1
> >


More information about the dev mailing list