[dpdk-dev] [PATCH v1] net/ice: support QoS BW config after VF reset in DCF

Ting Xu ting.xu at intel.com
Fri Jul 2 17:00:27 CEST 2021


When VF reset happens, the QoS bandwidth configuration will be lost. If
the reset is not caused by DCB change, it is supposed to replay the
bandwidth configuration to VF by DCF. In this patch, when a vsi update
PF event is received from PF after VF reset, and it is confirmed that
DCB is not changed, bandwidth configuration will be replayed.

Signed-off-by: Ting Xu <ting.xu at intel.com>
---
 drivers/net/ice/ice_dcf.c        | 11 +++++--
 drivers/net/ice/ice_dcf.h        |  2 ++
 drivers/net/ice/ice_dcf_ethdev.c |  1 -
 drivers/net/ice/ice_dcf_parent.c |  3 ++
 drivers/net/ice/ice_dcf_sched.c  | 53 +++++++++++++++++++++++++++++++-
 5 files changed, 66 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 349d23ee4f..045800a2d9 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -577,7 +577,7 @@ int
 ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
 {
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
-	int ret;
+	int ret, size;
 
 	hw->avf.hw_addr = pci_dev->mem_resource[0].addr;
 	hw->avf.back = hw;
@@ -669,8 +669,15 @@ ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
 		}
 	}
 
-	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
+	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS) {
 		ice_dcf_tm_conf_init(eth_dev);
+		size = sizeof(struct virtchnl_dcf_bw_cfg_list *) * hw->num_vfs;
+		hw->qos_bw_cfg = rte_zmalloc("qos_bw_cfg", size, 0);
+		if (!hw->qos_bw_cfg) {
+			PMD_INIT_LOG(ERR, "no memory for qos_bw_cfg");
+			goto err_rss;
+		}
+	}
 
 	hw->eth_dev = eth_dev;
 	rte_intr_callback_register(&pci_dev->intr_handle,
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index 1c7653de3d..711c0cf3ad 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -90,6 +90,7 @@ struct ice_dcf_hw {
 	uint16_t pf_vsi_id;
 
 	struct ice_dcf_tm_conf tm_conf;
+	struct virtchnl_dcf_bw_cfg_list **qos_bw_cfg;
 	struct ice_aqc_port_ets_elem *ets_config;
 	struct virtchnl_version_info virtchnl_version;
 	struct virtchnl_vf_resource *vf_res; /* VF resource */
@@ -131,5 +132,6 @@ int ice_dcf_link_update(struct rte_eth_dev *dev,
 		    __rte_unused int wait_to_complete);
 void ice_dcf_tm_conf_init(struct rte_eth_dev *dev);
 void ice_dcf_tm_conf_uninit(struct rte_eth_dev *dev);
+int ice_dcf_replay_vf_bw(struct ice_dcf_hw *hw, uint16_t vf_id);
 
 #endif /* _ICE_DCF_H_ */
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index 69fe6e63d1..cab7c4da87 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -622,7 +622,6 @@ ice_dcf_dev_stop(struct rte_eth_dev *dev)
 	ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
 	ad->pf.adapter_stopped = 1;
-	dcf_ad->real_hw.tm_conf.committed = false;
 
 	return 0;
 }
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index c59cd0bef9..03155c9df0 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -143,6 +143,9 @@ ice_dcf_vsi_update_service_handler(void *param)
 		}
 	}
 
+	if (hw->tm_conf.committed)
+		ice_dcf_replay_vf_bw(hw, reset_param->vf_id);
+
 	rte_spinlock_unlock(&vsi_update_lock);
 
 	free(param);
diff --git a/drivers/net/ice/ice_dcf_sched.c b/drivers/net/ice/ice_dcf_sched.c
index 4371bbc820..c688564ca6 100644
--- a/drivers/net/ice/ice_dcf_sched.c
+++ b/drivers/net/ice/ice_dcf_sched.c
@@ -631,6 +631,48 @@ ice_dcf_validate_tc_bw(struct virtchnl_dcf_bw_cfg_list *tc_bw,
 
 	return 0;
 }
+
+int
+ice_dcf_replay_vf_bw(struct ice_dcf_hw *hw, uint16_t vf_id)
+{
+	struct ice_aqc_port_ets_elem old_ets_config;
+	struct ice_dcf_adapter *adapter;
+	struct ice_hw *parent_hw;
+	int ret, size;
+
+	adapter = hw->eth_dev->data->dev_private;
+	parent_hw = &adapter->parent.hw;
+
+	/* store the old ets config */
+	old_ets_config = *hw->ets_config;
+
+	ice_memset(hw->ets_config, 0, sizeof(*hw->ets_config), ICE_NONDMA_MEM);
+	ret = ice_aq_query_port_ets(parent_hw->port_info,
+			hw->ets_config, sizeof(*hw->ets_config),
+			NULL);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "DCF Query Port ETS failed");
+		return ret;
+	}
+
+	if (memcmp(&old_ets_config, hw->ets_config, sizeof(old_ets_config))) {
+		PMD_DRV_LOG(DEBUG, "ETS config changes, do not replay BW");
+		return ICE_SUCCESS;
+	}
+
+	size = sizeof(struct virtchnl_dcf_bw_cfg_list) +
+		sizeof(struct virtchnl_dcf_bw_cfg) *
+		(hw->tm_conf.nb_tc_node - 1);
+
+	ret = ice_dcf_set_vf_bw(hw, hw->qos_bw_cfg[vf_id], size);
+	if (ret) {
+		PMD_DRV_LOG(DEBUG, "VF %u BW replay failed", vf_id);
+		return ICE_ERR_CFG;
+	}
+
+	return ICE_SUCCESS;
+}
+
 static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev,
 				 int clear_on_fail,
 				 __rte_unused struct rte_tm_error *error)
@@ -734,7 +776,16 @@ static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev,
 		ret_val = ice_dcf_set_vf_bw(hw, vf_bw, size);
 		if (ret_val)
 			goto fail_clear;
-		memset(vf_bw, 0, size);
+
+		hw->qos_bw_cfg[vf_id] = rte_zmalloc("vf_bw_cfg", size, 0);
+		if (!hw->qos_bw_cfg[vf_id]) {
+			ret_val = ICE_ERR_NO_MEMORY;
+			goto fail_clear;
+		}
+		/* store the bandwidth information for replay */
+		ice_memcpy(hw->qos_bw_cfg[vf_id], vf_bw, sizeof(*vf_bw),
+			   ICE_NONDMA_TO_NONDMA);
+		ice_memset(vf_bw, 0, size, ICE_NONDMA_MEM);
 	}
 
 	/* check if total CIR is larger than port bandwidth */
-- 
2.17.1



More information about the dev mailing list