[dpdk-dev] net/ice: CVL multi-process support

Xiao Zhang xiao.zhang at intel.com
Fri Jul 19 19:53:52 CEST 2019


Add multiple process support for CVL, secondary processes will share
memory with primary process, do not need allocation for secondary
processes.
Restrict configuration ops permission for secondary processes, only
allow primary process to do configuration ops since secondary processes
should not be allowed to do configuration but share from primary process.

Cc: stable at dpdk.org

Signed-off-by: Xiao Zhang <xiao.zhang at intel.com>
---
 drivers/net/ice/ice_ethdev.c | 85 ++++++++++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_rxtx.c   | 24 +++++++++++++
 2 files changed, 109 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 9ce730c..b2ef21f 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1408,6 +1408,12 @@ ice_dev_init(struct rte_eth_dev *dev)
 	dev->tx_pkt_burst = ice_xmit_pkts;
 	dev->tx_pkt_prepare = ice_prep_pkts;
 
+	/* for secondary processes, we don't initialise any further as primary
+	 * has already done this work.
+	 */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
 	ice_set_default_ptype_table(dev);
 	pci_dev = RTE_DEV_TO_PCI(dev->device);
 	intr_handle = &pci_dev->intr_handle;
@@ -1574,6 +1580,9 @@ ice_dev_stop(struct rte_eth_dev *dev)
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
 	uint16_t i;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	/* avoid stopping again */
 	if (pf->adapter_stopped)
 		return;
@@ -1610,6 +1619,9 @@ ice_dev_close(struct rte_eth_dev *dev)
 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	/* Since stop will make link down, then the link event will be
 	 * triggered, disable the irq firstly to avoid the port_infoe etc
 	 * resources deallocation causing the interrupt service thread
@@ -1638,6 +1650,9 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct rte_flow *p_flow;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
 	ice_dev_close(dev);
 
 	dev->dev_ops = NULL;
@@ -1670,6 +1685,9 @@ ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
 	struct ice_adapter *ad =
 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
 	 * bulk allocation or vector Rx preconditions we will reset it.
 	 */
@@ -1948,6 +1966,9 @@ ice_dev_start(struct rte_eth_dev *dev)
 	uint16_t nb_txq, i;
 	int mask, ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	/* program Tx queues' context in hardware */
 	for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
 		ret = ice_tx_queue_start(dev, nb_txq);
@@ -2031,6 +2052,9 @@ ice_dev_reset(struct rte_eth_dev *dev)
 {
 	int ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	if (dev->data->sriov.active)
 		return -ENOTSUP;
 
@@ -2211,6 +2235,9 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 	unsigned int rep_cnt = MAX_REPEAT_TIME;
 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	memset(&link, 0, sizeof(link));
 	memset(&old, 0, sizeof(old));
 	memset(&link_status, 0, sizeof(link_status));
@@ -2350,6 +2377,8 @@ ice_dev_set_link_up(struct rte_eth_dev *dev)
 {
 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
 	return ice_force_phys_link_state(hw, true);
 }
 
@@ -2358,6 +2387,8 @@ ice_dev_set_link_down(struct rte_eth_dev *dev)
 {
 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
 	return ice_force_phys_link_state(hw, false);
 }
 
@@ -2368,6 +2399,9 @@ ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	struct rte_eth_dev_data *dev_data = pf->dev_data;
 	uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	/* check if mtu is within the allowed range */
 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
 		return -EINVAL;
@@ -2402,6 +2436,9 @@ static int ice_macaddr_set(struct rte_eth_dev *dev,
 	uint8_t flags = 0;
 	int ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
 		PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
 		return -EINVAL;
@@ -2448,6 +2485,9 @@ ice_macaddr_add(struct rte_eth_dev *dev,
 	struct ice_vsi *vsi = pf->main_vsi;
 	int ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	ret = ice_add_mac_filter(vsi, mac_addr);
 	if (ret != ICE_SUCCESS) {
 		PMD_DRV_LOG(ERR, "Failed to add MAC filter");
@@ -2467,6 +2507,9 @@ ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
 	struct rte_ether_addr *macaddr;
 	int ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	macaddr = &data->mac_addrs[index];
 	ret = ice_remove_mac_filter(vsi, macaddr);
 	if (ret) {
@@ -2484,6 +2527,9 @@ ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	if (on) {
 		ret = ice_add_vlan_filter(vsi, vlan_id);
 		if (ret < 0) {
@@ -2602,6 +2648,9 @@ ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct ice_vsi *vsi = pf->main_vsi;
 	struct rte_eth_rxmode *rxmode;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	rxmode = &dev->data->dev_conf.rxmode;
 	if (mask & ETH_VLAN_FILTER_MASK) {
 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
@@ -2639,6 +2688,9 @@ ice_vlan_tpid_set(struct rte_eth_dev *dev,
 	int qinq = dev->data->dev_conf.rxmode.offloads &
 		   DEV_RX_OFFLOAD_VLAN_EXTEND;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	switch (vlan_type) {
 	case ETH_VLAN_TYPE_OUTER:
 		if (qinq)
@@ -2749,6 +2801,9 @@ ice_rss_reta_update(struct rte_eth_dev *dev,
 	uint8_t *lut;
 	int ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
 	    reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
 	    reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
@@ -2891,6 +2946,9 @@ ice_rss_hash_update(struct rte_eth_dev *dev,
 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct ice_vsi *vsi = pf->main_vsi;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	/* set hash key */
 	status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
 	if (status)
@@ -2924,6 +2982,9 @@ ice_promisc_enable(struct rte_eth_dev *dev)
 	enum ice_status status;
 	uint8_t pmask;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
 		ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
 
@@ -2943,6 +3004,9 @@ ice_promisc_disable(struct rte_eth_dev *dev)
 	enum ice_status status;
 	uint8_t pmask;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
 		ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
 
@@ -2960,6 +3024,9 @@ ice_allmulti_enable(struct rte_eth_dev *dev)
 	enum ice_status status;
 	uint8_t pmask;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
 
 	status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
@@ -2976,6 +3043,9 @@ ice_allmulti_disable(struct rte_eth_dev *dev)
 	enum ice_status status;
 	uint8_t pmask;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	if (dev->data->promiscuous == 1)
 		return; /* must remain in all_multicast mode */
 
@@ -2995,6 +3065,9 @@ static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
 	uint32_t val;
 	uint16_t msix_intr;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	msix_intr = intr_handle->intr_vec[queue_id];
 
 	val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
@@ -3015,6 +3088,9 @@ static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint16_t msix_intr;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	msix_intr = intr_handle->intr_vec[queue_id];
 
 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
@@ -3059,6 +3135,9 @@ ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
 	uint8_t vlan_flags = 0;
 	int ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	if (!vsi || !info) {
 		PMD_DRV_LOG(ERR, "invalid parameters");
 		return -EINVAL;
@@ -3113,6 +3192,9 @@ ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
 	struct ice_vsi_vlan_pvid_info info;
 	int ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	memset(&info, 0, sizeof(info));
 	info.on = on;
 	if (info.on) {
@@ -3555,6 +3637,9 @@ ice_stats_reset(struct rte_eth_dev *dev)
 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	/* Mark PF and VSI stats to update the offset, aka "reset" */
 	pf->offset_loaded = false;
 	if (pf->main_vsi)
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 035ed84..2a8b888 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -337,6 +337,9 @@ ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return -E_RTE_SECONDARY;
+
 	if (rx_queue_id >= dev->data->nb_rx_queues) {
 		PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
 			    rx_queue_id, dev->data->nb_rx_queues);
@@ -391,6 +394,9 @@ ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	int err;
 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return -E_RTE_SECONDARY;
+
 	if (rx_queue_id < dev->data->nb_rx_queues) {
 		rxq = dev->data->rx_queues[rx_queue_id];
 
@@ -421,6 +427,9 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return -E_RTE_SECONDARY;
+
 	if (tx_queue_id >= dev->data->nb_tx_queues) {
 		PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
 			    tx_queue_id, dev->data->nb_tx_queues);
@@ -548,6 +557,9 @@ ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	uint32_t q_teids[1];
 	uint16_t q_handle = tx_queue_id;
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return -E_RTE_SECONDARY;
+
 	if (tx_queue_id >= dev->data->nb_tx_queues) {
 		PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
 			    tx_queue_id, dev->data->nb_tx_queues);
@@ -597,6 +609,9 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t len;
 	int use_def_burst_func = 1;
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return -E_RTE_SECONDARY;
+
 	if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
 	    nb_desc > ICE_MAX_RING_DESC ||
 	    nb_desc < ICE_MIN_RING_DESC) {
@@ -714,6 +729,9 @@ ice_rx_queue_release(void *rxq)
 {
 	struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return;
+
 	if (!q) {
 		PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
 		return;
@@ -739,6 +757,9 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t tx_rs_thresh, tx_free_thresh;
 	uint64_t offloads;
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return -E_RTE_SECONDARY;
+
 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
@@ -910,6 +931,9 @@ ice_tx_queue_release(void *txq)
 {
 	struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return;
+
 	if (!q) {
 		PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
 		return;
-- 
2.7.4



More information about the dev mailing list