[dpdk-dev] [PATCH v8 9/9] net/ixgbe: add support for representor ports

Declan Doherty declan.doherty at intel.com
Thu Apr 26 12:41:05 CEST 2018


Add support for virtual function representor ports to the ixgbe PF driver.
When SR-IOV virtual functions devices are enabled a corresponding
representor port for each VF can be enabled in the process in which the
i40e PMD is running within, by specifying the representor devargs with
the list of VF ports that representors are to be created for.

An example of the devargs which would create VF representor for virtual
functions 0,2,4,5,6 and 7 is:

-w DBDF,representor=[0,2,4-7]

Signed-off-by: Declan Doherty <declan.doherty at intel.com>
Signed-off-by: Mohammad Abdul Awal <mohammad.abdul.awal at intel.com>
Signed-off-by: Remy Horton <remy.horton at intel.com>
---
 doc/guides/nics/ixgbe.rst                |  14 ++
 drivers/net/ixgbe/Makefile               |   1 +
 drivers/net/ixgbe/ixgbe_ethdev.c         |  80 ++++++++++--
 drivers/net/ixgbe/ixgbe_ethdev.h         |  14 ++
 drivers/net/ixgbe/ixgbe_pf.c             |   7 +
 drivers/net/ixgbe/ixgbe_vf_representor.c | 217 +++++++++++++++++++++++++++++++
 drivers/net/ixgbe/meson.build            |   1 +
 7 files changed, 325 insertions(+), 9 deletions(-)
 create mode 100644 drivers/net/ixgbe/ixgbe_vf_representor.c

diff --git a/doc/guides/nics/ixgbe.rst b/doc/guides/nics/ixgbe.rst
index 0c660f298..5512e0b08 100644
--- a/doc/guides/nics/ixgbe.rst
+++ b/doc/guides/nics/ixgbe.rst
@@ -228,6 +228,20 @@ For more details see the IPsec Security Gateway Sample Application and Security
 library documentation.
 
 
+Virtual Function Port Representors
+----------------------------------
+The IXGBE PF PMD supports the creation of VF port representors for the control
+and monitoring of IXGBE virtual function devices. Each port representor
+corresponds to a single virtual function of that device. Using the ``devargs``
+option ``representor`` the user can specify which virtual functions to create
+port representors for on initialization of the PF PMD by passing the VF IDs of
+the VFs which are required.::
+
+  -w DBDF,representor=[0,1,4]
+
+Currently hot-plugging of representor ports is not supported so all required
+representors must be specified on the creation of the PF.
+
 Supported Chipsets and NICs
 ---------------------------
 
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index f8cad125b..7b6af3532 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -104,6 +104,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ipsec.c
 endif
 SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c
 SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_tm.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_vf_representor.c
 
 # install this header file
 SYMLINK-$(CONFIG_RTE_LIBRTE_IXGBE_PMD)-include := rte_pmd_ixgbe.h
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 73a24b88a..ea2e58b16 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -132,7 +132,7 @@
 #define IXGBE_EXVET_VET_EXT_SHIFT              16
 #define IXGBE_DMATXCTL_VT_MASK                 0xFFFF0000
 
-static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
@@ -1043,7 +1043,7 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
  * It returns 0 on success.
  */
 static int
-eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
+eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
 {
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
@@ -1716,16 +1716,78 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
 	return 0;
 }
 
-static int eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
-	struct rte_pci_device *pci_dev)
+static int
+eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+		struct rte_pci_device *pci_dev)
 {
-	return rte_eth_dev_pci_generic_probe(pci_dev,
-		sizeof(struct ixgbe_adapter), eth_ixgbe_dev_init);
+	char name[RTE_ETH_NAME_MAX_LEN];
+
+	struct rte_eth_devargs eth_da;
+	int i, retval;
+
+	retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, &eth_da);
+	if (retval)
+		return retval;
+
+	/* physical port net_bdf_port */
+	snprintf(name, sizeof(name), "net_%s_%d", pci_dev->device.name, 0);
+
+	retval = rte_eth_dev_create(&pci_dev->device, name,
+		sizeof(struct ixgbe_adapter),
+		eth_dev_pci_specific_init, pci_dev,
+		eth_ixgbe_dev_init, NULL);
+
+	if (retval || eth_da.nb_representor_ports < 1)
+		return retval;
+
+	/* probe VF representor ports */
+	struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(name);
+
+	for (i = 0; i < eth_da.nb_representor_ports; i++) {
+		struct ixgbe_vf_info *vfinfo;
+		struct ixgbe_vf_representor representor;
+
+		vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(
+			pf_ethdev->data->dev_private);
+		if (vfinfo == NULL) {
+			PMD_DRV_LOG(ERR,
+				"no virtual functions supported by PF");
+			break;
+		}
+
+		representor.vf_id = eth_da.representor_ports[i];
+		representor.switch_domain_id = vfinfo->switch_domain_id;
+		representor.pf_ethdev = pf_ethdev;
+
+		/* representor port net_bdf_port */
+		snprintf(name, sizeof(name), "net_%s_representor_%d",
+			pci_dev->device.name,
+			eth_da.representor_ports[i]);
+
+		retval = rte_eth_dev_create(&pci_dev->device, name,
+			sizeof(struct ixgbe_vf_representor), NULL, NULL,
+			ixgbe_vf_representor_init, &representor);
+
+		if (retval)
+			PMD_DRV_LOG(ERR, "failed to create ixgbe vf "
+				"representor %s.", name);
+	}
+
+	return 0;
 }
 
 static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
 {
-	return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbe_dev_uninit);
+	struct rte_eth_dev *ethdev;
+
+	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+	if (!ethdev)
+		return -ENODEV;
+
+	if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
+		return rte_eth_dev_destroy(ethdev, ixgbe_vf_representor_uninit);
+	else
+		return rte_eth_dev_destroy(ethdev, eth_ixgbe_dev_uninit);
 }
 
 static struct rte_pci_driver rte_ixgbe_pmd = {
@@ -2868,7 +2930,7 @@ ixgbe_dev_reset(struct rte_eth_dev *dev)
 	if (ret)
 		return ret;
 
-	ret = eth_ixgbe_dev_init(dev);
+	ret = eth_ixgbe_dev_init(dev, NULL);
 
 	return ret;
 }
@@ -3883,7 +3945,7 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
 }
 
 /* return 0 means link status changed, -1 means not changed */
-static int
+int
 ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
 			    int wait_to_complete, int vf)
 {
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 655077700..1947442d9 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -253,6 +253,7 @@ struct ixgbe_vf_info {
 	uint16_t vlan_count;
 	uint8_t spoofchk_enabled;
 	uint8_t api_version;
+	uint16_t switch_domain_id;
 };
 
 /*
@@ -480,6 +481,15 @@ struct ixgbe_adapter {
  	struct ixgbe_tm_conf        tm_conf;
 };
 
+struct ixgbe_vf_representor {
+	uint16_t vf_id;
+	uint16_t switch_domain_id;
+	struct rte_eth_dev *pf_ethdev;
+};
+
+int ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params);
+int ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev);
+
 #define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
 	(&((struct ixgbe_adapter *)adapter)->hw)
 
@@ -652,6 +662,10 @@ int ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
 
 void ixgbe_configure_dcb(struct rte_eth_dev *dev);
 
+int
+ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
+			    int wait_to_complete, int vf);
+
 /*
  * misc function prototypes
  */
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index 4e61310af..4d199c802 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -90,6 +90,8 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
 	if (*vfinfo == NULL)
 		rte_panic("Cannot allocate memory for private VF data\n");
 
+	rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id);
+
 	memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info));
 	memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
 	hw->mac.mc_filter_type = 0;
@@ -122,6 +124,7 @@ void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
 {
 	struct ixgbe_vf_info **vfinfo;
 	uint16_t vf_num;
+	int ret;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -132,6 +135,10 @@ void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
 	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0;
 	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = 0;
 
+	ret = rte_eth_switch_domain_free((*vfinfo)->switch_domain_id);
+	if (ret)
+		PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
+
 	vf_num = dev_num_vf(eth_dev);
 	if (vf_num == 0)
 		return;
diff --git a/drivers/net/ixgbe/ixgbe_vf_representor.c b/drivers/net/ixgbe/ixgbe_vf_representor.c
new file mode 100644
index 000000000..e9edf0f67
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_vf_representor.c
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+
+#include "base/ixgbe_type.h"
+#include "base/ixgbe_vf.h"
+#include "ixgbe_ethdev.h"
+#include "ixgbe_rxtx.h"
+#include "rte_pmd_ixgbe.h"
+
+
+static int
+ixgbe_vf_representor_link_update(struct rte_eth_dev *ethdev,
+	int wait_to_complete)
+{
+	struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+	return ixgbe_dev_link_update_share(representor->pf_ethdev,
+		wait_to_complete, 0);
+}
+
+static int
+ixgbe_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev,
+	struct ether_addr *mac_addr)
+{
+	struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+	return rte_pmd_ixgbe_set_vf_mac_addr(
+		representor->pf_ethdev->data->port_id,
+		representor->vf_id, mac_addr);
+}
+
+static void
+ixgbe_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
+	struct rte_eth_dev_info *dev_info)
+{
+	struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(
+		representor->pf_ethdev->data->dev_private);
+
+	dev_info->device = representor->pf_ethdev->device;
+
+	dev_info->min_rx_bufsize = 1024;
+	/**< Minimum size of RX buffer. */
+	dev_info->max_rx_pktlen = 9728;
+	/**< Maximum configurable length of RX pkt. */
+	dev_info->max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+	/**< Maximum number of RX queues. */
+	dev_info->max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
+	/**< Maximum number of TX queues. */
+
+	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+	/**< Maximum number of MAC addresses. */
+
+	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+		DEV_RX_OFFLOAD_IPV4_CKSUM |	DEV_RX_OFFLOAD_UDP_CKSUM  |
+		DEV_RX_OFFLOAD_TCP_CKSUM;
+	/**< Device RX offload capabilities. */
+
+	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+		DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |
+		DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM |
+		DEV_TX_OFFLOAD_TCP_TSO;
+	/**< Device TX offload capabilities. */
+
+	dev_info->speed_capa =
+		representor->pf_ethdev->data->dev_link.link_speed;
+	/**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+
+	dev_info->switch_info.name =
+		representor->pf_ethdev->device->name;
+	dev_info->switch_info.domain_id = representor->switch_domain_id;
+	dev_info->switch_info.port_id = representor->vf_id;
+}
+
+static int ixgbe_vf_representor_dev_configure(
+		__rte_unused struct rte_eth_dev *dev)
+{
+	return 0;
+}
+
+static int ixgbe_vf_representor_rx_queue_setup(
+	__rte_unused struct rte_eth_dev *dev,
+	__rte_unused uint16_t rx_queue_id,
+	__rte_unused uint16_t nb_rx_desc,
+	__rte_unused unsigned int socket_id,
+	__rte_unused const struct rte_eth_rxconf *rx_conf,
+	__rte_unused struct rte_mempool *mb_pool)
+{
+	return 0;
+}
+
+static int ixgbe_vf_representor_tx_queue_setup(
+	__rte_unused struct rte_eth_dev *dev,
+	__rte_unused uint16_t rx_queue_id,
+	__rte_unused uint16_t nb_rx_desc,
+	__rte_unused unsigned int socket_id,
+	__rte_unused const struct rte_eth_txconf *tx_conf)
+{
+	return 0;
+}
+
+static int ixgbe_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev)
+{
+	return 0;
+}
+
+static void ixgbe_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev)
+{
+}
+
+static int
+ixgbe_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev,
+	uint16_t vlan_id, int on)
+{
+	struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+	uint64_t vf_mask = 1ULL << representor->vf_id;
+
+	return rte_pmd_ixgbe_set_vf_vlan_filter(
+		representor->pf_ethdev->data->port_id, vlan_id, vf_mask, on);
+}
+
+static void
+ixgbe_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev,
+	__rte_unused uint16_t rx_queue_id, int on)
+{
+	struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+	rte_pmd_ixgbe_set_vf_vlan_stripq(representor->pf_ethdev->data->port_id,
+		representor->vf_id, on);
+}
+
+struct eth_dev_ops ixgbe_vf_representor_dev_ops = {
+	.dev_infos_get		= ixgbe_vf_representor_dev_infos_get,
+
+	.dev_start		= ixgbe_vf_representor_dev_start,
+	.dev_configure		= ixgbe_vf_representor_dev_configure,
+	.dev_stop		= ixgbe_vf_representor_dev_stop,
+
+	.rx_queue_setup		= ixgbe_vf_representor_rx_queue_setup,
+	.tx_queue_setup		= ixgbe_vf_representor_tx_queue_setup,
+
+	.link_update		= ixgbe_vf_representor_link_update,
+
+	.vlan_filter_set	= ixgbe_vf_representor_vlan_filter_set,
+	.vlan_strip_queue_set	= ixgbe_vf_representor_vlan_strip_queue_set,
+
+	.mac_addr_set		= ixgbe_vf_representor_mac_addr_set,
+};
+
+
+int
+ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
+{
+	struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+	struct ixgbe_vf_info *vf_data;
+	struct rte_pci_device *pci_dev;
+	struct rte_eth_link *link;
+
+	if (!representor)
+		return -ENOMEM;
+
+	representor->vf_id =
+		((struct ixgbe_vf_representor *)init_params)->vf_id;
+	representor->switch_domain_id =
+		((struct ixgbe_vf_representor *)init_params)->switch_domain_id;
+	representor->pf_ethdev =
+		((struct ixgbe_vf_representor *)init_params)->pf_ethdev;
+
+	pci_dev = RTE_ETH_DEV_TO_PCI(representor->pf_ethdev);
+
+	if (representor->vf_id >= pci_dev->max_vfs)
+		return -ENODEV;
+
+	ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+	/* Set representor device ops */
+	ethdev->dev_ops = &ixgbe_vf_representor_dev_ops;
+
+	/* No data-path so no RX/TX functions */
+	ethdev->rx_pkt_burst = NULL;
+	ethdev->tx_pkt_burst = NULL;
+
+	/* Setting the number queues allocated to the VF */
+	ethdev->data->nb_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+	ethdev->data->nb_tx_queues = IXGBE_VF_MAX_RX_QUEUES;
+
+	/* Reference VF mac address from PF data structure */
+	vf_data = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(
+		representor->pf_ethdev->data->dev_private);
+
+	ethdev->data->mac_addrs = (struct ether_addr *)
+		vf_data[representor->vf_id].vf_mac_addresses;
+
+	/* Link state. Inherited from PF */
+	link = &representor->pf_ethdev->data->dev_link;
+
+	ethdev->data->dev_link.link_speed = link->link_speed;
+	ethdev->data->dev_link.link_duplex = link->link_duplex;
+	ethdev->data->dev_link.link_status = link->link_status;
+	ethdev->data->dev_link.link_autoneg = link->link_autoneg;
+
+	return 0;
+}
+
+
+int
+ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev __rte_unused)
+{
+	return 0;
+}
diff --git a/drivers/net/ixgbe/meson.build b/drivers/net/ixgbe/meson.build
index f649e659d..02d5ef5e4 100644
--- a/drivers/net/ixgbe/meson.build
+++ b/drivers/net/ixgbe/meson.build
@@ -19,6 +19,7 @@ sources = files(
 	'ixgbe_pf.c',
 	'ixgbe_rxtx.c',
 	'ixgbe_tm.c',
+	'ixgbe_vf_representor.c',
 	'rte_pmd_ixgbe.c'
 )
 
-- 
2.14.3



More information about the dev mailing list