[dpdk-dev] [PATCH 06/10] vdpa/sfc: add support for dev conf and dev close ops

Vijay Srivastava vijay.srivastava at xilinx.com
Tue Jul 6 18:44:14 CEST 2021


From: Vijay Kumar Srivastava <vsrivast at xilinx.com>

Implement vDPA ops dev_conf and dev_close for DMA mapping,
interrupt and virtqueue configurations.

Signed-off-by: Vijay Kumar Srivastava <vsrivast at xilinx.com>
---
 drivers/vdpa/sfc/sfc_vdpa.c     |   6 +
 drivers/vdpa/sfc/sfc_vdpa.h     |  43 ++++
 drivers/vdpa/sfc/sfc_vdpa_hw.c  |  70 ++++++
 drivers/vdpa/sfc/sfc_vdpa_ops.c | 527 ++++++++++++++++++++++++++++++++++++++--
 drivers/vdpa/sfc/sfc_vdpa_ops.h |  28 +++
 5 files changed, 654 insertions(+), 20 deletions(-)

diff --git a/drivers/vdpa/sfc/sfc_vdpa.c b/drivers/vdpa/sfc/sfc_vdpa.c
index 9c12dcb..ca13483 100644
--- a/drivers/vdpa/sfc/sfc_vdpa.c
+++ b/drivers/vdpa/sfc/sfc_vdpa.c
@@ -246,6 +246,8 @@ struct sfc_vdpa_ops_data *
 
 	sfc_vdpa_log_init(sva, "entry");
 
+	sfc_vdpa_adapter_lock_init(sva);
+
 	sfc_vdpa_log_init(sva, "vfio init");
 	if (sfc_vdpa_vfio_setup(sva) < 0) {
 		sfc_vdpa_err(sva, "failed to setup device %s", pci_dev->name);
@@ -280,6 +282,8 @@ struct sfc_vdpa_ops_data *
 	sfc_vdpa_vfio_teardown(sva);
 
 fail_vfio_setup:
+	sfc_vdpa_adapter_lock_fini(sva);
+
 fail_set_log_prefix:
 	rte_free(sva);
 
@@ -311,6 +315,8 @@ struct sfc_vdpa_ops_data *
 
 	sfc_vdpa_vfio_teardown(sva);
 
+	sfc_vdpa_adapter_lock_fini(sva);
+
 	rte_free(sva);
 
 	return 0;
diff --git a/drivers/vdpa/sfc/sfc_vdpa.h b/drivers/vdpa/sfc/sfc_vdpa.h
index 08075e5..b103b0a 100644
--- a/drivers/vdpa/sfc/sfc_vdpa.h
+++ b/drivers/vdpa/sfc/sfc_vdpa.h
@@ -80,10 +80,53 @@ struct sfc_vdpa_ops_data *
 void
 sfc_vdpa_dma_free(struct sfc_vdpa_adapter *sva, efsys_mem_t *esmp);
 
+int
+sfc_vdpa_dma_map(struct sfc_vdpa_ops_data *vdpa_data, bool do_map);
+
 static inline struct sfc_vdpa_adapter *
 sfc_vdpa_adapter_by_dev_handle(void *dev_handle)
 {
 	return (struct sfc_vdpa_adapter *)dev_handle;
 }
 
+/*
+ * Add wrapper functions to acquire/release lock to be able to remove or
+ * change the lock in one place.
+ */
+static inline void
+sfc_vdpa_adapter_lock_init(struct sfc_vdpa_adapter *sva)
+{
+	rte_spinlock_init(&sva->lock);
+}
+
+static inline int
+sfc_vdpa_adapter_is_locked(struct sfc_vdpa_adapter *sva)
+{
+	return rte_spinlock_is_locked(&sva->lock);
+}
+
+static inline void
+sfc_vdpa_adapter_lock(struct sfc_vdpa_adapter *sva)
+{
+	rte_spinlock_lock(&sva->lock);
+}
+
+static inline int
+sfc_vdpa_adapter_trylock(struct sfc_vdpa_adapter *sva)
+{
+	return rte_spinlock_trylock(&sva->lock);
+}
+
+static inline void
+sfc_vdpa_adapter_unlock(struct sfc_vdpa_adapter *sva)
+{
+	rte_spinlock_unlock(&sva->lock);
+}
+
+static inline void
+sfc_vdpa_adapter_lock_fini(__rte_unused struct sfc_vdpa_adapter *sva)
+{
+	/* Just for symmetry of the API */
+}
+
 #endif  /* _SFC_VDPA_H */
diff --git a/drivers/vdpa/sfc/sfc_vdpa_hw.c b/drivers/vdpa/sfc/sfc_vdpa_hw.c
index 84e680f..047bcc4 100644
--- a/drivers/vdpa/sfc/sfc_vdpa_hw.c
+++ b/drivers/vdpa/sfc/sfc_vdpa_hw.c
@@ -8,6 +8,7 @@
 #include <rte_common.h>
 #include <rte_errno.h>
 #include <rte_vfio.h>
+#include <rte_vhost.h>
 
 #include "efx.h"
 #include "sfc_vdpa.h"
@@ -104,6 +105,75 @@
 	memset(esmp, 0, sizeof(*esmp));
 }
 
+int
+sfc_vdpa_dma_map(struct sfc_vdpa_ops_data *ops_data, bool do_map)
+{
+	uint32_t i, j;
+	int rc;
+	struct rte_vhost_memory *vhost_mem = NULL;
+	struct rte_vhost_mem_region *mem_reg = NULL;
+	int vfio_container_fd;
+	void *dev;
+
+	dev = ops_data->dev_handle;
+	vfio_container_fd =
+		sfc_vdpa_adapter_by_dev_handle(dev)->vfio_container_fd;
+
+	rc = rte_vhost_get_mem_table(ops_data->vid, &vhost_mem);
+	if (rc < 0) {
+		sfc_vdpa_err(dev,
+			     "failed to get VM memory layout");
+		goto error;
+	}
+
+	for (i = 0; i < vhost_mem->nregions; i++) {
+		mem_reg = &vhost_mem->regions[i];
+
+		if (do_map) {
+			rc = rte_vfio_container_dma_map(vfio_container_fd,
+						mem_reg->host_user_addr,
+						mem_reg->guest_phys_addr,
+						mem_reg->size);
+			if (rc < 0) {
+				sfc_vdpa_err(dev,
+					     "DMA map failed : %s",
+					     rte_strerror(rte_errno));
+				goto failed_vfio_dma_map;
+			}
+		} else {
+			rc = rte_vfio_container_dma_unmap(vfio_container_fd,
+						mem_reg->host_user_addr,
+						mem_reg->guest_phys_addr,
+						mem_reg->size);
+			if (rc < 0) {
+				sfc_vdpa_err(dev,
+					     "DMA unmap failed : %s",
+					     rte_strerror(rte_errno));
+				goto error;
+			}
+		}
+	}
+
+	free(vhost_mem);
+
+	return 0;
+
+failed_vfio_dma_map:
+	for (j = 0; j < i; j++) {
+		mem_reg = &vhost_mem->regions[j];
+		rc = rte_vfio_container_dma_unmap(vfio_container_fd,
+						  mem_reg->host_user_addr,
+						  mem_reg->guest_phys_addr,
+						  mem_reg->size);
+	}
+
+error:
+	if (vhost_mem)
+		free(vhost_mem);
+
+	return rc;
+}
+
 static int
 sfc_vdpa_mem_bar_init(struct sfc_vdpa_adapter *sva,
 		      const efx_bar_region_t *mem_ebrp)
diff --git a/drivers/vdpa/sfc/sfc_vdpa_ops.c b/drivers/vdpa/sfc/sfc_vdpa_ops.c
index 5253adb..4228044 100644
--- a/drivers/vdpa/sfc/sfc_vdpa_ops.c
+++ b/drivers/vdpa/sfc/sfc_vdpa_ops.c
@@ -3,10 +3,13 @@
  * Copyright(c) 2020-2021 Xilinx, Inc.
  */
 
+#include <sys/ioctl.h>
+
 #include <rte_errno.h>
 #include <rte_malloc.h>
 #include <rte_vdpa.h>
 #include <rte_vdpa_dev.h>
+#include <rte_vfio.h>
 #include <rte_vhost.h>
 
 #include "efx.h"
@@ -28,24 +31,12 @@
 #define SFC_VDPA_DEFAULT_FEATURES \
 		(1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
 
-static int
-sfc_vdpa_get_queue_num(struct rte_vdpa_device *vdpa_dev, uint32_t *queue_num)
-{
-	struct sfc_vdpa_ops_data *ops_data;
-	void *dev;
-
-	ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
-	if (ops_data == NULL)
-		return -1;
-
-	dev = ops_data->dev_handle;
-	*queue_num = sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count;
+#define SFC_VDPA_MSIX_IRQ_SET_BUF_LEN \
+		(sizeof(struct vfio_irq_set) + \
+		sizeof(int) * (SFC_VDPA_MAX_QUEUE_PAIRS * 2 + 1))
 
-	sfc_vdpa_info(dev, "vDPA ops get_queue_num :: supported queue num : %d",
-		      *queue_num);
-
-	return 0;
-}
+/* It will be used for target VF when calling function is not PF */
+#define SFC_VDPA_VF_NULL		0xFFFF
 
 static int
 sfc_vdpa_get_device_features(struct sfc_vdpa_ops_data *ops_data)
@@ -74,6 +65,438 @@
 	return 0;
 }
 
+static uint64_t
+hva_to_gpa(int vid, uint64_t hva)
+{
+	struct rte_vhost_memory *vhost_mem = NULL;
+	struct rte_vhost_mem_region *mem_reg = NULL;
+	uint32_t i;
+	uint64_t gpa = 0;
+
+	if (rte_vhost_get_mem_table(vid, &vhost_mem) < 0)
+		goto error;
+
+	for (i = 0; i < vhost_mem->nregions; i++) {
+		mem_reg = &vhost_mem->regions[i];
+
+		if (hva >= mem_reg->host_user_addr &&
+				hva < mem_reg->host_user_addr + mem_reg->size) {
+			gpa = (hva - mem_reg->host_user_addr) +
+				mem_reg->guest_phys_addr;
+			break;
+		}
+	}
+
+error:
+	if (vhost_mem)
+		free(vhost_mem);
+	return gpa;
+}
+
+static int
+sfc_vdpa_enable_vfio_intr(struct sfc_vdpa_ops_data *ops_data)
+{
+	int rc;
+	int *irq_fd_ptr;
+	int vfio_dev_fd;
+	uint32_t i, num_vring;
+	struct rte_vhost_vring vring;
+	struct vfio_irq_set *irq_set;
+	struct rte_pci_device *pci_dev;
+	char irq_set_buf[SFC_VDPA_MSIX_IRQ_SET_BUF_LEN];
+	void *dev;
+
+	num_vring = rte_vhost_get_vring_num(ops_data->vid);
+	dev = ops_data->dev_handle;
+	vfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;
+	pci_dev = sfc_vdpa_adapter_by_dev_handle(dev)->pdev;
+
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = sizeof(irq_set_buf);
+	irq_set->count = num_vring + 1;
+	irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+			 VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+	irq_fd_ptr = (int *)&irq_set->data;
+	irq_fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = pci_dev->intr_handle.fd;
+
+	for (i = 0; i < num_vring; i++) {
+		rte_vhost_get_vhost_vring(ops_data->vid, i, &vring);
+		irq_fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
+	}
+
+	rc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (rc) {
+		sfc_vdpa_err(ops_data->dev_handle,
+			     "error enabling MSI-X interrupts: %s",
+			     strerror(errno));
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+sfc_vdpa_disable_vfio_intr(struct sfc_vdpa_ops_data *ops_data)
+{
+	int rc;
+	int vfio_dev_fd;
+	struct vfio_irq_set *irq_set;
+	char irq_set_buf[SFC_VDPA_MSIX_IRQ_SET_BUF_LEN];
+	void *dev;
+
+	dev = ops_data->dev_handle;
+	vfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;
+
+	irq_set = (struct vfio_irq_set *)irq_set_buf;
+	irq_set->argsz = sizeof(irq_set_buf);
+	irq_set->count = 0;
+	irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+	irq_set->start = 0;
+
+	rc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+	if (rc) {
+		sfc_vdpa_err(ops_data->dev_handle,
+			     "error disabling MSI-X interrupts: %s",
+			     strerror(errno));
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+sfc_vdpa_get_vring_info(struct sfc_vdpa_ops_data *ops_data,
+			int vq_num, struct sfc_vdpa_vring_info *vring)
+{
+	int rc;
+	uint64_t gpa;
+	struct rte_vhost_vring vq;
+
+	rc = rte_vhost_get_vhost_vring(ops_data->vid, vq_num, &vq);
+	if (rc < 0) {
+		sfc_vdpa_err(ops_data->dev_handle,
+			     "get vhost vring failed: %s", rte_strerror(rc));
+		return rc;
+	}
+
+	gpa = hva_to_gpa(ops_data->vid, (uint64_t)(uintptr_t)vq.desc);
+	if (gpa == 0) {
+		sfc_vdpa_err(ops_data->dev_handle,
+			     "fail to get GPA for descriptor ring.");
+		goto fail_vring_map;
+	}
+	vring->desc = gpa;
+
+	gpa = hva_to_gpa(ops_data->vid, (uint64_t)(uintptr_t)vq.avail);
+	if (gpa == 0) {
+		sfc_vdpa_err(ops_data->dev_handle,
+			     "fail to get GPA for available ring.");
+		goto fail_vring_map;
+	}
+	vring->avail = gpa;
+
+	gpa = hva_to_gpa(ops_data->vid, (uint64_t)(uintptr_t)vq.used);
+	if (gpa == 0) {
+		sfc_vdpa_err(ops_data->dev_handle,
+			     "fail to get GPA for used ring.");
+		goto fail_vring_map;
+	}
+	vring->used = gpa;
+
+	vring->size = vq.size;
+
+	rc = rte_vhost_get_vring_base(ops_data->vid, vq_num,
+				      &vring->last_avail_idx,
+				      &vring->last_used_idx);
+
+	return rc;
+
+fail_vring_map:
+	return -1;
+}
+
+static int
+sfc_vdpa_virtq_start(struct sfc_vdpa_ops_data *ops_data, int vq_num)
+{
+	int rc;
+	efx_virtio_vq_t *vq;
+	struct sfc_vdpa_vring_info vring;
+	efx_virtio_vq_cfg_t vq_cfg;
+	efx_virtio_vq_dyncfg_t vq_dyncfg;
+
+	vq = ops_data->vq_cxt[vq_num].vq;
+	if (vq == NULL)
+		return -1;
+
+	rc = sfc_vdpa_get_vring_info(ops_data, vq_num, &vring);
+	if (rc < 0) {
+		sfc_vdpa_err(ops_data->dev_handle,
+			     "get vring info failed: %s", rte_strerror(rc));
+		goto fail_vring_info;
+	}
+
+	vq_cfg.evvc_target_vf = SFC_VDPA_VF_NULL;
+
+	/* even virtqueue for RX and odd for TX */
+	if (vq_num % 2) {
+		vq_cfg.evvc_type = EFX_VIRTIO_VQ_TYPE_NET_TXQ;
+		sfc_vdpa_info(ops_data->dev_handle,
+			      "configure virtqueue # %d (TXQ)", vq_num);
+	} else {
+		vq_cfg.evvc_type = EFX_VIRTIO_VQ_TYPE_NET_RXQ;
+		sfc_vdpa_info(ops_data->dev_handle,
+			      "configure virtqueue # %d (RXQ)", vq_num);
+	}
+
+	vq_cfg.evvc_vq_num = vq_num;
+	vq_cfg.evvc_desc_tbl_addr   = vring.desc;
+	vq_cfg.evvc_avail_ring_addr = vring.avail;
+	vq_cfg.evvc_used_ring_addr  = vring.used;
+	vq_cfg.evvc_vq_size = vring.size;
+
+	vq_dyncfg.evvd_vq_pidx = vring.last_used_idx;
+	vq_dyncfg.evvd_vq_cidx = vring.last_avail_idx;
+
+	/* MSI-X vector is function-relative */
+	vq_cfg.evvc_msix_vector = RTE_INTR_VEC_RXTX_OFFSET + vq_num;
+	if (ops_data->vdpa_context == SFC_VDPA_AS_VF)
+		vq_cfg.evvc_pas_id = 0;
+	vq_cfg.evcc_features = ops_data->dev_features &
+			       ops_data->req_features;
+
+	/* Start virtqueue */
+	rc = efx_virtio_qstart(vq, &vq_cfg, &vq_dyncfg);
+	if (rc != 0) {
+		/* destroy virtqueue */
+		sfc_vdpa_err(ops_data->dev_handle,
+			     "virtqueue start failed: %s",
+			     rte_strerror(rc));
+		efx_virtio_qdestroy(vq);
+		goto fail_virtio_qstart;
+	}
+
+	sfc_vdpa_info(ops_data->dev_handle,
+		      "virtqueue started successfully for vq_num %d", vq_num);
+
+	ops_data->vq_cxt[vq_num].enable = B_TRUE;
+
+	return rc;
+
+fail_virtio_qstart:
+fail_vring_info:
+	return rc;
+}
+
+static int
+sfc_vdpa_virtq_stop(struct sfc_vdpa_ops_data *ops_data, int vq_num)
+{
+	int rc;
+	efx_virtio_vq_dyncfg_t vq_idx;
+	efx_virtio_vq_t *vq;
+
+	if (ops_data->vq_cxt[vq_num].enable != B_TRUE)
+		return -1;
+
+	vq = ops_data->vq_cxt[vq_num].vq;
+	if (vq == NULL)
+		return -1;
+
+	/* stop the vq */
+	rc = efx_virtio_qstop(vq, &vq_idx);
+	if (rc == 0) {
+		ops_data->vq_cxt[vq_num].cidx = vq_idx.evvd_vq_cidx;
+		ops_data->vq_cxt[vq_num].pidx = vq_idx.evvd_vq_pidx;
+	}
+	ops_data->vq_cxt[vq_num].enable = B_FALSE;
+
+	return rc;
+}
+
+static int
+sfc_vdpa_configure(struct sfc_vdpa_ops_data *ops_data)
+{
+	int rc, i;
+	int nr_vring;
+	int max_vring_cnt;
+	efx_virtio_vq_t *vq;
+	efx_nic_t *nic;
+	void *dev;
+
+	dev = ops_data->dev_handle;
+	nic = sfc_vdpa_adapter_by_dev_handle(dev)->nic;
+
+	SFC_EFX_ASSERT(ops_data->state == SFC_VDPA_STATE_INITIALIZED);
+
+	ops_data->state = SFC_VDPA_STATE_CONFIGURING;
+
+	nr_vring = rte_vhost_get_vring_num(ops_data->vid);
+	max_vring_cnt =
+		(sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count * 2);
+
+	/* number of vring should not be more than supported max vq count */
+	if (nr_vring > max_vring_cnt) {
+		sfc_vdpa_err(dev,
+			     "nr_vring (%d) is > max vring count (%d)",
+			     nr_vring, max_vring_cnt);
+		goto fail_vring_num;
+	}
+
+	rc = sfc_vdpa_dma_map(ops_data, true);
+	if (rc) {
+		sfc_vdpa_err(dev,
+			     "DMA map failed: %s", rte_strerror(rc));
+		goto fail_dma_map;
+	}
+
+	for (i = 0; i < nr_vring; i++) {
+		rc = efx_virtio_qcreate(nic, &vq);
+		if ((rc != 0) || (vq == NULL)) {
+			sfc_vdpa_err(dev,
+				     "virtqueue create failed: %s",
+				     rte_strerror(rc));
+			goto fail_vq_create;
+		}
+
+		/* store created virtqueue context */
+		ops_data->vq_cxt[i].vq = vq;
+	}
+
+	ops_data->vq_count = i;
+
+	ops_data->state = SFC_VDPA_STATE_CONFIGURED;
+
+	return 0;
+
+fail_vq_create:
+	sfc_vdpa_dma_map(ops_data, false);
+
+fail_dma_map:
+fail_vring_num:
+	ops_data->state = SFC_VDPA_STATE_INITIALIZED;
+
+	return -1;
+}
+
+static void
+sfc_vdpa_close(struct sfc_vdpa_ops_data *ops_data)
+{
+	int i;
+
+	if (ops_data->state != SFC_VDPA_STATE_CONFIGURED)
+		return;
+
+	ops_data->state = SFC_VDPA_STATE_CLOSING;
+
+	for (i = 0; i < ops_data->vq_count; i++) {
+		if (ops_data->vq_cxt[i].vq == NULL)
+			continue;
+
+		efx_virtio_qdestroy(ops_data->vq_cxt[i].vq);
+	}
+
+	sfc_vdpa_dma_map(ops_data, false);
+
+	ops_data->state = SFC_VDPA_STATE_INITIALIZED;
+}
+
+static void
+sfc_vdpa_stop(struct sfc_vdpa_ops_data *ops_data)
+{
+	int i;
+	int rc;
+
+	if (ops_data->state != SFC_VDPA_STATE_STARTED)
+		return;
+
+	ops_data->state = SFC_VDPA_STATE_STOPPING;
+
+	for (i = 0; i < ops_data->vq_count; i++) {
+		rc = sfc_vdpa_virtq_stop(ops_data, i);
+		if (rc != 0)
+			continue;
+	}
+
+	sfc_vdpa_disable_vfio_intr(ops_data);
+
+	ops_data->state = SFC_VDPA_STATE_CONFIGURED;
+}
+
+static int
+sfc_vdpa_start(struct sfc_vdpa_ops_data *ops_data)
+{
+	int i, j;
+	int rc;
+
+	SFC_EFX_ASSERT(ops_data->state == SFC_VDPA_STATE_CONFIGURED);
+
+	sfc_vdpa_log_init(ops_data->dev_handle, "entry");
+
+	ops_data->state = SFC_VDPA_STATE_STARTING;
+
+	sfc_vdpa_log_init(ops_data->dev_handle, "enable interrupts");
+	rc = sfc_vdpa_enable_vfio_intr(ops_data);
+	if (rc < 0) {
+		sfc_vdpa_err(ops_data->dev_handle,
+			     "vfio intr allocation failed: %s",
+			     rte_strerror(rc));
+		goto fail_enable_vfio_intr;
+	}
+
+	rte_vhost_get_negotiated_features(ops_data->vid,
+					  &ops_data->req_features);
+
+	sfc_vdpa_info(ops_data->dev_handle,
+		      "negotiated feature : 0x%" PRIx64,
+		      ops_data->req_features);
+
+	for (i = 0; i < ops_data->vq_count; i++) {
+		sfc_vdpa_log_init(ops_data->dev_handle,
+				  "starting vq# %d", i);
+		rc = sfc_vdpa_virtq_start(ops_data, i);
+		if (rc != 0)
+			goto fail_vq_start;
+	}
+
+	ops_data->state = SFC_VDPA_STATE_STARTED;
+
+	sfc_vdpa_log_init(ops_data->dev_handle, "done");
+
+	return 0;
+
+fail_vq_start:
+	/* stop already started virtqueues */
+	for (j = 0; j < i; j++)
+		sfc_vdpa_virtq_stop(ops_data, j);
+	sfc_vdpa_disable_vfio_intr(ops_data);
+
+fail_enable_vfio_intr:
+	ops_data->state = SFC_VDPA_STATE_CONFIGURED;
+
+	return rc;
+}
+
+static int
+sfc_vdpa_get_queue_num(struct rte_vdpa_device *vdpa_dev, uint32_t *queue_num)
+{
+	struct sfc_vdpa_ops_data *ops_data;
+	void *dev;
+
+	ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
+	if (ops_data == NULL)
+		return -1;
+
+	dev = ops_data->dev_handle;
+	*queue_num = sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count;
+
+	sfc_vdpa_info(dev, "vDPA ops get_queue_num :: supported queue num : %d",
+		      *queue_num);
+
+	return 0;
+}
+
 static int
 sfc_vdpa_get_features(struct rte_vdpa_device *vdpa_dev, uint64_t *features)
 {
@@ -114,7 +537,53 @@
 static int
 sfc_vdpa_dev_config(int vid)
 {
-	RTE_SET_USED(vid);
+	struct rte_vdpa_device *vdpa_dev;
+	int rc;
+	struct sfc_vdpa_ops_data *ops_data;
+
+	vdpa_dev = rte_vhost_get_vdpa_device(vid);
+
+	ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
+	if (ops_data == NULL) {
+		sfc_vdpa_err(ops_data->dev_handle,
+			     "invalid vDPA device : %p, vid : %d",
+			     vdpa_dev, vid);
+		return -1;
+	}
+
+	sfc_vdpa_log_init(ops_data->dev_handle, "entry");
+
+	ops_data->vid = vid;
+
+	sfc_vdpa_adapter_lock(ops_data->dev_handle);
+
+	sfc_vdpa_log_init(ops_data->dev_handle, "configuring");
+	rc = sfc_vdpa_configure(ops_data);
+	if (rc != 0)
+		goto fail_vdpa_config;
+
+	sfc_vdpa_log_init(ops_data->dev_handle, "starting");
+	rc = sfc_vdpa_start(ops_data);
+	if (rc != 0)
+		goto fail_vdpa_start;
+
+	sfc_vdpa_adapter_unlock(ops_data->dev_handle);
+
+	sfc_vdpa_log_init(ops_data->dev_handle, "vhost notifier ctrl");
+	if (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true) != 0)
+		sfc_vdpa_info(ops_data->dev_handle,
+			      "vDPA (%s): software relay for notify is used.",
+			      vdpa_dev->device->name);
+
+	sfc_vdpa_log_init(ops_data->dev_handle, "done");
+
+	return 0;
+
+fail_vdpa_start:
+	sfc_vdpa_close(ops_data);
+
+fail_vdpa_config:
+	sfc_vdpa_adapter_unlock(ops_data->dev_handle);
 
 	return -1;
 }
@@ -122,9 +591,27 @@
 static int
 sfc_vdpa_dev_close(int vid)
 {
-	RTE_SET_USED(vid);
+	struct rte_vdpa_device *vdpa_dev;
+	struct sfc_vdpa_ops_data *ops_data;
 
-	return -1;
+	vdpa_dev = rte_vhost_get_vdpa_device(vid);
+
+	ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
+	if (ops_data == NULL) {
+		sfc_vdpa_err(ops_data->dev_handle,
+			     "invalid vDPA device : %p, vid : %d",
+			     vdpa_dev, vid);
+		return -1;
+	}
+
+	sfc_vdpa_adapter_lock(ops_data->dev_handle);
+
+	sfc_vdpa_stop(ops_data);
+	sfc_vdpa_close(ops_data);
+
+	sfc_vdpa_adapter_unlock(ops_data->dev_handle);
+
+	return 0;
 }
 
 static int
diff --git a/drivers/vdpa/sfc/sfc_vdpa_ops.h b/drivers/vdpa/sfc/sfc_vdpa_ops.h
index 21cbb73..8d553c5 100644
--- a/drivers/vdpa/sfc/sfc_vdpa_ops.h
+++ b/drivers/vdpa/sfc/sfc_vdpa_ops.h
@@ -18,17 +18,45 @@ enum sfc_vdpa_context {
 enum sfc_vdpa_state {
 	SFC_VDPA_STATE_UNINITIALIZED = 0,
 	SFC_VDPA_STATE_INITIALIZED,
+	SFC_VDPA_STATE_CONFIGURING,
+	SFC_VDPA_STATE_CONFIGURED,
+	SFC_VDPA_STATE_CLOSING,
+	SFC_VDPA_STATE_CLOSED,
+	SFC_VDPA_STATE_STARTING,
+	SFC_VDPA_STATE_STARTED,
+	SFC_VDPA_STATE_STOPPING,
 	SFC_VDPA_STATE_NSTATES
 };
 
+struct sfc_vdpa_vring_info {
+	uint64_t	desc;
+	uint64_t	avail;
+	uint64_t	used;
+	uint64_t	size;
+	uint16_t	last_avail_idx;
+	uint16_t	last_used_idx;
+};
+
+typedef struct sfc_vdpa_vq_context_s {
+	uint8_t				enable;
+	uint32_t			pidx;
+	uint32_t			cidx;
+	efx_virtio_vq_t			*vq;
+} sfc_vdpa_vq_context_t;
+
 struct sfc_vdpa_ops_data {
 	void				*dev_handle;
+	int				vid;
 	struct rte_vdpa_device		*vdpa_dev;
 	enum sfc_vdpa_context		vdpa_context;
 	enum sfc_vdpa_state		state;
 
 	uint64_t			dev_features;
 	uint64_t			drv_features;
+	uint64_t			req_features;
+
+	uint16_t			vq_count;
+	struct sfc_vdpa_vq_context_s	vq_cxt[SFC_VDPA_MAX_QUEUE_PAIRS * 2];
 };
 
 struct sfc_vdpa_ops_data *
-- 
1.8.3.1



More information about the dev mailing list