[PATCH v7 06/18] vdpa/ifc: add block device SW live-migration

Xia, Chenbo chenbo.xia at intel.com
Thu May 12 14:55:02 CEST 2022


> -----Original Message-----
> From: Pei, Andy <andy.pei at intel.com>
> Sent: Wednesday, April 27, 2022 4:30 PM
> To: dev at dpdk.org
> Cc: Xia, Chenbo <chenbo.xia at intel.com>; maxime.coquelin at redhat.com; Cao,
> Gang <gang.cao at intel.com>; Liu, Changpeng <changpeng.liu at intel.com>
> Subject: [PATCH v7 06/18] vdpa/ifc: add block device SW live-migration
> 
> Add SW live-migration support to block device.
> 
> Signed-off-by: Andy Pei <andy.pei at intel.com>
> ---
>  drivers/vdpa/ifc/ifcvf_vdpa.c | 33 +++++++++++++++++++++++++++++----
>  1 file changed, 29 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c b/drivers/vdpa/ifc/ifcvf_vdpa.c
> index 07fc3ca..8a260b7 100644
> --- a/drivers/vdpa/ifc/ifcvf_vdpa.c
> +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c
> @@ -312,6 +312,7 @@ struct rte_vdpa_dev_info {
>  vdpa_ifcvf_stop(struct ifcvf_internal *internal)
>  {
>  	struct ifcvf_hw *hw = &internal->hw;
> +	struct rte_vhost_vring vq;
>  	uint32_t i;
>  	int vid;
>  	uint64_t features = 0;
> @@ -319,6 +320,22 @@ struct rte_vdpa_dev_info {
>  	uint64_t len;
> 
>  	vid = internal->vid;
> +
> +	/* to make sure no packet is lost for blk device
> +	 * do not stop until last_avail_idx == last_used_idx
> +	 */
> +	if (internal->device_type == IFCVF_BLK) {
> +		for (i = 0; i < hw->nr_vring; i++) {
> +			rte_vhost_get_vhost_vring(internal->vid, i, &vq);
> +			while (vq.avail->idx != vq.used->idx) {
> +				ifcvf_notify_queue(hw, i);
> +				usleep(10);
> +			}
> +			hw->vring[i].last_avail_idx = vq.avail->idx;
> +			hw->vring[i].last_used_idx = vq.used->idx;
> +		}
> +	}
> +

This seems not match with the above comment about avoiding in-flight packets.
But the change in patch 17 seems good. Why not just using the implementation
in patch 17?

Thanks,
Chenbo

>  	ifcvf_stop_hw(hw);
> 
>  	for (i = 0; i < hw->nr_vring; i++)
> @@ -642,8 +659,10 @@ struct rte_vdpa_dev_info {
>  		}
>  		hw->vring[i].avail = gpa;
> 
> -		/* Direct I/O for Tx queue, relay for Rx queue */
> -		if (i & 1) {
> +		/* NET: Direct I/O for Tx queue, relay for Rx queue
> +		 * BLK: relay every queue
> +		 */
> +		if ((internal->device_type == IFCVF_NET) && (i & 1)) {
>  			gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
>  			if (gpa == 0) {
>  				DRV_LOG(ERR, "Fail to get GPA for used ring.");
> @@ -693,8 +712,12 @@ struct rte_vdpa_dev_info {
> 
>  	for (i = 0; i < hw->nr_vring; i++) {
>  		/* synchronize remaining new used entries if any */
> -		if ((i & 1) == 0)
> +		if (internal->device_type == IFCVF_NET) {
> +			if ((i & 1) == 0)
> +				update_used_ring(internal, i);
> +		} else if (internal->device_type == IFCVF_BLK) {
>  			update_used_ring(internal, i);
> +		}
> 
>  		rte_vhost_get_vhost_vring(vid, i, &vq);
>  		len = IFCVF_USED_RING_LEN(vq.size);
> @@ -756,7 +779,9 @@ struct rte_vdpa_dev_info {
>  		}
>  	}
> 
> -	for (qid = 0; qid < q_num; qid += 2) {
> +	for (qid = 0; qid < q_num; qid += 1) {
> +		if ((internal->device_type == IFCVF_NET) && (qid & 1))
> +			continue;
>  		ev.events = EPOLLIN | EPOLLPRI;
>  		/* leave a flag to mark it's for interrupt */
>  		ev.data.u64 = 1 | qid << 1 |
> --
> 1.8.3.1



More information about the dev mailing list