|WARNING| pw108263-108268 [PATCH] [7/7] vdpa/mlx5: make statistics counter persistent

dpdklab at iol.unh.edu dpdklab at iol.unh.edu
Thu Feb 24 14:49:17 CET 2022


Test-Label: iol-testing
Test-Status: WARNING
http://dpdk.org/patch/108263

_apply patch failure_

Submitter: Xueming Li <xuemingl at nvidia.com>
Date: Thursday, February 24 2022 13:28:20 
Applied on: CommitID:ee05a93e1e6633d0fdec409faf09f12a2e05b991
Apply patch set 108263-108268 failed:

Checking patch drivers/vdpa/mlx5/mlx5_vdpa.c...
error: while searching for:
	DRV_LOG(DEBUG, "no traffic max is %u.", priv->no_traffic_max);
}

static int
mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,
		    struct mlx5_kvargs_ctrl *mkvlist)
{
	struct mlx5_vdpa_priv *priv = NULL;
	struct mlx5_hca_attr *attr = &cdev->config.hca_attr;
	int retry;

	if (!attr->vdpa.valid || !attr->vdpa.max_num_virtio_queues) {
		DRV_LOG(ERR, "Not enough capabilities to support vdpa, maybe "

error: patch failed: drivers/vdpa/mlx5/mlx5_vdpa.c:507
Hunk #7 succeeded at 534 (offset -79 lines).
error: while searching for:
	}
	mlx5_vdpa_config_get(mkvlist, priv);
	SLIST_INIT(&priv->mr_list);
	pthread_mutex_init(&priv->vq_config_lock, NULL);
	pthread_mutex_lock(&priv_list_lock);
	TAILQ_INSERT_TAIL(&priv_list, priv, next);
	pthread_mutex_unlock(&priv_list_lock);
	return 0;

error:
	if (priv) {
		if (priv->var)
			mlx5_glue->dv_free_var(priv->var);
		rte_intr_instance_free(priv->err_intr_handle);
		rte_free(priv);
	}
	return -rte_errno;
}


error: patch failed: drivers/vdpa/mlx5/mlx5_vdpa.c:564
Hunk #9 succeeded at 578 (offset -73 lines).
Checking patch drivers/vdpa/mlx5/mlx5_vdpa.h...
Checking patch drivers/vdpa/mlx5/mlx5_vdpa_event.c...
Checking patch drivers/vdpa/mlx5/mlx5_vdpa_mem.c...
Checking patch drivers/vdpa/mlx5/mlx5_vdpa_steer.c...
Checking patch drivers/vdpa/mlx5/mlx5_vdpa_virtq.c...
Applying patch drivers/vdpa/mlx5/mlx5_vdpa.c with 2 rejects...
Hunk #1 applied cleanly.
Hunk #2 applied cleanly.
Hunk #3 applied cleanly.
Hunk #4 applied cleanly.
Hunk #5 applied cleanly.
Rejected hunk #6.
Hunk #7 applied cleanly.
Rejected hunk #8.
Hunk #9 applied cleanly.
Applied patch drivers/vdpa/mlx5/mlx5_vdpa.h cleanly.
Applied patch drivers/vdpa/mlx5/mlx5_vdpa_event.c cleanly.
Applied patch drivers/vdpa/mlx5/mlx5_vdpa_mem.c cleanly.
Applied patch drivers/vdpa/mlx5/mlx5_vdpa_steer.c cleanly.
Applied patch drivers/vdpa/mlx5/mlx5_vdpa_virtq.c cleanly.
diff a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c	(rejected hunks)
@@ -507,13 +508,88 @@ mlx5_vdpa_config_get(struct mlx5_kvargs_ctrl *mkvlist,
 	DRV_LOG(DEBUG, "no traffic max is %u.", priv->no_traffic_max);
 }
 
+static int
+mlx5_vdpa_create_dev_resources(struct mlx5_vdpa_priv *priv)
+{
+	struct mlx5_devx_tis_attr tis_attr = {0};
+	struct ibv_context *ctx = priv->cdev->ctx;
+	uint32_t i;
+	int retry;
+
+	for (retry = 0; retry < 7; retry++) {
+		priv->var = mlx5_glue->dv_alloc_var(ctx, 0);
+		if (priv->var != NULL)
+			break;
+		DRV_LOG(WARNING, "Failed to allocate VAR, retry %d.", retry);
+		/* Wait Qemu release VAR during vdpa restart, 0.1 sec based. */
+		usleep(100000U << retry);
+	}
+	if (!priv->var) {
+		DRV_LOG(ERR, "Failed to allocate VAR %u.", errno);
+		rte_errno = ENOMEM;
+		return -rte_errno;
+	}
+	/* Always map the entire page. */
+	priv->virtq_db_addr = mmap(NULL, priv->var->length, PROT_READ |
+				   PROT_WRITE, MAP_SHARED, ctx->cmd_fd,
+				   priv->var->mmap_off);
+	if (priv->virtq_db_addr == MAP_FAILED) {
+		DRV_LOG(ERR, "Failed to map doorbell page %u.", errno);
+		priv->virtq_db_addr = NULL;
+		rte_errno = errno;
+		return -rte_errno;
+	}
+	DRV_LOG(DEBUG, "VAR address of doorbell mapping is %p.",
+		priv->virtq_db_addr);
+	priv->td = mlx5_devx_cmd_create_td(ctx);
+	if (!priv->td) {
+		DRV_LOG(ERR, "Failed to create transport domain.");
+		rte_errno = errno;
+		return -rte_errno;
+	}
+	tis_attr.transport_domain = priv->td->id;
+	for (i = 0; i < priv->num_lag_ports; i++) {
+		/* 0 is auto affinity, non-zero value to propose port. */
+		tis_attr.lag_tx_port_affinity = i + 1;
+		priv->tiss[i] = mlx5_devx_cmd_create_tis(ctx, &tis_attr);
+		if (!priv->tiss[i]) {
+			DRV_LOG(ERR, "Failed to create TIS %u.", i);
+			return -rte_errno;
+		}
+	}
+	priv->null_mr = mlx5_glue->alloc_null_mr(priv->cdev->pd);
+	if (!priv->null_mr) {
+		DRV_LOG(ERR, "Failed to allocate null MR.");
+		rte_errno = errno;
+		return -rte_errno;
+	}
+	DRV_LOG(DEBUG, "Dump fill Mkey = %u.", priv->null_mr->lkey);
+	priv->steer.domain = mlx5_glue->dr_create_domain(ctx,
+					MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
+	if (!priv->steer.domain) {
+		DRV_LOG(ERR, "Failed to create Rx domain.");
+		rte_errno = errno;
+		return -rte_errno;
+	}
+	priv->steer.tbl = mlx5_glue->dr_create_flow_tbl(priv->steer.domain, 0);
+	if (!priv->steer.tbl) {
+		DRV_LOG(ERR, "Failed to create table 0 with Rx domain.");
+		rte_errno = errno;
+		return -rte_errno;
+	}
+	if (mlx5_vdpa_err_event_setup(priv) != 0)
+		return -rte_errno;
+	if (mlx5_vdpa_event_qp_global_prepare(priv))
+		return -rte_errno;
+	return 0;
+}
+
 static int
 mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,
 		    struct mlx5_kvargs_ctrl *mkvlist)
 {
 	struct mlx5_vdpa_priv *priv = NULL;
 	struct mlx5_hca_attr *attr = &cdev->config.hca_attr;
-	int retry;
 
 	if (!attr->vdpa.valid || !attr->vdpa.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Not enough capabilities to support vdpa, maybe "
@@ -564,19 +625,13 @@ mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,
 	}
 	mlx5_vdpa_config_get(mkvlist, priv);
 	SLIST_INIT(&priv->mr_list);
-	pthread_mutex_init(&priv->vq_config_lock, NULL);
 	pthread_mutex_lock(&priv_list_lock);
 	TAILQ_INSERT_TAIL(&priv_list, priv, next);
 	pthread_mutex_unlock(&priv_list_lock);
 	return 0;
-
 error:
-	if (priv) {
-		if (priv->var)
-			mlx5_glue->dv_free_var(priv->var);
-		rte_intr_instance_free(priv->err_intr_handle);
-		rte_free(priv);
-	}
+	if (priv)
+		mlx5_vdpa_dev_release(priv);
 	return -rte_errno;
 }
 
Checking patch drivers/vdpa/mlx5/mlx5_vdpa.c...
error: drivers/vdpa/mlx5/mlx5_vdpa.c: does not match index
Checking patch drivers/vdpa/mlx5/mlx5_vdpa.h...
error: drivers/vdpa/mlx5/mlx5_vdpa.h: does not match index
Checking patch drivers/vdpa/mlx5/mlx5_vdpa_mem.c...
error: drivers/vdpa/mlx5/mlx5_vdpa_mem.c: does not match index
Checking patch drivers/vdpa/mlx5/mlx5_vdpa_virtq.c...
error: drivers/vdpa/mlx5/mlx5_vdpa_virtq.c: does not match index
Checking patch drivers/vdpa/mlx5/mlx5_vdpa.c...
error: drivers/vdpa/mlx5/mlx5_vdpa.c: does not match index
Checking patch drivers/vdpa/mlx5/mlx5_vdpa.h...
error: drivers/vdpa/mlx5/mlx5_vdpa.h: does not match index
Checking patch doc/guides/vdpadevs/mlx5.rst...
Hunk #1 succeeded at 182 (offset 73 lines).
Checking patch drivers/vdpa/mlx5/mlx5_vdpa.c...
error: drivers/vdpa/mlx5/mlx5_vdpa.c: does not match index
Checking patch drivers/vdpa/mlx5/mlx5_vdpa.h...
error: drivers/vdpa/mlx5/mlx5_vdpa.h: does not match index
Checking patch drivers/vdpa/mlx5/mlx5_vdpa_virtq.c...
error: drivers/vdpa/mlx5/mlx5_vdpa_virtq.c: does not match index
Applied patch doc/guides/vdpadevs/mlx5.rst cleanly.

https://lab.dpdk.org/results/dashboard/patchsets/21254/

UNH-IOL DPDK Community Lab


More information about the test-report mailing list