[dpdk-dev] [PATCH v5 11/22] event/dlb: add port setup

Eads, Gage gage.eads at intel.com
Tue Oct 20 22:06:50 CEST 2020


[...]

> +static int
> +dlb_pf_ldb_port_create(struct dlb_hw_dev *handle,
> +		       struct dlb_create_ldb_port_args *cfg,
> +		       enum dlb_cq_poll_modes poll_mode)
> +{
> +	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> +	struct dlb_cmd_response response = {0};
> +	int ret;
> +	uint8_t *port_base;
> +	const struct rte_memzone *mz;
> +	int alloc_sz, qe_sz, cq_alloc_depth;
> +	rte_iova_t pp_dma_base;
> +	rte_iova_t pc_dma_base;
> +	rte_iova_t cq_dma_base;
> +	int is_dir = false;
> +
> +	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
> +
> +	if (poll_mode == DLB_CQ_POLL_MODE_STD)
> +		qe_sz = sizeof(struct dlb_dequeue_qe);
> +	else
> +		qe_sz = RTE_CACHE_LINE_SIZE;
> +
> +	/* The hardware always uses a CQ depth of at least
> +	 * DLB_MIN_HARDWARE_CQ_DEPTH, even though from the user
> +	 * perspective we support a depth as low as 1 for LDB ports.
> +	 */
> +	cq_alloc_depth = RTE_MAX(cfg->cq_depth,
> DLB_MIN_HARDWARE_CQ_DEPTH);
> +
> +	/* Calculate the port memory required, including two cache lines for
> +	 * credit pop counts. Round up to the nearest cache line.
> +	 */
> +	alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cq_alloc_depth * qe_sz;
> +	alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
> +
> +	port_base = dlb_alloc_coherent_aligned(&mz, &pc_dma_base,
> +					       alloc_sz, PAGE_SIZE);
> +	if (port_base == NULL)
> +		return -ENOMEM;
> +
> +	/* Lock the page in memory */
> +	ret = rte_mem_lock_page(port_base);
> +	if (ret < 0) {
> +		DLB_LOG_ERR("dlb pf pmd could not lock page for device i/o\n");
> +		goto create_port_err;
> +	}
> +
> +	memset(port_base, 0, alloc_sz);
> +	cq_dma_base = (uintptr_t)(pc_dma_base + (2 *
> RTE_CACHE_LINE_SIZE));
> +
> +	ret = dlb_hw_create_ldb_port(&dlb_dev->hw,
> +				     handle->domain_id,
> +				     cfg,
> +				     pc_dma_base,
> +				     cq_dma_base,
> +				     &response);
> +	if (ret)
> +		goto create_port_err;
> +
> +	pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
> +	dlb_port[response.id][DLB_LDB].pp_addr =
> +		(void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
> +
> +	dlb_port[response.id][DLB_LDB].cq_base =
> +		(void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
> +
> +	dlb_port[response.id][DLB_LDB].ldb_popcount =
> +		(void *)(uintptr_t)port_base;
> +	dlb_port[response.id][DLB_LDB].dir_popcount = (void *)(uintptr_t)
> +		(port_base + RTE_CACHE_LINE_SIZE);
> +	dlb_port[response.id][DLB_LDB].mz = mz;
> +
> +	*(struct dlb_cmd_response *)cfg->response = response;
> +
> +	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__,
> ret);
> +
> +create_port_err:

Need to free the memzone if the PMD jumps to this label

> +
> +	return ret;
> +}
> +
> +static int
> +dlb_pf_dir_port_create(struct dlb_hw_dev *handle,
> +		       struct dlb_create_dir_port_args *cfg,
> +		       enum dlb_cq_poll_modes poll_mode)
> +{
> +	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
> +	struct dlb_cmd_response response = {0};
> +	int ret;
> +	uint8_t *port_base;
> +	const struct rte_memzone *mz;
> +	int alloc_sz, qe_sz;
> +	rte_iova_t pp_dma_base;
> +	rte_iova_t pc_dma_base;
> +	rte_iova_t cq_dma_base;
> +	int is_dir = true;
> +
> +	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
> +
> +	if (poll_mode == DLB_CQ_POLL_MODE_STD)
> +		qe_sz = sizeof(struct dlb_dequeue_qe);
> +	else
> +		qe_sz = RTE_CACHE_LINE_SIZE;
> +
> +	/* Calculate the port memory required, including two cache lines for
> +	 * credit pop counts. Round up to the nearest cache line.
> +	 */
> +	alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cfg->cq_depth * qe_sz;
> +	alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
> +
> +	port_base = dlb_alloc_coherent_aligned(&mz, &pc_dma_base,
> +					       alloc_sz, PAGE_SIZE);
> +	if (port_base == NULL)
> +		return -ENOMEM;
> +
> +	/* Lock the page in memory */
> +	ret = rte_mem_lock_page(port_base);
> +	if (ret < 0) {
> +		DLB_LOG_ERR("dlb pf pmd could not lock page for device i/o\n");
> +		goto create_port_err;
> +	}
> +
> +	memset(port_base, 0, alloc_sz);
> +	cq_dma_base = (uintptr_t)(pc_dma_base + (2 *
> RTE_CACHE_LINE_SIZE));
> +
> +	ret = dlb_hw_create_dir_port(&dlb_dev->hw,
> +				     handle->domain_id,
> +				     cfg,
> +				     pc_dma_base,
> +				     cq_dma_base,
> +				     &response);
> +	if (ret)
> +		goto create_port_err;
> +
> +	pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
> +	dlb_port[response.id][DLB_DIR].pp_addr =
> +		(void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
> +
> +	dlb_port[response.id][DLB_DIR].cq_base =
> +		(void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
> +
> +	dlb_port[response.id][DLB_DIR].ldb_popcount =
> +		(void *)(uintptr_t)port_base;
> +	dlb_port[response.id][DLB_DIR].dir_popcount = (void *)(uintptr_t)
> +		(port_base + RTE_CACHE_LINE_SIZE);
> +	dlb_port[response.id][DLB_DIR].mz = mz;
> +
> +	*(struct dlb_cmd_response *)cfg->response = response;
> +
> +	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__,
> ret);
> +
> +create_port_err:
> +

Ditto

Thanks,
Gage


More information about the dev mailing list