[dpdk-dev] [PATCH v2 1/4] common/cnxk: add DPI DMA support
fengchengwen
fengchengwen at huawei.com
Tue Nov 2 12:45:16 CET 2021
On 2021/11/2 11:40, Radha Mohan Chintakuntla wrote:
> Add base support as ROC(Rest of Chip) API which will be used by PMD
> dmadev driver.
>
> This patch adds routines to init, fini, configure the DPI DMA device
> found in Marvell's CN9k or CN10k SoC families.
>
> Signed-off-by: Radha Mohan Chintakuntla <radhac at marvell.com>
> ---
...
> --- /dev/null
> +++ b/drivers/common/cnxk/hw/dpi.h
> @@ -0,0 +1,141 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2021 Marvell.
> + */
> +/**
> + * DPI device HW definitions.
> + */
> +#ifndef __DEV_DPI_HW_H__
> +#define __DEV_DPI_HW_H__
suggest remove __ to avoid conflict with gcc reserved symbol.
...
> +
> +int
> +roc_dpi_configure(struct roc_dpi *roc_dpi)
> +{
> + struct plt_pci_device *pci_dev;
> + const struct plt_memzone *dpi_mz;
> + dpi_mbox_msg_t mbox_msg;
> + struct npa_pool_s pool;
> + struct npa_aura_s aura;
> + int rc, count, buflen;
> + uint64_t aura_handle;
> + plt_iova_t iova;
> + char name[32];
> +
> + if (!roc_dpi) {
> + plt_err("roc_dpi is NULL");
> + return -EINVAL;
> + }
> +
> + pci_dev = roc_dpi->pci_dev;
> + memset(&pool, 0, sizeof(struct npa_pool_s));
> + pool.nat_align = 1;
> +
> + memset(&aura, 0, sizeof(aura));
> + rc = roc_npa_pool_create(&aura_handle, DPI_CMD_QUEUE_SIZE,
> + DPI_CMD_QUEUE_BUFS, &aura, &pool);
> + if (rc) {
> + plt_err("Failed to create NPA pool, err %d\n", rc);
> + return rc;
> + }
> +
> + snprintf(name, sizeof(name), "dpimem%d", roc_dpi->vfid);
> + buflen = DPI_CMD_QUEUE_SIZE * DPI_CMD_QUEUE_BUFS;
> + dpi_mz = plt_memzone_reserve_aligned(name, buflen, 0,
> + DPI_CMD_QUEUE_SIZE);
> + if (dpi_mz == NULL) {
> + plt_err("dpi memzone reserve failed");
> + rc = -ENOMEM;
> + goto err1;
currently, err1 will free dpi_mz which is NULL,
it should invoke roc_npa_pool_destroy instead.
> + }
> +
> + roc_dpi->mz = dpi_mz;
> + iova = dpi_mz->iova;
> + for (count = 0; count < DPI_CMD_QUEUE_BUFS; count++) {
> + roc_npa_aura_op_free(aura_handle, 0, iova);
> + iova += DPI_CMD_QUEUE_SIZE;
> + }
> +
> + roc_dpi->chunk_base = (void *)roc_npa_aura_op_alloc(aura_handle, 0);
> + if (!roc_dpi->chunk_base) {
> + plt_err("Failed to alloc buffer from NPA aura");
> + rc = -ENOMEM;
> + goto err2;
> + }
> +
> + roc_dpi->chunk_next = (void *)roc_npa_aura_op_alloc(aura_handle, 0);
> + if (!roc_dpi->chunk_next) {
> + plt_err("Failed to alloc buffer from NPA aura");
> + rc = -ENOMEM;
> + goto err2;
> + }
> +
> + roc_dpi->aura_handle = aura_handle;
> + /* subtract 2 as they have already been alloc'ed above */
> + roc_dpi->pool_size_m1 = (DPI_CMD_QUEUE_SIZE >> 3) - 2;
> +
> + plt_write64(0x0, roc_dpi->rbase + DPI_VDMA_REQQ_CTL);
> + plt_write64(((uint64_t)(roc_dpi->chunk_base) >> 7) << 7,
> + roc_dpi->rbase + DPI_VDMA_SADDR);
> + mbox_msg.u[0] = 0;
> + mbox_msg.u[1] = 0;
> + /* DPI PF driver expects vfid starts from index 0 */
> + mbox_msg.s.vfid = roc_dpi->vfid;
> + mbox_msg.s.cmd = DPI_QUEUE_OPEN;
> + mbox_msg.s.csize = DPI_CMD_QUEUE_SIZE;
> + mbox_msg.s.aura = roc_npa_aura_handle_to_aura(aura_handle);
> + mbox_msg.s.sso_pf_func = idev_sso_pffunc_get();
> + mbox_msg.s.npa_pf_func = idev_npa_pffunc_get();
> +
> + rc = send_msg_to_pf(&pci_dev->addr, (const char *)&mbox_msg,
> + sizeof(dpi_mbox_msg_t));
> + if (rc < 0) {
> + plt_err("Failed to send mbox message %d to DPI PF, err %d",
> + mbox_msg.s.cmd, rc);
> + goto err2;
> + }
> +
> + return rc;
> +
> +err2:
> + roc_npa_pool_destroy(aura_handle);
> +err1:
> + plt_memzone_free(dpi_mz);
> + return rc;
> +}
> +
...
>
More information about the dev
mailing list