<div dir="ltr">Hi Nic,<div><br></div><div>Yes, i will split this into smaller patches and take care of the TODO's. I will have a v1 patch set ready with these changes within a couple of days. Given the 4/10 deadline for 22.07 it would seem that 22.11 is our target.</div><div><br></div><div>Thank you,</div><div>-John</div><div><br></div></div><br><div class="gmail_quote"><div dir="ltr" class="gmail_attr">On Wed, Apr 27, 2022 at 2:38 PM Chautru, Nicolas <<a href="mailto:nicolas.chautru@intel.com">nicolas.chautru@intel.com</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">Hi John, <br>
<br>
Do you think this one can be split into a few incremental commits?<br>
<br>
There are a few TODOs, is that v1 ready for review? Also you are targeting 22.11 right?<br>
<br>
Thanks<br>
Nic<br>
<br>
> -----Original Message-----<br>
> From: John Miller <<a href="mailto:john.miller@atomicrules.com" target="_blank">john.miller@atomicrules.com</a>><br>
> Sent: Thursday, April 21, 2022 8:19 AM<br>
> To: <a href="mailto:dev@dpdk.org" target="_blank">dev@dpdk.org</a><br>
> Cc: <a href="mailto:ferruh.yigit@xilinx.com" target="_blank">ferruh.yigit@xilinx.com</a>; <a href="mailto:ed.czeck@atomicrules.com" target="_blank">ed.czeck@atomicrules.com</a>; John Miller<br>
> <<a href="mailto:john.miller@atomicrules.com" target="_blank">john.miller@atomicrules.com</a>><br>
> Subject: [PATCH 05/10] baseband/ark: add ark baseband device<br>
> <br>
> Add new ark baseband device.<br>
> <br>
> Signed-off-by: John Miller <<a href="mailto:john.miller@atomicrules.com" target="_blank">john.miller@atomicrules.com</a>><br>
> ---<br>
> drivers/baseband/ark/ark_bbdev.c | 1064 +++++++++++++++++++++++<br>
> drivers/baseband/ark/ark_bbdev_common.c | 125 +++<br>
> drivers/baseband/ark/ark_bbdev_common.h | 92 ++<br>
> drivers/baseband/ark/ark_bbdev_custom.c | 201 +++++<br>
> drivers/baseband/ark/ark_bbdev_custom.h | 30 +<br>
> drivers/baseband/ark/meson.build | 11 +<br>
> drivers/baseband/ark/version.map | 3 +<br>
> 7 files changed, 1526 insertions(+)<br>
> create mode 100644 drivers/baseband/ark/ark_bbdev.c create mode<br>
> 100644 drivers/baseband/ark/ark_bbdev_common.c<br>
> create mode 100644 drivers/baseband/ark/ark_bbdev_common.h<br>
> create mode 100644 drivers/baseband/ark/ark_bbdev_custom.c<br>
> create mode 100644 drivers/baseband/ark/ark_bbdev_custom.h<br>
> create mode 100644 drivers/baseband/ark/meson.build create mode<br>
> 100644 drivers/baseband/ark/version.map<br>
> <br>
> diff --git a/drivers/baseband/ark/ark_bbdev.c<br>
> b/drivers/baseband/ark/ark_bbdev.c<br>
> new file mode 100644<br>
> index 0000000000..b23bbd44d1<br>
> --- /dev/null<br>
> +++ b/drivers/baseband/ark/ark_bbdev.c<br>
> @@ -0,0 +1,1064 @@<br>
> +/* SPDX-License-Identifier: BSD-3-Clause<br>
> + * Copyright(c) 2016-2021 Atomic Rules LLC */<br>
> +<br>
> +#include "ark_common.h"<br>
> +#include "ark_bbdev_common.h"<br>
> +#include "ark_bbdev_custom.h"<br>
> +#include "ark_ddm.h"<br>
> +#include "ark_mpu.h"<br>
> +#include "ark_rqp.h"<br>
> +#include "ark_udm.h"<br>
> +<br>
> +#include <rte_bbdev.h><br>
> +#include <rte_bbdev_pmd.h><br>
> +#include <rte_bus_pci.h><br>
> +#include <rte_common.h><br>
> +#include <rte_devargs.h><br>
> +#include <rte_malloc.h><br>
> +#include <rte_ring.h><br>
> +<br>
> +#include <unistd.h><br>
> +<br>
> +#define DRIVER_NAME baseband_ark<br>
> +<br>
> +RTE_LOG_REGISTER_DEFAULT(ark_bbdev_logtype, DEBUG);<br>
> +<br>
> +#define ARK_SYSCTRL_BASE 0x0<br>
> +#define ARK_PKTGEN_BASE 0x10000<br>
> +#define ARK_MPU_RX_BASE 0x20000<br>
> +#define ARK_UDM_BASE 0x30000<br>
> +#define ARK_MPU_TX_BASE 0x40000<br>
> +#define ARK_DDM_BASE 0x60000<br>
> +#define ARK_PKTDIR_BASE 0xa0000<br>
> +#define ARK_PKTCHKR_BASE 0x90000<br>
> +#define ARK_RCPACING_BASE 0xb0000<br>
> +#define ARK_MPU_QOFFSET 0x00100<br>
> +<br>
> +#define BB_ARK_TX_Q_FACTOR 4<br>
> +<br>
> +/* TODO move to UDM, verify configuration */ #define ARK_RX_META_SIZE<br>
> +32 #define ARK_RX_META_OFFSET (RTE_PKTMBUF_HEADROOM -<br>
> ARK_RX_META_SIZE)<br>
> +#define ARK_RX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)<br>
> +<br>
> +static_assert(sizeof(struct ark_rx_meta) == ARK_RX_META_SIZE,<br>
> +"Unexpected struct size ark_rx_meta"); static_assert(sizeof(union<br>
> +ark_tx_meta) == 8, "Unexpected struct size ark_tx_meta");<br>
> +<br>
> +static struct rte_pci_id pci_id_ark[] = {<br>
> + {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1015)},<br>
> + {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1016)},<br>
> + {.device_id = 0},<br>
> +};<br>
> +<br>
> +static const struct ark_dev_caps<br>
> +ark_device_caps[] = {<br>
> + SET_DEV_CAPS(0x1015, true),<br>
> + SET_DEV_CAPS(0x1016, true),<br>
> + {.device_id = 0,}<br>
> +};<br>
> +<br>
> +<br>
> +/* Forward declarations */<br>
> +static const struct rte_bbdev_ops ark_bbdev_pmd_ops;<br>
> +<br>
> +<br>
> +/* queue */<br>
> +struct ark_bbdev_queue {<br>
> + struct rte_ring *active_ops; /* Ring for processed packets */<br>
> +<br>
> + /* RX components */<br>
> + /* array of physical addresses of the mbuf data pointer */<br>
> + rte_iova_t *rx_paddress_q;<br>
> + struct ark_udm_t *udm;<br>
> + struct ark_mpu_t *rx_mpu;<br>
> +<br>
> + /* TX components */<br>
> + union ark_tx_meta *tx_meta_q;<br>
> + struct ark_mpu_t *tx_mpu;<br>
> + struct ark_ddm_t *ddm;<br>
> +<br>
> + /* */<br>
> + uint32_t tx_queue_mask;<br>
> + uint32_t rx_queue_mask;<br>
> +<br>
> + int32_t rx_seed_index; /* step 1 set with empty mbuf */<br>
> + int32_t rx_cons_index; /* step 3 consumed by driver */<br>
> +<br>
> + /* 3 indexes to the paired data rings. */<br>
> + int32_t tx_prod_index; /* where to put the next one */<br>
> + int32_t tx_free_index; /* local copy of tx_cons_index */<br>
> +<br>
> + /* separate cache line -- written by FPGA -- RX announce */<br>
> + RTE_MARKER cacheline1 __rte_cache_min_aligned;<br>
> + volatile int32_t rx_prod_index; /* step 2 filled by FPGA */<br>
> +<br>
> + /* Separate cache line -- written by FPGA -- RX completion */<br>
> + RTE_MARKER cacheline2 __rte_cache_min_aligned;<br>
> + volatile int32_t tx_cons_index; /* hw is done, can be freed */ }<br>
> +__rte_cache_aligned;<br>
> +<br>
> +static int<br>
> +ark_bb_hw_q_setup(struct rte_bbdev *bbdev, uint16_t q_id, uint16_t<br>
> +queue_size) {<br>
> + struct ark_bbdev_queue *q = bbdev->data-<br>
> >queues[q_id].queue_private;<br>
> +<br>
> + rte_iova_t queue_base;<br>
> + rte_iova_t phys_addr_q_base;<br>
> + rte_iova_t phys_addr_prod_index;<br>
> + rte_iova_t phys_addr_cons_index;<br>
> +<br>
> + uint32_t write_interval_ns = 500; /* TODO this seems big */<br>
> +<br>
> + if (ark_mpu_verify(q->rx_mpu, sizeof(rte_iova_t))) {<br>
> + ARK_BBDEV_LOG(ERR, "Illegal hw/sw configuration RX<br>
> queue");<br>
> + return -1;<br>
> + }<br>
> + ARK_BBDEV_LOG(DEBUG, "ark_bb_q setup %u:%u",<br>
> + bbdev->data->dev_id, q_id);<br>
> +<br>
> + /* RX MPU */<br>
> + phys_addr_q_base = rte_malloc_virt2iova(q->rx_paddress_q);<br>
> + /* Force TX mode on MPU to match bbdev behavior */<br>
> + ark_mpu_configure(q->rx_mpu, phys_addr_q_base, queue_size, 1);<br>
> + ark_mpu_reset_stats(q->rx_mpu);<br>
> + ark_mpu_start(q->rx_mpu);<br>
> +<br>
> + /* UDM */<br>
> + queue_base = rte_malloc_virt2iova(q);<br>
> + phys_addr_prod_index = queue_base +<br>
> + offsetof(struct ark_bbdev_queue, rx_prod_index);<br>
> + ark_udm_write_addr(q->udm, phys_addr_prod_index);<br>
> + ark_udm_queue_enable(q->udm, 1);<br>
> +<br>
> + /* TX MPU */<br>
> + phys_addr_q_base = rte_malloc_virt2iova(q->tx_meta_q);<br>
> + ark_mpu_configure(q->tx_mpu, phys_addr_q_base,<br>
> + BB_ARK_TX_Q_FACTOR * queue_size, 1);<br>
> + ark_mpu_start(q->tx_mpu);<br>
> +<br>
> + /* DDM */<br>
> + phys_addr_cons_index = queue_base +<br>
> + offsetof(struct ark_bbdev_queue, tx_cons_index);<br>
> + ark_ddm_setup(q->ddm, phys_addr_cons_index, write_interval_ns);<br>
> +<br>
> + return 0;<br>
> +}<br>
> +<br>
> +/* Setup a queue */<br>
> +static int<br>
> +ark_bb_q_setup(struct rte_bbdev *bbdev, uint16_t q_id,<br>
> + const struct rte_bbdev_queue_conf *queue_conf) {<br>
> + struct ark_bbdev_queue *q;<br>
> + struct ark_bbdevice *ark_bb = bbdev->data->dev_private;<br>
> +<br>
> + const uint32_t queue_size = queue_conf->queue_size;<br>
> + const int socket_id = queue_conf->socket;<br>
> + const uint64_t pg_sz = sysconf(_SC_PAGESIZE);<br>
> + char ring_name[RTE_RING_NAMESIZE];<br>
> +<br>
> + /* Configuration checks */<br>
> + if (!rte_is_power_of_2(queue_size)) {<br>
> + ARK_BBDEV_LOG(ERR,<br>
> + "Configuration queue size"<br>
> + " must be power of two %u",<br>
> + queue_size);<br>
> + return -EINVAL;<br>
> + }<br>
> +<br>
> + if (RTE_PKTMBUF_HEADROOM < ARK_RX_META_SIZE) {<br>
> + ARK_BBDEV_LOG(ERR,<br>
> + "Error: Ark bbdev requires head room > %d bytes<br>
> (%s)",<br>
> + ARK_RX_META_SIZE, __func__);<br>
> + return -EINVAL;<br>
> + }<br>
> +<br>
> + /* Allocate the queue data structure. */<br>
> + q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q),<br>
> + RTE_CACHE_LINE_SIZE, queue_conf->socket);<br>
> + if (q == NULL) {<br>
> + ARK_BBDEV_LOG(ERR, "Failed to allocate queue memory");<br>
> + return -ENOMEM;<br>
> + }<br>
> + bbdev->data->queues[q_id].queue_private = q;<br>
> +<br>
> + /* RING */<br>
> + snprintf(ring_name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)<br>
> "%u:%u",<br>
> + bbdev->data->dev_id, q_id);<br>
> + q->active_ops = rte_ring_create(ring_name,<br>
> + queue_size,<br>
> + queue_conf->socket,<br>
> + RING_F_SP_ENQ | RING_F_SC_DEQ);<br>
> + if (q->active_ops == NULL) {<br>
> + ARK_BBDEV_LOG(ERR, "Failed to create ring");<br>
> + goto free_all;<br>
> + }<br>
> +<br>
> + q->rx_queue_mask = queue_size - 1;<br>
> + q->tx_queue_mask = (BB_ARK_TX_Q_FACTOR * queue_size) - 1;<br>
> +<br>
> + /* Each mbuf requires 2 to 4 objects, factor by<br>
> BB_ARK_TX_Q_FACTOR */<br>
> + q->tx_meta_q =<br>
> + rte_zmalloc_socket("Ark_bb_txqueue meta",<br>
> + queue_size * BB_ARK_TX_Q_FACTOR *<br>
> + sizeof(union ark_tx_meta),<br>
> + pg_sz,<br>
> + socket_id);<br>
> +<br>
> + if (q->tx_meta_q == 0) {<br>
> + ARK_BBDEV_LOG(ERR, "Failed to allocate "<br>
> + "queue memory in %s", __func__);<br>
> + goto free_all;<br>
> + }<br>
> +<br>
> + q->ddm = RTE_PTR_ADD(ark_bb->ddm.v, q_id *<br>
> ARK_DDM_QOFFSET);<br>
> + q->tx_mpu = RTE_PTR_ADD(ark_bb->mputx.v, q_id *<br>
> ARK_MPU_QOFFSET);<br>
> +<br>
> + q->rx_paddress_q =<br>
> + rte_zmalloc_socket("ark_bb_rx_paddress_q",<br>
> + queue_size * sizeof(rte_iova_t),<br>
> + pg_sz,<br>
> + socket_id);<br>
> +<br>
> + if (q->rx_paddress_q == 0) {<br>
> + ARK_BBDEV_LOG(ERR,<br>
> + "Failed to allocate queue memory in %s",<br>
> + __func__);<br>
> + goto free_all;<br>
> + }<br>
> + q->udm = RTE_PTR_ADD(ark_bb->udm.v, q_id *<br>
> ARK_UDM_QOFFSET);<br>
> + q->rx_mpu = RTE_PTR_ADD(ark_bb->mpurx.v, q_id *<br>
> ARK_MPU_QOFFSET);<br>
> +<br>
> + /* Structure have been configured, set the hardware */<br>
> + return ark_bb_hw_q_setup(bbdev, q_id, queue_size);<br>
> +<br>
> +free_all:<br>
> + rte_free(q->tx_meta_q);<br>
> + rte_free(q->rx_paddress_q);<br>
> + rte_free(q);<br>
> + return -EFAULT;<br>
> +}<br>
> +<br>
> +/* Release queue */<br>
> +static int<br>
> +ark_bb_q_release(struct rte_bbdev *bbdev, uint16_t q_id) {<br>
> + struct ark_bbdev_queue *q = bbdev->data-<br>
> >queues[q_id].queue_private;<br>
> +<br>
> + /* TODO Wait for ddm to send out all packets in flight,<br>
> + * Is this only called after q stop?<br>
> + */<br>
> +<br>
> + ark_mpu_dump(q->rx_mpu, "rx_MPU release", q_id);<br>
> + ark_mpu_dump(q->tx_mpu, "tx_MPU release", q_id);<br>
> +<br>
> + rte_ring_free(q->active_ops);<br>
> + rte_free(q->tx_meta_q);<br>
> + rte_free(q->rx_paddress_q);<br>
> + rte_free(q);<br>
> + bbdev->data->queues[q_id].queue_private = NULL;<br>
> +<br>
> + ARK_BBDEV_LOG(DEBUG, "released device queue %u:%u",<br>
> + bbdev->data->dev_id, q_id);<br>
> + return 0;<br>
> +}<br>
> +<br>
> +static int<br>
> +ark_bbdev_start(struct rte_bbdev *bbdev) {<br>
> + struct ark_bbdevice *ark_bb = bbdev->data->dev_private;<br>
> +<br>
> + ARK_BBDEV_LOG(DEBUG, "Starting device %u", bbdev->data-<br>
> >dev_id);<br>
> + if (ark_bb->started)<br>
> + return 0;<br>
> +<br>
> + /* start UDM */<br>
> + ark_udm_start(ark_bb->udm.v);<br>
> +<br>
> + /* start DDM */<br>
> + ark_ddm_start(ark_bb->ddm.v);<br>
> +<br>
> + ark_bb->started = 1;<br>
> +<br>
> + if (ark_bb->start_pg)<br>
> + ark_pktchkr_run(ark_bb->pc);<br>
> +<br>
> + if (ark_bb->start_pg) {<br>
> + pthread_t thread;<br>
> +<br>
> + /* Delay packet generator start allow the hardware to be<br>
> ready<br>
> + * This is only used for sanity checking with internal generator<br>
> + */<br>
> + if (pthread_create(&thread, NULL,<br>
> + ark_pktgen_delay_start, ark_bb->pg)) {<br>
> + ARK_BBDEV_LOG(ERR, "Could not create pktgen "<br>
> + "starter thread");<br>
> + return -1;<br>
> + }<br>
> + }<br>
> +<br>
> + return 0;<br>
> +}<br>
> +<br>
> +static void<br>
> +ark_bbdev_stop(struct rte_bbdev *bbdev) {<br>
> + struct ark_bbdevice *ark_bb = bbdev->data->dev_private;<br>
> + struct ark_mpu_t *mpu;<br>
> + unsigned int i;<br>
> + int status;<br>
> +<br>
> + ARK_BBDEV_LOG(DEBUG, "Stopping device %u", bbdev->data-<br>
> >dev_id);<br>
> +<br>
> + if (!ark_bb->started)<br>
> + return;<br>
> +<br>
> + /* Stop the packet generator */<br>
> + if (ark_bb->start_pg)<br>
> + ark_pktgen_pause(ark_bb->pg);<br>
> +<br>
> + /* Stop DDM */<br>
> + /* Wait up to 0.1 second. each stop is up to 1000 * 10 useconds */<br>
> + for (i = 0; i < 10; i++) {<br>
> + status = ark_ddm_stop(ark_bb->ddm.v, 1);<br>
> + if (status == 0)<br>
> + break;<br>
> + }<br>
> + if (status || i != 0) {<br>
> + ARK_BBDEV_LOG(ERR, "DDM stop anomaly. status:"<br>
> + " %d iter: %u. (%s)",<br>
> + status,<br>
> + i,<br>
> + __func__);<br>
> + ark_ddm_dump(ark_bb->ddm.v, "Stop anomaly");<br>
> +<br>
> + mpu = ark_bb->mputx.v;<br>
> + for (i = 0; i < ark_bb->max_nb_queues; i++) {<br>
> + ark_mpu_dump(mpu, "DDM failure dump", i);<br>
> + mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);<br>
> + }<br>
> + }<br>
> + ark_ddm_dump_stats(ark_bb->ddm.v, "bbdev stop");<br>
> +<br>
> + /* STOP RX Side */<br>
> + /* Stop UDM multiple tries attempted */<br>
> + for (i = 0; i < 10; i++) {<br>
> + status = ark_udm_stop(ark_bb->udm.v, 1);<br>
> + if (status == 0)<br>
> + break;<br>
> + }<br>
> + if (status || i != 0) {<br>
> + ARK_BBDEV_LOG(WARNING, "UDM stop anomaly. status %d<br>
> iter: %u. (%s)",<br>
> + status, i, __func__);<br>
> + ark_udm_dump(ark_bb->udm.v, "Stop anomaly");<br>
> +<br>
> + mpu = ark_bb->mpurx.v;<br>
> + for (i = 0; i < ark_bb->max_nb_queues; i++) {<br>
> + ark_mpu_dump(mpu, "UDM Stop anomaly", i);<br>
> + mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);<br>
> + }<br>
> + }<br>
> +<br>
> + ark_udm_dump_stats(ark_bb->udm.v, "Post stop");<br>
> + ark_udm_dump_perf(ark_bb->udm.v, "Post stop");<br>
> +<br>
> + /* Stop the packet checker if it is running */<br>
> + if (ark_bb->start_pg) {<br>
> + ark_pktchkr_dump_stats(ark_bb->pc);<br>
> + ark_pktchkr_stop(ark_bb->pc);<br>
> + }<br>
> +}<br>
> +<br>
> +static int<br>
> +ark_bb_q_start(struct rte_bbdev *bbdev, uint16_t q_id) {<br>
> + struct ark_bbdev_queue *q = bbdev->data-<br>
> >queues[q_id].queue_private;<br>
> + ARK_BBDEV_LOG(DEBUG, "ark_bb_q start %u:%u", bbdev->data-<br>
> >dev_id, q_id);<br>
> + ark_mpu_start(q->tx_mpu);<br>
> + ark_mpu_start(q->rx_mpu);<br>
> + return 0;<br>
> +}<br>
> +static int<br>
> +ark_bb_q_stop(struct rte_bbdev *bbdev, uint16_t q_id) {<br>
> + struct ark_bbdev_queue *q = bbdev->data-<br>
> >queues[q_id].queue_private;<br>
> + ARK_BBDEV_LOG(DEBUG, "ark_bb_q stop %u:%u", bbdev->data-<br>
> >dev_id, q_id);<br>
> + ark_mpu_stop(q->tx_mpu);<br>
> + ark_mpu_stop(q->rx_mpu);<br>
> + return 0;<br>
> +}<br>
> +<br>
> +/*<br>
> +****************************************************************<br>
> *******<br>
> +** */<br>
> +/* Common function for all enqueue and dequeue ops */ static inline<br>
> +void ark_bb_enqueue_desc_fill(struct ark_bbdev_queue *q,<br>
> + struct rte_mbuf *mbuf,<br>
> + uint16_t offset, /* Extra offset */<br>
> + uint8_t flags,<br>
> + uint32_t *meta,<br>
> + uint8_t meta_cnt /* 0, 1 or 2 */<br>
> + )<br>
> +{<br>
> + union ark_tx_meta *tx_meta;<br>
> + int32_t tx_idx;<br>
> + uint8_t m;<br>
> +<br>
> + /* Header */<br>
> + tx_idx = q->tx_prod_index & q->tx_queue_mask;<br>
> + tx_meta = &q->tx_meta_q[tx_idx];<br>
> + tx_meta->data_len = rte_pktmbuf_data_len(mbuf) - offset;<br>
> + tx_meta->flags = flags;<br>
> + tx_meta->meta_cnt = meta_cnt;<br>
> + tx_meta->user1 = *meta++;<br>
> + q->tx_prod_index++;<br>
> +<br>
> + for (m = 0; m < meta_cnt; m++) {<br>
> + tx_idx = q->tx_prod_index & q->tx_queue_mask;<br>
> + tx_meta = &q->tx_meta_q[tx_idx];<br>
> + tx_meta->usermeta0 = *meta++;<br>
> + tx_meta->usermeta1 = *meta++;<br>
> + q->tx_prod_index++;<br>
> + }<br>
> +<br>
> + tx_idx = q->tx_prod_index & q->tx_queue_mask;<br>
> + tx_meta = &q->tx_meta_q[tx_idx];<br>
> + tx_meta->physaddr = rte_mbuf_data_iova(mbuf) + offset;<br>
> + q->tx_prod_index++;<br>
> +}<br>
> +<br>
> +static inline void<br>
> +ark_bb_enqueue_segmented_pkt(struct ark_bbdev_queue *q,<br>
> + struct rte_mbuf *mbuf,<br>
> + uint16_t offset,<br>
> + uint32_t *meta, uint8_t meta_cnt) {<br>
> + struct rte_mbuf *next;<br>
> + uint8_t flags = ARK_DDM_SOP;<br>
> +<br>
> + while (mbuf != NULL) {<br>
> + next = mbuf->next;<br>
> + flags |= (next == NULL) ? ARK_DDM_EOP : 0;<br>
> +<br>
> + ark_bb_enqueue_desc_fill(q, mbuf, offset, flags,<br>
> + meta, meta_cnt);<br>
> +<br>
> + flags &= ~ARK_DDM_SOP; /* drop SOP flags */<br>
> + meta_cnt = 0;<br>
> + offset = 0;<br>
> +<br>
> + mbuf = next;<br>
> + }<br>
> +}<br>
> +<br>
> +static inline int<br>
> +ark_bb_enqueue_common(struct ark_bbdev_queue *q,<br>
> + struct rte_mbuf *m_in, struct rte_mbuf *m_out,<br>
> + uint16_t offset,<br>
> + uint32_t *meta, uint8_t meta_cnt) {<br>
> + int32_t free_queue_space;<br>
> + int32_t rx_idx;<br>
> +<br>
> + /* TX side limit */<br>
> + free_queue_space = q->tx_queue_mask -<br>
> + (q->tx_prod_index - q->tx_free_index);<br>
> + if (unlikely(free_queue_space < (2 + (2 * m_in->nb_segs))))<br>
> + return 1;<br>
> +<br>
> + /* RX side limit */<br>
> + free_queue_space = q->rx_queue_mask -<br>
> + (q->rx_seed_index - q->rx_cons_index);<br>
> + if (unlikely(free_queue_space < m_out->nb_segs))<br>
> + return 1;<br>
> +<br>
> + if (unlikely(m_in->nb_segs > 1))<br>
> + ark_bb_enqueue_segmented_pkt(q, m_in, offset, meta,<br>
> meta_cnt);<br>
> + else<br>
> + ark_bb_enqueue_desc_fill(q, m_in, offset,<br>
> + ARK_DDM_SOP | ARK_DDM_EOP,<br>
> + meta, meta_cnt);<br>
> +<br>
> + /* We assume that the return mubf has exactly enough segments for<br>
> + * return data, which is 2048 bytes per segment.<br>
> + */<br>
> + do {<br>
> + rx_idx = q->rx_seed_index & q->rx_queue_mask;<br>
> + q->rx_paddress_q[rx_idx] = m_out->buf_iova;<br>
> + q->rx_seed_index++;<br>
> + m_out = m_out->next;<br>
> + } while (m_out);<br>
> +<br>
> + return 0;<br>
> +}<br>
> +<br>
> +static inline void<br>
> +ark_bb_enqueue_finalize(struct rte_bbdev_queue_data *q_data,<br>
> + struct ark_bbdev_queue *q,<br>
> + void **ops,<br>
> + uint16_t nb_ops, uint16_t nb)<br>
> +{<br>
> + /* BBDEV global stats */<br>
> + /* These are not really errors, not sure why bbdev counts these. */<br>
> + q_data->queue_stats.enqueue_err_count += nb_ops - nb;<br>
> + q_data->queue_stats.enqueued_count += nb;<br>
> +<br>
> + /* Notify HW that */<br>
> + if (unlikely(nb == 0))<br>
> + return;<br>
> +<br>
> + ark_mpu_set_producer(q->tx_mpu, q->tx_prod_index);<br>
> + ark_mpu_set_producer(q->rx_mpu, q->rx_seed_index);<br>
> +<br>
> + /* Queue info for dequeue-side processing */<br>
> + rte_ring_enqueue_burst(q->active_ops,<br>
> + (void **)ops, nb, NULL);<br>
> +}<br>
> +<br>
> +static int<br>
> +ark_bb_dequeue_segmented(struct rte_mbuf *mbuf0,<br>
> + int32_t *prx_cons_index,<br>
> + uint16_t pkt_len<br>
> + )<br>
> +{<br>
> + struct rte_mbuf *mbuf;<br>
> + uint16_t data_len;<br>
> + uint16_t remaining;<br>
> + uint16_t segments = 1;<br>
> +<br>
> + data_len = RTE_MIN(pkt_len, RTE_MBUF_DEFAULT_DATAROOM);<br>
> + remaining = pkt_len - data_len;<br>
> +<br>
> + mbuf = mbuf0;<br>
> + mbuf0->data_len = data_len;<br>
> + while (remaining) {<br>
> + segments += 1;<br>
> + mbuf = mbuf->next;<br>
> + if (unlikely(mbuf == 0)) {<br>
> + ARK_BBDEV_LOG(CRIT, "Expected chained mbuf with<br>
> "<br>
> + "at least %d segments for dequeue "<br>
> + "of packet length %d",<br>
> + segments, pkt_len);<br>
> + return 1;<br>
> + }<br>
> +<br>
> + data_len = RTE_MIN(remaining,<br>
> + RTE_MBUF_DEFAULT_DATAROOM);<br>
> + remaining -= data_len;<br>
> +<br>
> + mbuf->data_len = data_len;<br>
> + *prx_cons_index += 1;<br>
> + }<br>
> +<br>
> + if (mbuf->next != 0) {<br>
> + ARK_BBDEV_LOG(CRIT, "Expected chained mbuf with "<br>
> + "at exactly %d segments for dequeue "<br>
> + "of packet length %d. Found %d "<br>
> + "segments",<br>
> + segments, pkt_len, mbuf0->nb_segs);<br>
> + return 1;<br>
> + }<br>
> + return 0;<br>
> +}<br>
> +<br>
> +/*<br>
> +****************************************************************<br>
> *******<br>
> +** */<br>
> +/* LDPC Decode ops */<br>
> +static int16_t<br>
> +ark_bb_enqueue_ldpc_dec_one_op(struct ark_bbdev_queue *q,<br>
> + struct rte_bbdev_dec_op *this_op) {<br>
> + struct rte_bbdev_op_ldpc_dec *ldpc_dec_op = &this_op->ldpc_dec;<br>
> + struct rte_mbuf *m_in = ldpc_dec_op->input.data;<br>
> + struct rte_mbuf *m_out = ldpc_dec_op->hard_output.data;<br>
> + uint16_t offset = ldpc_dec_op->input.offset;<br>
> + uint32_t meta[5] = {0};<br>
> + uint8_t meta_cnt = 0;<br>
> +<br>
> + /* User's meta move from bbdev op to Arkville HW */<br>
> + if (ark_bb_user_enqueue_ldpc_dec(this_op, meta, &meta_cnt)) {<br>
> + ARK_BBDEV_LOG(ERR, "%s failed", __func__);<br>
> + return 1;<br>
> + }<br>
> +<br>
> + return ark_bb_enqueue_common(q, m_in, m_out, offset, meta,<br>
> meta_cnt);<br>
> +}<br>
> +<br>
> +/* Enqueue LDPC Decode -- burst */<br>
> +static uint16_t<br>
> +ark_bb_enqueue_ldpc_dec_ops(struct rte_bbdev_queue_data *q_data,<br>
> + struct rte_bbdev_dec_op **ops, uint16_t nb_ops) {<br>
> + struct ark_bbdev_queue *q = q_data->queue_private;<br>
> + unsigned int max_enq;<br>
> + uint16_t nb;<br>
> +<br>
> + max_enq = rte_ring_free_count(q->active_ops);<br>
> + max_enq = RTE_MIN(max_enq, nb_ops);<br>
> + for (nb = 0; nb < max_enq; nb++) {<br>
> + if (ark_bb_enqueue_ldpc_dec_one_op(q, ops[nb]))<br>
> + break;<br>
> + }<br>
> +<br>
> + ark_bb_enqueue_finalize(q_data, q, (void **)ops, nb_ops, nb);<br>
> + return nb;<br>
> +}<br>
> +<br>
> +/*<br>
> +****************************************************************<br>
> *******<br>
> +** */<br>
> +/* Dequeue LDPC Decode -- burst */<br>
> +static uint16_t<br>
> +ark_bb_dequeue_ldpc_dec_ops(struct rte_bbdev_queue_data *q_data,<br>
> + struct rte_bbdev_dec_op **ops, uint16_t nb_ops) {<br>
> + struct ark_bbdev_queue *q = q_data->queue_private;<br>
> + struct rte_mbuf *mbuf;<br>
> + struct rte_bbdev_dec_op *this_op;<br>
> + struct ark_rx_meta *meta;<br>
> + uint32_t *usermeta;<br>
> +<br>
> + uint16_t nb = 0;<br>
> + int32_t prod_index = q->rx_prod_index;<br>
> + int32_t cons_index = q->rx_cons_index;<br>
> +<br>
> + q->tx_free_index = q->tx_cons_index;<br>
> +<br>
> + while ((prod_index - cons_index) > 0) {<br>
> + if (rte_ring_dequeue(q->active_ops, (void **)&this_op)) {<br>
> + ARK_BBDEV_LOG(ERR, "%s data ready but no op!",<br>
> + __func__);<br>
> + q_data->queue_stats.dequeue_err_count += 1;<br>
> + break;<br>
> + }<br>
> + ops[nb] = this_op;<br>
> +<br>
> + mbuf = this_op->ldpc_dec.hard_output.data;<br>
> +<br>
> + /* META DATA embedded in headroom */<br>
> + meta = RTE_PTR_ADD(mbuf->buf_addr,<br>
> ARK_RX_META_OFFSET);<br>
> +<br>
> + mbuf->pkt_len = meta->pkt_len;<br>
> + mbuf->data_len = meta->pkt_len;<br>
> +<br>
> + if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN)) {<br>
> + if (ark_bb_dequeue_segmented(mbuf, &cons_index,<br>
> + meta->pkt_len))<br>
> + q_data->queue_stats.dequeue_err_count +=<br>
> 1;<br>
> + } else if (mbuf->next != 0) {<br>
> + ARK_BBDEV_LOG(CRIT, "Expected mbuf with "<br>
> + "at exactly 1 segments for dequeue "<br>
> + "of packet length %d. Found %d "<br>
> + "segments",<br>
> + meta->pkt_len, mbuf->nb_segs);<br>
> + q_data->queue_stats.dequeue_err_count += 1;<br>
> + }<br>
> +<br>
> + usermeta = meta->user_meta;<br>
> + /* User's meta move from Arkville HW to bbdev OP */<br>
> + ark_bb_user_dequeue_ldpc_dec(this_op, usermeta);<br>
> + nb++;<br>
> + cons_index++;<br>
> + if (nb >= nb_ops)<br>
> + break;<br>
> + }<br>
> +<br>
> + q->rx_cons_index = cons_index;<br>
> +<br>
> + /* BBdev stats */<br>
> + q_data->queue_stats.dequeued_count += nb;<br>
> +<br>
> + return nb;<br>
> +}<br>
> +<br>
> +/***************************************************************<br>
> *******<br>
> +****/<br>
> +/* Enqueue LDPC Encode */<br>
> +static int16_t<br>
> +ark_bb_enqueue_ldpc_enc_one_op(struct ark_bbdev_queue *q,<br>
> + struct rte_bbdev_enc_op *this_op) {<br>
> + struct rte_bbdev_op_ldpc_enc *ldpc_enc_op = &this_op->ldpc_enc;<br>
> + struct rte_mbuf *m_in = ldpc_enc_op->input.data;<br>
> + struct rte_mbuf *m_out = ldpc_enc_op->output.data;<br>
> + uint16_t offset = ldpc_enc_op->input.offset;<br>
> + uint32_t meta[5] = {0};<br>
> + uint8_t meta_cnt = 0;<br>
> +<br>
> + /* User's meta move from bbdev op to Arkville HW */<br>
> + if (ark_bb_user_enqueue_ldpc_enc(this_op, meta, &meta_cnt)) {<br>
> + ARK_BBDEV_LOG(ERR, "%s failed", __func__);<br>
> + return 1;<br>
> + }<br>
> +<br>
> + return ark_bb_enqueue_common(q, m_in, m_out, offset, meta,<br>
> meta_cnt);<br>
> +}<br>
> +<br>
> +/* Enqueue LDPC Encode -- burst */<br>
> +static uint16_t<br>
> +ark_bb_enqueue_ldpc_enc_ops(struct rte_bbdev_queue_data *q_data,<br>
> + struct rte_bbdev_enc_op **ops, uint16_t nb_ops) {<br>
> + struct ark_bbdev_queue *q = q_data->queue_private;<br>
> + unsigned int max_enq;<br>
> + uint16_t nb;<br>
> +<br>
> + max_enq = rte_ring_free_count(q->active_ops);<br>
> + max_enq = RTE_MIN(max_enq, nb_ops);<br>
> + for (nb = 0; nb < max_enq; nb++) {<br>
> + if (ark_bb_enqueue_ldpc_enc_one_op(q, ops[nb]))<br>
> + break;<br>
> + }<br>
> +<br>
> + ark_bb_enqueue_finalize(q_data, q, (void **)ops, nb_ops, nb);<br>
> + return nb;<br>
> +}<br>
> +<br>
> +/* Dequeue LDPC Encode -- burst */<br>
> +static uint16_t<br>
> +ark_bb_dequeue_ldpc_enc_ops(struct rte_bbdev_queue_data *q_data,<br>
> + struct rte_bbdev_enc_op **ops, uint16_t nb_ops) {<br>
> + struct ark_bbdev_queue *q = q_data->queue_private;<br>
> + struct rte_mbuf *mbuf;<br>
> + struct rte_bbdev_enc_op *this_op;<br>
> + struct ark_rx_meta *meta;<br>
> + uint32_t *usermeta;<br>
> +<br>
> + uint16_t nb = 0;<br>
> + int32_t prod_index = q->rx_prod_index;<br>
> + int32_t cons_index = q->rx_cons_index;<br>
> +<br>
> + q->tx_free_index = q->tx_cons_index;<br>
> +<br>
> + while ((prod_index - cons_index) > 0) {<br>
> + if (rte_ring_dequeue(q->active_ops, (void **)&this_op)) {<br>
> + ARK_BBDEV_LOG(ERR, "%s data ready but no op!",<br>
> + __func__);<br>
> + q_data->queue_stats.dequeue_err_count += 1;<br>
> + break;<br>
> + }<br>
> + ops[nb] = this_op;<br>
> +<br>
> + mbuf = this_op->ldpc_enc.output.data;<br>
> +<br>
> + /* META DATA embedded in headroom */<br>
> + meta = RTE_PTR_ADD(mbuf->buf_addr,<br>
> ARK_RX_META_OFFSET);<br>
> +<br>
> + mbuf->pkt_len = meta->pkt_len;<br>
> + mbuf->data_len = meta->pkt_len;<br>
> + usermeta = meta->user_meta;<br>
> +<br>
> + if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN)) {<br>
> + if (ark_bb_dequeue_segmented(mbuf, &cons_index,<br>
> + meta->pkt_len))<br>
> + q_data->queue_stats.dequeue_err_count +=<br>
> 1;<br>
> + } else if (mbuf->next != 0) {<br>
> + ARK_BBDEV_LOG(CRIT, "Expected mbuf with "<br>
> + "at exactly 1 segments for dequeue "<br>
> + "of packet length %d. Found %d "<br>
> + "segments",<br>
> + meta->pkt_len, mbuf->nb_segs);<br>
> + q_data->queue_stats.dequeue_err_count += 1;<br>
> + }<br>
> +<br>
> + /* User's meta move from Arkville HW to bbdev OP */<br>
> + ark_bb_user_dequeue_ldpc_enc(this_op, usermeta);<br>
> + nb++;<br>
> + cons_index++;<br>
> + if (nb >= nb_ops)<br>
> + break;<br>
> + }<br>
> +<br>
> + q->rx_cons_index = cons_index;<br>
> +<br>
> + /* BBdev stats */<br>
> + q_data->queue_stats.dequeued_count += nb;<br>
> +<br>
> + return nb;<br>
> +}<br>
> +<br>
> +/***************************************************************<br>
> *******<br>
> +****/<br>
> +/*<br>
> + *Initial device hardware configuration when device is opened<br>
> + * setup the DDM, and UDM; called once per PCIE device */ static int<br>
> +ark_bb_config_device(struct ark_bbdevice *ark_bb) {<br>
> + uint16_t num_q, i;<br>
> + struct ark_mpu_t *mpu;<br>
> +<br>
> + /*<br>
> + * Make sure that the packet director, generator and checker are in a<br>
> + * known state<br>
> + */<br>
> + ark_bb->start_pg = 0;<br>
> + ark_bb->pg = ark_pktgen_init(ark_bb->pktgen.v, 0, 1);<br>
> + if (ark_bb->pg == NULL)<br>
> + return -1;<br>
> + ark_pktgen_reset(ark_bb->pg);<br>
> + ark_bb->pc = ark_pktchkr_init(ark_bb->pktchkr.v, 0, 1);<br>
> + if (ark_bb->pc == NULL)<br>
> + return -1;<br>
> + ark_pktchkr_stop(ark_bb->pc);<br>
> + ark_bb->pd = ark_pktdir_init(ark_bb->pktdir.v);<br>
> + if (ark_bb->pd == NULL)<br>
> + return -1;<br>
> +<br>
> + /* Verify HW */<br>
> + if (ark_udm_verify(ark_bb->udm.v))<br>
> + return -1;<br>
> + if (ark_ddm_verify(ark_bb->ddm.v))<br>
> + return -1;<br>
> +<br>
> + /* UDM */<br>
> + if (ark_udm_reset(ark_bb->udm.v)) {<br>
> + ARK_BBDEV_LOG(ERR, "Unable to stop and reset UDM");<br>
> + return -1;<br>
> + }<br>
> + /* Keep in reset until the MPU are cleared */<br>
> +<br>
> + /* MPU reset */<br>
> + mpu = ark_bb->mpurx.v;<br>
> + num_q = ark_api_num_queues(mpu);<br>
> + ark_bb->max_nb_queues = num_q;<br>
> +<br>
> + for (i = 0; i < num_q; i++) {<br>
> + ark_mpu_reset(mpu);<br>
> + mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);<br>
> + }<br>
> +<br>
> + /* Only 1 queue supported in the udm */<br>
> + ark_udm_stop(ark_bb->udm.v, 0);<br>
> + ark_udm_configure(ark_bb->udm.v,<br>
> + RTE_PKTMBUF_HEADROOM,<br>
> + bbdev->data->queues[q_id]->dataroom,<br>
> + ARK_RX_WRITE_TIME_NS);<br>
> +<br>
> +<br>
> + ark_udm_stats_reset(ark_bb->udm.v);<br>
> + ark_udm_stop(ark_bb->udm.v, 0);<br>
> +<br>
> + /* TX -- DDM */<br>
> + if (ark_ddm_stop(ark_bb->ddm.v, 1))<br>
> + ARK_BBDEV_LOG(ERR, "Unable to stop DDM");<br>
> +<br>
> + mpu = ark_bb->mputx.v;<br>
> + num_q = ark_api_num_queues(mpu);<br>
> + for (i = 0; i < num_q; i++) {<br>
> + ark_mpu_reset(mpu);<br>
> + mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);<br>
> + }<br>
> +<br>
> + ark_ddm_reset(ark_bb->ddm.v);<br>
> + ark_ddm_stats_reset(ark_bb->ddm.v);<br>
> +<br>
> + ark_ddm_stop(ark_bb->ddm.v, 0);<br>
> + if (ark_bb->rqpacing)<br>
> + ark_rqp_stats_reset(ark_bb->rqpacing);<br>
> +<br>
> + ARK_BBDEV_LOG(INFO, "packet director set to 0x%x", ark_bb-<br>
> >pkt_dir_v);<br>
> + ark_pktdir_setup(ark_bb->pd, ark_bb->pkt_dir_v);<br>
> +<br>
> + if (ark_bb->pkt_gen_args[0]) {<br>
> + ARK_BBDEV_LOG(INFO, "Setting up the packet generator");<br>
> + ark_pktgen_parse(ark_bb->pkt_gen_args);<br>
> + ark_pktgen_reset(ark_bb->pg);<br>
> + ark_pktgen_setup(ark_bb->pg);<br>
> + ark_bb->start_pg = 1;<br>
> + }<br>
> +<br>
> + return 0;<br>
> +}<br>
> +<br>
> +static int<br>
> +ark_bbdev_init(struct rte_bbdev *bbdev, struct rte_pci_driver *pci_drv)<br>
> +{<br>
> + struct ark_bbdevice *ark_bb = bbdev->data->dev_private;<br>
> + struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(bbdev->device);<br>
> + bool rqpacing = false;<br>
> + int p;<br>
> +<br>
> + RTE_SET_USED(pci_drv);<br>
> +<br>
> + ark_bb->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr;<br>
> + ark_bb->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr;<br>
> +<br>
> + ark_bb->sysctrl.v = (void *)&ark_bb->bar0[ARK_SYSCTRL_BASE];<br>
> + ark_bb->mpurx.v = (void *)&ark_bb->bar0[ARK_MPU_RX_BASE];<br>
> + ark_bb->udm.v = (void *)&ark_bb->bar0[ARK_UDM_BASE];<br>
> + ark_bb->mputx.v = (void *)&ark_bb->bar0[ARK_MPU_TX_BASE];<br>
> + ark_bb->ddm.v = (void *)&ark_bb->bar0[ARK_DDM_BASE];<br>
> + ark_bb->pktdir.v = (void *)&ark_bb->bar0[ARK_PKTDIR_BASE];<br>
> + ark_bb->pktgen.v = (void *)&ark_bb->bar0[ARK_PKTGEN_BASE];<br>
> + ark_bb->pktchkr.v = (void *)&ark_bb->bar0[ARK_PKTCHKR_BASE];<br>
> +<br>
> + p = 0;<br>
> + while (ark_device_caps[p].device_id != 0) {<br>
> + if (pci_dev->id.device_id == ark_device_caps[p].device_id) {<br>
> + rqpacing = ark_device_caps[p].caps.rqpacing;<br>
> + break;<br>
> + }<br>
> + p++;<br>
> + }<br>
> +<br>
> + if (rqpacing)<br>
> + ark_bb->rqpacing =<br>
> + (struct ark_rqpace_t *)(ark_bb->bar0 +<br>
> ARK_RCPACING_BASE);<br>
> + else<br>
> + ark_bb->rqpacing = NULL;<br>
> +<br>
> + ark_bb->started = 0;<br>
> +<br>
> + ARK_BBDEV_LOG(INFO, "Sys Ctrl Const = 0x%x HW Commit_ID:<br>
> %08x",<br>
> + ark_bb->sysctrl.t32[4],<br>
> + rte_be_to_cpu_32(ark_bb->sysctrl.t32[0x20 / 4]));<br>
> + ARK_BBDEV_LOG(INFO, "Arkville HW Commit_ID: %08x",<br>
> + rte_be_to_cpu_32(ark_bb->sysctrl.t32[0x20 / 4]));<br>
> +<br>
> + /* If HW sanity test fails, return an error */<br>
> + if (ark_bb->sysctrl.t32[4] != 0xcafef00d) {<br>
> + ARK_BBDEV_LOG(ERR,<br>
> + "HW Sanity test has failed, expected constant"<br>
> + " 0x%x, read 0x%x (%s)",<br>
> + 0xcafef00d,<br>
> + ark_bb->sysctrl.t32[4], __func__);<br>
> + return -1;<br>
> + }<br>
> +<br>
> + return ark_bb_config_device(ark_bb);<br>
> +}<br>
> +<br>
> +static int<br>
> +ark_bbdev_uninit(struct rte_bbdev *bbdev) {<br>
> + struct ark_bbdevice *ark_bb = bbdev->data->dev_private;<br>
> +<br>
> + if (rte_eal_process_type() != RTE_PROC_PRIMARY)<br>
> + return 0;<br>
> +<br>
> + ark_pktgen_uninit(ark_bb->pg);<br>
> + ark_pktchkr_uninit(ark_bb->pc);<br>
> +<br>
> + return 0;<br>
> +}<br>
> +<br>
> +static int<br>
> +ark_bbdev_probe(struct rte_pci_driver *pci_drv,<br>
> + struct rte_pci_device *pci_dev)<br>
> +{<br>
> + struct rte_bbdev *bbdev = NULL;<br>
> + char dev_name[RTE_BBDEV_NAME_MAX_LEN];<br>
> + struct ark_bbdevice *ark_bb;<br>
> +<br>
> + if (pci_dev == NULL)<br>
> + return -EINVAL;<br>
> +<br>
> + rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));<br>
> +<br>
> + /* Allocate memory to be used privately by drivers */<br>
> + bbdev = rte_bbdev_allocate(pci_dev-><a href="http://device.name" rel="noreferrer" target="_blank">device.name</a>);<br>
> + if (bbdev == NULL)<br>
> + return -ENODEV;<br>
> +<br>
> + /* allocate device private memory */<br>
> + bbdev->data->dev_private = rte_zmalloc_socket(dev_name,<br>
> + sizeof(struct ark_bbdevice),<br>
> + RTE_CACHE_LINE_SIZE,<br>
> + pci_dev->device.numa_node);<br>
> +<br>
> + if (bbdev->data->dev_private == NULL) {<br>
> + ARK_BBDEV_LOG(CRIT,<br>
> + "Allocate of %zu bytes for device \"%s\"<br>
> failed",<br>
> + sizeof(struct ark_bbdevice), dev_name);<br>
> + rte_bbdev_release(bbdev);<br>
> + return -ENOMEM;<br>
> + }<br>
> + ark_bb = bbdev->data->dev_private;<br>
> + /* Initialize ark_bb */<br>
> + ark_bb->pkt_dir_v = 0x00110110;<br>
> +<br>
> + /* Fill HW specific part of device structure */<br>
> + bbdev->device = &pci_dev->device;<br>
> + bbdev->intr_handle = NULL;<br>
> + bbdev->data->socket_id = pci_dev->device.numa_node;<br>
> + bbdev->dev_ops = &ark_bbdev_pmd_ops;<br>
> + if (pci_dev->device.devargs)<br>
> + parse_ark_bbdev_params(pci_dev->device.devargs->args,<br>
> ark_bb);<br>
> +<br>
> +<br>
> + /* Device specific initialization */<br>
> + if (ark_bbdev_init(bbdev, pci_drv))<br>
> + return -EIO;<br>
> + if (ark_bbdev_start(bbdev))<br>
> + return -EIO;<br>
> +<br>
> + /* Core operations LDPC encode amd decode */<br>
> + bbdev->enqueue_ldpc_enc_ops = ark_bb_enqueue_ldpc_enc_ops;<br>
> + bbdev->dequeue_ldpc_enc_ops = ark_bb_dequeue_ldpc_enc_ops;<br>
> + bbdev->enqueue_ldpc_dec_ops = ark_bb_enqueue_ldpc_dec_ops;<br>
> + bbdev->dequeue_ldpc_dec_ops = ark_bb_dequeue_ldpc_dec_ops;<br>
> +<br>
> + ARK_BBDEV_LOG(DEBUG, "bbdev id = %u [%s]",<br>
> + bbdev->data->dev_id, dev_name);<br>
> +<br>
> + return 0;<br>
> +}<br>
> +<br>
> +/* Uninitialize device */<br>
> +static int<br>
> +ark_bbdev_remove(struct rte_pci_device *pci_dev) {<br>
> + struct rte_bbdev *bbdev;<br>
> + int ret;<br>
> +<br>
> + if (pci_dev == NULL)<br>
> + return -EINVAL;<br>
> +<br>
> + /* Find device */<br>
> + bbdev = rte_bbdev_get_named_dev(pci_dev-><a href="http://device.name" rel="noreferrer" target="_blank">device.name</a>);<br>
> + if (bbdev == NULL) {<br>
> + ARK_BBDEV_LOG(CRIT,<br>
> + "Couldn't find HW dev \"%s\" to Uninitialize<br>
> it",<br>
> + pci_dev-><a href="http://device.name" rel="noreferrer" target="_blank">device.name</a>);<br>
> + return -ENODEV;<br>
> + }<br>
> +<br>
> + /* Arkville device close */<br>
> + ark_bbdev_uninit(bbdev);<br>
> + rte_free(bbdev->data->dev_private);<br>
> +<br>
> + /* Close device */<br>
> + ret = rte_bbdev_close(bbdev->data->dev_id);<br>
> + if (ret < 0)<br>
> + ARK_BBDEV_LOG(ERR,<br>
> + "Device %i failed to close during remove: %i",<br>
> + bbdev->data->dev_id, ret);<br>
> +<br>
> + return rte_bbdev_release(bbdev);<br>
> +}<br>
> +<br>
> +/* Operation for the PMD */<br>
> +static const struct rte_bbdev_ops ark_bbdev_pmd_ops = {<br>
> + .info_get = ark_bbdev_info_get,<br>
> + .start = ark_bbdev_start,<br>
> + .stop = ark_bbdev_stop,<br>
> + .queue_setup = ark_bb_q_setup,<br>
> + .queue_release = ark_bb_q_release,<br>
> + .queue_start = ark_bb_q_start,<br>
> + .queue_stop = ark_bb_q_stop,<br>
> +};<br>
> +<br>
> +<br>
> +<br>
> +static struct rte_pci_driver ark_bbdev_pmd_drv = {<br>
> + .probe = ark_bbdev_probe,<br>
> + .remove = ark_bbdev_remove,<br>
> + .id_table = pci_id_ark,<br>
> + .drv_flags = RTE_PCI_DRV_NEED_MAPPING<br>
> +};<br>
> +<br>
> +RTE_PMD_REGISTER_PCI(DRIVER_NAME, ark_bbdev_pmd_drv);<br>
> +RTE_PMD_REGISTER_PCI_TABLE(DRIVER_NAME, pci_id_ark);<br>
> +RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,<br>
> + ARK_BBDEV_PKTGEN_ARG "=<filename> "<br>
> + ARK_BBDEV_PKTCHKR_ARG "=<filename> "<br>
> + ARK_BBDEV_PKTDIR_ARG "=<bitmap>"<br>
> + );<br>
> diff --git a/drivers/baseband/ark/ark_bbdev_common.c<br>
> b/drivers/baseband/ark/ark_bbdev_common.c<br>
> new file mode 100644<br>
> index 0000000000..6ef0f43654<br>
> --- /dev/null<br>
> +++ b/drivers/baseband/ark/ark_bbdev_common.c<br>
> @@ -0,0 +1,125 @@<br>
> +/* SPDX-License-Identifier: BSD-3-Clause<br>
> + * Copyright(c) 2016-2021 Atomic Rules LLC */<br>
> +<br>
> +#include <string.h><br>
> +<br>
> +#include <rte_kvargs.h><br>
> +#include <rte_log.h><br>
> +<br>
> +#include "ark_bbdev_common.h"<br>
> +<br>
> +static const char * const ark_bbdev_valid_params[] = {<br>
> + ARK_BBDEV_PKTDIR_ARG,<br>
> + ARK_BBDEV_PKTGEN_ARG,<br>
> + ARK_BBDEV_PKTCHKR_ARG,<br>
> + NULL<br>
> +};<br>
> +<br>
> +/* Parse 16-bit integer from string argument */ static inline int<br>
> +parse_u16_arg(const char *key, const char *value, void *extra_args) {<br>
> + uint16_t *u16 = extra_args;<br>
> + unsigned int long result;<br>
> +<br>
> + if ((value == NULL) || (extra_args == NULL))<br>
> + return -EINVAL;<br>
> + errno = 0;<br>
> + result = strtoul(value, NULL, 0);<br>
> + if ((result >= (1 << 16)) || (errno != 0)) {<br>
> + ARK_BBDEV_LOG(ERR, "Invalid value %" PRIu64 " for %s",<br>
> result, key);<br>
> + return -ERANGE;<br>
> + }<br>
> + *u16 = (uint16_t)result;<br>
> + return 0;<br>
> +}<br>
> +<br>
> +static inline int<br>
> +process_pktdir_arg(const char *key, const char *value,<br>
> + void *extra_args)<br>
> +{<br>
> + uint32_t *u32 = extra_args;<br>
> + ARK_BBDEV_LOG(DEBUG, "key = %s, value = %s", key, value);<br>
> +<br>
> + *u32 = strtol(value, NULL, 0);<br>
> + ARK_BBDEV_LOG(DEBUG, "pkt_dir_v = 0x%x", *u32);<br>
> + return 0;<br>
> +}<br>
> +<br>
> +static inline int<br>
> +process_file_args(const char *key, const char *value, void *extra_args)<br>
> +{<br>
> + char *args = (char *)extra_args;<br>
> + ARK_BBDEV_LOG(DEBUG, "key = %s, value = %s", key, value);<br>
> +<br>
> + /* Open the configuration file */<br>
> + FILE *file = fopen(value, "r");<br>
> + char line[ARK_MAX_ARG_LEN];<br>
> + int size = 0;<br>
> + int first = 1;<br>
> +<br>
> + if (file == NULL) {<br>
> + ARK_BBDEV_LOG(ERR, "Unable to open config file %s",<br>
> + value);<br>
> + return -1;<br>
> + }<br>
> +<br>
> + while (fgets(line, sizeof(line), file)) {<br>
> + size += strlen(line);<br>
> + if (size >= ARK_MAX_ARG_LEN) {<br>
> + ARK_BBDEV_LOG(ERR, "Unable to parse file %s args,<br>
> "<br>
> + "parameter list is too long", value);<br>
> + fclose(file);<br>
> + return -1;<br>
> + }<br>
> + if (first) {<br>
> + strncpy(args, line, ARK_MAX_ARG_LEN);<br>
> + first = 0;<br>
> + } else {<br>
> + strncat(args, line, ARK_MAX_ARG_LEN);<br>
> + }<br>
> + }<br>
> + ARK_BBDEV_LOG(DEBUG, "file = %s", args);<br>
> + fclose(file);<br>
> + return 0;<br>
> +}<br>
> +<br>
> +<br>
> +/* Parse parameters used to create device */ int<br>
> +parse_ark_bbdev_params(const char *input_args,<br>
> + struct ark_bbdevice *ark_bb)<br>
> +{<br>
> + struct rte_kvargs *kvlist = NULL;<br>
> + int ret = 0;<br>
> +<br>
> + if (ark_bb == NULL)<br>
> + return -EINVAL;<br>
> + if (input_args == NULL)<br>
> + return ret;<br>
> +<br>
> + kvlist = rte_kvargs_parse(input_args, ark_bbdev_valid_params);<br>
> + if (kvlist == NULL)<br>
> + return -EFAULT;<br>
> +<br>
> + ret = rte_kvargs_process(kvlist, ARK_BBDEV_PKTDIR_ARG,<br>
> + &process_pktdir_arg, &ark_bb->pkt_dir_v);<br>
> + if (ret < 0)<br>
> + goto exit;<br>
> +<br>
> + ret = rte_kvargs_process(kvlist, ARK_BBDEV_PKTGEN_ARG,<br>
> + &process_file_args, &ark_bb-<br>
> >pkt_gen_args);<br>
> + if (ret < 0)<br>
> + goto exit;<br>
> +<br>
> + ret = rte_kvargs_process(kvlist, ARK_BBDEV_PKTCHKR_ARG,<br>
> + &process_file_args, &ark_bb-<br>
> >pkt_chkr_args);<br>
> + if (ret < 0)<br>
> + goto exit;<br>
> +<br>
> + exit:<br>
> + if (kvlist)<br>
> + rte_kvargs_free(kvlist);<br>
> + return ret;<br>
> +}<br>
> diff --git a/drivers/baseband/ark/ark_bbdev_common.h<br>
> b/drivers/baseband/ark/ark_bbdev_common.h<br>
> new file mode 100644<br>
> index 0000000000..670e7e86d6<br>
> --- /dev/null<br>
> +++ b/drivers/baseband/ark/ark_bbdev_common.h<br>
> @@ -0,0 +1,92 @@<br>
> +/* SPDX-License-Identifier: BSD-3-Clause<br>
> + * Copyright(c) 2016-2021 Atomic Rules LLC */<br>
> +<br>
> +#ifndef _ARK_BBDEV_COMMON_H_<br>
> +#define _ARK_BBDEV_COMMON_H_<br>
> +<br>
> +#include "ark_pktchkr.h"<br>
> +#include "ark_pktdir.h"<br>
> +#include "ark_pktgen.h"<br>
> +<br>
> +#define ARK_MAX_ARG_LEN 256<br>
> +<br>
> +/* Acceptable params for ark BBDEV devices */<br>
> +/*<br>
> + * The packet generator is a functional block used to generate packet<br>
> + * patterns for testing. It is not intended for nominal use.<br>
> + */<br>
> +#define ARK_BBDEV_PKTGEN_ARG "Pkt_gen"<br>
> +<br>
> +/*<br>
> + * The packet checker is a functional block used to verify packet<br>
> + * patterns for testing. It is not intended for nominal use.<br>
> + */<br>
> +#define ARK_BBDEV_PKTCHKR_ARG "Pkt_chkr"<br>
> +<br>
> +/*<br>
> + * The packet director is used to select the internal ingress and<br>
> + * egress packets paths during testing. It is not intended for<br>
> + * nominal use.<br>
> + */<br>
> +#define ARK_BBDEV_PKTDIR_ARG "Pkt_dir"<br>
> +<br>
> +<br>
> +#define def_ptr(type, name) \<br>
> + union type { \<br>
> + uint64_t *t64; \<br>
> + uint32_t *t32; \<br>
> + uint16_t *t16; \<br>
> + uint8_t *t8; \<br>
> + void *v; \<br>
> + } name<br>
> +<br>
> +/*<br>
> + * Structure to store private data for each PF/VF instance.<br>
> + */<br>
> +struct ark_bbdevice {<br>
> + /* Our Bar 0 */<br>
> + uint8_t *bar0;<br>
> +<br>
> + /* Application Bar needed for extensions */<br>
> + uint8_t *a_bar;<br>
> +<br>
> + /* Arkville hardware block offsets */<br>
> + def_ptr(sys_ctrl, sysctrl);<br>
> + def_ptr(pkt_gen, pktgen);<br>
> + def_ptr(mpu_rx, mpurx);<br>
> + def_ptr(UDM, udm);<br>
> + def_ptr(mpu_tx, mputx);<br>
> + def_ptr(DDM, ddm);<br>
> + def_ptr(pkt_dir, pktdir);<br>
> + def_ptr(pkt_chkr, pktchkr);<br>
> + struct ark_rqpace_t *rqpacing;<br>
> +<br>
> + /* Pointers to packet generator and checker */<br>
> + int start_pg;<br>
> + ark_pkt_gen_t pg;<br>
> + ark_pkt_chkr_t pc;<br>
> + ark_pkt_dir_t pd;<br>
> +<br>
> + /* Packet generator/checker args */<br>
> + char pkt_gen_args[ARK_MAX_ARG_LEN];<br>
> + char pkt_chkr_args[ARK_MAX_ARG_LEN];<br>
> + uint32_t pkt_dir_v;<br>
> +<br>
> + int started;<br>
> + unsigned int max_nb_queues; /**< Max number of queues */<br>
> +<br>
> +};<br>
> +<br>
> +<br>
> +/* Log message for PMD */<br>
> +extern int ark_bbdev_logtype;<br>
> +<br>
> +/* Helper macro for logging */<br>
> +#define ARK_BBDEV_LOG(level, fmt, ...) \<br>
> + rte_log(RTE_LOG_ ## level, ark_bbdev_logtype, \<br>
> + "ARK_BBD: " fmt "\n", ##__VA_ARGS__)<br>
> +<br>
> +int parse_ark_bbdev_params(const char *argv, struct ark_bbdevice *dev);<br>
> +<br>
> +#endif<br>
> diff --git a/drivers/baseband/ark/ark_bbdev_custom.c<br>
> b/drivers/baseband/ark/ark_bbdev_custom.c<br>
> new file mode 100644<br>
> index 0000000000..6b1553abe1<br>
> --- /dev/null<br>
> +++ b/drivers/baseband/ark/ark_bbdev_custom.c<br>
> @@ -0,0 +1,201 @@<br>
> +/* SPDX-License-Identifier: BSD-3-Clause<br>
> + * Copyright(c) 2016-2021 Atomic Rules LLC */<br>
> +<br>
> +#include <rte_bbdev.h><br>
> +#include <rte_bbdev_pmd.h><br>
> +<br>
> +#include <rte_mbuf.h><br>
> +#include <rte_hexdump.h> /* For debug */<br>
> +<br>
> +<br>
> +#include "ark_bbdev_common.h"<br>
> +#include "ark_bbdev_custom.h"<br>
> +<br>
> +/* It is expected that functions in this file will be modified based on<br>
> + * specifics of the FPGA hardware beyond the core Arkville<br>
> + * components.<br>
> + */<br>
> +<br>
> +/* bytyes must be range of 0 to 20 */<br>
> +static inline<br>
> +uint8_t ark_bb_cvt_bytes_meta_cnt(size_t bytes) {<br>
> + return (bytes + 3) / 8;<br>
> +}<br>
> +<br>
> +void<br>
> +ark_bbdev_info_get(struct rte_bbdev *dev,<br>
> + struct rte_bbdev_driver_info *dev_info) {<br>
> + struct ark_bbdevice *ark_bb = dev->data->dev_private;<br>
> +<br>
> + static const struct rte_bbdev_op_cap bbdev_capabilities[] = {<br>
> + {<br>
> + .type = RTE_BBDEV_OP_LDPC_DEC,<br>
> + .cap.ldpc_dec = {<br>
> + .capability_flags =<br>
> + RTE_BBDEV_LDPC_CRC_24B_ATTACH<br>
> |<br>
> + RTE_BBDEV_LDPC_RATE_MATCH,<br>
> + .num_buffers_src =<br>
> +<br>
> RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,<br>
> + .num_buffers_hard_out =<br>
> +<br>
> RTE_BBDEV_LDPC_MAX_CODE_BLOCKS<br>
> + }<br>
> + },<br>
> + {<br>
> + .type = RTE_BBDEV_OP_LDPC_ENC,<br>
> + .cap.ldpc_enc = {<br>
> + .capability_flags =<br>
> + RTE_BBDEV_LDPC_CRC_24B_ATTACH<br>
> |<br>
> + RTE_BBDEV_LDPC_RATE_MATCH,<br>
> + .num_buffers_src =<br>
> +<br>
> RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,<br>
> + .num_buffers_dst =<br>
> +<br>
> RTE_BBDEV_LDPC_MAX_CODE_BLOCKS<br>
> + }<br>
> + },<br>
> + RTE_BBDEV_END_OF_CAPABILITIES_LIST(),<br>
> + };<br>
> +<br>
> + static struct rte_bbdev_queue_conf default_queue_conf = {<br>
> + .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT,<br>
> + };<br>
> +<br>
> + default_queue_conf.socket = dev->data->socket_id;<br>
> +<br>
> + dev_info->driver_name = RTE_STR(DRIVER_NAME);<br>
> + dev_info->max_num_queues = ark_bb->max_nb_queues;<br>
> + dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT;<br>
> + dev_info->hardware_accelerated = true;<br>
> + dev_info->max_dl_queue_priority = 0;<br>
> + dev_info->max_ul_queue_priority = 0;<br>
> + dev_info->default_queue_conf = default_queue_conf;<br>
> + dev_info->capabilities = bbdev_capabilities;<br>
> + dev_info->cpu_flag_reqs = NULL;<br>
> + dev_info->min_alignment = 4;<br>
> +<br>
> +}<br>
> +<br>
> +/* Structure defining layout of the ldpc command struct */ struct<br>
> +ark_bb_ldpc_enc_meta {<br>
> + uint16_t header;<br>
> + uint8_t rv_index:2,<br>
> + basegraph:1,<br>
> + code_block_mode:1,<br>
> + rfu_71_68:4;<br>
> +<br>
> + uint8_t q_m;<br>
> + uint32_t e_ea;<br>
> + uint32_t eb;<br>
> + uint8_t c;<br>
> + uint8_t cab;<br>
> + uint16_t n_cb;<br>
> + uint16_t pad;<br>
> + uint16_t trailer;<br>
> +} __rte_packed;<br>
> +<br>
> +/* The size must be less then 20 Bytes */ static_assert(sizeof(struct<br>
> +ark_bb_ldpc_enc_meta) <= 20, "struct size");<br>
> +<br>
> +/* Custom operation on equeue ldpc operation */<br>
> +/* Do these function need queue number? */<br>
> +/* Maximum of 20 bytes */<br>
> +int<br>
> +ark_bb_user_enqueue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,<br>
> + uint32_t *meta, uint8_t *meta_cnt) {<br>
> + struct rte_bbdev_op_ldpc_enc *ldpc_enc_op = &enc_op->ldpc_enc;<br>
> + struct ark_bb_ldpc_enc_meta *src = (struct ark_bb_ldpc_enc_meta<br>
> +*)meta;<br>
> +<br>
> + src->header = 0x4321; /* For testings */<br>
> + src->trailer = 0xFEDC;<br>
> +<br>
> + src->rv_index = ldpc_enc_op->rv_index;<br>
> + src->basegraph = ldpc_enc_op->basegraph;<br>
> + src->code_block_mode = ldpc_enc_op->code_block_mode;<br>
> +<br>
> + src->q_m = ldpc_enc_op->q_m;<br>
> + src->e_ea = 0xABCD;<br>
> + src->eb = ldpc_enc_op->tb_params.eb;<br>
> + src->c = ldpc_enc_op->tb_params.c;<br>
> + src->cab = ldpc_enc_op->tb_params.cab;<br>
> +<br>
> + src->n_cb = 0;<br>
> +<br>
> + meta[0] = 0x11111110;<br>
> + meta[1] = 0x22222220;<br>
> + meta[2] = 0x33333330;<br>
> + meta[3] = 0x44444440;<br>
> + meta[4] = 0x55555550;<br>
> +<br>
> + *meta_cnt = ark_bb_cvt_bytes_meta_cnt(<br>
> + sizeof(struct ark_bb_ldpc_enc_meta));<br>
> + return 0;<br>
> +}<br>
> +<br>
> +/* Custom operation on dequeue ldpc operation */ int<br>
> +ark_bb_user_dequeue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,<br>
> + const uint32_t *usermeta)<br>
> +{<br>
> + static int dump; /* = 0 */<br>
> + /* Just compare with what was sent? */<br>
> + uint32_t meta_in[5] = {0};<br>
> + uint8_t meta_cnt;<br>
> +<br>
> + ark_bb_user_enqueue_ldpc_enc(enc_op, meta_in, &meta_cnt);<br>
> + if (memcmp(usermeta, meta_in, 3 + (meta_cnt * 8))) {<br>
> + fprintf(stderr,<br>
> + "------------------------------------------\n");<br>
> + rte_hexdump(stdout, "meta difference for lpdc_enc IN",<br>
> + meta_in, 20);<br>
> + rte_hexdump(stdout, "meta difference for lpdc_enc OUT",<br>
> + usermeta, 20);<br>
> + } else if (dump) {<br>
> + rte_hexdump(stdout, "DUMP lpdc_enc IN", usermeta, 20);<br>
> + dump--;<br>
> + }<br>
> +<br>
> + return 0;<br>
> +}<br>
> +<br>
> +<br>
> +/* Turbo op call backs for user meta data */ int<br>
> +ark_bb_user_enqueue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,<br>
> + uint32_t *meta, uint8_t *meta_cnt) {<br>
> + RTE_SET_USED(enc_op);<br>
> + meta[0] = 0xF1111110;<br>
> + meta[1] = 0xF2222220;<br>
> + meta[2] = 0xF3333330;<br>
> + meta[3] = 0xF4444440;<br>
> + meta[4] = 0xF5555550;<br>
> +<br>
> + *meta_cnt = ark_bb_cvt_bytes_meta_cnt(20);<br>
> + return 0;<br>
> +}<br>
> +<br>
> +int ark_bb_user_dequeue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,<br>
> + const uint32_t *usermeta)<br>
> +{<br>
> + RTE_SET_USED(enc_op);<br>
> + static int dump; /* = 0 */<br>
> + /* Just compare with what was sent? */<br>
> + uint32_t meta_in[5] = {0};<br>
> + uint8_t meta_cnt;<br>
> +<br>
> + ark_bb_user_enqueue_ldpc_dec(enc_op, meta_in, &meta_cnt);<br>
> + if (memcmp(usermeta, meta_in, 3 + (meta_cnt * 8))) {<br>
> + fprintf(stderr,<br>
> + "------------------------------------------\n");<br>
> + rte_hexdump(stdout, "meta difference for lpdc_enc IN",<br>
> + meta_in, 20);<br>
> + rte_hexdump(stdout, "meta difference for lpdc_enc OUT",<br>
> + usermeta, 20);<br>
> + } else if (dump) {<br>
> + rte_hexdump(stdout, "DUMP lpdc_enc IN", usermeta, 20);<br>
> + dump--;<br>
> + }<br>
> + return 0;<br>
> +}<br>
> diff --git a/drivers/baseband/ark/ark_bbdev_custom.h<br>
> b/drivers/baseband/ark/ark_bbdev_custom.h<br>
> new file mode 100644<br>
> index 0000000000..32a2ef6bb6<br>
> --- /dev/null<br>
> +++ b/drivers/baseband/ark/ark_bbdev_custom.h<br>
> @@ -0,0 +1,30 @@<br>
> +/* SPDX-License-Identifier: BSD-3-Clause<br>
> + * Copyright(c) 2016-2021 Atomic Rules LLC */<br>
> +<br>
> +#ifndef _ARK_BBDEV_CUSTOM_H_<br>
> +#define _ARK_BBDEV_CUSTOM_H_<br>
> +<br>
> +#include <stdint.h><br>
> +<br>
> +/* Forward declarations */<br>
> +struct rte_bbdev;<br>
> +struct rte_bbdev_driver_info;<br>
> +struct rte_bbdev_enc_op;<br>
> +struct rte_bbdev_dec_op;<br>
> +struct rte_mbuf;<br>
> +<br>
> +void ark_bbdev_info_get(struct rte_bbdev *dev,<br>
> + struct rte_bbdev_driver_info *dev_info);<br>
> +<br>
> +int ark_bb_user_enqueue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,<br>
> + uint32_t *meta, uint8_t *meta_cnt); int<br>
> +ark_bb_user_dequeue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,<br>
> + const uint32_t *usermeta);<br>
> +<br>
> +int ark_bb_user_enqueue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,<br>
> + uint32_t *meta, uint8_t *meta_cnt); int<br>
> +ark_bb_user_dequeue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,<br>
> + const uint32_t *usermeta);<br>
> +<br>
> +#endif<br>
> diff --git a/drivers/baseband/ark/meson.build<br>
> b/drivers/baseband/ark/meson.build<br>
> new file mode 100644<br>
> index 0000000000..b876f05c6e<br>
> --- /dev/null<br>
> +++ b/drivers/baseband/ark/meson.build<br>
> @@ -0,0 +1,11 @@<br>
> +# SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2018 Luca<br>
> +Boccassi <<a href="mailto:bluca@debian.org" target="_blank">bluca@debian.org</a>><br>
> +<br>
> +deps += ['common_ark', 'bbdev', 'bus_pci', 'pci', 'ring'] sources =<br>
> +files(<br>
> + 'ark_bbdev.c',<br>
> + 'ark_bbdev_common.c',<br>
> + 'ark_bbdev_custom.c'<br>
> + )<br>
> +<br>
> +includes += include_directories('../../common/ark')<br>
> diff --git a/drivers/baseband/ark/version.map<br>
> b/drivers/baseband/ark/version.map<br>
> new file mode 100644<br>
> index 0000000000..4a76d1d52d<br>
> --- /dev/null<br>
> +++ b/drivers/baseband/ark/version.map<br>
> @@ -0,0 +1,3 @@<br>
> +DPDK_21 {<br>
> + local: *;<br>
> +};<br>
> --<br>
> 2.25.1<br>
<br>
</blockquote></div>