[dpdk-dev] [RFC Patch 25/39] net/dpaa: add support for Tx and Rx queue setup

Shreyansh Jain shreyansh.jain at nxp.com
Sat May 27 12:25:21 CEST 2017


Signed-off-by: Hemant Agrawal <hemant.agrawal at nxp.com>
Signed-off-by: Shreyansh Jain <shreyansh.jain at nxp.com>
---
 doc/guides/nics/features/dpaa.ini |   1 +
 drivers/net/dpaa/Makefile         |   4 +
 drivers/net/dpaa/dpaa_ethdev.c    | 271 ++++++++++++++++++++++++++++++++-
 drivers/net/dpaa/dpaa_ethdev.h    |   7 +
 drivers/net/dpaa/dpaa_rxtx.c      | 312 ++++++++++++++++++++++++++++++++++++++
 drivers/net/dpaa/dpaa_rxtx.h      | 216 ++++++++++++++++++++++++++
 mk/rte.app.mk                     |   1 +
 7 files changed, 807 insertions(+), 5 deletions(-)
 create mode 100644 drivers/net/dpaa/dpaa_rxtx.c
 create mode 100644 drivers/net/dpaa/dpaa_rxtx.h

diff --git a/doc/guides/nics/features/dpaa.ini b/doc/guides/nics/features/dpaa.ini
index 9e8befc..29ba47e 100644
--- a/doc/guides/nics/features/dpaa.ini
+++ b/doc/guides/nics/features/dpaa.ini
@@ -4,5 +4,6 @@
 ; Refer to default.ini for the full list of available PMD features.
 ;
 [Features]
+Queue start/stop     = Y
 ARMv8                = Y
 Usage doc            = Y
diff --git a/drivers/net/dpaa/Makefile b/drivers/net/dpaa/Makefile
index 8fcde26..06b63fc 100644
--- a/drivers/net/dpaa/Makefile
+++ b/drivers/net/dpaa/Makefile
@@ -44,11 +44,13 @@ else
 CFLAGS += -O3
 CFLAGS += $(WERROR_FLAGS)
 endif
+CFLAGS +=-Wno-pointer-arith
 
 CFLAGS += -I$(RTE_SDK_DPAA)/
 CFLAGS += -I$(RTE_SDK_DPAA)/include
 CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
 CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include/
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa
 CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
 CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal/include
 
@@ -58,7 +60,9 @@ LIBABIVER := 1
 
 # Interfaces with DPDK
 SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_rxtx.c
 
 LDLIBS += -lrte_bus_dpaa
+LDLIBS += -lrte_mempool_dpaa
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 53c8277..b93f781 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -62,8 +62,16 @@
 
 #include <rte_dpaa_bus.h>
 #include <rte_dpaa_logs.h>
+#include <dpaa_mempool.h>
 
 #include <dpaa_ethdev.h>
+#include <dpaa_rxtx.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <fsl_fman.h>
+
 
 /* Keep track of whether QMAN and BMAN have been globally initialized */
 static int is_global_init;
@@ -79,20 +87,104 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
 
 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 {
+	struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
 	PMD_INIT_FUNC_TRACE();
 
 	/* Change tx callback to the real one */
-	dev->tx_pkt_burst = NULL;
+	dev->tx_pkt_burst = dpaa_eth_queue_tx;
+	fman_if_enable_rx(dpaa_intf->fif);
 
 	return 0;
 }
 
 static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
 {
-	dev->tx_pkt_burst = NULL;
+	struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+	PMD_INIT_FUNC_TRACE();
+
+	fman_if_disable_rx(dpaa_intf->fif);
+	dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
+}
+
+static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	dpaa_eth_dev_stop(dev);
 }
 
-static void dpaa_eth_dev_close(struct rte_eth_dev *dev __rte_unused)
+static
+int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+			    uint16_t nb_desc __rte_unused,
+			    unsigned int socket_id __rte_unused,
+			    const struct rte_eth_rxconf *rx_conf __rte_unused,
+			    struct rte_mempool *mp)
+{
+	struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+	PMD_INIT_FUNC_TRACE();
+
+	PMD_DRV_LOG(INFO, "Rx queue setup for queue index: %d", queue_idx);
+
+	if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
+		struct fman_if_ic_params icp;
+		uint32_t fd_offset;
+		uint32_t bp_size;
+
+		if (!mp->pool_data) {
+			PMD_DRV_LOG(ERR, "not an offloaded buffer pool");
+			return -1;
+		}
+		dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+
+		memset(&icp, 0, sizeof(icp));
+		/* set ICEOF for to the default value , which is 0*/
+		icp.iciof = DEFAULT_ICIOF;
+		icp.iceof = DEFAULT_RX_ICEOF;
+		icp.icsz = DEFAULT_ICSZ;
+		fman_if_set_ic_params(dpaa_intf->fif, &icp);
+
+		fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
+		fman_if_set_fdoff(dpaa_intf->fif, fd_offset);
+
+		/* Buffer pool size should be equal to Dataroom Size*/
+		bp_size = rte_pktmbuf_data_room_size(mp);
+		fman_if_set_bp(dpaa_intf->fif, mp->size,
+			       dpaa_intf->bp_info->bpid, bp_size);
+		dpaa_intf->valid = 1;
+		PMD_DRV_LOG(INFO, "if =%s - fd_offset = %d offset = %d",
+			    dpaa_intf->name, fd_offset,
+			fman_if_get_fdoff(dpaa_intf->fif));
+	}
+	dev->data->rx_queues[queue_idx] = &dpaa_intf->rx_queues[queue_idx];
+
+	return 0;
+}
+
+static
+void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
+{
+	PMD_INIT_FUNC_TRACE();
+}
+
+static
+int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+			    uint16_t nb_desc __rte_unused,
+		unsigned int socket_id __rte_unused,
+		const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+	struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+	PMD_INIT_FUNC_TRACE();
+
+	PMD_DRV_LOG(INFO, "Tx queue setup for queue index: %d", queue_idx);
+	dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
+	return 0;
+}
+
+static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
 {
 	PMD_INIT_FUNC_TRACE();
 }
@@ -102,28 +194,197 @@ static struct eth_dev_ops dpaa_devops = {
 	.dev_start		  = dpaa_eth_dev_start,
 	.dev_stop		  = dpaa_eth_dev_stop,
 	.dev_close		  = dpaa_eth_dev_close,
+
+	.rx_queue_setup		  = dpaa_eth_rx_queue_setup,
+	.tx_queue_setup		  = dpaa_eth_tx_queue_setup,
+	.rx_queue_release	  = dpaa_eth_rx_queue_release,
+	.tx_queue_release	  = dpaa_eth_tx_queue_release,
 };
 
+/* Initialise an Rx FQ */
+static int dpaa_rx_queue_init(struct qman_fq *fq,
+			      uint32_t fqid)
+{
+	struct qm_mcc_initfq opts;
+	int ret;
+
+	PMD_INIT_FUNC_TRACE();
+
+	ret = qman_reserve_fqid(fqid);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "reserve rx fqid %d failed with ret: %d",
+			fqid, ret);
+		return -EINVAL;
+	}
+	PMD_DRV_LOG(DEBUG, "creating rx fq %p, fqid %d", fq, fqid);
+	ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "create rx fqid %d failed with ret: %d",
+			fqid, ret);
+		return ret;
+	}
+
+	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_TDTHRESH;
+
+	opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
+	opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
+			   QM_FQCTRL_PREFERINCACHE | QM_FQCTRL_TDE;
+	opts.fqd.context_a.stashing.exclusive = 0;
+	opts.fqd.context_a.stashing.annotation_cl = DPAA_IF_RX_ANNOTATION_STASH;
+	opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
+	opts.fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
+
+	qm_fqd_taildrop_set(&opts.fqd.td, CONG_THRESHOLD_RX_Q, 1);
+
+	ret = qman_init_fq(fq, 0, &opts);
+	if (ret)
+		PMD_DRV_LOG(ERR, "init rx fqid %d failed with ret: %d",
+			fqid, ret);
+	return ret;
+}
+
+/* Initialise a Tx FQ */
+static int dpaa_tx_queue_init(struct qman_fq *fq,
+			      struct fman_if *fman_intf)
+{
+	struct qm_mcc_initfq opts;
+	int ret;
+
+	PMD_INIT_FUNC_TRACE();
+
+	ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
+			     QMAN_FQ_FLAG_TO_DCPORTAL, fq);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "create tx fq failed with ret: %d", ret);
+		return ret;
+	}
+	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+		       QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
+	opts.fqd.dest.channel = fman_intf->tx_channel_id;
+	opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
+	opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+	opts.fqd.context_b = 0;
+	/* no tx-confirmation */
+	opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
+	opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
+	PMD_DRV_LOG(DEBUG, "init tx fq %p, fqid %d", fq, fq->fqid);
+	ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
+	if (ret)
+		PMD_DRV_LOG(ERR, "init tx fqid %d failed %d", fq->fqid, ret);
+	return ret;
+}
+
 /* Initialise a network interface */
-static int dpaa_eth_dev_init(struct rte_eth_dev *eth_dev __rte_unused)
+static int dpaa_eth_dev_init(struct rte_eth_dev *eth_dev)
 {
+	int num_cores, num_rx_fqs, fqid;
+	int loop, ret = 0;
 	int dev_id;
 	struct rte_dpaa_device *dpaa_device;
 	struct dpaa_if *dpaa_intf;
+	struct fm_eth_port_cfg *cfg;
+	struct fman_if *fman_intf;
+	struct fman_if_bpool *bp, *tmp_bp;
 
 	PMD_INIT_FUNC_TRACE();
 
 	dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
 	dev_id = dpaa_device->id.dev_id;
 	dpaa_intf = eth_dev->data->dev_private;
+	cfg = &dpaa_netcfg->port_cfg[dev_id];
+	fman_intf = cfg->fman_if;
 
 	dpaa_intf->name = dpaa_device->name;
 
+	/* save fman_if & cfg in the interface struture */
+	dpaa_intf->fif = fman_intf;
 	dpaa_intf->ifid = dev_id;
+	dpaa_intf->cfg = cfg;
+
+	/* Initialize Rx FQ's */
+	if (getenv("DPAA_NUM_RX_QUEUES"))
+		num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
+	else
+		num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
+
+	dpaa_intf->rx_queues = rte_zmalloc(NULL,
+		sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
+	for (loop = 0; loop < num_rx_fqs; loop++) {
+		fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
+			DPAA_PCD_FQID_MULTIPLIER + loop;
+		ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], fqid);
+		if (ret)
+			return ret;
+		dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
+	}
+	dpaa_intf->nb_rx_queues = num_rx_fqs;
+
+	/* Initialise Tx FQs. Have as many Tx FQ's as number of cores */
+	num_cores = rte_lcore_count();
+	dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
+		num_cores, MAX_CACHELINE);
+	if (!dpaa_intf->tx_queues)
+		return -ENOMEM;
+
+	for (loop = 0; loop < num_cores; loop++) {
+		ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
+					 fman_intf);
+		if (ret)
+			return ret;
+		dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
+	}
+	dpaa_intf->nb_tx_queues = num_cores;
 
+	PMD_DRV_LOG(DEBUG, "all fqs created");
+
+	/* reset bpool list, initialize bpool dynamically */
+	list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
+		list_del(&bp->node);
+		rte_free(bp);
+	}
+
+	/* Populate ethdev structure */
 	eth_dev->dev_ops = &dpaa_devops;
+	eth_dev->data->nb_rx_queues = dpaa_intf->nb_rx_queues;
+	eth_dev->data->nb_tx_queues = dpaa_intf->nb_tx_queues;
+	eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
+	eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
+
+	/* Allocate memory for storing MAC addresses */
+	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
+		ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
+	if (eth_dev->data->mac_addrs == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
+						"store MAC addresses",
+				ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
+		return -ENOMEM;
+	}
 
-	return -1;
+	/* copy the primary mac address */
+	memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
+		fman_intf->mac_addr.addr_bytes,
+		ETHER_ADDR_LEN);
+
+	PMD_DRV_LOG(DEBUG, "interface %s macaddr:", dpaa_device->name);
+	for (loop = 0; loop < ETHER_ADDR_LEN; loop++) {
+		if (loop != (ETHER_ADDR_LEN - 1))
+			printf("%02x:", fman_intf->mac_addr.addr_bytes[loop]);
+		else
+			printf("%02x\n", fman_intf->mac_addr.addr_bytes[loop]);
+	}
+
+	/* Disable RX mode */
+	fman_if_discard_rx_errors(fman_intf);
+	fman_if_disable_rx(fman_intf);
+	/* Disable promiscuous mode */
+	fman_if_promiscuous_disable(fman_intf);
+	/* Disable multicast */
+	fman_if_reset_mcast_filter_table(fman_intf);
+	/* Reset interface statistics */
+	fman_if_stats_reset(fman_intf);
+
+	return 0;
 }
 
 static int
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index 7c1295e..076faf5 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -38,6 +38,13 @@
 #include <rte_ethdev.h>
 
 #include <rte_dpaa_logs.h>
+#include <dpaa_mempool.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <of.h>
+#include <netcfg.h>
 
 #define FSL_CLASS_ID		0
 #define FSL_VENDOR_ID		0x1957
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
new file mode 100644
index 0000000..5978090
--- /dev/null
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -0,0 +1,312 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
+ *   Copyright 2017 NXP. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of  Freescale Semiconductor, Inc nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* System headers */
+#include <stdio.h>
+#include <inttypes.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <limits.h>
+#include <sched.h>
+#include <pthread.h>
+
+#include <rte_config.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+
+#include "dpaa_ethdev.h"
+#include "dpaa_rxtx.h"
+#include <rte_dpaa_bus.h>
+#include <dpaa_mempool.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <of.h>
+#include <netcfg.h>
+
+#define DPAA_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) \
+	do { \
+		(_fd)->cmd = 0; \
+		(_fd)->opaque_addr = 0; \
+		(_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \
+		(_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \
+		(_fd)->opaque |= (_mbuf)->pkt_len; \
+		(_fd)->addr = (_mbuf)->buf_physaddr; \
+		(_fd)->bpid = _bpid; \
+	} while (0)
+
+static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct qm_fd *fd,
+							uint32_t ifid)
+{
+	struct pool_info_entry *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
+	struct rte_mbuf *mbuf;
+	void *ptr;
+	uint16_t offset =
+		(fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT;
+	uint32_t length = fd->opaque & DPAA_FD_LENGTH_MASK;
+
+	PMD_RX_LOG(DEBUG, " FD--->MBUF");
+
+	/* Ignoring case when format != qm_fd_contig */
+	ptr = rte_dpaa_mem_ptov(fd->addr);
+	/* Ignoring case when ptr would be NULL. That is only possible incase
+	 * of a corrupted packet
+	 */
+
+	mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
+	/* Prefetch the Parse results and packet data to L1 */
+	rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
+	rte_prefetch0((void *)((uint8_t *)ptr + offset));
+
+	mbuf->data_off = offset;
+	mbuf->data_len = length;
+	mbuf->pkt_len = length;
+
+	mbuf->port = ifid;
+	mbuf->nb_segs = 1;
+	mbuf->ol_flags = 0;
+	mbuf->next = NULL;
+	rte_mbuf_refcnt_set(mbuf, 1);
+
+	return mbuf;
+}
+
+uint16_t dpaa_eth_queue_rx(void *q,
+			   struct rte_mbuf **bufs,
+			   uint16_t nb_bufs)
+{
+	struct qman_fq *fq = q;
+	struct qm_dqrr_entry *dq;
+	uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
+	int ret;
+
+	ret = rte_dpaa_portal_init((void *)0);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failure in affining portal");
+		return 0;
+	}
+
+	ret = qman_set_vdq(fq, nb_bufs);
+	if (ret)
+		return 0;
+
+	do {
+		dq = qman_dequeue(fq);
+		if (!dq)
+			continue;
+		bufs[num_rx++] = dpaa_eth_fd_to_mbuf(&dq->fd, ifid);
+		qman_dqrr_consume(fq, dq);
+	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
+
+	return num_rx;
+}
+
+static void *dpaa_get_pktbuf(struct pool_info_entry *bp_info)
+{
+	int ret;
+	uint64_t buf = 0;
+	struct bm_buffer bufs;
+
+	ret = bman_acquire(bp_info->bp, &bufs, 1, 0);
+	if (ret <= 0) {
+		PMD_DRV_LOG(WARNING, "Failed to allocate buffers %d", ret);
+		return (void *)buf;
+	}
+
+	PMD_RX_LOG(DEBUG, "got buffer 0x%llx from pool %d",
+		    bufs.addr, bufs.bpid);
+
+	buf = (uint64_t)rte_dpaa_mem_ptov(bufs.addr) - bp_info->meta_data_size;
+	if (!buf)
+		goto out;
+
+out:
+	return (void *)buf;
+}
+
+static struct rte_mbuf *dpaa_get_dmable_mbuf(struct rte_mbuf *mbuf,
+					     struct dpaa_if *dpaa_intf)
+{
+	struct rte_mbuf *dpaa_mbuf;
+
+	/* allocate pktbuffer on bpid for dpaa port */
+	dpaa_mbuf = dpaa_get_pktbuf(dpaa_intf->bp_info);
+	if (!dpaa_mbuf)
+		return NULL;
+
+	memcpy((uint8_t *)(dpaa_mbuf->buf_addr) + mbuf->data_off, (void *)
+		((uint8_t *)(mbuf->buf_addr) + mbuf->data_off), mbuf->pkt_len);
+
+	/* Copy only the required fields */
+	dpaa_mbuf->data_off = mbuf->data_off;
+	dpaa_mbuf->pkt_len = mbuf->pkt_len;
+	dpaa_mbuf->ol_flags = mbuf->ol_flags;
+	dpaa_mbuf->packet_type = mbuf->packet_type;
+	dpaa_mbuf->tx_offload = mbuf->tx_offload;
+	rte_pktmbuf_free(mbuf);
+	return dpaa_mbuf;
+}
+
+uint16_t
+dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+	struct rte_mbuf *mbuf, *mi = NULL;
+	struct rte_mempool *mp;
+	struct pool_info_entry *bp_info;
+	struct qm_fd fd_arr[MAX_TX_RING_SLOTS];
+	uint32_t frames_to_send, loop, i = 0;
+	int ret;
+
+	ret = rte_dpaa_portal_init((void *)0);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failure in affining portal");
+		return 0;
+	}
+
+	PMD_TX_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q);
+
+	while (nb_bufs) {
+		frames_to_send = (nb_bufs >> 3) ? MAX_TX_RING_SLOTS : nb_bufs;
+		for (loop = 0; loop < frames_to_send; loop++, i++) {
+			mbuf = bufs[i];
+			if (RTE_MBUF_DIRECT(mbuf)) {
+				mp = mbuf->pool;
+			} else {
+				mi = rte_mbuf_from_indirect(mbuf);
+				mp = mi->pool;
+			}
+
+			bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+			if (mp->ops_index == bp_info->dpaa_ops_index) {
+				PMD_TX_LOG(DEBUG, "BMAN offloaded buffer, "
+					"mbuf: %p", mbuf);
+				if (mbuf->nb_segs == 1) {
+					if (RTE_MBUF_DIRECT(mbuf)) {
+						if (rte_mbuf_refcnt_read(mbuf) > 1) {
+							DPAA_MBUF_TO_CONTIG_FD(mbuf,
+								&fd_arr[loop], 0xff);
+							rte_mbuf_refcnt_update(mbuf, -1);
+						} else {
+							DPAA_MBUF_TO_CONTIG_FD(mbuf,
+								&fd_arr[loop], bp_info->bpid);
+						}
+					} else {
+						if (rte_mbuf_refcnt_read(mi) > 1) {
+							DPAA_MBUF_TO_CONTIG_FD(mbuf,
+								&fd_arr[loop], 0xff);
+						} else {
+							rte_mbuf_refcnt_update(mi, 1);
+							DPAA_MBUF_TO_CONTIG_FD(mbuf,
+								&fd_arr[loop], bp_info->bpid);
+						}
+						rte_pktmbuf_free(mbuf);
+					}
+				} else {
+					PMD_DRV_LOG(DEBUG, "Number of Segments not supported");
+					/* Set frames_to_send & nb_bufs so that
+					 * packets are transmitted till
+					 * previous frame.
+					 */
+					frames_to_send = loop;
+					nb_bufs = loop;
+					goto send_pkts;
+				}
+			} else {
+				struct qman_fq *txq = q;
+				struct dpaa_if *dpaa_intf = txq->dpaa_intf;
+
+				PMD_TX_LOG(DEBUG, "Non-BMAN offloaded buffer."
+					"Allocating an offloaded buffer");
+				mbuf = dpaa_get_dmable_mbuf(mbuf, dpaa_intf);
+				if (!mbuf) {
+					PMD_DRV_LOG(DEBUG, "no dpaa buffers.");
+					/* Set frames_to_send & nb_bufs so that
+					 * packets are transmitted till
+					 * previous frame.
+					 */
+					frames_to_send = loop;
+					nb_bufs = loop;
+					goto send_pkts;
+				}
+
+				DPAA_MBUF_TO_CONTIG_FD(mbuf, &fd_arr[loop],
+						dpaa_intf->bp_info->bpid);
+			}
+		}
+
+send_pkts:
+		loop = 0;
+		while (loop < frames_to_send) {
+			loop += qman_enqueue_multi(q, &fd_arr[loop],
+					frames_to_send - loop);
+		}
+		nb_bufs -= frames_to_send;
+	}
+
+	PMD_TX_LOG(DEBUG, "Transmitted %d buffers on queue: %p", i, q);
+
+	return i;
+}
+
+uint16_t dpaa_eth_tx_drop_all(void *q  __rte_unused,
+			      struct rte_mbuf **bufs __rte_unused,
+		uint16_t nb_bufs __rte_unused)
+{
+	PMD_TX_LOG(DEBUG, "Drop all packets");
+
+	/* Drop all incoming packets. No need to free packets here
+	 * because the rte_eth f/w frees up the packets through tx_buffer
+	 * callback in case this functions returns count less than nb_bufs
+	 */
+	return 0;
+}
diff --git a/drivers/net/dpaa/dpaa_rxtx.h b/drivers/net/dpaa/dpaa_rxtx.h
new file mode 100644
index 0000000..8858736
--- /dev/null
+++ b/drivers/net/dpaa/dpaa_rxtx.h
@@ -0,0 +1,216 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
+ *   Copyright 2017 NXP. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of  Freescale Semiconductor, Inc nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPDK_RXTX_H__
+#define __DPDK_RXTX_H__
+
+#define L2_ERROR_MASK	  0x001f  /* bits 11:15 */
+#define L3_ERROR_MASK	  0x0200 /* bit 6 */
+#define L4_ERROR_MASK	  0x10	 /* bit 3 */
+#define ETH_LEN_ERR	  2
+#define VLAN_LEN_ERR	  4
+
+#define ETH_PRESENT_MASK  0x8000 /* bit 0 */
+#define L2_BIT_POS 15		/* bit 0 */
+#define ETH_BIT_POS L2_BIT_POS	/* bit 0 */
+#define VLAN_PRESENT_MASK 0x4000 /* bit 1 */
+#define VLAN_BIT_POS (ETH_BIT_POS - 1) /* bit 1 */
+#define QINQ_PRESENT_MASK 0x100 /* bit 7 */
+#define VLAN_QINQ_BIT_POS (ETH_BIT_POS - 7) /* bit 7 */
+
+#define FIRST_IPV4_PRESENT_MASK 0x8000 /* bit 0 */
+#define L3_BIT_POS 15		/* bit 0 */
+#define FIRST_IPV4_BIT_POS 15		/* bit 0 */
+#define FIRST_IPV6_PRESENT_MASK 0x4000 /* bit 1 */
+#define FIRST_IPV6_BIT_POS (FIRST_IPV4_BIT_POS - 1) /* bit 1 */
+#define UNKNOWN_PROTO_MASK	0x0080 /* bit 8 */
+#define UNKNOWN_PROTO_BIT_POS	7 /* bit 8 */
+#define IPOPT_MASK		0x0100 /* bit 7 */
+#define IPOPT_BIT_POS		8 /* bit 7 */
+#define IPFRAG_MASK		0x0040 /* bit 9 */
+#define IPFRAG_BIT_POS		6 /* bit 9 */
+
+#define L4_TYPE_MASK	0xe0 /* bits 0:2 */
+#define L4_BIT_POS 6		/* bit 1 */
+#define L4_TYPE_SHIFT	5
+#define TCP_PRESENT	1
+#define UDP_PRESENT	2
+#define IPSEC_PRESENT	3
+#define SCTP_PRESENT	4
+
+/* internal offset from where IC is copied to packet buffer*/
+#define DEFAULT_ICIOF          32
+/* IC transfer size */
+#define DEFAULT_ICSZ	48
+
+/* IC offsets from buffer header address */
+#define DEFAULT_RX_ICEOF	16
+#define DEFAULT_TX_ICEOF	16
+
+/*
+ * Values for the L3R field of the FM Parse Results
+ */
+/* L3 Type field: First IP Present IPv4 */
+#define DPAA_L3_PARSE_RESULT_IPV4 0x80
+/* L3 Type field: First IP Present IPv6 */
+#define DPAA_L3_PARSE_RESULT_IPV6	0x40
+/* Values for the L4R field of the FM Parse Results
+ * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual.
+ */
+/* L4 Type field: UDP */
+#define DPAA_L4_PARSE_RESULT_UDP	0x40
+/* L4 Type field: TCP */
+#define DPAA_L4_PARSE_RESULT_TCP	0x20
+
+#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
+
+/* Parsing mask (Little Endian) - 0x00E044EC00800000
+ *	Classification Plan ID 0x00
+ *	L4R 0xE0 -
+ *		0x20 - TCP
+ *		0x40 - UDP
+ *		0x80 - SCTP
+ *	L3R 0xEC44 (in Big Endian) -
+ *		0x8000 - IPv4
+ *		0x4000 - IPv6
+ *		0x8040 - IPv4 Ext
+ *		0x4040 - IPv6 Ext
+ *	L2R 0x8000 (in Big Endian) -
+ *		0x8000 - Ethernet type
+ *	ShimR & Logical Port ID 0x0000
+ */
+#define DPAA_PARSE_MASK		0x00E044EC00800000
+#define DPAA_PARSE_VLAN_MASK		0x0000000000700000
+
+/* Parsed values (Little Endian) */
+#define DPAA_PKT_TYPE_NONE		0x0000000000000000
+#define DPAA_PKT_TYPE_ETHER		0x0000000000800000
+#define DPAA_PKT_TYPE_IPV4		0x0000008000800000
+#define DPAA_PKT_TYPE_IPV6		0x0000004000800000
+#define DPAA_PKT_TYPE_IPV4_EXT		0x0000408000800000
+#define DPAA_PKT_TYPE_IPV6_EXT		0x0000404000800000
+#define DPAA_PKT_TYPE_IPV4_TCP		0x0020008000800000
+#define DPAA_PKT_TYPE_IPV6_TCP		0x0020004000800000
+#define DPAA_PKT_TYPE_IPV4_UDP		0x0040008000800000
+#define DPAA_PKT_TYPE_IPV6_UDP		0x0040004000800000
+#define DPAA_PKT_TYPE_IPV4_SCTP	0x0080008000800000
+#define DPAA_PKT_TYPE_IPV6_SCTP	0x0080004000800000
+#define DPAA_PKT_L3_LEN_SHIFT	7
+
+/* FD structure masks and offset */
+#define DPAA_FD_FORMAT_MASK 0xE0000000
+#define DPAA_FD_OFFSET_MASK 0x1FF00000
+#define DPAA_FD_LENGTH_MASK 0xFFFFF
+#define DPAA_FD_FORMAT_SHIFT 29
+#define DPAA_FD_OFFSET_SHIFT 20
+
+
+/**
+ * FMan parse result array
+ */
+struct dpaa_eth_parse_results_t {
+	 uint8_t     lpid;		 /**< Logical port id */
+	 uint8_t     shimr;		 /**< Shim header result  */
+	 uint16_t    l2r;		 /**< Layer 2 result */
+	 uint16_t    l3r;		 /**< Layer 3 result */
+	 uint8_t     l4r;		 /**< Layer 4 result */
+	 uint8_t     cplan;		 /**< Classification plan id */
+	 uint16_t    nxthdr;		 /**< Next Header  */
+	 uint16_t    cksum;		 /**< Checksum */
+	 uint32_t    lcv;		 /**< LCV */
+	 uint8_t     shim_off[3];	 /**< Shim offset */
+	 uint8_t     eth_off;		 /**< ETH offset */
+	 uint8_t     llc_snap_off;	 /**< LLC_SNAP offset */
+	 uint8_t     vlan_off[2];	 /**< VLAN offset */
+	 uint8_t     etype_off;		 /**< ETYPE offset */
+	 uint8_t     pppoe_off;		 /**< PPP offset */
+	 uint8_t     mpls_off[2];	 /**< MPLS offset */
+	 uint8_t     ip_off[2];		 /**< IP offset */
+	 uint8_t     gre_off;		 /**< GRE offset */
+	 uint8_t     l4_off;		 /**< Layer 4 offset */
+	 uint8_t     nxthdr_off;	 /**< Parser end point */
+} __attribute__ ((__packed__));
+
+/* The structure is the Prepended Data to the Frame which is used by FMAN */
+struct annotations_t {
+	uint8_t reserved[DEFAULT_RX_ICEOF];
+	struct dpaa_eth_parse_results_t parse;	/**< Pointer to Parsed result*/
+	uint64_t reserved1;
+	uint64_t hash;			/**< Hash Result */
+};
+
+#define GET_ANNOTATIONS(_buf) \
+	(struct annotations_t *)(_buf)
+
+#define GET_RX_PRS(_buf) \
+	(struct dpaa_eth_parse_results_t *)((uint8_t *)_buf + DEFAULT_RX_ICEOF)
+
+#define GET_TX_PRS(_buf) \
+	(struct dpaa_eth_parse_results_t *)((uint8_t *)_buf + DEFAULT_TX_ICEOF)
+
+#define L2_ETH_MAC_PRESENT(prs) \
+	(rte_be_to_cpu_16((prs)->l2r) & ETH_PRESENT_MASK)
+
+#define L3_IPV4_PRESENT(prs) \
+	(rte_be_to_cpu_16((prs)->l3r) & FIRST_IPV4_PRESENT_MASK)
+
+#define L3_IPV6_PRESENT(prs) \
+	(rte_be_to_cpu_16((prs)->l3r) & FIRST_IPV6_PRESENT_MASK)
+
+#define L3_OPT_PRESENT(prs) \
+	(rte_be_to_cpu_16((prs)->l3r) & IPOPT_MASK)
+
+#define L4_UDP_PRESENT(prs) \
+	((((prs)->l4r & L4_TYPE_MASK) >> L4_TYPE_SHIFT) == UDP_PRESENT)
+#define L4_TCP_PRESENT(prs) \
+	((((prs)->l4r & L4_TYPE_MASK) >> L4_TYPE_SHIFT) == TCP_PRESENT)
+#define L4_IPSEC_PRESENT(prs) \
+	((((prs)->l4r & L4_TYPE_MASK) >> L4_TYPE_SHIFT) == IPSEC_PRESENT)
+#define L4_SCTP_PRESENT(prs) \
+	((((prs)->l4r & L4_TYPE_MASK) >> L4_TYPE_SHIFT) == SCTP_PRESENT)
+
+uint16_t dpaa_eth_queue_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);
+
+uint16_t dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);
+
+uint16_t dpaa_eth_tx_drop_all(void *q  __rte_unused,
+			      struct rte_mbuf **bufs __rte_unused,
+			      uint16_t nb_bufs __rte_unused);
+
+int dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
+			   struct qm_fd *fd,
+			   uint32_t bpid);
+
+struct rte_mbuf *dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid);
+
+#endif
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 80e5530..6939bc5 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -181,6 +181,7 @@ endif # CONFIG_RTE_LIBRTE_DPAA2_PMD
 
 ifeq ($(CONFIG_RTE_LIBRTE_DPAA_PMD),y)
 _LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA_PMD)       += -lrte_bus_dpaa
+_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA_PMD)       += -lrte_mempool_dpaa
 endif
 
 endif # !CONFIG_RTE_BUILD_SHARED_LIBS
-- 
2.7.4



More information about the dev mailing list