[dpdk-dev] [PATCH v2 08/13] net/ppfe: add queue setup and release operations

Gagandeep Singh g.singh at nxp.com
Wed Aug 28 13:08:44 CEST 2019


This patch add RX/TX queue setup operations
and supported checksum offloads.

Signed-off-by: Gagandeep Singh <g.singh at nxp.com>
Acked-by: Nipun Gupta <nipun.gupta at nxp.com>
Acked-by: Akhil Goyal <akhil.goyal at nxp.com>
---
 doc/guides/nics/features/ppfe.ini |   2 +
 doc/guides/nics/ppfe.rst          |   1 +
 drivers/net/ppfe/pfe_hif.c        | 114 ++++++++++++++++++++++++++++++
 drivers/net/ppfe/pfe_hif.h        |   1 +
 drivers/net/ppfe/pfe_hif_lib.c    |  48 +++++++++++++
 drivers/net/ppfe/ppfe_ethdev.c    | 100 +++++++++++++++++++++++++-
 6 files changed, 263 insertions(+), 3 deletions(-)

diff --git a/doc/guides/nics/features/ppfe.ini b/doc/guides/nics/features/ppfe.ini
index cd5f836a3..4e38ffd24 100644
--- a/doc/guides/nics/features/ppfe.ini
+++ b/doc/guides/nics/features/ppfe.ini
@@ -4,6 +4,8 @@
 ; Refer to default.ini for the full list of available PMD features.
 ;
 [Features]
+L3 checksum offload  = Y
+L4 checksum offload  = Y
 Linux VFIO           = Y
 ARMv8                = Y
 Usage doc            = Y
diff --git a/doc/guides/nics/ppfe.rst b/doc/guides/nics/ppfe.rst
index 5c58c7837..6313229cb 100644
--- a/doc/guides/nics/ppfe.rst
+++ b/doc/guides/nics/ppfe.rst
@@ -91,6 +91,7 @@ pfe.ko is required for PHY initialisation.
 PPFE Features
 ~~~~~~~~~~~~~~
 
+- L3/L4 checksum offload
 - ARMv8
 
 Supported PPFE SoCs
diff --git a/drivers/net/ppfe/pfe_hif.c b/drivers/net/ppfe/pfe_hif.c
index 3a044bc04..fc1a0fd91 100644
--- a/drivers/net/ppfe/pfe_hif.c
+++ b/drivers/net/ppfe/pfe_hif.c
@@ -41,6 +41,120 @@ static void pfe_hif_free_descr(struct pfe_hif *hif)
 	rte_free(hif->descr_baseaddr_v);
 }
 
+/*
+ * pfe_hif_init_buffers
+ * This function initializes the HIF Rx/Tx ring descriptors and
+ * initialize Rx queue with buffers.
+ */
+int pfe_hif_init_buffers(struct pfe_hif *hif)
+{
+	struct hif_desc	*desc, *first_desc_p;
+	uint32_t i = 0;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* Check enough Rx buffers available in the shared memory */
+	if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
+		return -ENOMEM;
+
+	hif->rx_base = hif->descr_baseaddr_v;
+	memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
+
+	/*Initialize Rx descriptors */
+	desc = hif->rx_base;
+	first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
+
+	for (i = 0; i < hif->rx_ring_size; i++) {
+		/* Initialize Rx buffers from the shared memory */
+		struct rte_mbuf *mbuf =
+			(struct rte_mbuf *)hif->shm->rx_buf_pool[i];
+
+		/* PPFE mbuf structure is as follow:
+		 * ----------------------------------------------------------+
+		 * | mbuf  | priv | headroom (annotation + PPFE data) | data |
+		 * ----------------------------------------------------------+
+		 *
+		 * As we are expecting additional information like parse
+		 * results, eth id, queue id from PPFE block along with data.
+		 * so we have to provide additional memory for each packet to
+		 * HIF rx rings so that PPFE block can write its headers.
+		 * so, we are giving the data pointor to HIF rings whose
+		 * calculation is as below:
+		 * mbuf->data_pointor - Required_header_size
+		 *
+		 * We are utilizing the HEADROOM area to receive the PPFE
+		 * block headers. On packet reception, HIF driver will use
+		 * PPFE headers information based on which it will decide
+		 * the clients and fill the parse results.
+		 * after that application can use/overwrite the HEADROOM area.
+		 */
+		hif->rx_buf_vaddr[i] =
+			(void *)((size_t)mbuf->buf_addr + mbuf->data_off -
+					PFE_PKT_HEADER_SZ);
+		hif->rx_buf_addr[i] =
+			(void *)(size_t)(rte_pktmbuf_iova(mbuf) -
+					PFE_PKT_HEADER_SZ);
+		hif->rx_buf_len[i] =  mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
+
+		hif->shm->rx_buf_pool[i] = NULL;
+
+		writel(DDR_PHYS_TO_PFE(hif->rx_buf_addr[i]),
+					&desc->data);
+		writel(0, &desc->status);
+
+		/*
+		 * Ensure everything else is written to DDR before
+		 * writing bd->ctrl
+		 */
+		rte_wmb();
+
+		writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
+			| BD_CTRL_DIR | BD_CTRL_DESC_EN
+			| BD_BUF_LEN(hif->rx_buf_len[i])), &desc->ctrl);
+
+		/* Chain descriptors */
+		writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
+		desc++;
+	}
+
+	/* Overwrite last descriptor to chain it to first one*/
+	desc--;
+	writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
+
+	hif->rxtoclean_index = 0;
+
+	/*Initialize Rx buffer descriptor ring base address */
+	writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
+
+	hif->tx_base = hif->rx_base + hif->rx_ring_size;
+	first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
+				hif->rx_ring_size;
+	memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
+
+	/*Initialize tx descriptors */
+	desc = hif->tx_base;
+
+	for (i = 0; i < hif->tx_ring_size; i++) {
+		/* Chain descriptors */
+		writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
+		writel(0, &desc->ctrl);
+		desc++;
+	}
+
+	/* Overwrite last descriptor to chain it to first one */
+	desc--;
+	writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
+	hif->txavail = hif->tx_ring_size;
+	hif->txtosend = 0;
+	hif->txtoclean = 0;
+	hif->txtoflush = 0;
+
+	/*Initialize Tx buffer descriptor ring base address */
+	writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
+
+	return 0;
+}
+
 /*
  * pfe_hif_client_register
  *
diff --git a/drivers/net/ppfe/pfe_hif.h b/drivers/net/ppfe/pfe_hif.h
index 483db75da..80f78551c 100644
--- a/drivers/net/ppfe/pfe_hif.h
+++ b/drivers/net/ppfe/pfe_hif.h
@@ -143,5 +143,6 @@ void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int
 int pfe_hif_init(struct pfe *pfe);
 void pfe_hif_exit(struct pfe *pfe);
 void pfe_hif_rx_idle(struct pfe_hif *hif);
+int pfe_hif_init_buffers(struct pfe_hif *hif);
 
 #endif /* _PFE_HIF_H_ */
diff --git a/drivers/net/ppfe/pfe_hif_lib.c b/drivers/net/ppfe/pfe_hif_lib.c
index 4c19108eb..d86f3ecaf 100644
--- a/drivers/net/ppfe/pfe_hif_lib.c
+++ b/drivers/net/ppfe/pfe_hif_lib.c
@@ -15,6 +15,54 @@ unsigned int emac_txq_cnt;
 /*HIF shared memory Global variable */
 struct hif_shm ghif_shm;
 
+/* Cleanup the HIF shared memory, release HIF rx_buffer_pool.
+ * This function should be called after pfe_hif_exit
+ *
+ * @param[in] hif_shm		Shared memory address location in DDR
+ */
+void pfe_hif_shm_clean(struct hif_shm *hif_shm)
+{
+	unsigned int i;
+	void *pkt;
+
+	for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
+		pkt = hif_shm->rx_buf_pool[i];
+		if (pkt)
+			rte_pktmbuf_free((struct rte_mbuf *)pkt);
+	}
+}
+
+/* Initialize shared memory used between HIF driver and clients,
+ * allocate rx_buffer_pool required for HIF Rx descriptors.
+ * This function should be called before initializing HIF driver.
+ *
+ * @param[in] hif_shm		Shared memory address location in DDR
+ * @rerurn			0 - on succes, <0 on fail to initialize
+ */
+int pfe_hif_shm_init(struct hif_shm *hif_shm, struct rte_mempool *mb_pool)
+{
+	unsigned int i;
+	struct rte_mbuf *mbuf;
+
+	memset(hif_shm, 0, sizeof(struct hif_shm));
+	hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT;
+
+	for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
+		mbuf = rte_cpu_to_le_64(rte_pktmbuf_alloc(mb_pool));
+		if (mbuf)
+			hif_shm->rx_buf_pool[i] = mbuf;
+		else
+			goto err0;
+	}
+
+	return 0;
+
+err0:
+	PFE_PMD_ERR("Low memory");
+	pfe_hif_shm_clean(hif_shm);
+	return -ENOMEM;
+}
+
 /*This function sends indication to HIF driver
  *
  * @param[in] hif	hif context
diff --git a/drivers/net/ppfe/ppfe_ethdev.c b/drivers/net/ppfe/ppfe_ethdev.c
index bb4d40077..be9a7fec7 100644
--- a/drivers/net/ppfe/ppfe_ethdev.c
+++ b/drivers/net/ppfe/ppfe_ethdev.c
@@ -13,15 +13,28 @@
 #define PPFE_MAX_MACS 1 /*we can support upto 4 MACs per IF*/
 #define PPFE_VDEV_GEM_ID_ARG	("intf")
 
-struct pfe_vdev_init_params {
-	int8_t	gem_id;
-};
+/* Supported Rx offloads */
+static uint64_t dev_rx_offloads_sup =
+		DEV_RX_OFFLOAD_IPV4_CKSUM |
+		DEV_RX_OFFLOAD_UDP_CKSUM |
+		DEV_RX_OFFLOAD_TCP_CKSUM;
+
+/* Supported Tx offloads */
+static uint64_t dev_tx_offloads_sup =
+		DEV_TX_OFFLOAD_IPV4_CKSUM |
+		DEV_TX_OFFLOAD_UDP_CKSUM |
+		DEV_TX_OFFLOAD_TCP_CKSUM;
+
 
 static void *cbus_emac_base[3];
 static void *cbus_gpi_base[3];
 struct pfe *g_pfe;
 unsigned int pfe_svr = SVR_LS1012A_REV1;
 
+struct pfe_vdev_init_params {
+	int8_t	gem_id;
+};
+
 /* pfe_gemac_init
  */
 static int pfe_gemac_init(struct pfe_eth_priv_s *priv)
@@ -287,6 +300,83 @@ pfe_eth_info(struct rte_eth_dev *dev,
 	dev_info->max_rx_queues = dev->data->nb_rx_queues;
 	dev_info->max_tx_queues = dev->data->nb_tx_queues;
 	dev_info->min_rx_bufsize = HIF_RX_PKT_MIN_SIZE;
+	dev_info->rx_offload_capa = dev_rx_offloads_sup;
+	dev_info->tx_offload_capa = dev_tx_offloads_sup;
+}
+
+/* Only first mb_pool given on first call of this API will be used
+ * in whole system, also nb_rx_desc and rx_conf are unused params
+ */
+static int pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+		__rte_unused uint16_t nb_rx_desc,
+		__rte_unused unsigned int socket_id,
+		__rte_unused const struct rte_eth_rxconf *rx_conf,
+		struct rte_mempool *mb_pool)
+{
+	int rc = 0;
+	struct pfe *pfe;
+	struct pfe_eth_priv_s *priv = dev->data->dev_private;
+
+	pfe = priv->pfe;
+
+	if (queue_idx >= EMAC_RXQ_CNT) {
+		PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
+				queue_idx, EMAC_RXQ_CNT);
+		return -1;
+	}
+
+	if (!pfe->hif.setuped) {
+		rc = pfe_hif_shm_init(pfe->hif.shm, mb_pool);
+		if (rc) {
+			PFE_PMD_ERR("Could not allocate buffer descriptors");
+			return -1;
+		}
+
+		pfe->hif.shm->pool = mb_pool;
+		if (pfe_hif_init_buffers(&pfe->hif)) {
+			PFE_PMD_ERR("Could not initialize buffer descriptors");
+			return -1;
+		}
+		hif_init();
+		hif_rx_enable();
+		hif_tx_enable();
+		pfe->hif.setuped = 1;
+	}
+	dev->data->rx_queues[queue_idx] = &priv->client.rx_q[queue_idx];
+	priv->client.rx_q[queue_idx].queue_id = queue_idx;
+
+	return 0;
+}
+
+static void
+pfe_rx_queue_release(void *q __rte_unused)
+{
+	PMD_INIT_FUNC_TRACE();
+}
+
+static void
+pfe_tx_queue_release(void *q __rte_unused)
+{
+	PMD_INIT_FUNC_TRACE();
+}
+
+static int
+pfe_tx_queue_setup(struct rte_eth_dev *dev,
+		   uint16_t queue_idx,
+		   __rte_unused uint16_t nb_desc,
+		   __rte_unused unsigned int socket_id,
+		   __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+	struct pfe_eth_priv_s *priv = dev->data->dev_private;
+
+	if (queue_idx >= emac_txq_cnt) {
+		PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
+				queue_idx, emac_txq_cnt);
+		return -1;
+	}
+	dev->data->tx_queues[queue_idx] = &priv->client.tx_q[queue_idx];
+	priv->client.tx_q[queue_idx].queue_id = queue_idx;
+	return 0;
 }
 
 static const struct eth_dev_ops ops = {
@@ -295,6 +385,10 @@ static const struct eth_dev_ops ops = {
 	.dev_close = pfe_eth_close,
 	.dev_configure = pfe_eth_configure,
 	.dev_infos_get = pfe_eth_info,
+	.rx_queue_setup = pfe_rx_queue_setup,
+	.rx_queue_release  = pfe_rx_queue_release,
+	.tx_queue_setup = pfe_tx_queue_setup,
+	.tx_queue_release  = pfe_tx_queue_release,
 };
 
 /* pfe_eth_init_one
-- 
2.17.1



More information about the dev mailing list