[dpdk-dev] [PATCH v6 23/27] net/thunderx: add Rx queue start and stop support

Jerin Jacob jerin.jacob at caviumnetworks.com
Fri Jun 17 15:29:50 CEST 2016


Signed-off-by: Jerin Jacob <jerin.jacob at caviumnetworks.com>
Signed-off-by: Maciej Czekaj <maciej.czekaj at caviumnetworks.com>
Signed-off-by: Kamil Rytarowski <Kamil.Rytarowski at caviumnetworks.com>
Signed-off-by: Zyta Szpak <zyta.szpak at semihalf.com>
Signed-off-by: Slawomir Rosek <slawomir.rosek at semihalf.com>
Signed-off-by: Radoslaw Biernacki <rad at semihalf.com>
---
 drivers/net/thunderx/nicvf_ethdev.c | 167 ++++++++++++++++++++++++++++++++++++
 drivers/net/thunderx/nicvf_rxtx.c   |  18 ++++
 drivers/net/thunderx/nicvf_rxtx.h   |   1 +
 3 files changed, 186 insertions(+)

diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index 33d5fba..ed69147 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -562,6 +562,54 @@ nicvf_tx_queue_reset(struct nicvf_txq *txq)
 	txq->xmit_bufs = 0;
 }
 
+
+static inline int
+nicvf_configure_cpi(struct rte_eth_dev *dev)
+{
+	struct nicvf *nic = nicvf_pmd_priv(dev);
+	uint16_t qidx, qcnt;
+	int ret;
+
+	/* Count started rx queues */
+	for (qidx = qcnt = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++)
+		if (dev->data->rx_queue_state[qidx] ==
+		    RTE_ETH_QUEUE_STATE_STARTED)
+			qcnt++;
+
+	nic->cpi_alg = CPI_ALG_NONE;
+	ret = nicvf_mbox_config_cpi(nic, qcnt);
+	if (ret)
+		PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret);
+
+	return ret;
+}
+
+static int
+nicvf_configure_rss_reta(struct rte_eth_dev *dev)
+{
+	struct nicvf *nic = nicvf_pmd_priv(dev);
+	unsigned int idx, qmap_size;
+	uint8_t qmap[RTE_MAX_QUEUES_PER_PORT];
+	uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
+
+	if (nic->cpi_alg != CPI_ALG_NONE)
+		return -EINVAL;
+
+	/* Prepare queue map */
+	for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) {
+		if (dev->data->rx_queue_state[idx] ==
+				RTE_ETH_QUEUE_STATE_STARTED)
+			qmap[qmap_size++] = idx;
+	}
+
+	/* Update default RSS RETA */
+	for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
+		default_reta[idx] = qmap[idx % qmap_size];
+
+	return nicvf_rss_reta_update(nic, default_reta,
+				     NIC_MAX_RSS_IDR_TBL_SIZE);
+}
+
 static void
 nicvf_dev_tx_queue_release(void *sq)
 {
@@ -687,6 +735,33 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 	return 0;
 }
 
+static inline void
+nicvf_rx_queue_release_mbufs(struct nicvf_rxq *rxq)
+{
+	uint32_t rxq_cnt;
+	uint32_t nb_pkts, released_pkts = 0;
+	uint32_t refill_cnt = 0;
+	struct rte_eth_dev *dev = rxq->nic->eth_dev;
+	struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH];
+
+	if (dev->rx_pkt_burst == NULL)
+		return;
+
+	while ((rxq_cnt = nicvf_dev_rx_queue_count(dev, rxq->queue_id))) {
+		nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
+					NICVF_MAX_RX_FREE_THRESH);
+		PMD_DRV_LOG(INFO, "nb_pkts=%d  rxq_cnt=%d", nb_pkts, rxq_cnt);
+		while (nb_pkts) {
+			rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]);
+			released_pkts++;
+		}
+	}
+
+	refill_cnt += nicvf_dev_rbdr_refill(dev, rxq->queue_id);
+	PMD_DRV_LOG(INFO, "free_cnt=%d  refill_cnt=%d",
+		    released_pkts, refill_cnt);
+}
+
 static void
 nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
 {
@@ -695,6 +770,69 @@ nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
 	rxq->recv_buffers = 0;
 }
 
+static inline int
+nicvf_start_rx_queue(struct rte_eth_dev *dev, uint16_t qidx)
+{
+	struct nicvf *nic = nicvf_pmd_priv(dev);
+	struct nicvf_rxq *rxq;
+	int ret;
+
+	if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
+		return 0;
+
+	/* Update rbdr pointer to all rxq */
+	rxq = dev->data->rx_queues[qidx];
+	rxq->shared_rbdr = nic->rbdr;
+
+	ret = nicvf_qset_rq_config(nic, qidx, rxq);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to configure rq %d %d", qidx, ret);
+		goto config_rq_error;
+	}
+	ret = nicvf_qset_cq_config(nic, qidx, rxq);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to configure cq %d %d", qidx, ret);
+		goto config_cq_error;
+	}
+
+	dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+	return 0;
+
+config_cq_error:
+	nicvf_qset_cq_reclaim(nic, qidx);
+config_rq_error:
+	nicvf_qset_rq_reclaim(nic, qidx);
+	return ret;
+}
+
+static inline int
+nicvf_stop_rx_queue(struct rte_eth_dev *dev, uint16_t qidx)
+{
+	struct nicvf *nic = nicvf_pmd_priv(dev);
+	struct nicvf_rxq *rxq;
+	int ret, other_error;
+
+	if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
+		return 0;
+
+	ret = nicvf_qset_rq_reclaim(nic, qidx);
+	if (ret)
+		PMD_INIT_LOG(ERR, "Failed to reclaim rq %d %d", qidx, ret);
+
+	other_error = ret;
+	rxq = dev->data->rx_queues[qidx];
+	nicvf_rx_queue_release_mbufs(rxq);
+	nicvf_rx_queue_reset(rxq);
+
+	ret = nicvf_qset_cq_reclaim(nic, qidx);
+	if (ret)
+		PMD_INIT_LOG(ERR, "Failed to reclaim cq %d %d", qidx, ret);
+
+	other_error |= ret;
+	dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+	return other_error;
+}
+
 static void
 nicvf_dev_rx_queue_release(void *rx_queue)
 {
@@ -707,6 +845,33 @@ nicvf_dev_rx_queue_release(void *rx_queue)
 }
 
 static int
+nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+	int ret;
+
+	ret = nicvf_start_rx_queue(dev, qidx);
+	if (ret)
+		return ret;
+
+	ret = nicvf_configure_cpi(dev);
+	if (ret)
+		return ret;
+
+	return nicvf_configure_rss_reta(dev);
+}
+
+static int
+nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+	int ret;
+
+	ret = nicvf_stop_rx_queue(dev, qidx);
+	ret |= nicvf_configure_cpi(dev);
+	ret |= nicvf_configure_rss_reta(dev);
+	return ret;
+}
+
+static int
 nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 			 uint16_t nb_desc, unsigned int socket_id,
 			 const struct rte_eth_rxconf *rx_conf,
@@ -933,6 +1098,8 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
 	.reta_query               = nicvf_dev_reta_query,
 	.rss_hash_update          = nicvf_dev_rss_hash_update,
 	.rss_hash_conf_get        = nicvf_dev_rss_hash_conf_get,
+	.rx_queue_start           = nicvf_dev_rx_queue_start,
+	.rx_queue_stop            = nicvf_dev_rx_queue_stop,
 	.rx_queue_setup           = nicvf_dev_rx_queue_setup,
 	.rx_queue_release         = nicvf_dev_rx_queue_release,
 	.rx_queue_count           = nicvf_dev_rx_queue_count,
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index 1c6d6a8..eb51a72 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -579,3 +579,21 @@ nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
 	rxq = dev->data->rx_queues[queue_idx];
 	return nicvf_addr_read(rxq->cq_status) & NICVF_CQ_CQE_COUNT_MASK;
 }
+
+uint32_t
+nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+	struct nicvf_rxq *rxq;
+	uint32_t to_process;
+	uint32_t rx_free;
+
+	rxq = dev->data->rx_queues[queue_idx];
+	to_process = rxq->recv_buffers;
+	while (rxq->recv_buffers > 0) {
+		rx_free = RTE_MIN(rxq->recv_buffers, NICVF_MAX_RX_FREE_THRESH);
+		rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rx_free);
+	}
+
+	assert(rxq->recv_buffers == 0);
+	return to_process;
+}
diff --git a/drivers/net/thunderx/nicvf_rxtx.h b/drivers/net/thunderx/nicvf_rxtx.h
index ded87f3..9dad8a5 100644
--- a/drivers/net/thunderx/nicvf_rxtx.h
+++ b/drivers/net/thunderx/nicvf_rxtx.h
@@ -85,6 +85,7 @@ fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt)
 #endif
 
 uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx);
+uint32_t nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx);
 
 uint16_t nicvf_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t pkts);
 uint16_t nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
-- 
2.5.5



More information about the dev mailing list