[PATCH 05/14] net/nfp: rename function and struct
Jin Liu
jin.liu at corigine.com
Thu Jun 2 03:52:55 CEST 2022
Add 'nfd3' into the nfd3 firmware eth driver function name, preparation
for the next work, as we will support another nfdk firmware version.
Signed-off-by: Jin Liu <jin.liu at corigine.com>
Signed-off-by: Diana Wang <na.wang at corigine.com>
Signed-off-by: Peng Zhang <peng.zhang at corigine.com>
Signed-off-by: Chaoyong He <chaoyong.he at corigine.com>
Signed-off-by: Niklas Söderlund <niklas.soderlund at corigine.com>
---
drivers/net/nfp/nfp_ethdev.c | 28 +++++++++----------
drivers/net/nfp/nfp_ethdev_vf.c | 26 +++++++++---------
drivers/net/nfp/nfp_rxtx.c | 48 ++++++++++++++++-----------------
drivers/net/nfp/nfp_rxtx.h | 12 ++++-----
4 files changed, 57 insertions(+), 57 deletions(-)
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index e491eee99a..0d5caf94ea 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -179,7 +179,7 @@ nfp_net_start(struct rte_eth_dev *dev)
/* Stop device: disable rx and tx functions to allow for reconfiguring. */
static int
-nfp_net_stop(struct rte_eth_dev *dev)
+nfp_net_nfd3_stop(struct rte_eth_dev *dev)
{
int i;
struct nfp_net_hw *hw;
@@ -195,7 +195,7 @@ nfp_net_stop(struct rte_eth_dev *dev)
/* Clear queues */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
- nfp_net_reset_tx_queue(this_tx_q);
+ nfp_net_nfd3_reset_tx_queue(this_tx_q);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -251,7 +251,7 @@ nfp_net_set_link_down(struct rte_eth_dev *dev)
/* Reset and stop device. The device can not be restarted. */
static int
-nfp_net_close(struct rte_eth_dev *dev)
+nfp_net_nfd3_close(struct rte_eth_dev *dev)
{
struct nfp_net_hw *hw;
struct rte_pci_device *pci_dev;
@@ -279,8 +279,8 @@ nfp_net_close(struct rte_eth_dev *dev)
/* Clear queues */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
- nfp_net_reset_tx_queue(this_tx_q);
- nfp_net_tx_queue_release(dev, i);
+ nfp_net_nfd3_reset_tx_queue(this_tx_q);
+ nfp_net_nfd3_tx_queue_release(dev, i);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -329,13 +329,13 @@ nfp_net_close(struct rte_eth_dev *dev)
}
/* Initialise and register driver with DPDK Application */
-static const struct eth_dev_ops nfp_net_eth_dev_ops = {
+static const struct eth_dev_ops nfp_net_nfd3_eth_dev_ops = {
.dev_configure = nfp_net_configure,
.dev_start = nfp_net_start,
- .dev_stop = nfp_net_stop,
+ .dev_stop = nfp_net_nfd3_stop,
.dev_set_link_up = nfp_net_set_link_up,
.dev_set_link_down = nfp_net_set_link_down,
- .dev_close = nfp_net_close,
+ .dev_close = nfp_net_nfd3_close,
.promiscuous_enable = nfp_net_promisc_enable,
.promiscuous_disable = nfp_net_promisc_disable,
.link_update = nfp_net_link_update,
@@ -352,8 +352,8 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = {
.rss_hash_conf_get = nfp_net_rss_hash_conf_get,
.rx_queue_setup = nfp_net_rx_queue_setup,
.rx_queue_release = nfp_net_rx_queue_release,
- .tx_queue_setup = nfp_net_tx_queue_setup,
- .tx_queue_release = nfp_net_tx_queue_release,
+ .tx_queue_setup = nfp_net_nfd3_tx_queue_setup,
+ .tx_queue_release = nfp_net_nfd3_tx_queue_release,
.rx_queue_intr_enable = nfp_rx_queue_intr_enable,
.rx_queue_intr_disable = nfp_rx_queue_intr_disable,
};
@@ -401,10 +401,10 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
PMD_INIT_LOG(DEBUG, "Working with physical port number: %d, "
"NFP internal port number: %d", port, hw->nfp_idx);
- eth_dev->dev_ops = &nfp_net_eth_dev_ops;
+ eth_dev->dev_ops = &nfp_net_nfd3_eth_dev_ops;
eth_dev->rx_queue_count = nfp_net_rx_queue_count;
eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
- eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
+ eth_dev->tx_pkt_burst = &nfp_net_nfd3_xmit_pkts;
/* For secondary processes, the primary has done all the work */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -970,10 +970,10 @@ nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
return -ENODEV;
}
eth_dev->process_private = cpp;
- eth_dev->dev_ops = &nfp_net_eth_dev_ops;
+ eth_dev->dev_ops = &nfp_net_nfd3_eth_dev_ops;
eth_dev->rx_queue_count = nfp_net_rx_queue_count;
eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
- eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
+ eth_dev->tx_pkt_burst = &nfp_net_nfd3_xmit_pkts;
rte_eth_dev_probing_finish(eth_dev);
}
diff --git a/drivers/net/nfp/nfp_ethdev_vf.c b/drivers/net/nfp/nfp_ethdev_vf.c
index aa156ad162..7d79a08041 100644
--- a/drivers/net/nfp/nfp_ethdev_vf.c
+++ b/drivers/net/nfp/nfp_ethdev_vf.c
@@ -143,7 +143,7 @@ nfp_netvf_start(struct rte_eth_dev *dev)
}
static int
-nfp_netvf_stop(struct rte_eth_dev *dev)
+nfp_netvf_nfd3_stop(struct rte_eth_dev *dev)
{
struct nfp_net_txq *this_tx_q;
struct nfp_net_rxq *this_rx_q;
@@ -156,7 +156,7 @@ nfp_netvf_stop(struct rte_eth_dev *dev)
/* Clear queues */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
- nfp_net_reset_tx_queue(this_tx_q);
+ nfp_net_nfd3_reset_tx_queue(this_tx_q);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -182,7 +182,7 @@ nfp_netvf_set_link_down(struct rte_eth_dev *dev __rte_unused)
/* Reset and stop device. The device can not be restarted. */
static int
-nfp_netvf_close(struct rte_eth_dev *dev)
+nfp_netvf_nfd3_close(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev;
struct nfp_net_txq *this_tx_q;
@@ -206,8 +206,8 @@ nfp_netvf_close(struct rte_eth_dev *dev)
/* Clear queues */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
- nfp_net_reset_tx_queue(this_tx_q);
- nfp_net_tx_queue_release(dev, i);
+ nfp_net_nfd3_reset_tx_queue(this_tx_q);
+ nfp_net_nfd3_tx_queue_release(dev, i);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -236,13 +236,13 @@ nfp_netvf_close(struct rte_eth_dev *dev)
}
/* Initialise and register VF driver with DPDK Application */
-static const struct eth_dev_ops nfp_netvf_eth_dev_ops = {
+static const struct eth_dev_ops nfp_netvf_nfd3_eth_dev_ops = {
.dev_configure = nfp_net_configure,
.dev_start = nfp_netvf_start,
- .dev_stop = nfp_netvf_stop,
+ .dev_stop = nfp_netvf_nfd3_stop,
.dev_set_link_up = nfp_netvf_set_link_up,
.dev_set_link_down = nfp_netvf_set_link_down,
- .dev_close = nfp_netvf_close,
+ .dev_close = nfp_netvf_nfd3_close,
.promiscuous_enable = nfp_net_promisc_enable,
.promiscuous_disable = nfp_net_promisc_disable,
.link_update = nfp_net_link_update,
@@ -259,8 +259,8 @@ static const struct eth_dev_ops nfp_netvf_eth_dev_ops = {
.rss_hash_conf_get = nfp_net_rss_hash_conf_get,
.rx_queue_setup = nfp_net_rx_queue_setup,
.rx_queue_release = nfp_net_rx_queue_release,
- .tx_queue_setup = nfp_net_tx_queue_setup,
- .tx_queue_release = nfp_net_tx_queue_release,
+ .tx_queue_setup = nfp_net_nfd3_tx_queue_setup,
+ .tx_queue_release = nfp_net_nfd3_tx_queue_release,
.rx_queue_intr_enable = nfp_rx_queue_intr_enable,
.rx_queue_intr_disable = nfp_rx_queue_intr_disable,
};
@@ -291,10 +291,10 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- eth_dev->dev_ops = &nfp_netvf_eth_dev_ops;
+ eth_dev->dev_ops = &nfp_netvf_nfd3_eth_dev_ops;
eth_dev->rx_queue_count = nfp_net_rx_queue_count;
eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
- eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
+ eth_dev->tx_pkt_burst = &nfp_net_nfd3_xmit_pkts;
/* For secondary processes, the primary has done all the work */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -467,7 +467,7 @@ static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
static int nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev)
{
/* VF cleanup, just free private port data */
- return nfp_netvf_close(eth_dev);
+ return nfp_netvf_nfd3_close(eth_dev);
}
static int eth_nfp_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
diff --git a/drivers/net/nfp/nfp_rxtx.c b/drivers/net/nfp/nfp_rxtx.c
index cd7faa2c58..2c9875e829 100644
--- a/drivers/net/nfp/nfp_rxtx.c
+++ b/drivers/net/nfp/nfp_rxtx.c
@@ -618,7 +618,7 @@ nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
}
static void
-nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
+nfp_net_nfd3_tx_queue_release_mbufs(struct nfp_net_txq *txq)
{
unsigned int i;
@@ -634,12 +634,12 @@ nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
}
void
-nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
+nfp_net_nfd3_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
{
struct nfp_net_txq *txq = dev->data->tx_queues[queue_idx];
if (txq) {
- nfp_net_tx_queue_release_mbufs(txq);
+ nfp_net_nfd3_tx_queue_release_mbufs(txq);
rte_eth_dma_zone_free(dev, "tx_ring", queue_idx);
rte_free(txq->txbufs);
rte_free(txq);
@@ -647,15 +647,15 @@ nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
}
void
-nfp_net_reset_tx_queue(struct nfp_net_txq *txq)
+nfp_net_nfd3_reset_tx_queue(struct nfp_net_txq *txq)
{
- nfp_net_tx_queue_release_mbufs(txq);
+ nfp_net_nfd3_tx_queue_release_mbufs(txq);
txq->wr_p = 0;
txq->rd_p = 0;
}
int
-nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
@@ -670,7 +670,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
PMD_INIT_FUNC_TRACE();
/* Validating number of descriptors */
- tx_desc_sz = nb_desc * sizeof(struct nfp_net_tx_desc);
+ tx_desc_sz = nb_desc * sizeof(struct nfp_net_nfd3_tx_desc);
if (tx_desc_sz % NFP_ALIGN_RING_DESC != 0 ||
nb_desc > NFP_NET_MAX_TX_DESC ||
nb_desc < NFP_NET_MIN_TX_DESC) {
@@ -698,7 +698,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (dev->data->tx_queues[queue_idx]) {
PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
queue_idx);
- nfp_net_tx_queue_release(dev, queue_idx);
+ nfp_net_nfd3_tx_queue_release(dev, queue_idx);
dev->data->tx_queues[queue_idx] = NULL;
}
@@ -718,12 +718,12 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
* resizing in later calls to the queue setup function.
*/
tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
- sizeof(struct nfp_net_tx_desc) *
+ sizeof(struct nfp_net_nfd3_tx_desc) *
NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN,
socket_id);
if (tz == NULL) {
PMD_DRV_LOG(ERR, "Error allocating tx dma");
- nfp_net_tx_queue_release(dev, queue_idx);
+ nfp_net_nfd3_tx_queue_release(dev, queue_idx);
dev->data->tx_queues[queue_idx] = NULL;
return -ENOMEM;
}
@@ -743,21 +743,21 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Saving physical and virtual addresses for the TX ring */
txq->dma = (uint64_t)tz->iova;
- txq->txds = (struct nfp_net_tx_desc *)tz->addr;
+ txq->txds = (struct nfp_net_nfd3_tx_desc *)tz->addr;
/* mbuf pointers array for referencing mbufs linked to TX descriptors */
txq->txbufs = rte_zmalloc_socket("txq->txbufs",
sizeof(*txq->txbufs) * nb_desc,
RTE_CACHE_LINE_SIZE, socket_id);
if (txq->txbufs == NULL) {
- nfp_net_tx_queue_release(dev, queue_idx);
+ nfp_net_nfd3_tx_queue_release(dev, queue_idx);
dev->data->tx_queues[queue_idx] = NULL;
return -ENOMEM;
}
PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
txq->txbufs, txq->txds, (unsigned long)txq->dma);
- nfp_net_reset_tx_queue(txq);
+ nfp_net_nfd3_reset_tx_queue(txq);
txq->hw = hw;
@@ -773,7 +773,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Leaving always free descriptors for avoiding wrapping confusion */
static inline
-uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq)
+uint32_t nfp_net_nfd3_free_tx_desc(struct nfp_net_txq *txq)
{
if (txq->wr_p >= txq->rd_p)
return txq->tx_count - (txq->wr_p - txq->rd_p) - 8;
@@ -790,14 +790,14 @@ uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq)
* This function uses the host copy* of read/write pointers
*/
static inline
-uint32_t nfp_net_txq_full(struct nfp_net_txq *txq)
+uint32_t nfp_net_nfd3_txq_full(struct nfp_net_txq *txq)
{
- return (nfp_free_tx_desc(txq) < txq->tx_free_thresh);
+ return (nfp_net_nfd3_free_tx_desc(txq) < txq->tx_free_thresh);
}
/* nfp_net_tx_tso - Set TX descriptor for TSO */
static inline void
-nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
+nfp_net_nfd3_tx_tso(struct nfp_net_txq *txq, struct nfp_net_nfd3_tx_desc *txd,
struct rte_mbuf *mb)
{
uint64_t ol_flags;
@@ -828,7 +828,7 @@ nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
/* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
static inline void
-nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
+nfp_net_nfd3_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_nfd3_tx_desc *txd,
struct rte_mbuf *mb)
{
uint64_t ol_flags;
@@ -857,11 +857,11 @@ nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
}
uint16_t
-nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+nfp_net_nfd3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct nfp_net_txq *txq;
struct nfp_net_hw *hw;
- struct nfp_net_tx_desc *txds, txd;
+ struct nfp_net_nfd3_tx_desc *txds, txd;
struct rte_mbuf *pkt;
uint64_t dma_addr;
int pkt_size, dma_size;
@@ -876,10 +876,10 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets",
txq->qidx, txq->wr_p, nb_pkts);
- if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
+ if ((nfp_net_nfd3_free_tx_desc(txq) < nb_pkts) || (nfp_net_nfd3_txq_full(txq)))
nfp_net_tx_free_bufs(txq);
- free_descs = (uint16_t)nfp_free_tx_desc(txq);
+ free_descs = (uint16_t)nfp_net_nfd3_free_tx_desc(txq);
if (unlikely(free_descs == 0))
return 0;
@@ -913,8 +913,8 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
* multisegment packet, but TSO info needs to be in all of them.
*/
txd.data_len = pkt->pkt_len;
- nfp_net_tx_tso(txq, &txd, pkt);
- nfp_net_tx_cksum(txq, &txd, pkt);
+ nfp_net_nfd3_tx_tso(txq, &txd, pkt);
+ nfp_net_nfd3_tx_cksum(txq, &txd, pkt);
if ((pkt->ol_flags & RTE_MBUF_F_TX_VLAN) &&
(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
diff --git a/drivers/net/nfp/nfp_rxtx.h b/drivers/net/nfp/nfp_rxtx.h
index dec1857da3..87a9c59b3b 100644
--- a/drivers/net/nfp/nfp_rxtx.h
+++ b/drivers/net/nfp/nfp_rxtx.h
@@ -53,7 +53,7 @@
#define PCIE_DESC_TX_ENCAP_VXLAN (1 << 1)
#define PCIE_DESC_TX_ENCAP_GRE (1 << 0)
-struct nfp_net_tx_desc {
+struct nfp_net_nfd3_tx_desc {
union {
struct {
uint8_t dma_addr_hi; /* High bits of host buf address */
@@ -124,7 +124,7 @@ struct nfp_net_txq {
* of the queue and @size is the size in bytes for the queue
* (needed for free)
*/
- struct nfp_net_tx_desc *txds;
+ struct nfp_net_nfd3_tx_desc *txds;
/*
* At this point 48 bytes have been used for all the fields in the
@@ -284,12 +284,12 @@ int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp);
-void nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
-void nfp_net_reset_tx_queue(struct nfp_net_txq *txq);
-int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+void nfp_net_nfd3_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
+void nfp_net_nfd3_reset_tx_queue(struct nfp_net_txq *txq);
+int nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
-uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+uint16_t nfp_net_nfd3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
#endif /* _NFP_RXTX_H_ */
--
2.27.0
More information about the dev
mailing list