[dpdk-dev] [PATCH 01/13] net/thunderx: cleanup the driver before adding new features
Ferruh Yigit
ferruh.yigit at intel.com
Tue Sep 20 15:48:27 CEST 2016
On 8/26/2016 5:53 PM, Kamil Rytarowski wrote:
> From: Kamil Rytarowski <kamil.rytarowski at caviumnetworks.com>
>
> Refactored features:
> - enable nicvf_qset_rbdr_precharge to handle handle secondary queue sets
double "handle"
> - rte_free already handles NULL pointer
> - check mempool flags to predict being contiguous in memory
> - allow to use mempool with multiple memory chunks
I am not able to find the implementation for this.
> - simplify local construct of accessing nb_rx_queus
s/nb_rx_queus/nb_rx_queues
>
> Signed-off-by: Maciej Czekaj <maciej.czekaj at caviumnetworks.com>
> Signed-off-by: Kamil Rytarowski <kamil.rytarowski at caviumnetworks.com>
> Signed-off-by: Zyta Szpak <zyta.szpak at semihalf.com>
> Signed-off-by: Slawomir Rosek <slawomir.rosek at semihalf.com>
> Signed-off-by: Radoslaw Biernacki <rad at semihalf.com>
> Signed-off-by: Jerin Jacob <jerin.jacob at caviumnetworks.com>
> ---
> drivers/net/thunderx/base/nicvf_hw.c | 10 +++++-----
> drivers/net/thunderx/base/nicvf_hw.h | 6 +++---
> drivers/net/thunderx/nicvf_ethdev.c | 32 ++++++++++++--------------------
> 3 files changed, 20 insertions(+), 28 deletions(-)
>
> diff --git a/drivers/net/thunderx/base/nicvf_hw.c b/drivers/net/thunderx/base/nicvf_hw.c
> index 4bdd183..1f08ef2 100644
> --- a/drivers/net/thunderx/base/nicvf_hw.c
> +++ b/drivers/net/thunderx/base/nicvf_hw.c
> @@ -141,7 +141,7 @@ nicvf_base_init(struct nicvf *nic)
> return NICVF_ERR_BASE_INIT;
>
> if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF)
> - nic->hwcap |= NICVF_CAP_TUNNEL_PARSING;
> + nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
is this new flag NICVF_CAP_CQE_RX2 flag described in commit log?
>
> if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN81XX_NICVF)
> nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
> @@ -497,9 +497,9 @@ nicvf_qsize_rbdr_roundup(uint32_t val)
> }
>
> int
> -nicvf_qset_rbdr_precharge(struct nicvf *nic, uint16_t ridx,
> - rbdr_pool_get_handler handler,
> - void *opaque, uint32_t max_buffs)
> +nicvf_qset_rbdr_precharge(void *dev, struct nicvf *nic,
> + uint16_t ridx, rbdr_pool_get_handler handler,
> + uint32_t max_buffs)
> {
> struct rbdr_entry_t *desc, *desc0;
> struct nicvf_rbdr *rbdr = nic->rbdr;
> @@ -514,7 +514,7 @@ nicvf_qset_rbdr_precharge(struct nicvf *nic, uint16_t ridx,
> if (count >= max_buffs)
> break;
> desc0 = desc + count;
> - phy = handler(opaque);
> + phy = handler(dev, nic);
> if (phy) {
> desc0->full_addr = phy;
> count++;
> diff --git a/drivers/net/thunderx/base/nicvf_hw.h b/drivers/net/thunderx/base/nicvf_hw.h
> index a6cda82..2b8738b 100644
> --- a/drivers/net/thunderx/base/nicvf_hw.h
> +++ b/drivers/net/thunderx/base/nicvf_hw.h
> @@ -85,7 +85,7 @@ enum nicvf_err_e {
> NICVF_ERR_RSS_GET_SZ, /* -8171 */
> };
>
> -typedef nicvf_phys_addr_t (*rbdr_pool_get_handler)(void *opaque);
> +typedef nicvf_phys_addr_t (*rbdr_pool_get_handler)(void *dev, void *opaque);
>
> struct nicvf_hw_rx_qstats {
> uint64_t q_rx_bytes;
> @@ -194,8 +194,8 @@ int nicvf_qset_reclaim(struct nicvf *nic);
>
> int nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx);
> int nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx);
> -int nicvf_qset_rbdr_precharge(struct nicvf *nic, uint16_t ridx,
> - rbdr_pool_get_handler handler, void *opaque,
> +int nicvf_qset_rbdr_precharge(void *dev, struct nicvf *nic,
> + uint16_t ridx, rbdr_pool_get_handler handler,
> uint32_t max_buffs);
> int nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx);
>
> diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
> index 4402f6a..48f2cd2 100644
> --- a/drivers/net/thunderx/nicvf_ethdev.c
> +++ b/drivers/net/thunderx/nicvf_ethdev.c
> @@ -691,7 +691,7 @@ nicvf_configure_cpi(struct rte_eth_dev *dev)
> int ret;
>
> /* Count started rx queues */
> - for (qidx = qcnt = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++)
> + for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++)
> if (dev->data->rx_queue_state[qidx] ==
> RTE_ETH_QUEUE_STATE_STARTED)
> qcnt++;
> @@ -1023,12 +1023,9 @@ nicvf_stop_rx_queue(struct rte_eth_dev *dev, uint16_t qidx)
> static void
> nicvf_dev_rx_queue_release(void *rx_queue)
> {
> - struct nicvf_rxq *rxq = rx_queue;
> -
> PMD_INIT_FUNC_TRACE();
>
> - if (rxq)
> - rte_free(rxq);
> + rte_free(rx_queue);
> }
>
> static int
> @@ -1087,9 +1084,9 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
> PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
> socket_id, nic->node);
>
> - /* Mempool memory should be contiguous */
> - if (mp->nb_mem_chunks != 1) {
> - PMD_INIT_LOG(ERR, "Non contiguous mempool, check huge page sz");
> + /* Mempool memory must be contiguous */
> + if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) {
If you need continuous memory, this check is not enough.
Not having this flag doesn't guaranties that memory is continuous, this
flag can be set but still can have multiple mem_chunks. And there is no
guarantee that mem_chunks are continuous.
> + PMD_INIT_LOG(ERR, "Mempool memory must be contiguous");
> return -EINVAL;
> }
>
> @@ -1212,15 +1209,16 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
> }
>
> static nicvf_phys_addr_t
> -rbdr_rte_mempool_get(void *opaque)
> +rbdr_rte_mempool_get(void *dev, void *opaque)
> {
> uint16_t qidx;
> uintptr_t mbuf;
> struct nicvf_rxq *rxq;
> - struct nicvf *nic = nicvf_pmd_priv((struct rte_eth_dev *)opaque);
> + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev;
> + struct nicvf *nic __rte_unused = (struct nicvf *)opaque;
>
> - for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) {
> - rxq = nic->eth_dev->data->rx_queues[qidx];
> + for (qidx = 0; qidx < eth_dev->data->nb_rx_queues; qidx++) {
> + rxq = eth_dev->data->rx_queues[qidx];
> /* Maintain equal buffer count across all pools */
> if (rxq->precharge_cnt >= rxq->qlen_mask)
> continue;
> @@ -1354,8 +1352,8 @@ nicvf_dev_start(struct rte_eth_dev *dev)
> }
>
> /* Fill rte_mempool buffers in RBDR pool and precharge it */
> - ret = nicvf_qset_rbdr_precharge(nic, 0, rbdr_rte_mempool_get,
> - dev, total_rxq_desc);
> + ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get,
> + total_rxq_desc);
> if (ret) {
> PMD_INIT_LOG(ERR, "Failed to fill rbdr %d", ret);
> goto qset_rbdr_reclaim;
> @@ -1721,12 +1719,6 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
> goto malloc_fail;
> }
>
> - ret = nicvf_mbox_get_rss_size(nic);
> - if (ret) {
> - PMD_INIT_LOG(ERR, "Failed to get rss table size");
> - goto malloc_fail;
> - }
> -
Is removing mbox_get_rss_size() mentioned in commit log?
> PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
> eth_dev->data->port_id, nic->vendor_id, nic->device_id,
> nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
>
More information about the dev
mailing list