[PATCH 06/10] net/cpfl: support hairpin queue configuration
Xing, Beilei
beilei.xing at intel.com
Fri May 19 07:43:39 CEST 2023
> -----Original Message-----
> From: Liu, Mingxia <mingxia.liu at intel.com>
> Sent: Monday, April 24, 2023 5:48 PM
> To: Xing, Beilei <beilei.xing at intel.com>; Wu, Jingjing <jingjing.wu at intel.com>
> Cc: dev at dpdk.org; Wang, Xiao W <xiao.w.wang at intel.com>
> Subject: RE: [PATCH 06/10] net/cpfl: support hairpin queue configuration
>
>
>
> > -----Original Message-----
> > From: Xing, Beilei <beilei.xing at intel.com>
> > Sent: Friday, April 21, 2023 2:51 PM
> > To: Wu, Jingjing <jingjing.wu at intel.com>
> > Cc: dev at dpdk.org; Liu, Mingxia <mingxia.liu at intel.com>; Xing, Beilei
> > <beilei.xing at intel.com>; Wang, Xiao W <xiao.w.wang at intel.com>
> > Subject: [PATCH 06/10] net/cpfl: support hairpin queue configuration
> >
> > From: Beilei Xing <beilei.xing at intel.com>
> >
> > This patch supports Rx/Tx hairpin queue configuration.
> >
> > Signed-off-by: Xiao Wang <xiao.w.wang at intel.com>
> > Signed-off-by: Mingxia Liu <mingxia.liu at intel.com>
> > Signed-off-by: Beilei Xing <beilei.xing at intel.com>
> > ---
> > drivers/common/idpf/idpf_common_virtchnl.c | 70 +++++++++++
> > drivers/common/idpf/idpf_common_virtchnl.h | 6 +
> > drivers/common/idpf/version.map | 2 +
> > drivers/net/cpfl/cpfl_ethdev.c | 136 ++++++++++++++++++++-
> > drivers/net/cpfl/cpfl_rxtx.c | 80 ++++++++++++
> > drivers/net/cpfl/cpfl_rxtx.h | 7 ++
> > 6 files changed, 297 insertions(+), 4 deletions(-)
> >
> > diff --git a/drivers/common/idpf/idpf_common_virtchnl.c
> > b/drivers/common/idpf/idpf_common_virtchnl.c
> > index 76a658bb26..50cd43a8dd 100644
> > --- a/drivers/common/idpf/idpf_common_virtchnl.c
> > +++ b/drivers/common/idpf/idpf_common_virtchnl.c
<...>
> > static int
> > cpfl_start_queues(struct rte_eth_dev *dev) {
> > + struct cpfl_vport *cpfl_vport = dev->data->dev_private;
> > + struct idpf_vport *vport = &cpfl_vport->base;
> > struct cpfl_rx_queue *cpfl_rxq;
> > struct cpfl_tx_queue *cpfl_txq;
> > + int tx_cmplq_flag = 0;
> > + int rx_bufq_flag = 0;
> > + int flag = 0;
> > int err = 0;
> > int i;
> >
> > + /* For normal data queues, configure, init and enale Txq.
> > + * For non-cross vport hairpin queues, configure Txq.
> > + */
> > for (i = 0; i < dev->data->nb_tx_queues; i++) {
> > cpfl_txq = dev->data->tx_queues[i];
> > if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
> > continue;
> > - err = cpfl_tx_queue_start(dev, i);
> > + if (!cpfl_txq->hairpin_info.hairpin_q) {
> > + err = cpfl_tx_queue_start(dev, i);
> > + if (err != 0) {
> > + PMD_DRV_LOG(ERR, "Fail to start Tx
> > queue %u", i);
> > + return err;
> > + }
> > + } else if (!cpfl_txq->hairpin_info.manual_bind) {
> > + if (flag == 0) {
> > + err = cpfl_txq_hairpin_info_update(dev,
> > + cpfl_txq-
> > >hairpin_info.peer_rxp);
> > + if (err != 0) {
> > + PMD_DRV_LOG(ERR, "Fail to update
> Tx
> > hairpin queue info");
> > + return err;
> > + }
> > + flag = 1;
> [Liu, Mingxia] The variable flag is not been used, can it be removed?
It's used in above code, txq_hairpin_info should be updated once.
> > + }
> > + err = cpfl_hairpin_txq_config(vport, cpfl_txq);
> > + if (err != 0) {
> > + PMD_DRV_LOG(ERR, "Fail to configure hairpin
> > Tx queue %u", i);
> > + return err;
> > + }
> > + tx_cmplq_flag = 1;
> > + }
> > + }
> > +
>
> > + /* For non-cross vport hairpin queues, configure Tx completion queue
> > first.*/
> > + if (tx_cmplq_flag == 1 && cpfl_vport->p2p_tx_complq != NULL) {
> > + err = cpfl_hairpin_tx_complq_config(cpfl_vport);
> > if (err != 0) {
> > - PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
> > + PMD_DRV_LOG(ERR, "Fail to config Tx completion
> > queue");
> > return err;
> > }
> > }
> >
> [Liu, Mingxia] Better to move this code next to
> + err = cpfl_hairpin_txq_config(vport, cpfl_txq);
> + if (err != 0) {
> + PMD_DRV_LOG(ERR, "Fail to configure hairpin
> Tx queue %u", i);
> + return err;
> + }
> When cpfl_rxq->hairpin_info.hairpin_q is true, then cpfl_vport-
> >p2p_tx_complq is not null, right ?
> And remove tx_cmplq_flag?
Hairpin tx completion queue should only be configured once, so it should not be in for loop.
However, code is refined in v2.
>
> > + /* For normal data queues, configure, init and enale Rxq.
> > + * For non-cross vport hairpin queues, configure Rxq, and then init Rxq.
> > + */
> > + cpfl_rxq_hairpin_mz_bind(dev);
> > for (i = 0; i < dev->data->nb_rx_queues; i++) {
> > cpfl_rxq = dev->data->rx_queues[i];
> > if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
> > continue;
> > - err = cpfl_rx_queue_start(dev, i);
> > + if (!cpfl_rxq->hairpin_info.hairpin_q) {
> > + err = cpfl_rx_queue_start(dev, i);
> > + if (err != 0) {
> > + PMD_DRV_LOG(ERR, "Fail to start Rx
> > queue %u", i);
> > + return err;
> > + }
> > + } else if (!cpfl_rxq->hairpin_info.manual_bind) {
> > + err = cpfl_hairpin_rxq_config(vport, cpfl_rxq);
> > + if (err != 0) {
> > + PMD_DRV_LOG(ERR, "Fail to configure hairpin
> > Rx queue %u", i);
> > + return err;
> > + }
> > + err = cpfl_rx_queue_init(dev, i);
> > + if (err != 0) {
> > + PMD_DRV_LOG(ERR, "Fail to init hairpin Rx
> > queue %u", i);
> > + return err;
> > + }
> > + rx_bufq_flag = 1;
> > + }
> > + }
> > +
>
> > + /* For non-cross vport hairpin queues, configure Rx buffer queue.*/
> > + if (rx_bufq_flag == 1 && cpfl_vport->p2p_rx_bufq != NULL) {
> > + err = cpfl_hairpin_rx_bufq_config(cpfl_vport);
> > if (err != 0) {
> > - PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
> > + PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
> > return err;
> > }
> > }
> [Liu, Mingxia] Similar to above.
>
> > diff --git a/drivers/net/cpfl/cpfl_rxtx.c
> > b/drivers/net/cpfl/cpfl_rxtx.c index 64ed331a6d..040beb5bac 100644
> > --- a/drivers/net/cpfl/cpfl_rxtx.c
> > +++ b/drivers/net/cpfl/cpfl_rxtx.c
> > @@ -930,6 +930,86 @@ cpfl_tx_hairpin_queue_setup(struct rte_eth_dev
> > *dev, uint16_t queue_idx,
> > return 0;
> > }
> >
> > +int
> > +cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport) {
> > + struct idpf_rx_queue *rx_bufq = cpfl_vport->p2p_rx_bufq;
> > + struct virtchnl2_rxq_info rxq_info[1] = {0};
> > +
> > + rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
> > + rxq_info[0].queue_id = rx_bufq->queue_id;
> > + rxq_info[0].ring_len = rx_bufq->nb_rx_desc;
> > + rxq_info[0].dma_ring_addr = rx_bufq->rx_ring_phys_addr;
> > + rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
> > + rxq_info[0].rx_buffer_low_watermark =
> > CPFL_RXBUF_LOW_WATERMARK;
> > + rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
> > + rxq_info[0].data_buffer_size = rx_bufq->rx_buf_len;
> > + rxq_info[0].buffer_notif_stride = CPFL_RX_BUF_STRIDE;
> > +
> > + return idpf_vc_rxq_config_by_info(&cpfl_vport->base, rxq_info, 1); }
> > +
> > +int
> > +cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct
> > +cpfl_rx_queue
> > +*cpfl_rxq) {
> > + struct virtchnl2_rxq_info rxq_info[1] = {0};
> > + struct idpf_rx_queue *rxq = &cpfl_rxq->base;
> > +
> > + rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX;
> > + rxq_info[0].queue_id = rxq->queue_id;
> > + rxq_info[0].ring_len = rxq->nb_rx_desc;
> > + rxq_info[0].dma_ring_addr = rxq->rx_ring_phys_addr;
> > + rxq_info[0].rx_bufq1_id = rxq->bufq1->queue_id;
> > + rxq_info[0].max_pkt_size = vport->max_pkt_len;
> > + rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
> > + rxq_info[0].qflags |= VIRTCHNL2_RX_DESC_SIZE_16BYTE;
> > +
> > + rxq_info[0].data_buffer_size = rxq->rx_buf_len;
> > + rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
> > + rxq_info[0].rx_buffer_low_watermark =
> > CPFL_RXBUF_LOW_WATERMARK;
> > +
> > + PMD_DRV_LOG(NOTICE, "hairpin: vport %u, Rxq id 0x%x",
> > + vport->vport_id, rxq_info[0].queue_id);
> > +
> > + return idpf_vc_rxq_config_by_info(vport, rxq_info, 1); }
> > +
> > +int
> > +cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport) {
> > + struct idpf_tx_queue *tx_complq = cpfl_vport->p2p_tx_complq;
> > + struct virtchnl2_txq_info txq_info[1] = {0};
> > +
> > + txq_info[0].dma_ring_addr = tx_complq->tx_ring_phys_addr;
> > + txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
> > + txq_info[0].queue_id = tx_complq->queue_id;
> > + txq_info[0].ring_len = tx_complq->nb_tx_desc;
> > + txq_info[0].peer_rx_queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
> > + txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
> > + txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
> > +
> > + return idpf_vc_txq_config_by_info(&cpfl_vport->base, txq_info, 1); }
> > +
> > +int
> > +cpfl_hairpin_txq_config(struct idpf_vport *vport, struct
> > +cpfl_tx_queue
> > +*cpfl_txq) {
> > + struct idpf_tx_queue *txq = &cpfl_txq->base;
> > + struct virtchnl2_txq_info txq_info[1] = {0};
> > +
> > + txq_info[0].dma_ring_addr = txq->tx_ring_phys_addr;
> > + txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX;
> > + txq_info[0].queue_id = txq->queue_id;
> > + txq_info[0].ring_len = txq->nb_tx_desc;
> > + txq_info[0].tx_compl_queue_id = txq->complq->queue_id;
> > + txq_info[0].relative_queue_id = txq->queue_id;
> > + txq_info[0].peer_rx_queue_id = cpfl_txq->hairpin_info.peer_rxq_id;
> > + txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
> > + txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
> > +
> > + return idpf_vc_txq_config_by_info(vport, txq_info, 1); }
> > +
> > int
> > cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id) {
> > diff --git a/drivers/net/cpfl/cpfl_rxtx.h
> > b/drivers/net/cpfl/cpfl_rxtx.h index
> > d844c9f057..b01ce5edf9 100644
> > --- a/drivers/net/cpfl/cpfl_rxtx.h
> > +++ b/drivers/net/cpfl/cpfl_rxtx.h
> > @@ -30,12 +30,15 @@
> > #define CPFL_RING_BASE_ALIGN 128
> >
> > #define CPFL_DEFAULT_RX_FREE_THRESH 32
> > +#define CPFL_RXBUF_LOW_WATERMARK 64
> >
> > #define CPFL_DEFAULT_TX_RS_THRESH 32
> > #define CPFL_DEFAULT_TX_FREE_THRESH 32
> >
> > #define CPFL_SUPPORT_CHAIN_NUM 5
> >
> > +#define CPFL_RX_BUF_STRIDE 64
> > +
> > struct cpfl_rxq_hairpin_info {
> > bool hairpin_q; /* if rx queue is a hairpin queue */
> > bool manual_bind; /* for cross vport */
> > @@ -85,4 +88,8 @@ int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev
> > *dev, uint16_t queue_idx, int cpfl_tx_hairpin_queue_setup(struct
> > rte_eth_dev *dev, uint16_t queue_idx,
> > uint16_t nb_desc,
> > const struct rte_eth_hairpin_conf *conf);
> > +int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport); int
> > +cpfl_hairpin_txq_config(struct idpf_vport *vport, struct
> > +cpfl_tx_queue *cpfl_txq); int cpfl_hairpin_rx_bufq_config(struct
> > +cpfl_vport *cpfl_vport); int cpfl_hairpin_rxq_config(struct
> > +idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
> > #endif /* _CPFL_RXTX_H_ */
> > --
> > 2.26.2
More information about the dev
mailing list