[PATCH v14 06/18] net/idpf: add support for queue start

Xing, Beilei beilei.xing at intel.com
Fri Oct 28 19:34:02 CEST 2022



> -----Original Message-----
> From: Andrew Rybchenko <andrew.rybchenko at oktetlabs.ru>
> Sent: Friday, October 28, 2022 11:51 PM
> To: Guo, Junfeng <junfeng.guo at intel.com>; Zhang, Qi Z
> <qi.z.zhang at intel.com>; Wu, Jingjing <jingjing.wu at intel.com>; Xing, Beilei
> <beilei.xing at intel.com>
> Cc: dev at dpdk.org; Li, Xiaoyun <xiaoyun.li at intel.com>
> Subject: Re: [PATCH v14 06/18] net/idpf: add support for queue start
> 
> On 10/27/22 10:47, Junfeng Guo wrote:
> > Add support for these device ops:
> >   - rx_queue_start
> >   - tx_queue_start
> >
> > Signed-off-by: Beilei Xing <beilei.xing at intel.com>
> > Signed-off-by: Xiaoyun Li <xiaoyun.li at intel.com>
> > Signed-off-by: Junfeng Guo <junfeng.guo at intel.com>
> 
> [snip]
> 
> > +#define IDPF_RX_BUF_STRIDE		64
> > +int
> > +idpf_vc_config_rxqs(struct idpf_vport *vport) {
> > +	struct idpf_adapter *adapter = vport->adapter;
> > +	struct idpf_rx_queue **rxq =
> > +		(struct idpf_rx_queue **)vport->dev_data->rx_queues;
> > +	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
> > +	struct virtchnl2_rxq_info *rxq_info;
> > +	struct idpf_cmd_info args;
> > +	uint16_t total_qs, num_qs;
> > +	int size, i, j;
> > +	int err = 0;
> > +	int k = 0;
> > +
> > +	total_qs = vport->num_rx_q + vport->num_rx_bufq;
> > +	while (total_qs) {
> > +		if (total_qs > adapter->max_rxq_per_msg) {
> > +			num_qs = adapter->max_rxq_per_msg;
> > +			total_qs -= adapter->max_rxq_per_msg;
> > +		} else {
> > +			num_qs = total_qs;
> > +			total_qs = 0;
> > +		}
> > +
> > +		size = sizeof(*vc_rxqs) + (num_qs - 1) *
> > +			sizeof(struct virtchnl2_rxq_info);
> > +		vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
> > +		if (vc_rxqs == NULL) {
> > +			PMD_DRV_LOG(ERR, "Failed to allocate
> virtchnl2_config_rx_queues");
> > +			err = -ENOMEM;
> > +			break;
> > +		}
> > +		vc_rxqs->vport_id = vport->vport_id;
> > +		vc_rxqs->num_qinfo = num_qs;
> > +		if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
> {
> > +			for (i = 0; i < num_qs; i++, k++) {
> > +				rxq_info = &vc_rxqs->qinfo[i];
> > +				rxq_info->dma_ring_addr = rxq[k]-
> >rx_ring_phys_addr;
> > +				rxq_info->type =
> VIRTCHNL2_QUEUE_TYPE_RX;
> > +				rxq_info->queue_id = rxq[k]->queue_id;
> > +				rxq_info->model =
> VIRTCHNL2_QUEUE_MODEL_SINGLE;
> > +				rxq_info->data_buffer_size = rxq[k]-
> >rx_buf_len;
> > +				rxq_info->max_pkt_size = vport-
> >max_pkt_len;
> > +
> > +				rxq_info->desc_ids =
> VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
> > +				rxq_info->qflags |=
> VIRTCHNL2_RX_DESC_SIZE_32BYTE;
> > +
> > +				rxq_info->ring_len = rxq[k]->nb_rx_desc;
> > +			}
> > +		} else {
> > +			for (i = 0; i < num_qs / 3; i++, k++) {
> > +				/* Rx queue */
> > +				rxq_info = &vc_rxqs->qinfo[i * 3];
> > +				rxq_info->dma_ring_addr =
> > +					rxq[k]->rx_ring_phys_addr;
> > +				rxq_info->type =
> VIRTCHNL2_QUEUE_TYPE_RX;
> > +				rxq_info->queue_id = rxq[k]->queue_id;
> > +				rxq_info->model =
> VIRTCHNL2_QUEUE_MODEL_SPLIT;
> > +				rxq_info->data_buffer_size = rxq[k]-
> >rx_buf_len;
> > +				rxq_info->max_pkt_size = vport-
> >max_pkt_len;
> > +
> > +				rxq_info->desc_ids =
> VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
> > +				rxq_info->qflags |=
> VIRTCHNL2_RX_DESC_SIZE_32BYTE;
> > +
> > +				rxq_info->ring_len = rxq[k]->nb_rx_desc;
> > +				rxq_info->rx_bufq1_id = rxq[k]->bufq1-
> >queue_id;
> > +				rxq_info->rx_bufq2_id = rxq[k]->bufq2-
> >queue_id;
> > +				rxq_info->rx_buffer_low_watermark = 64;
> > +
> > +				/* Buffer queue */
> > +				for (j = 1; j <= IDPF_RX_BUFQ_PER_GRP; j++) {
> > +					struct idpf_rx_queue *bufq = j == 1 ?
> > +						rxq[k]->bufq1 : rxq[k]->bufq2;
> > +					rxq_info = &vc_rxqs->qinfo[i * 3 + j];
> > +					rxq_info->dma_ring_addr =
> > +						bufq->rx_ring_phys_addr;
> > +					rxq_info->type =
> > +
> 	VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
> > +					rxq_info->queue_id = bufq-
> >queue_id;
> > +					rxq_info->model =
> VIRTCHNL2_QUEUE_MODEL_SPLIT;
> > +					rxq_info->data_buffer_size = bufq-
> >rx_buf_len;
> > +					rxq_info->desc_ids =
> > +
> 	VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
> > +					rxq_info->ring_len = bufq-
> >nb_rx_desc;
> > +
> > +					rxq_info->buffer_notif_stride =
> > +						IDPF_RX_BUF_STRIDE;
> > +					rxq_info->rx_buffer_low_watermark
> = 64;
> > +				}
> > +			}
> > +		}
> > +		memset(&args, 0, sizeof(args));
> > +		args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
> > +		args.in_args = (uint8_t *)vc_rxqs;
> > +		args.in_args_size = size;
> > +		args.out_buffer = adapter->mbx_resp;
> > +		args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> > +
> > +		err = idpf_execute_vc_cmd(adapter, &args);
> 
> The function must return negative errno since its return value is used as a
> return value of functions returning negative errno.

Thanks for all your comments, please expect v15 tomorrow..

> 
> > +		rte_free(vc_rxqs);
> > +		if (err != 0) {
> > +			PMD_DRV_LOG(ERR, "Failed to execute command of
> VIRTCHNL2_OP_CONFIG_RX_QUEUES");
> > +			break;
> > +		}
> > +	}
> > +
> > +	return err;
> > +}
> 
> [snip]
> 



More information about the dev mailing list