[PATCH v3 4/9] net/cpfl: setup ctrl path

Liu, Mingxia mingxia.liu at intel.com
Mon Sep 11 08:30:18 CEST 2023



> -----Original Message-----
> From: Qiao, Wenjing <wenjing.qiao at intel.com>
> Sent: Wednesday, September 6, 2023 5:34 PM
> To: Zhang, Yuying <yuying.zhang at intel.com>; dev at dpdk.org; Zhang, Qi Z
> <qi.z.zhang at intel.com>; Wu, Jingjing <jingjing.wu at intel.com>; Xing, Beilei
> <beilei.xing at intel.com>
> Cc: Liu, Mingxia <mingxia.liu at intel.com>; Qiao, Wenjing
> <wenjing.qiao at intel.com>
> Subject: [PATCH v3 4/9] net/cpfl: setup ctrl path
> 
> Setup the control vport and control queue for flow offloading.
> 
> Signed-off-by: Yuying Zhang <yuying.zhang at intel.com>
> Signed-off-by: Beilei Xing <beilei.xing at intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang at intel.com>
> Signed-off-by: Wenjing Qiao <wenjing.qiao at intel.com>
> ---
>  drivers/net/cpfl/cpfl_ethdev.c | 267
> +++++++++++++++++++++++++++++++++  drivers/net/cpfl/cpfl_ethdev.h |
> 14 ++  drivers/net/cpfl/cpfl_vchnl.c  | 144 ++++++++++++++++++
>  3 files changed, 425 insertions(+)
> 
> diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
> index 3c4a6a4724..22f3e72894 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/cpfl/cpfl_ethdev.c
> @@ -1657,6 +1657,10 @@ cpfl_handle_vchnl_event_msg(struct
> cpfl_adapter_ext *adapter, uint8_t *msg, uint
>  		return;
>  	}
> 
> +	/* ignore if it is ctrl vport */
> +	if (adapter->ctrl_vport.base.vport_id == vc_event->vport_id)
> +		return;
> +
>  	vport = cpfl_find_vport(adapter, vc_event->vport_id);
>  	if (!vport) {
>  		PMD_DRV_LOG(ERR, "Can't find vport."); @@ -1852,6
> +1856,260 @@ cpfl_dev_alarm_handler(void *param)
>  	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler,
> adapter);  }
> 
> +static int
> +cpfl_stop_cfgqs(struct cpfl_adapter_ext *adapter) {
> +	int i, ret;
> +
> +	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
> +		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i,
> false, false);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Fail to disable Tx config
> queue.");
> +			return ret;
> +		}
> +	}
> +
> +	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
> +		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i,
> true, false);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Fail to disable Rx config
> queue.");
> +			return ret;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_start_cfgqs(struct cpfl_adapter_ext *adapter) {
> +	int i, ret;
> +
> +	ret = cpfl_config_ctlq_tx(adapter);
> +	if (ret) {
> +		PMD_DRV_LOG(ERR, "Fail to configure Tx config queue.");
> +		return ret;
> +	}
> +
> +	ret = cpfl_config_ctlq_rx(adapter);
> +	if (ret) {
> +		PMD_DRV_LOG(ERR, "Fail to configure Rx config queue.");
> +		return ret;
> +	}
> +
> +	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
> +		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i,
> false, true);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Fail to enable Tx config
> queue.");
> +			return ret;
> +		}
> +	}
> +
> +	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
> +		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i,
> true, true);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Fail to enable Rx config
> queue.");
> +			return ret;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static void
> +cpfl_remove_cfgqs(struct cpfl_adapter_ext *adapter) {
> +	struct idpf_hw *hw = (struct idpf_hw *)(&adapter->base.hw);
> +	struct cpfl_ctlq_create_info *create_cfgq_info;
> +	int i;
> +
> +	create_cfgq_info = adapter->cfgq_info;
> +
> +	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
> +		cpfl_vport_ctlq_remove(hw, adapter->ctlqp[i]);
[Liu, Mingxia] Sometimes adapter->ctlqp[i] maybe NULL, then the an error will be reported in cpfl_vport_ctlq_remove (), right? 
Such as when this function is called by cpfl_add_cfgqs(), So better to check if adapter->ctlqp[i] == NULL.

> +		if (create_cfgq_info[i].ring_mem.va)
> +			idpf_free_dma_mem(&adapter->base.hw,
> &create_cfgq_info[i].ring_mem);
> +		if (create_cfgq_info[i].buf_mem.va)
> +			idpf_free_dma_mem(&adapter->base.hw,
> &create_cfgq_info[i].buf_mem);
> +	}
> +}
> +
> +static int
> +cpfl_add_cfgqs(struct cpfl_adapter_ext *adapter) {
> +	struct idpf_ctlq_info *cfg_cq;
> +	int ret = 0;
> +	int i = 0;
> +
> +	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
> +		ret = cpfl_vport_ctlq_add((struct idpf_hw *)(&adapter-
> >base.hw),
> +					  &adapter->cfgq_info[i],
> +					  &cfg_cq);
> +		if (ret || !cfg_cq) {
[Liu, Mingxia] Before each loop, better to set cfg_cq with NULL ?

> +			PMD_DRV_LOG(ERR, "ctlq add failed for queue
> id: %d",
> +				    adapter->cfgq_info[i].id);
> +			cpfl_remove_cfgqs(adapter);
> +			return ret;
> +		}
> +		PMD_DRV_LOG(INFO, "added cfgq to hw. queue id: %d",
> +			    adapter->cfgq_info[i].id);
> +		adapter->ctlqp[i] = cfg_cq;
> +	}
> +
> +	return ret;
> +}
> +
> +#define CPFL_CFGQ_RING_LEN		512
> +#define CPFL_CFGQ_DESCRIPTOR_SIZE	32
> +#define CPFL_CFGQ_BUFFER_SIZE		256
> +#define CPFL_CFGQ_RING_SIZE		512
> +
> +static int
> +cpfl_cfgq_setup(struct cpfl_adapter_ext *adapter) {
> +	struct cpfl_ctlq_create_info *create_cfgq_info;
> +	struct cpfl_vport *vport;
> +	int i, err;
> +	uint32_t ring_size = CPFL_CFGQ_RING_SIZE * sizeof(struct
> idpf_ctlq_desc);
> +	uint32_t buf_size = CPFL_CFGQ_RING_SIZE *
> CPFL_CFGQ_BUFFER_SIZE;
> +
> +	vport = &adapter->ctrl_vport;
> +	create_cfgq_info = adapter->cfgq_info;
> +
> +	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
> +		if (i % 2 == 0) {
> +			/* Setup Tx config queue */
> +			create_cfgq_info[i].id = vport-
> >base.chunks_info.tx_start_qid + i / 2;
> +			create_cfgq_info[i].type =
> IDPF_CTLQ_TYPE_CONFIG_TX;
> +			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
> +			create_cfgq_info[i].buf_size =
> CPFL_CFGQ_BUFFER_SIZE;
> +			memset(&create_cfgq_info[i].reg, 0, sizeof(struct
> idpf_ctlq_reg));
> +			create_cfgq_info[i].reg.tail = vport-
> >base.chunks_info.tx_qtail_start +
> +				i / 2 * vport-
> >base.chunks_info.tx_qtail_spacing;
> +		} else {
> +			/* Setup Rx config queue */
> +			create_cfgq_info[i].id = vport-
> >base.chunks_info.rx_start_qid + i / 2;
> +			create_cfgq_info[i].type =
> IDPF_CTLQ_TYPE_CONFIG_RX;
> +			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
> +			create_cfgq_info[i].buf_size =
> CPFL_CFGQ_BUFFER_SIZE;
> +			memset(&create_cfgq_info[i].reg, 0, sizeof(struct
> idpf_ctlq_reg));
> +			create_cfgq_info[i].reg.tail = vport-
> >base.chunks_info.rx_qtail_start +
> +				i / 2 * vport-
> >base.chunks_info.rx_qtail_spacing;
> +			if (!idpf_alloc_dma_mem(&adapter->base.hw,
> &create_cfgq_info[i].buf_mem,
> +						buf_size)) {
> +				err = -ENOMEM;
> +				goto free_mem;
> +			}
> +		}
> +		if (!idpf_alloc_dma_mem(&adapter->base.hw,
> &create_cfgq_info[i].ring_mem,
> +					ring_size)) {
> +			err = -ENOMEM;
> +			goto free_mem;
> +		}
> +	}
> +	return 0;
> +free_mem:
> +	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
> +		if (create_cfgq_info[i].ring_mem.va)
> +			idpf_free_dma_mem(&adapter->base.hw,
> &create_cfgq_info[i].ring_mem);
> +		if (create_cfgq_info[i].buf_mem.va)
> +			idpf_free_dma_mem(&adapter->base.hw,
> &create_cfgq_info[i].buf_mem);
> +	}
> +	return err;
> +}
> +
> +static int
> +cpfl_init_ctrl_vport(struct cpfl_adapter_ext *adapter) {
> +	struct cpfl_vport *vport = &adapter->ctrl_vport;
> +	struct virtchnl2_create_vport *vport_info =
> +		(struct virtchnl2_create_vport *)adapter-
> >ctrl_vport_recv_info;
> +	int i;
> +
> +	vport->itf.adapter = adapter;
> +	vport->base.adapter = &adapter->base;
> +	vport->base.vport_id = vport_info->vport_id;
> +
> +	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
> +		if (vport_info->chunks.chunks[i].type ==
> VIRTCHNL2_QUEUE_TYPE_TX) {
> +			vport->base.chunks_info.tx_start_qid =
> +				vport_info-
> >chunks.chunks[i].start_queue_id;
> +			vport->base.chunks_info.tx_qtail_start =
> +			vport_info->chunks.chunks[i].qtail_reg_start;
> +			vport->base.chunks_info.tx_qtail_spacing =
> +			vport_info->chunks.chunks[i].qtail_reg_spacing;
> +		} else if (vport_info->chunks.chunks[i].type ==
> VIRTCHNL2_QUEUE_TYPE_RX) {
> +			vport->base.chunks_info.rx_start_qid =
> +				vport_info-
> >chunks.chunks[i].start_queue_id;
> +			vport->base.chunks_info.rx_qtail_start =
> +			vport_info->chunks.chunks[i].qtail_reg_start;
> +			vport->base.chunks_info.rx_qtail_spacing =
> +			vport_info->chunks.chunks[i].qtail_reg_spacing;
> +		} else {
> +			PMD_INIT_LOG(ERR, "Unsupported chunk type");
> +			return -EINVAL;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static void
> +cpfl_ctrl_path_close(struct cpfl_adapter_ext *adapter) {
> +	cpfl_remove_cfgqs(adapter);
> +	cpfl_stop_cfgqs(adapter);
> +	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
[Liu, Mingxia] Should in the reverse order as cpfl_ctrl_path_open(), cpfl_stop_cfgqs () -> cpfl_remove_cfgqs () -> idpf_vc_vport_destroy(), ?
> +}
> +
> +static int
> +cpfl_ctrl_path_open(struct cpfl_adapter_ext *adapter) {
> +	int ret;
> +
> +	ret = cpfl_vc_create_ctrl_vport(adapter);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "Failed to create control vport");
> +		return ret;
> +	}
> +
> +	ret = cpfl_init_ctrl_vport(adapter);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "Failed to init control vport");
> +		goto err_init_ctrl_vport;
> +	}
> +
> +	ret = cpfl_cfgq_setup(adapter);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "Failed to setup control queues");
> +		goto err_cfgq_setup;
> +	}
> +
> +	ret = cpfl_add_cfgqs(adapter);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "Failed to add control queues");
> +		goto err_add_cfgq;
> +	}
> +
> +	ret = cpfl_start_cfgqs(adapter);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "Failed to start control queues");
> +		goto err_start_cfgqs;
> +	}
> +
> +	return 0;
> +
> +err_start_cfgqs:
> +	cpfl_stop_cfgqs(adapter);
> +err_add_cfgq:
> +	cpfl_remove_cfgqs(adapter);
> +err_cfgq_setup:
> +err_init_ctrl_vport:
> +	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
> +
> +	return ret;
> +}
> +
>  static struct virtchnl2_get_capabilities req_caps = {
>  	.csum_caps =
>  	VIRTCHNL2_CAP_TX_CSUM_L3_IPV4          |
> @@ -2019,6 +2277,12 @@ cpfl_adapter_ext_init(struct rte_pci_device
> *pci_dev, struct cpfl_adapter_ext *a
>  		goto err_vports_alloc;
>  	}
> 
> +	ret = cpfl_ctrl_path_open(adapter);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "Failed to setup control path");
> +		goto err_create_ctrl_vport;
> +	}
> +
>  	adapter->cur_vports = 0;
>  	adapter->cur_vport_nb = 0;
> 
> @@ -2026,6 +2290,8 @@ cpfl_adapter_ext_init(struct rte_pci_device
> *pci_dev, struct cpfl_adapter_ext *a
> 
>  	return ret;
> 
> +err_create_ctrl_vport:
> +	rte_free(adapter->vports);
>  err_vports_alloc:
>  	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
>  	cpfl_repr_whitelist_uninit(adapter);
> @@ -2260,6 +2526,7 @@ cpfl_find_adapter_ext(struct rte_pci_device
> *pci_dev)  static void  cpfl_adapter_ext_deinit(struct cpfl_adapter_ext
> *adapter)  {
> +	cpfl_ctrl_path_close(adapter);
>  	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
>  	cpfl_vport_map_uninit(adapter);
>  	idpf_adapter_deinit(&adapter->base);
> diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
> index 2151605987..40bba8da00 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.h
> +++ b/drivers/net/cpfl/cpfl_ethdev.h
> @@ -22,6 +22,7 @@
>  #include "cpfl_logs.h"
>  #include "cpfl_cpchnl.h"
>  #include "cpfl_representor.h"
> +#include "cpfl_controlq.h"
> 
>  /* Currently, backend supports up to 8 vports */
>  #define CPFL_MAX_VPORT_NUM	8
> @@ -89,6 +90,10 @@
> 
>  #define CPFL_FLOW_FILE_LEN 100
> 
> +#define CPFL_RX_CFGQ_NUM	4
> +#define CPFL_TX_CFGQ_NUM	4
> +#define CPFL_CFGQ_NUM		8
> +
>  #define CPFL_INVALID_HW_ID	UINT16_MAX
>  #define CPFL_META_CHUNK_LENGTH	1024
>  #define CPFL_META_LENGTH	32
> @@ -204,11 +209,20 @@ struct cpfl_adapter_ext {
>  	rte_spinlock_t repr_lock;
>  	struct rte_hash *repr_whitelist_hash;
> 
> +	/* ctrl vport and ctrl queues. */
> +	struct cpfl_vport ctrl_vport;
> +	uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE];
> +	struct idpf_ctlq_info *ctlqp[CPFL_CFGQ_NUM];
> +	struct cpfl_ctlq_create_info cfgq_info[CPFL_CFGQ_NUM];
> +
>  	struct cpfl_metadata meta;
>  };
> 
>  TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
> 
> +int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter); int
> +cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter); int
> +cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
>  int cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
>  			   struct cpfl_vport_id *vport_identity,
>  			   struct cpchnl2_vport_info *vport_info); diff --git
> a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c index
> a21a4a451f..932840a972 100644
> --- a/drivers/net/cpfl/cpfl_vchnl.c
> +++ b/drivers/net/cpfl/cpfl_vchnl.c
> @@ -70,3 +70,147 @@ cpfl_cc_vport_info_get(struct cpfl_adapter_ext
> *adapter,
> 
>  	return 0;
>  }
> +
> +int
> +cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter) {
> +	struct virtchnl2_create_vport vport_msg;
> +	struct idpf_cmd_info args;
> +	int err = -1;
> +
> +	memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
> +	vport_msg.vport_type =
> rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
> +	vport_msg.txq_model =
> rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
> +	vport_msg.rxq_model =
> rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
> +	vport_msg.num_tx_q = CPFL_TX_CFGQ_NUM;
> +	vport_msg.num_tx_complq = 0;
> +	vport_msg.num_rx_q = CPFL_RX_CFGQ_NUM;
> +	vport_msg.num_rx_bufq = 0;
> +
> +	memset(&args, 0, sizeof(args));
> +	args.ops = VIRTCHNL2_OP_CREATE_VPORT;
> +	args.in_args = (uint8_t *)&vport_msg;
> +	args.in_args_size = sizeof(vport_msg);
> +	args.out_buffer = adapter->base.mbx_resp;
> +	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> +
> +	err = idpf_vc_cmd_execute(&adapter->base, &args);
> +	if (err) {
> +		PMD_DRV_LOG(ERR,
> +			    "Failed to execute command of
> VIRTCHNL2_OP_CREATE_VPORT");
> +		return err;
> +	}
> +
> +	rte_memcpy(adapter->ctrl_vport_recv_info, args.out_buffer,
> +		   IDPF_DFLT_MBX_BUF_SIZE);
> +	return err;
> +}
> +
> +int
> +cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter) {
> +	struct cpfl_vport *vport = &adapter->ctrl_vport;
> +	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
> +	struct virtchnl2_rxq_info *rxq_info;
> +	struct idpf_cmd_info args;
> +	uint16_t num_qs;
> +	int size, err, i;
> +
> +	if (vport->base.rxq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> +		PMD_DRV_LOG(ERR, "This rxq model isn't supported.");
> +		err = -EINVAL;
> +		return err;
> +	}
> +
> +	num_qs = CPFL_RX_CFGQ_NUM;
> +	size = sizeof(*vc_rxqs) + (num_qs - 1) *
> +		sizeof(struct virtchnl2_rxq_info);
> +	vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
> +	if (!vc_rxqs) {
> +		PMD_DRV_LOG(ERR, "Failed to allocate
> virtchnl2_config_rx_queues");
> +		err = -ENOMEM;
> +		return err;
> +	}
> +	vc_rxqs->vport_id = vport->base.vport_id;
> +	vc_rxqs->num_qinfo = num_qs;
> +
> +	for (i = 0; i < num_qs; i++) {
> +		rxq_info = &vc_rxqs->qinfo[i];
> +		rxq_info->dma_ring_addr = adapter->ctlqp[2 * i + 1]-
> >desc_ring.pa;
> +		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_RX;
> +		rxq_info->queue_id = adapter->cfgq_info[2 * i + 1].id;
> +		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
> +		rxq_info->data_buffer_size = adapter->cfgq_info[2 * i +
> 1].buf_size;
> +		rxq_info->max_pkt_size = vport->base.max_pkt_len;
> +		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
> +		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
> +		rxq_info->ring_len = adapter->cfgq_info[2 * i + 1].len;
> +	}
> +
> +	memset(&args, 0, sizeof(args));
> +	args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
> +	args.in_args = (uint8_t *)vc_rxqs;
> +	args.in_args_size = size;
> +	args.out_buffer = adapter->base.mbx_resp;
> +	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> +
> +	err = idpf_vc_cmd_execute(&adapter->base, &args);
> +	rte_free(vc_rxqs);
> +	if (err)
> +		PMD_DRV_LOG(ERR, "Failed to execute command of
> +VIRTCHNL2_OP_CONFIG_RX_QUEUES");
> +
> +	return err;
> +}
> +
> +int
> +cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter) {
> +	struct cpfl_vport *vport = &adapter->ctrl_vport;
> +	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
> +	struct virtchnl2_txq_info *txq_info;
> +	struct idpf_cmd_info args;
> +	uint16_t num_qs;
> +	int size, err, i;
> +
> +	if (vport->base.txq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> +		PMD_DRV_LOG(ERR, "This txq model isn't supported.");
> +		err = -EINVAL;
> +		return err;
> +	}
> +
> +	num_qs = CPFL_TX_CFGQ_NUM;
> +	size = sizeof(*vc_txqs) + (num_qs - 1) *
> +		sizeof(struct virtchnl2_txq_info);
> +	vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
> +	if (!vc_txqs) {
> +		PMD_DRV_LOG(ERR, "Failed to allocate
> virtchnl2_config_tx_queues");
> +		err = -ENOMEM;
> +		return err;
> +	}
> +	vc_txqs->vport_id = vport->base.vport_id;
> +	vc_txqs->num_qinfo = num_qs;
> +
> +	for (i = 0; i < num_qs; i++) {
> +		txq_info = &vc_txqs->qinfo[i];
> +		txq_info->dma_ring_addr = adapter->ctlqp[2 * i]-
> >desc_ring.pa;
> +		txq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_TX;
> +		txq_info->queue_id = adapter->cfgq_info[2 * i].id;
> +		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
> +		txq_info->sched_mode =
> VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
> +		txq_info->ring_len = adapter->cfgq_info[2 * i].len;
> +	}
> +
> +	memset(&args, 0, sizeof(args));
> +	args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
> +	args.in_args = (uint8_t *)vc_txqs;
> +	args.in_args_size = size;
> +	args.out_buffer = adapter->base.mbx_resp;
> +	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> +
> +	err = idpf_vc_cmd_execute(&adapter->base, &args);
> +	rte_free(vc_txqs);
> +	if (err)
> +		PMD_DRV_LOG(ERR, "Failed to execute command of
> +VIRTCHNL2_OP_CONFIG_TX_QUEUES");
> +
> +	return err;
> +}
> --
> 2.34.1



More information about the dev mailing list