[PATCH v14 02/18] net/idpf: add support for device initialization

Andrew Rybchenko andrew.rybchenko at oktetlabs.ru
Fri Oct 28 17:35:13 CEST 2022


On 10/27/22 10:47, Junfeng Guo wrote:
> Support device init and add the following dev ops:
>   - dev_configure
>   - dev_close
>   - dev_infos_get
> 
> Signed-off-by: Beilei Xing <beilei.xing at intel.com>
> Signed-off-by: Xiaoyun Li <xiaoyun.li at intel.com>
> Signed-off-by: Xiao Wang <xiao.w.wang at intel.com>
> Signed-off-by: Wenjun Wu <wenjun1.wu at intel.com>
> Signed-off-by: Junfeng Guo <junfeng.guo at intel.com>

[snip]

> +static int idpf_dev_configure(struct rte_eth_dev *dev);
> +static int idpf_dev_close(struct rte_eth_dev *dev);
> +static int idpf_dev_info_get(struct rte_eth_dev *dev,
> +			     struct rte_eth_dev_info *dev_info);
> +static void idpf_adapter_rel(struct idpf_adapter *adapter);
> +
> +static const struct eth_dev_ops idpf_eth_dev_ops = {
> +	.dev_configure			= idpf_dev_configure,
> +	.dev_close			= idpf_dev_close,
> +	.dev_infos_get			= idpf_dev_info_get,
> +};

Typically it is better to avoid forward static declarations and
simply define the ops structure after callbacks.

> +
> +static int
> +idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
> +{
> +	struct idpf_vport *vport = dev->data->dev_private;
> +	struct idpf_adapter *adapter = vport->adapter;
> +
> +	dev_info->max_rx_queues = adapter->caps->max_rx_q;
> +	dev_info->max_tx_queues = adapter->caps->max_tx_q;
> +	dev_info->min_rx_bufsize = IDPF_MIN_BUF_SIZE;
> +	dev_info->max_rx_pktlen = IDPF_MAX_FRAME_SIZE;
> +
> +	dev_info->max_mtu = dev_info->max_rx_pktlen - IDPF_ETH_OVERHEAD;
> +	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
> +
> +	dev_info->max_mac_addrs = IDPF_NUM_MACADDR_MAX;

I guess it make sense if and only if you support API
to add/remove unicast MAC addresses.

> +
> +	return 0;
> +

[snip]

> +static int
> +idpf_init_vport(struct rte_eth_dev *dev)
> +{
> +	struct idpf_vport *vport = dev->data->dev_private;
> +	struct idpf_adapter *adapter = vport->adapter;
> +	uint16_t idx = adapter->cur_vport_idx;
> +	struct virtchnl2_create_vport *vport_info =
> +		(struct virtchnl2_create_vport *)adapter->vport_recv_info[idx];
> +	int i, type, ret;
> +
> +	vport->vport_id = vport_info->vport_id;
> +	vport->txq_model = vport_info->txq_model;
> +	vport->rxq_model = vport_info->rxq_model;
> +	vport->num_tx_q = vport_info->num_tx_q;
> +	vport->num_tx_complq = vport_info->num_tx_complq;
> +	vport->num_rx_q = vport_info->num_rx_q;
> +	vport->num_rx_bufq = vport_info->num_rx_bufq;
> +	vport->max_mtu = vport_info->max_mtu;
> +	rte_memcpy(vport->default_mac_addr,
> +		   vport_info->default_mac_addr, ETH_ALEN);
> +	vport->sw_idx = idx;
> +
> +	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
> +		type = vport_info->chunks.chunks[i].type;
> +		switch (type) {
> +		case VIRTCHNL2_QUEUE_TYPE_TX:
> +			vport->chunks_info.tx_start_qid =
> +				vport_info->chunks.chunks[i].start_queue_id;
> +			vport->chunks_info.tx_qtail_start =
> +				vport_info->chunks.chunks[i].qtail_reg_start;
> +			vport->chunks_info.tx_qtail_spacing =
> +				vport_info->chunks.chunks[i].qtail_reg_spacing;
> +			break;
> +		case VIRTCHNL2_QUEUE_TYPE_RX:
> +			vport->chunks_info.rx_start_qid =
> +				vport_info->chunks.chunks[i].start_queue_id;
> +			vport->chunks_info.rx_qtail_start =
> +				vport_info->chunks.chunks[i].qtail_reg_start;
> +			vport->chunks_info.rx_qtail_spacing =
> +				vport_info->chunks.chunks[i].qtail_reg_spacing;
> +			break;
> +		case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
> +			vport->chunks_info.tx_compl_start_qid =
> +				vport_info->chunks.chunks[i].start_queue_id;
> +			vport->chunks_info.tx_compl_qtail_start =
> +				vport_info->chunks.chunks[i].qtail_reg_start;
> +			vport->chunks_info.tx_compl_qtail_spacing =
> +				vport_info->chunks.chunks[i].qtail_reg_spacing;
> +			break;
> +		case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
> +			vport->chunks_info.rx_buf_start_qid =
> +				vport_info->chunks.chunks[i].start_queue_id;
> +			vport->chunks_info.rx_buf_qtail_start =
> +				vport_info->chunks.chunks[i].qtail_reg_start;
> +			vport->chunks_info.rx_buf_qtail_spacing =
> +				vport_info->chunks.chunks[i].qtail_reg_spacing;
> +			break;
> +		default:
> +			PMD_INIT_LOG(ERR, "Unsupported queue type");
> +			break;
> +		}
> +	}
> +
> +	ret = idpf_parse_devarg_id(dev->data->name);
> +	if (ret < 0) {
> +		PMD_INIT_LOG(ERR, "Failed to parse devarg id.");
> +		return -1;

Negative errno must be returned since finally it is used as
rte_eth_dev_create() return value which is negative errno.

> +	}
> +	vport->devarg_id = ret;
> +
> +	vport->dev_data = dev->data;
> +
> +	adapter->vports[idx] = vport;
> +
> +	return 0;
> +}
> +
> +static int
> +idpf_dev_configure(struct rte_eth_dev *dev)
> +{
> +	struct rte_eth_conf *conf = &dev->data->dev_conf;
> +
> +	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
> +		PMD_INIT_LOG(ERR, "Setting link speed is not supported");
> +		return -1;

The return value is used as rte_eth_dev_configure() return value
which should be negative errno, not -1.

Please, double-check all other similar cases.

> +	}
> +
> +	if ((dev->data->nb_rx_queues == 1 && conf->rxmode.mq_mode != RTE_ETH_MQ_RX_NONE) ||
> +	    (dev->data->nb_rx_queues > 1 && conf->rxmode.mq_mode != RTE_ETH_MQ_RX_RSS)) {

Rigth now (just after the patch) you don't support RSS since you don't
handle corresponding configuratoin items. So, Nothing except RX_NONE
is supported. RX_RSS should be added when you really support it (later).

> +		PMD_INIT_LOG(ERR, "Multi-queue packet distribution mode %d is not supported",
> +			     conf->rxmode.mq_mode);
> +		return -1;
> +	}

[snip]

> +struct idpf_adapter *
> +idpf_find_adapter(struct rte_pci_device *pci_dev)
> +{
> +	struct idpf_adapter *adapter;
> +
> +	rte_spinlock_lock(&idpf_adapter_lock);
> +	TAILQ_FOREACH(adapter, &idpf_adapter_list, next) {
> +		if (strncmp(adapter->name, pci_dev->device.name, PCI_PRI_STR_SIZE) == 0) {
> +			rte_spinlock_unlock(&idpf_adapter_lock);
> +			return adapter;

Pointer to an element of the list protected by spin lock is returned
here.

> +		}
> +	}
> +	rte_spinlock_unlock(&idpf_adapter_lock);
> +
> +	return NULL;
> +}

[snip]

> +static int
> +idpf_pci_remove(struct rte_pci_device *pci_dev)
> +{
> +	struct idpf_adapter *adapter = idpf_find_adapter(pci_dev);

Question about locking still stands. I'm not sure that I understand
the purpose of locking here. Or why can it be omitted.
Anyway, returing pointer to the list element when the list is
protected by lock looks suspicous.

> +	uint16_t port_id;
> +
> +	/* Ethdev created can be found RTE_ETH_FOREACH_DEV_OF through rte_device */
> +	RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) {
> +			rte_eth_dev_close(port_id);
> +	}
> +
> +	rte_spinlock_lock(&idpf_adapter_lock);
> +	TAILQ_REMOVE(&idpf_adapter_list, adapter, next);
> +	rte_spinlock_unlock(&idpf_adapter_lock);
> +	idpf_adapter_rel(adapter);
> +	rte_free(adapter);
> +
> +	return 0;
> +}

[snip]

> +int
> +idpf_vc_get_caps(struct idpf_adapter *adapter)
> +{
> +	struct virtchnl2_get_capabilities caps_msg;
> +	struct idpf_cmd_info args;
> +	int err;
> +
> +	 memset(&caps_msg, 0, sizeof(struct virtchnl2_get_capabilities));
> +	 caps_msg.csum_caps =
> +		 VIRTCHNL2_CAP_TX_CSUM_L3_IPV4		|
> +		 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP	|
> +		 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP	|
> +		 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP	|
> +		 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP	|
> +		 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP	|
> +		 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP	|
> +		 VIRTCHNL2_CAP_TX_CSUM_GENERIC		|
> +		 VIRTCHNL2_CAP_RX_CSUM_L3_IPV4		|
> +		 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP	|
> +		 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP	|
> +		 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP	|
> +		 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP	|
> +		 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP	|
> +		 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP	|
> +		 VIRTCHNL2_CAP_RX_CSUM_GENERIC;
> +
> +	 caps_msg.seg_caps =
> +		 VIRTCHNL2_CAP_SEG_IPV4_TCP		|
> +		 VIRTCHNL2_CAP_SEG_IPV4_UDP		|
> +		 VIRTCHNL2_CAP_SEG_IPV4_SCTP		|
> +		 VIRTCHNL2_CAP_SEG_IPV6_TCP		|
> +		 VIRTCHNL2_CAP_SEG_IPV6_UDP		|
> +		 VIRTCHNL2_CAP_SEG_IPV6_SCTP		|
> +		 VIRTCHNL2_CAP_SEG_GENERIC;
> +
> +	 caps_msg.rss_caps =
> +		 VIRTCHNL2_CAP_RSS_IPV4_TCP		|
> +		 VIRTCHNL2_CAP_RSS_IPV4_UDP		|
> +		 VIRTCHNL2_CAP_RSS_IPV4_SCTP		|
> +		 VIRTCHNL2_CAP_RSS_IPV4_OTHER		|
> +		 VIRTCHNL2_CAP_RSS_IPV6_TCP		|
> +		 VIRTCHNL2_CAP_RSS_IPV6_UDP		|
> +		 VIRTCHNL2_CAP_RSS_IPV6_SCTP		|
> +		 VIRTCHNL2_CAP_RSS_IPV6_OTHER		|
> +		 VIRTCHNL2_CAP_RSS_IPV4_AH		|
> +		 VIRTCHNL2_CAP_RSS_IPV4_ESP		|
> +		 VIRTCHNL2_CAP_RSS_IPV4_AH_ESP		|
> +		 VIRTCHNL2_CAP_RSS_IPV6_AH		|
> +		 VIRTCHNL2_CAP_RSS_IPV6_ESP		|
> +		 VIRTCHNL2_CAP_RSS_IPV6_AH_ESP;
> +
> +	 caps_msg.other_caps =
> +		 VIRTCHNL2_CAP_SPLITQ_QSCHED		|
> +		 VIRTCHNL2_CAP_CRC			|
> +		 VIRTCHNL2_CAP_WB_ON_ITR		|
> +		 VIRTCHNL2_CAP_PROMISC			|
> +		 VIRTCHNL2_CAP_LINK_SPEED		|
> +		 VIRTCHNL2_CAP_VLAN;
> +

My question aske in v11 still stands since I have no an answer yet.
Basically it looks like corresponding caps should be added when
corresponding offload support is added later. If no, I'd like to
understand why.

> +	args.ops = VIRTCHNL2_OP_GET_CAPS;
> +	args.in_args = (uint8_t *)&caps_msg;
> +	args.in_args_size = sizeof(caps_msg);
> +	args.out_buffer = adapter->mbx_resp;
> +	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> +
> +	err = idpf_execute_vc_cmd(adapter, &args);
> +	if (err != 0) {
> +		PMD_DRV_LOG(ERR,
> +			    "Failed to execute command of VIRTCHNL2_OP_GET_CAPS");
> +		return err;
> +	}
> +
> +	rte_memcpy(adapter->caps, args.out_buffer, sizeof(caps_msg));
> +
> +	return 0;
> +}

[snip]

> +int
> +idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable)
> +{
> +	struct idpf_adapter *adapter = vport->adapter;
> +	struct virtchnl2_vport vc_vport;
> +	struct idpf_cmd_info args;
> +	int err;
> +
> +	vc_vport.vport_id = vport->vport_id;
> +	args.ops = enable ? VIRTCHNL2_OP_ENABLE_VPORT :
> +			    VIRTCHNL2_OP_DISABLE_VPORT;
> +	args.in_args = (u8 *)&vc_vport;

uint8_t should be used as you do above in idpf_vc_destroy_vport() and
many other cases.

[snip]


More information about the dev mailing list