[PATCH v7 6/8] net/cpfl: add fxp rule module

Zhang, Qi Z qi.z.zhang at intel.com
Thu Sep 28 05:29:07 CEST 2023



> -----Original Message-----
> From: Zhang, Yuying <yuying.zhang at intel.com>
> Sent: Wednesday, September 27, 2023 2:17 AM
> To: Zhang, Yuying <yuying.zhang at intel.com>; dev at dpdk.org; Zhang, Qi Z
> <qi.z.zhang at intel.com>; Wu, Jingjing <jingjing.wu at intel.com>; Xing, Beilei
> <beilei.xing at intel.com>
> Subject: [PATCH v7 6/8] net/cpfl: add fxp rule module
> 
> From: Yuying Zhang <yuying.zhang at intel.com>
> 
> Implement FXP rule creation / destroying.
> 
> Signed-off-by: Yuying Zhang <yuying.zhang at intel.com>
> ---
>  drivers/net/cpfl/cpfl_ethdev.c   |  31 ++++
>  drivers/net/cpfl/cpfl_ethdev.h   |   6 +
>  drivers/net/cpfl/cpfl_fxp_rule.c | 296 +++++++++++++++++++++++++++++++
> drivers/net/cpfl/cpfl_fxp_rule.h |  68 +++++++
>  drivers/net/cpfl/meson.build     |   1 +
>  5 files changed, 402 insertions(+)
>  create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c  create mode 100644
> drivers/net/cpfl/cpfl_fxp_rule.h
> 
> diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
> index a2bc6784d0..da78e79652 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/cpfl/cpfl_ethdev.c
> @@ -16,6 +16,7 @@
>  #include <ethdev_private.h>
>  #include "cpfl_rxtx.h"
>  #include "cpfl_flow.h"
> +#include "cpfl_rules.h"
> 
>  #define CPFL_REPRESENTOR	"representor"
>  #define CPFL_TX_SINGLE_Q	"tx_single"
> @@ -1127,6 +1128,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
>  	adapter->cur_vport_nb--;
>  	dev->data->dev_private = NULL;
>  	adapter->vports[vport->sw_idx] = NULL;
> +	idpf_free_dma_mem(NULL, &cpfl_vport->itf.flow_dma);
>  	rte_free(cpfl_vport);
> 
>  	return 0;
> @@ -2466,6 +2468,26 @@ cpfl_p2p_queue_info_init(struct cpfl_vport
> *cpfl_vport,
>  	return 0;
>  }
> 
> +int
> +cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct
> idpf_dma_mem *dma, uint32_t size,
> +			 int batch_size)
> +{
> +	int i;
> +
> +	if (!idpf_alloc_dma_mem(NULL, orig_dma, size * (1 + batch_size))) {
> +		PMD_INIT_LOG(ERR, "Could not alloc dma memory");
> +		return -ENOMEM;
> +	}
> +
> +	for (i = 0; i < batch_size; i++) {
> +		dma[i].va = (void *)((uint64_t)orig_dma->va + size * (i + 1));
> +		dma[i].pa = orig_dma->pa + size * (i + 1);
> +		dma[i].size = size;
> +		dma[i].zone = NULL;
> +	}
> +	return 0;
> +}
> +
>  static int
>  cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)  { @@ -
> 2515,6 +2537,15 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void
> *init_params)
>  	rte_ether_addr_copy((struct rte_ether_addr *)vport-
> >default_mac_addr,
>  			    &dev->data->mac_addrs[0]);
> 
> +	memset(cpfl_vport->itf.dma, 0, sizeof(cpfl_vport->itf.dma));
> +	memset(cpfl_vport->itf.msg, 0, sizeof(cpfl_vport->itf.msg));
> +	ret = cpfl_alloc_dma_mem_batch(&cpfl_vport->itf.flow_dma,
> +				       cpfl_vport->itf.dma,
> +				       sizeof(union cpfl_rule_cfg_pkt_record),
> +				       CPFL_FLOW_BATCH_SIZE);
> +	if (ret < 0)
> +		goto err_mac_addrs;
> +
>  	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
>  		memset(&p2p_queue_grps_info, 0,
> sizeof(p2p_queue_grps_info));
>  		ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info,
> p2p_q_vc_out_info); diff --git a/drivers/net/cpfl/cpfl_ethdev.h
> b/drivers/net/cpfl/cpfl_ethdev.h index 7f83d170d7..8eeeac9910 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.h
> +++ b/drivers/net/cpfl/cpfl_ethdev.h
> @@ -147,10 +147,14 @@ enum cpfl_itf_type {
> 
>  TAILQ_HEAD(cpfl_flow_list, rte_flow);
> 
> +#define CPFL_FLOW_BATCH_SIZE  490
>  struct cpfl_itf {
>  	enum cpfl_itf_type type;
>  	struct cpfl_adapter_ext *adapter;
>  	struct cpfl_flow_list flow_list;
> +	struct idpf_dma_mem flow_dma;
> +	struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE];
> +	struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE];
>  	void *data;
>  };
> 
> @@ -240,6 +244,8 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext
> *adapter,  int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);  int
> cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);  int
> cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
> +int cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct
> idpf_dma_mem *dma,
> +			     uint32_t size, int batch_size);
> 
>  #define CPFL_DEV_TO_PCI(eth_dev)		\
>  	RTE_DEV_TO_PCI((eth_dev)->device)
> diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c
> new file mode 100644
> index 0000000000..50fac55432
> --- /dev/null
> +++ b/drivers/net/cpfl/cpfl_fxp_rule.c
> @@ -0,0 +1,296 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2023 Intel Corporation
> + */
> +#include "cpfl_ethdev.h"
> +
> +#include "cpfl_fxp_rule.h"
> +#include "cpfl_logs.h"
> +
> +#define CTLQ_SEND_RETRIES 100
> +#define CTLQ_RECEIVE_RETRIES 100
> +
> +int
> +cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16
> num_q_msg,
> +		   struct idpf_ctlq_msg q_msg[])
> +{
> +	struct idpf_ctlq_msg **msg_ptr_list;
> +	u16 clean_count = 0;
> +	int num_cleaned = 0;
> +	int retries = 0;
> +	int ret = 0;
> +
> +	msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *));
> +	if (!msg_ptr_list) {
> +		PMD_INIT_LOG(ERR, "no memory for cleaning ctlq");
> +		ret = -ENOMEM;
> +		goto err;
> +	}
> +
> +	ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with
> error: 0x%4x", ret);
> +		goto send_err;
> +	}
> +
> +	while (retries <= CTLQ_SEND_RETRIES) {
> +		clean_count = num_q_msg - num_cleaned;
> +		ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count,
> +					       &msg_ptr_list[num_cleaned]);
> +		if (ret) {
> +			PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret);
> +			goto send_err;
> +		}
> +
> +		num_cleaned += clean_count;
> +		retries++;
> +		if (num_cleaned >= num_q_msg)
> +			break;
> +		rte_delay_us_sleep(10);
> +	}
> +
> +	if (retries > CTLQ_SEND_RETRIES) {
> +		PMD_INIT_LOG(ERR, "timed out while polling for
> completions");
> +		ret = -1;
> +		goto send_err;
> +	}
> +
> +send_err:
> +	if (msg_ptr_list)
> +		free(msg_ptr_list);
> +err:
> +	return ret;
> +}
> +
> +static int
> +cpfl_process_rx_ctlq_msg(u16 num_q_msg, struct idpf_ctlq_msg *q_msg) {
> +	u16 i;
> +
> +	if (!num_q_msg || !q_msg)
> +		return -EINVAL;
> +
> +	for (i = 0; i < num_q_msg; i++) {
> +		if (q_msg[i].status == CPFL_CFG_PKT_ERR_OK) {
> +			continue;
> +		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_EEXIST &&
> +			   q_msg[i].opcode == cpfl_ctlq_sem_add_rule) {
> +			PMD_INIT_LOG(ERR, "The rule has confliction with
> already existed one");
> +			return -EINVAL;
> +		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_ENOTFND &&
> +			   q_msg[i].opcode == cpfl_ctlq_sem_del_rule) {
> +			PMD_INIT_LOG(ERR, "The rule has already deleted");
> +			return -EINVAL;
> +		} else {
> +			PMD_INIT_LOG(ERR, "Invalid rule");
> +			return -EINVAL;
> +		}

Please fix checkpatch warning due to unnecessary else.




More information about the dev mailing list