[PATCH v3 3/4] net/intel: add config queue support to vCPF
Bruce Richardson
bruce.richardson at intel.com
Mon Sep 29 15:40:26 CEST 2025
On Tue, Sep 23, 2025 at 02:54:54PM +0200, Shetty, Praveen wrote:
> From: Praveen Shetty <praveen.shetty at intel.com>
>
> A "configuration queue" is a software term to denote
> a hardware mailbox queue dedicated to NSS programming.
> While the hardware does not have a construct of a
> "configuration queue", software does to state clearly
> the distinction between a queue software dedicates to
> regular mailbox processing (e.g. CPChnl or Virtchnl)
> and a queue software dedicates to NSS programming
> (e.g. SEM/LEM rule programming).
>
Please provide expansions or clarifications for the acronyms used in the
commit message, so that the commit log is understandable for those unaware
of what the NSS is, or what SEM/LEM refers to. As far as I know, these are
not generally known terms in the industry.
Also, you say that the hardware doesn't have a config queue, but software
does - I think that needs a bit of explanation as to what exactly the
patch is doing/implementing? How is software providing a special config
queue if the facility is not provided by HW.
> Signed-off-by: Praveen Shetty <praveen.shetty at intel.com>
> Tested-by: Dhananjay Shukla <dhananjay.shukla at intel.com>
> Tested-by: Atul Patel <atul.patel at intel.com>
> ---
Couple of small comments inline below.
/Bruce
> drivers/net/intel/cpfl/cpfl_ethdev.c | 274 +++++++++++++++---
> drivers/net/intel/cpfl/cpfl_ethdev.h | 38 ++-
> drivers/net/intel/cpfl/cpfl_vchnl.c | 143 ++++++++-
> drivers/net/intel/idpf/base/idpf_osdep.h | 3 +
> drivers/net/intel/idpf/base/virtchnl2.h | 3 +-
> drivers/net/intel/idpf/idpf_common_device.h | 2 +
> drivers/net/intel/idpf/idpf_common_virtchnl.c | 38 +++
> drivers/net/intel/idpf/idpf_common_virtchnl.h | 3 +
> 8 files changed, 449 insertions(+), 55 deletions(-)
>
> diff --git a/drivers/net/intel/cpfl/cpfl_ethdev.c b/drivers/net/intel/cpfl/cpfl_ethdev.c
> index d6227c99b5..c411a2a024 100644
> --- a/drivers/net/intel/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/intel/cpfl/cpfl_ethdev.c
> @@ -29,6 +29,9 @@
> #define CPFL_FLOW_PARSER "flow_parser"
> #endif
>
> +#define VCPF_FID 0
> +#define CPFL_FID 6
> +
> rte_spinlock_t cpfl_adapter_lock;
> /* A list for all adapters, one adapter matches one PCI device */
> struct cpfl_adapter_list cpfl_adapter_list;
> @@ -1699,7 +1702,8 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
> }
>
> /* ignore if it is ctrl vport */
> - if (adapter->ctrl_vport.base.vport_id == vc_event->vport_id)
> + if (adapter->base.hw.device_id == IDPF_DEV_ID_CPF &&
> + adapter->ctrl_vport.base.vport_id == vc_event->vport_id)
> return;
>
> vport = cpfl_find_vport(adapter, vc_event->vport_id);
> @@ -1903,18 +1907,30 @@ cpfl_stop_cfgqs(struct cpfl_adapter_ext *adapter)
> {
> int i, ret;
>
> - for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
> - ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, false,
> + for (i = 0; i < adapter->num_tx_cfgq; i++) {
> + if (adapter->base.hw.device_id == IXD_DEV_ID_VCPF)
> + ret = idpf_vc_ena_dis_one_queue_vcpf(&adapter->base,
> + adapter->cfgq_info[0].id,
> + VIRTCHNL2_QUEUE_TYPE_CONFIG_TX, false);
> + else
> + ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, false,
> VIRTCHNL2_QUEUE_TYPE_CONFIG_TX);
> +
> if (ret) {
> PMD_DRV_LOG(ERR, "Fail to disable Tx config queue.");
> return ret;
> }
> }
>
> - for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
> - ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, false,
> - VIRTCHNL2_QUEUE_TYPE_CONFIG_RX);
> + for (i = 0; i < adapter->num_rx_cfgq; i++) {
> + if (adapter->base.hw.device_id == IXD_DEV_ID_VCPF)
> + ret = idpf_vc_ena_dis_one_queue_vcpf(&adapter->base,
> + adapter->cfgq_info[1].id,
> + VIRTCHNL2_QUEUE_TYPE_CONFIG_RX, false);
> + else
> + ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, false,
> + VIRTCHNL2_QUEUE_TYPE_CONFIG_RX);
> +
> if (ret) {
> PMD_DRV_LOG(ERR, "Fail to disable Rx config queue.");
> return ret;
> @@ -1922,6 +1938,7 @@ cpfl_stop_cfgqs(struct cpfl_adapter_ext *adapter)
> }
>
> return 0;
> +
> }
>
> static int
> @@ -1941,8 +1958,13 @@ cpfl_start_cfgqs(struct cpfl_adapter_ext *adapter)
> return ret;
> }
>
> - for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
> - ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, true,
> + for (i = 0; i < adapter->num_tx_cfgq; i++) {
> + if (adapter->base.hw.device_id == IXD_DEV_ID_VCPF)
> + ret = idpf_vc_ena_dis_one_queue_vcpf(&adapter->base,
> + adapter->cfgq_info[0].id,
> + VIRTCHNL2_QUEUE_TYPE_CONFIG_TX, true);
> + else
> + ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, true,
> VIRTCHNL2_QUEUE_TYPE_CONFIG_TX);
> if (ret) {
> PMD_DRV_LOG(ERR, "Fail to enable Tx config queue.");
> @@ -1950,8 +1972,13 @@ cpfl_start_cfgqs(struct cpfl_adapter_ext *adapter)
> }
> }
>
> - for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
> - ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, true,
> + for (i = 0; i < adapter->num_rx_cfgq; i++) {
> + if (adapter->base.hw.device_id == IXD_DEV_ID_VCPF)
> + ret = idpf_vc_ena_dis_one_queue_vcpf(&adapter->base,
> + adapter->cfgq_info[1].id,
> + VIRTCHNL2_QUEUE_TYPE_CONFIG_RX, true);
> + else
> + ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, true,
> VIRTCHNL2_QUEUE_TYPE_CONFIG_RX);
> if (ret) {
> PMD_DRV_LOG(ERR, "Fail to enable Rx config queue.");
> @@ -1971,14 +1998,20 @@ cpfl_remove_cfgqs(struct cpfl_adapter_ext *adapter)
>
> create_cfgq_info = adapter->cfgq_info;
>
> - for (i = 0; i < CPFL_CFGQ_NUM; i++) {
> - if (adapter->ctlqp[i])
> + for (i = 0; i < adapter->num_cfgq; i++) {
> + if (adapter->ctlqp[i]) {
> cpfl_vport_ctlq_remove(hw, adapter->ctlqp[i]);
> + adapter->ctlqp[i] = NULL;
> + }
> if (create_cfgq_info[i].ring_mem.va)
> idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
> if (create_cfgq_info[i].buf_mem.va)
> idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
> }
> + if (adapter->ctlqp) {
> + rte_free(adapter->ctlqp);
> + adapter->ctlqp = NULL;
> + }
> }
>
> static int
> @@ -1988,7 +2021,16 @@ cpfl_add_cfgqs(struct cpfl_adapter_ext *adapter)
> int ret = 0;
> int i = 0;
>
> - for (i = 0; i < CPFL_CFGQ_NUM; i++) {
> + adapter->ctlqp = rte_zmalloc("ctlqp", adapter->num_cfgq *
> + sizeof(struct idpf_ctlq_info *),
> + RTE_CACHE_LINE_SIZE);
> +
> + if (!adapter->ctlqp) {
> + PMD_DRV_LOG(ERR, "Failed to allocate memory for control queues");
> + return -ENOMEM;
> + }
> +
> + for (i = 0; i < adapter->num_cfgq; i++) {
> cfg_cq = NULL;
> ret = cpfl_vport_ctlq_add((struct idpf_hw *)(&adapter->base.hw),
> &adapter->cfgq_info[i],
> @@ -2007,6 +2049,62 @@ cpfl_add_cfgqs(struct cpfl_adapter_ext *adapter)
> return ret;
> }
>
> +static
> +int vcpf_save_chunk_in_cfgq(struct cpfl_adapter_ext *adapter)
> +{
> + struct virtchnl2_add_queues *add_q =
> + (struct virtchnl2_add_queues *)adapter->addq_recv_info;
> + struct vcpf_cfg_queue *cfgq;
> + struct virtchnl2_queue_reg_chunk *q_chnk;
> + u16 rx, tx, num_chunks, num_q, struct_size;
> + u32 q_id, q_type;
> +
> + rx = 0; tx = 0;
> +
> + cfgq = rte_zmalloc("cfgq", adapter->num_cfgq *
> + sizeof(struct vcpf_cfg_queue),
> + RTE_CACHE_LINE_SIZE);
> +
I suspect you can probably fix both sides of the multiply on a single line
here, and still be within 100 chars. That will mak ethe code slightly
easier to read.
> + if (!cfgq) {
> + PMD_DRV_LOG(ERR, "Failed to allocate memory for cfgq");
> + return -ENOMEM;
> + }
> +
> + struct_size = idpf_struct_size(add_q, chunks.chunks, (add_q->chunks.num_chunks - 1));
> + adapter->cfgq_in.cfgq_add = rte_zmalloc("config_queues", struct_size, 0);
Missing check for a failed zmalloc call.
> + rte_memcpy(adapter->cfgq_in.cfgq_add, add_q, struct_size);
> +
> + num_chunks = add_q->chunks.num_chunks;
> + for (u16 i = 0; i < num_chunks; i++) {
> + num_q = add_q->chunks.chunks[i].num_queues;
> + q_chnk = &add_q->chunks.chunks[i];
> + for (u16 j = 0; j < num_q; j++) {
> + if (rx > adapter->num_cfgq || tx > adapter->num_cfgq)
> + break;
> + q_id = q_chnk->start_queue_id + j;
> + q_type = q_chnk->type;
> + if (q_type == VIRTCHNL2_QUEUE_TYPE_MBX_TX) {
> + cfgq[0].qid = q_id;
> + cfgq[0].qtail_reg_start = q_chnk->qtail_reg_start;
> + cfgq[0].qtail_reg_spacing = q_chnk->qtail_reg_spacing;
> + q_chnk->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_TX;
> + tx++;
> + } else if (q_type == VIRTCHNL2_QUEUE_TYPE_MBX_RX) {
> + cfgq[1].qid = q_id;
> + cfgq[1].qtail_reg_start = q_chnk->qtail_reg_start;
> + cfgq[1].qtail_reg_spacing = q_chnk->qtail_reg_spacing;
> + q_chnk->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_RX;
> + rx++;
> + }
> + }
> + }
> +
<snip>
More information about the dev
mailing list