[PATCH v2 11/15] common/idpf: allocate static buffer at initialization

Zhang, Qi Z qi.z.zhang at intel.com
Mon Apr 24 14:15:16 CEST 2023



> -----Original Message-----
> From: Qiao, Wenjing <wenjing.qiao at intel.com>
> Sent: Friday, April 21, 2023 4:41 PM
> To: Wu, Jingjing <jingjing.wu at intel.com>; Xing, Beilei
> <beilei.xing at intel.com>; Zhang, Qi Z <qi.z.zhang at intel.com>
> Cc: dev at dpdk.org; Qiao, Wenjing <wenjing.qiao at intel.com>; Pau,
> Christopher <christopher.pau at intel.com>
> Subject: [PATCH v2 11/15] common/idpf: allocate static buffer at
> initialization
> 
> Some OSs don't allow allocating DMA memory at runtime. So create an initial
> static buffer at initialization to hold this data.

Seems this is not for DPDK which should support DMA allocation, do we really need this patch?

Btw using global variables in a module can create issues in a multi-device environment where multiple devices share the same module.
We can consider to embedded this struct in idpf_hw.

> 
> Signed-off-by: Christopher Pau <christopher.pau at intel.com>
> Signed-off-by: Wenjing Qiao <wenjing.qiao at intel.com>
> ---
>  drivers/common/idpf/base/idpf_common.c | 26 +++++++++++++++-----------
>  1 file changed, 15 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/common/idpf/base/idpf_common.c
> b/drivers/common/idpf/base/idpf_common.c
> index de82c3458f..f4a5707272 100644
> --- a/drivers/common/idpf/base/idpf_common.c
> +++ b/drivers/common/idpf/base/idpf_common.c
> @@ -6,6 +6,7 @@
>  #include "idpf_prototype.h"
>  #include "virtchnl.h"
> 
> +struct idpf_dma_mem send_dma_mem = { 0 };
> 
>  /**
>   * idpf_set_mac_type - Sets MAC type
> @@ -132,6 +133,15 @@ int idpf_init_hw(struct idpf_hw *hw, struct
> idpf_ctlq_size ctlq_size)
> 
>  	idpf_free(hw, q_info);
> 
> +	/*
> +	 * Need an initial static buffer to copy DMA memory to send
> +	 * for drivers that do not allow this allocation at runtime
> +	 */
> +	send_dma_mem.va = (struct idpf_dma_mem *)
> +		idpf_alloc_dma_mem(hw, &send_dma_mem, 4096);
> +	if (!send_dma_mem.va)
> +		return -ENOMEM;
> +
>  	return 0;
>  }
> 
> @@ -152,7 +162,6 @@ int idpf_send_msg_to_cp(struct idpf_hw *hw, int
> v_opcode,
>  			int v_retval, u8 *msg, u16 msglen)
>  {
>  	struct idpf_ctlq_msg ctlq_msg = { 0 };
> -	struct idpf_dma_mem dma_mem = { 0 };
>  	int status;
> 
>  	ctlq_msg.opcode = idpf_mbq_opc_send_msg_to_pf; @@ -162,19
> +171,11 @@ int idpf_send_msg_to_cp(struct idpf_hw *hw, int v_opcode,
>  	ctlq_msg.cookie.mbx.chnl_opcode = v_opcode;
> 
>  	if (msglen > 0) {
> -		dma_mem.va = (struct idpf_dma_mem *)
> -			  idpf_alloc_dma_mem(hw, &dma_mem, msglen);
> -		if (!dma_mem.va)
> -			return -ENOMEM;
> -
> -		idpf_memcpy(dma_mem.va, msg, msglen,
> IDPF_NONDMA_TO_DMA);
> -		ctlq_msg.ctx.indirect.payload = &dma_mem;
> +		idpf_memcpy(send_dma_mem.va, msg, msglen,
> IDPF_NONDMA_TO_DMA);
> +		ctlq_msg.ctx.indirect.payload = &send_dma_mem;
>  	}
>  	status = idpf_ctlq_send(hw, hw->asq, 1, &ctlq_msg);
> 
> -	if (dma_mem.va)
> -		idpf_free_dma_mem(hw, &dma_mem);
> -
>  	return status;
>  }
> 
> @@ -262,6 +263,9 @@ int idpf_clean_arq_element(struct idpf_hw *hw,
>   */
>  int idpf_deinit_hw(struct idpf_hw *hw)
>  {
> +	if (send_dma_mem.va)
> +		idpf_free_dma_mem(hw, &send_dma_mem);
> +
>  	hw->asq = NULL;
>  	hw->arq = NULL;
> 
> --
> 2.25.1



More information about the dev mailing list