[dpdk-dev] [PATCH v2] net/mlx5: fix concurrent use of Tx offloads

Yongseok Koh yskoh at mellanox.com
Wed Jan 30 05:06:24 CET 2019


On Tue, Jan 29, 2019 at 01:27:56PM +0200, Dekel Peled wrote:
> Original patch implemented the use of match_metadata offload in the
> different burst functions.
> The concurrent use of match_metadata and multi_segs offloads was
> not handled.
> 
> This patch updates function txq_scatter_v(), to pass metadata value
> from mbuf to wqe, when indicated by offload flags.
> 
> Fixes: 6bd7fbd03c62 ("net/mlx5: support metadata as flow rule criteria")
> Cc: stable at dpdk.org
> 
> Signed-off-by: Dekel Peled <dekelp at mellanox.com>
> 
> ---

Not sure if the title represents this patch well.
	net/mlx5: fix concurrent use of Tx offloads

How about,
	net/mlx5: fix Tx metadata for multi-segment packet

> v2: Apply code review comments.
> ---
> ---
>  drivers/net/mlx5/mlx5_rxtx_vec_neon.h | 11 ++++++++---
>  drivers/net/mlx5/mlx5_rxtx_vec_sse.h  | 11 +++++++----
>  2 files changed, 15 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
> index 883fe1b..7353457 100644
> --- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
> +++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
> @@ -104,6 +104,8 @@
>  		sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
>  	unsigned int n;
>  	volatile struct mlx5_wqe *wqe = NULL;
> +	bool metadata_ol =
> +		txq->offloads & DEV_TX_OFFLOAD_MATCH_METADATA ? true : false;
>  
>  	assert(elts_n > pkts_n);
>  	mlx5_tx_complete(txq);
> @@ -127,6 +129,9 @@
>  		uint8x16_t *t_wqe;
>  		uint8_t *dseg;
>  		uint8x16_t ctrl;
> +		rte_be32_t metadata = (metadata_ol &&
> +				       (pkts[n]->ol_flags & PKT_TX_METADATA)) ?
> +					pkts[n]->tx_metadata : 0;

Why pkts[n] instead of buf?
And indentation.

		rte_be32_t metadata =
			metadata_ol && (buf->ol_flags & PKT_TX_METADATA) ?
			buf->tx_metadata : 0;

>  
>  		assert(segs_n);
>  		max_elts = elts_n - (elts_head - txq->elts_tail);
> @@ -164,9 +169,9 @@
>  		ctrl = vqtbl1q_u8(ctrl, ctrl_shuf_m);
>  		vst1q_u8((void *)t_wqe, ctrl);
>  		/* Fill ESEG in the header. */
> -		vst1q_u16((void *)(t_wqe + 1),
> -			  ((uint16x8_t) { 0, 0, cs_flags, rte_cpu_to_be_16(len),
> -					  0, 0, 0, 0 }));
> +		vst1q_u32((void *)(t_wqe + 1), ((uint32x4_t){ 0,
> +			  cs_flags << 16 | rte_cpu_to_be_16(len),
> +			  metadata, 0 }));

Indentation.

		vst1q_u32((void *)(t_wqe + 1),
			  ((uint32x4_t){ 0, cs_flags << 16 |
					    rte_cpu_to_be_16(len),
					 metadata, 0 }));

>  		txq->wqe_ci = wqe_ci;
>  	}
>  	if (!n)
> diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
> index 14117c4..7b580d3 100644
> --- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
> +++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
> @@ -104,6 +104,8 @@
>  		sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
>  	unsigned int n;
>  	volatile struct mlx5_wqe *wqe = NULL;
> +	bool metadata_ol =
> +		txq->offloads & DEV_TX_OFFLOAD_MATCH_METADATA ? true : false;
>  
>  	assert(elts_n > pkts_n);
>  	mlx5_tx_complete(txq);
> @@ -125,6 +127,9 @@
>  		uint16_t max_wqe;
>  		__m128i *t_wqe, *dseg;
>  		__m128i ctrl;
> +		rte_be32_t metadata = (metadata_ol &&
> +				       (pkts[n]->ol_flags & PKT_TX_METADATA)) ?
> +					pkts[n]->tx_metadata : 0;

		rte_be32_t metadata =
			metadata_ol && (buf->ol_flags & PKT_TX_METADATA) ?
			buf->tx_metadata : 0;

>  
>  		assert(segs_n);
>  		max_elts = elts_n - (elts_head - txq->elts_tail);
> @@ -164,10 +169,8 @@
>  		ctrl = _mm_shuffle_epi8(ctrl, shuf_mask_ctrl);
>  		_mm_store_si128(t_wqe, ctrl);
>  		/* Fill ESEG in the header. */
> -		_mm_store_si128(t_wqe + 1,
> -				_mm_set_epi16(0, 0, 0, 0,
> -					      rte_cpu_to_be_16(len), cs_flags,
> -					      0, 0));
> +		_mm_store_si128(t_wqe + 1, _mm_set_epi32(0, metadata,
> +				(rte_cpu_to_be_16(len) << 16) | cs_flags, 0));

		_mm_store_si128(t_wqe + 1,
				_mm_set_epi32(0, metadata,
					      (rte_cpu_to_be_16(len) << 16) |
					      cs_flags, 0));

Thanks,
Yongseok

>  		txq->wqe_ci = wqe_ci;
>  	}
>  	if (!n)
> -- 
> 1.8.3.1
> 


More information about the dev mailing list