[dpdk-dev] [PATCH v3] net/mlx5: relaxed ordering for multi-packet RQ buffer refcnt

Phil Yang Phil.Yang at arm.com
Mon Jul 13 05:02:27 CEST 2020


Hi,

We are also doing C11 atomics converting for other components.
Your insight would be much appreciated.

Thanks,
Phil Yang

> -----Original Message-----
> From: dev <dev-bounces at dpdk.org> On Behalf Of Phil Yang
> Sent: Tuesday, June 23, 2020 4:27 PM
> To: dev at dpdk.org
> Cc: matan at mellanox.com; shahafs at mellanox.com;
> viacheslavo at mellanox.com; Honnappa Nagarahalli
> <Honnappa.Nagarahalli at arm.com>; drc at linux.vnet.ibm.com; nd
> <nd at arm.com>
> Subject: [dpdk-dev] [PATCH v3] net/mlx5: relaxed ordering for multi-packet
> RQ buffer refcnt
> 
> Use c11 atomics with explicit ordering instead of the rte_atomic ops
> which enforce unnecessary barriers on aarch64.
> 
> Signed-off-by: Phil Yang <phil.yang at arm.com>
> ---
> v3:
> Split from the patchset:
> http://patchwork.dpdk.org/cover/68159/
> 
>  drivers/net/mlx5/mlx5_rxq.c  |  2 +-
>  drivers/net/mlx5/mlx5_rxtx.c | 16 +++++++++-------
>  drivers/net/mlx5/mlx5_rxtx.h |  2 +-
>  3 files changed, 11 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
> index dda0073..7f487f1 100644
> --- a/drivers/net/mlx5/mlx5_rxq.c
> +++ b/drivers/net/mlx5/mlx5_rxq.c
> @@ -1545,7 +1545,7 @@ mlx5_mprq_buf_init(struct rte_mempool *mp,
> void *opaque_arg,
> 
>  	memset(_m, 0, sizeof(*buf));
>  	buf->mp = mp;
> -	rte_atomic16_set(&buf->refcnt, 1);
> +	__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
>  	for (j = 0; j != strd_n; ++j) {
>  		shinfo = &buf->shinfos[j];
>  		shinfo->free_cb = mlx5_mprq_buf_free_cb;
> diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
> index e4106bf..f0eda88 100644
> --- a/drivers/net/mlx5/mlx5_rxtx.c
> +++ b/drivers/net/mlx5/mlx5_rxtx.c
> @@ -1595,10 +1595,11 @@ mlx5_mprq_buf_free_cb(void *addr
> __rte_unused, void *opaque)
>  {
>  	struct mlx5_mprq_buf *buf = opaque;
> 
> -	if (rte_atomic16_read(&buf->refcnt) == 1) {
> +	if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
>  		rte_mempool_put(buf->mp, buf);
> -	} else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
> -		rte_atomic16_set(&buf->refcnt, 1);
> +	} else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
> +					       __ATOMIC_RELAXED) == 0)) {
> +		__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
>  		rte_mempool_put(buf->mp, buf);
>  	}
>  }
> @@ -1678,7 +1679,8 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct
> rte_mbuf **pkts, uint16_t pkts_n)
> 
>  		if (consumed_strd == strd_n) {
>  			/* Replace WQE only if the buffer is still in use. */
> -			if (rte_atomic16_read(&buf->refcnt) > 1) {
> +			if (__atomic_load_n(&buf->refcnt,
> +					    __ATOMIC_RELAXED) > 1) {
>  				mprq_buf_replace(rxq, rq_ci & wq_mask,
> strd_n);
>  				/* Release the old buffer. */
>  				mlx5_mprq_buf_free(buf);
> @@ -1790,9 +1792,9 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct
> rte_mbuf **pkts, uint16_t pkts_n)
>  			void *buf_addr;
> 
>  			/* Increment the refcnt of the whole chunk. */
> -			rte_atomic16_add_return(&buf->refcnt, 1);
> -			MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf-
> >refcnt) <=
> -				    strd_n + 1);
> +			__atomic_add_fetch(&buf->refcnt, 1,
> __ATOMIC_ACQUIRE);
> +			MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
> +				    __ATOMIC_RELAXED) <= strd_n + 1);
>  			buf_addr = RTE_PTR_SUB(addr,
> RTE_PKTMBUF_HEADROOM);
>  			/*
>  			 * MLX5 device doesn't use iova but it is necessary in
> a
> diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
> index 26621ff..0fc15f3 100644
> --- a/drivers/net/mlx5/mlx5_rxtx.h
> +++ b/drivers/net/mlx5/mlx5_rxtx.h
> @@ -78,7 +78,7 @@ struct rxq_zip {
>  /* Multi-Packet RQ buffer header. */
>  struct mlx5_mprq_buf {
>  	struct rte_mempool *mp;
> -	rte_atomic16_t refcnt; /* Atomically accessed refcnt. */
> +	uint16_t refcnt; /* Atomically accessed refcnt. */
>  	uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first
> packet. */
>  	struct rte_mbuf_ext_shared_info shinfos[];
>  	/*
> --
> 2.7.4



More information about the dev mailing list