[dpdk-dev] [PATCH v4] net/mlx5: relaxed ordering for multi-packet RQ buffer refcnt

Slava Ovsiienko viacheslavo at nvidia.com
Wed Sep 30 14:44:42 CEST 2020


Looks good to me, and we've checked the performance has no impact.
Thank you.

Acked-by: Viacheslav Ovsiienko <viacheslavo at nvidia.com>

> -----Original Message-----
> From: dev <dev-bounces at dpdk.org> On Behalf Of Phil Yang
> Sent: Tuesday, September 29, 2020 18:23
> To: Raslan Darawsheh <rasland at nvidia.com>; Matan Azrad
> <matan at nvidia.com>; Shahaf Shuler <shahafs at nvidia.com>
> Cc: nd <nd at arm.com>; Alexander Kozyrev <akozyrev at nvidia.com>;
> Honnappa Nagarahalli <Honnappa.Nagarahalli at arm.com>; dev at dpdk.org;
> nd <nd at arm.com>
> Subject: Re: [dpdk-dev] [PATCH v4] net/mlx5: relaxed ordering for multi-
> packet RQ buffer refcnt
> 
> Hi Raslan,
> 
> It seems that there are no more comments for this patch.
> So shall we proceed further?
> 
> Thanks,
> Phil Yang
> 
> > -----Original Message-----
> > From: Alexander Kozyrev <akozyrev at nvidia.com>
> > Sent: Thursday, September 10, 2020 9:37 AM
> > To: Honnappa Nagarahalli <Honnappa.Nagarahalli at arm.com>; Phil Yang
> > <Phil.Yang at arm.com>; akozyrev at mellanox.com; rasland at mellanox.com;
> > dev at dpdk.org
> > Cc: Phil Yang <Phil.Yang at arm.com>; matan at mellanox.com; Shahaf Shuler
> > <shahafs at mellanox.com>; viacheslavo at mellanox.com; nd
> <nd at arm.com>; nd
> > <nd at arm.com>
> > Subject: RE: [PATCH v4] net/mlx5: relaxed ordering for multi-packet RQ
> > buffer refcnt
> >
> > > <snip>
> > >
> > > >
> > > > Use c11 atomics with RELAXED ordering instead of the rte_atomic
> > > > ops which enforce unnecessary barriers on aarch64.
> > > >
> > > > Signed-off-by: Phil Yang <phil.yang at arm.com>
> > > Looks good.
> > >
> > > Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli at arm.com>
> >
> > Acked-by: Alexander Kozyrev <akozyrev at nvidia.com>
> >
> > >
> > > > ---
> > > > v4:
> > > > Remove the unnecessary ACQUIRE barrier in rx burst path.
> > > > (Honnappa)
> > > >
> > > > v3:
> > > > Split from the patchset:
> > > >
> >
> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatch
> > > >
> > >
> >
> work.dpdk.org%2Fcover%2F68159%2F&data=02%7C01%7Cakozyrev%4
> 0
> > nv
> > > idia.
> > > >
> > >
> >
> com%7Cf16ba4e8cfb145f5d82008d85529348e%7C43083d15727340c1b7db3
> 9e
> > f
> > > d9ccc
> > > >
> > >
> >
> 17a%7C0%7C0%7C637352982762038088&sdata=0HzTxbzh0Dqk0hZ5PI
> gE
> > V
> > > zieyV%
> > > > 2BnLTivsVIFFxXFAtI%3D&reserved=0
> > > >
> > > >  drivers/net/mlx5/mlx5_rxq.c  |  2 +-
> > > > drivers/net/mlx5/mlx5_rxtx.c | 16 +++++++++-------
> > > > drivers/net/mlx5/mlx5_rxtx.h |  2 +-
> > > >  3 files changed, 11 insertions(+), 9 deletions(-)
> > > >
> > > > diff --git a/drivers/net/mlx5/mlx5_rxq.c
> > > > b/drivers/net/mlx5/mlx5_rxq.c index
> > > > 79eb8f8..40e0239 100644
> > > > --- a/drivers/net/mlx5/mlx5_rxq.c
> > > > +++ b/drivers/net/mlx5/mlx5_rxq.c
> > > > @@ -2012,7 +2012,7 @@ mlx5_mprq_buf_init(struct rte_mempool
> *mp,
> > void
> > > > *opaque_arg,
> > > >
> > > >  	memset(_m, 0, sizeof(*buf));
> > > >  	buf->mp = mp;
> > > > -	rte_atomic16_set(&buf->refcnt, 1);
> > > > +	__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
> > > >  	for (j = 0; j != strd_n; ++j) {
> > > >  		shinfo = &buf->shinfos[j];
> > > >  		shinfo->free_cb = mlx5_mprq_buf_free_cb; diff --git
> > > > a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
> > > > index 1b71e94..549477b 100644
> > > > --- a/drivers/net/mlx5/mlx5_rxtx.c
> > > > +++ b/drivers/net/mlx5/mlx5_rxtx.c
> > > > @@ -1626,10 +1626,11 @@ mlx5_mprq_buf_free_cb(void *addr
> > > __rte_unused,
> > > > void *opaque)  {
> > > >  	struct mlx5_mprq_buf *buf = opaque;
> > > >
> > > > -	if (rte_atomic16_read(&buf->refcnt) == 1) {
> > > > +	if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
> > > >  		rte_mempool_put(buf->mp, buf);
> > > > -	} else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
> > > > -		rte_atomic16_set(&buf->refcnt, 1);
> > > > +	} else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
> > > > +					       __ATOMIC_RELAXED) == 0)) {
> > > > +		__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
> > > >  		rte_mempool_put(buf->mp, buf);
> > > >  	}
> > > >  }
> > > > @@ -1709,7 +1710,8 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct
> > > > rte_mbuf **pkts, uint16_t pkts_n)
> > > >
> > > >  		if (consumed_strd == strd_n) {
> > > >  			/* Replace WQE only if the buffer is still in use. */
> > > > -			if (rte_atomic16_read(&buf->refcnt) > 1) {
> > > > +			if (__atomic_load_n(&buf->refcnt,
> > > > +					    __ATOMIC_RELAXED) > 1) {
> > > >  				mprq_buf_replace(rxq, rq_ci & wq_mask,
> > > strd_n);
> > > >  				/* Release the old buffer. */
> > > >  				mlx5_mprq_buf_free(buf);
> > > > @@ -1821,9 +1823,9 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct
> > > > rte_mbuf **pkts, uint16_t pkts_n)
> > > >  			void *buf_addr;
> > > >
> > > >  			/* Increment the refcnt of the whole chunk. */
> > > > -			rte_atomic16_add_return(&buf->refcnt, 1);
> > > > -			MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf-
> > > > >refcnt) <=
> > > > -				    strd_n + 1);
> > > > +			__atomic_add_fetch(&buf->refcnt, 1,
> > > > __ATOMIC_RELAXED);
> > > > +			MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
> > > > +				    __ATOMIC_RELAXED) <= strd_n + 1);
> > > >  			buf_addr = RTE_PTR_SUB(addr,
> > > > RTE_PKTMBUF_HEADROOM);
> > > >  			/*
> > > >  			 * MLX5 device doesn't use iova but it is necessary in
> > a
> > > diff
> > > > --git a/drivers/net/mlx5/mlx5_rxtx.h
> > > > b/drivers/net/mlx5/mlx5_rxtx.h index c02a007..467f31d 100644
> > > > --- a/drivers/net/mlx5/mlx5_rxtx.h
> > > > +++ b/drivers/net/mlx5/mlx5_rxtx.h
> > > > @@ -68,7 +68,7 @@ struct rxq_zip {
> > > >  /* Multi-Packet RQ buffer header. */  struct mlx5_mprq_buf {
> > > >  	struct rte_mempool *mp;
> > > > -	rte_atomic16_t refcnt; /* Atomically accessed refcnt. */
> > > > +	uint16_t refcnt; /* Atomically accessed refcnt. */
> > > >  	uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first
> > > packet.
> > > > */
> > > >  	struct rte_mbuf_ext_shared_info shinfos[];
> > > >  	/*
> > > > --
> > > > 2.7.4



More information about the dev mailing list