[PATCH v2] mbuf: optimize segment prefree

Bruce Richardson bruce.richardson at intel.com
Wed Oct 22 11:08:29 CEST 2025


On Mon, Oct 20, 2025 at 12:02:01PM +0000, Morten Brørup wrote:
> Refactored rte_pktmbuf_prefree_seg() for both performance and readability.
> 
> With the optimized RTE_MBUF_DIRECT() macro, the common likely code path
> now fits within one instruction cache line on x86-64 when built with GCC.
> 
> Signed-off-by: Morten Brørup <mb at smartsharesystems.com>

Reviewed-by: Bruce Richardson <bruce.richardson at intel.com>

Comments inline below.

> ---
> v2:
> * Fixed typo in commit description.
> * Fixed indentation.
> * Added detailed description to the optimized RTE_MBUF_DIRECT() macro.
>   (Stephen Hemminger)
> * Added static_assert() to verify that the optimized RTE_MBUF_DIRECT()
>   macro is valid, specifically that the tested bits are in the MSB of the
>   64-bit field.
> ---
>  lib/mbuf/rte_mbuf.h      | 51 +++++++++++++++-------------------------
>  lib/mbuf/rte_mbuf_core.h | 27 +++++++++++++++++++++
>  2 files changed, 46 insertions(+), 32 deletions(-)
> 
> diff --git a/lib/mbuf/rte_mbuf.h b/lib/mbuf/rte_mbuf.h
> index 3df22125de..2004391f57 100644
> --- a/lib/mbuf/rte_mbuf.h
> +++ b/lib/mbuf/rte_mbuf.h
> @@ -31,6 +31,7 @@
>   * http://www.kohala.com/start/tcpipiv2.html
>   */
>  
> +#include <stdbool.h>
>  #include <stdint.h>
>  
>  #include <rte_common.h>
> @@ -1458,44 +1459,30 @@ static inline int __rte_pktmbuf_pinned_extbuf_decref(struct rte_mbuf *m)
>  static __rte_always_inline struct rte_mbuf *
>  rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
>  {
> -	__rte_mbuf_sanity_check(m, 0);
> -
> -	if (likely(rte_mbuf_refcnt_read(m) == 1)) {
> -
> -		if (!RTE_MBUF_DIRECT(m)) {
> -			rte_pktmbuf_detach(m);
> -			if (RTE_MBUF_HAS_EXTBUF(m) &&
> -			    RTE_MBUF_HAS_PINNED_EXTBUF(m) &&
> -			    __rte_pktmbuf_pinned_extbuf_decref(m))
> -				return NULL;
> -		}
> -
> -		if (m->next != NULL)
> -			m->next = NULL;
> -		if (m->nb_segs != 1)
> -			m->nb_segs = 1;
> +	bool refcnt_not_one;
>  
> -		return m;
> +	__rte_mbuf_sanity_check(m, 0);
>  
> -	} else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
> +	refcnt_not_one = unlikely(rte_mbuf_refcnt_read(m) != 1);
> +	if (refcnt_not_one && __rte_mbuf_refcnt_update(m, -1) != 0)
> +		return NULL;
>  
> -		if (!RTE_MBUF_DIRECT(m)) {
> -			rte_pktmbuf_detach(m);
> -			if (RTE_MBUF_HAS_EXTBUF(m) &&
> -			    RTE_MBUF_HAS_PINNED_EXTBUF(m) &&
> -			    __rte_pktmbuf_pinned_extbuf_decref(m))
> -				return NULL;
> -		}
> +	if (unlikely(!RTE_MBUF_DIRECT(m))) {
> +		rte_pktmbuf_detach(m);
> +		if (RTE_MBUF_HAS_EXTBUF(m) &&
> +				RTE_MBUF_HAS_PINNED_EXTBUF(m) &&
> +				__rte_pktmbuf_pinned_extbuf_decref(m))
> +			return NULL;
> +	}
>  
> -		if (m->next != NULL)
> -			m->next = NULL;
> -		if (m->nb_segs != 1)
> -			m->nb_segs = 1;
> +	if (refcnt_not_one)
>  		rte_mbuf_refcnt_set(m, 1);
> +	if (m->nb_segs != 1)
> +		m->nb_segs = 1;
> +	if (m->next != NULL)
> +		m->next = NULL;
>  
> -		return m;
> -	}
> -	return NULL;
> +	return m;
>  }
>  

Nice refactor, much more readable, thanks.

>  /**
> diff --git a/lib/mbuf/rte_mbuf_core.h b/lib/mbuf/rte_mbuf_core.h
> index a0df265b5d..41f40e1967 100644
> --- a/lib/mbuf/rte_mbuf_core.h
> +++ b/lib/mbuf/rte_mbuf_core.h
> @@ -715,6 +715,33 @@ struct rte_mbuf_ext_shared_info {
>  #define RTE_MBUF_DIRECT(mb) \
>  	(!((mb)->ol_flags & (RTE_MBUF_F_INDIRECT | RTE_MBUF_F_EXTERNAL)))
>  
> +#if defined(RTE_TOOLCHAIN_GCC) && defined(RTE_ARCH_X86)
> +/* Optimization for code size.
> + * GCC only optimizes single-bit MSB tests this way, so we do it by hand with multi-bit.
> + *
> + * The flags RTE_MBUF_F_INDIRECT and RTE_MBUF_F_EXTERNAL are both in the MSB of the
> + * 64-bit ol_flags field, so we only compare this one byte instead of all 64 bits.
> + * On little endian architecture, the MSB of a 64-bit integer is at byte offest 7.
> + *
> + * Note: Tested using GCC version 16.0.0 20251019 (experimental).
> + *
> + * Without this optimization, GCC generates 17 bytes of instructions:
> + *      movabs rax,0x6000000000000000       // 10 bytes
> + *      and    rax,QWORD PTR [rdi+0x18]     // 4 bytes
> + *      sete   al                           // 3 bytes
> + * With this optimization, GCC generates only 7 bytes of instructions:
> + *      test   BYTE PTR [rdi+0x1f],0x60     // 4 bytes
> + *      sete   al                           // 3 bytes
> + */
> +#undef RTE_MBUF_DIRECT
> +#define RTE_MBUF_DIRECT(mb) \
> +	(!(((const uint8_t *)(mb))[offsetof(struct rte_mbuf, ol_flags) + 7] & \
> +	(uint8_t)((RTE_MBUF_F_INDIRECT | RTE_MBUF_F_EXTERNAL) >> (7 * 8))))
> +static_assert(((RTE_MBUF_F_INDIRECT | RTE_MBUF_F_EXTERNAL) >> (7 * 8)) << (7 * 8) ==
> +	(RTE_MBUF_F_INDIRECT | RTE_MBUF_F_EXTERNAL),
> +	"RTE_MBUF_F_INDIRECT and/or RTE_MBUF_F_EXTERNAL are not in MSB.");
> +#endif
> +
Couple of comments/thoughts/questions here.

* This looks like a compiler limitation that should be fixed in GCC. IF we
  put this optimization in, how will we know when/if we can remove it again
  in future? I'm not sure we want this hanging around forever.
* Can the static_assert - which just checks flags are in the MSB - be
* simplified to e.g.
  "((RTE_MBUF_F_INDIRECT | RTE_MBUF_F_EXTERNAL) << CHAR_BIT) == 0"
  or "__builtin_ctzll(...) > (7 * CHAR_BIT)"
* As in prev bullet, I tend to prefer use of CHAR_BIT over hard-coded 8.
* Is it necessary to limit this to just GCC and x86? If it leads to the
  best code on x86, why not include for all compilers? What about non-x86
  LE platforms?
* Does the actual macro need to be that long and complex? If we simplify a
  bit, does the compiler go back to generating bad code? For example:
  using "(mb->ol_flags >> 56) & ((RTE_MBUF_F_INDIRECT | ..) >> 56)"
* If the above is true, do we need to actually put this in in assembler to
  guarantee compiler generates good code in all situations?


More information about the dev mailing list