[PATCH v2] mempool cache: add zero-copy get and put functions
Kamalakshitha Aligeri
Kamalakshitha.Aligeri at arm.com
Tue Nov 29 21:54:22 CET 2022
> -----Original Message-----
> From: Morten Brørup <mb at smartsharesystems.com>
> Sent: Wednesday, November 16, 2022 12:04 PM
> To: olivier.matz at 6wind.com; andrew.rybchenko at oktetlabs.ru; Honnappa
> Nagarahalli <Honnappa.Nagarahalli at arm.com>; Kamalakshitha Aligeri
> <Kamalakshitha.Aligeri at arm.com>; bruce.richardson at intel.com;
> dev at dpdk.org
> Cc: nd <nd at arm.com>; Morten Brørup <mb at smartsharesystems.com>
> Subject: [PATCH v2] mempool cache: add zero-copy get and put functions
>
> Zero-copy access to mempool caches is beneficial for PMD performance, and
> must be provided by the mempool library to fix [Bug 1052] without a
> performance regression.
>
> [Bug 1052]: https://bugs.dpdk.org/show_bug.cgi?id=1052
>
> v2:
> * Fix checkpatch warnings.
> * Fix missing registration of trace points.
> * The functions are inline, so they don't go into the map file.
> v1 changes from the RFC:
> * Removed run-time parameter checks. (Honnappa)
> This is a hot fast path function; requiring correct application
> behaviour, i.e. function parameters must be valid.
> * Added RTE_ASSERT for parameters instead.
> Code for this is only generated if built with RTE_ENABLE_ASSERT.
> * Removed fallback when 'cache' parameter is not set. (Honnappa)
> * Chose the simple get function; i.e. do not move the existing objects in
> the cache to the top of the new stack, just leave them at the bottom.
> * Renamed the functions. Other suggestions are welcome, of course. ;-)
> * Updated the function descriptions.
> * Added the functions to trace_fp and version.map.
>
> Signed-off-by: Morten Brørup <mb at smartsharesystems.com>
> ---
> lib/mempool/mempool_trace_points.c | 6 ++
> lib/mempool/rte_mempool.h | 124 +++++++++++++++++++++++++++++
> lib/mempool/rte_mempool_trace_fp.h | 16 ++++
> lib/mempool/version.map | 4 +
> 4 files changed, 150 insertions(+)
>
> diff --git a/lib/mempool/mempool_trace_points.c
> b/lib/mempool/mempool_trace_points.c
> index 4ad76deb34..a6070799af 100644
> --- a/lib/mempool/mempool_trace_points.c
> +++ b/lib/mempool/mempool_trace_points.c
> @@ -77,3 +77,9 @@
> RTE_TRACE_POINT_REGISTER(rte_mempool_trace_ops_free,
>
> RTE_TRACE_POINT_REGISTER(rte_mempool_trace_set_ops_byname,
> lib.mempool.set.ops.byname)
> +
> +RTE_TRACE_POINT_REGISTER(rte_mempool_trace_cache_zc_put_bulk,
> + lib.mempool.cache.zc.put.bulk)
> +
> +RTE_TRACE_POINT_REGISTER(rte_mempool_trace_cache_zc_get_bulk,
> + lib.mempool.cache.zc.get.bulk)
> diff --git a/lib/mempool/rte_mempool.h b/lib/mempool/rte_mempool.h
> index 9f530db24b..5e6da06bc7 100644
> --- a/lib/mempool/rte_mempool.h
> +++ b/lib/mempool/rte_mempool.h
> @@ -47,6 +47,7 @@
> #include <rte_ring.h>
> #include <rte_memcpy.h>
> #include <rte_common.h>
> +#include <rte_errno.h>
>
> #include "rte_mempool_trace_fp.h"
>
> @@ -1346,6 +1347,129 @@ rte_mempool_cache_flush(struct
> rte_mempool_cache *cache,
> cache->len = 0;
> }
>
> +/**
> + * @warning
> + * @b EXPERIMENTAL: This API may change, or be removed, without prior
> notice.
> + *
> + * Zero-copy put objects in a user-owned mempool cache backed by the
> specified mempool.
> + *
> + * @param cache
> + * A pointer to the mempool cache.
> + * @param mp
> + * A pointer to the mempool.
> + * @param n
> + * The number of objects to be put in the mempool cache.
> + * Must not exceed RTE_MEMPOOL_CACHE_MAX_SIZE.
> + * @return
> + * The pointer to where to put the objects in the mempool cache.
> + */
rte_mempool_cache_zc_put_bulk function takes *cache as an input parameter, which means rte_mempool_default_cache function must be called in the PMD code, because there is no pointer to mempool stored in i40e_tx_queue. Its there in i40e_rx_queue though.
So, should we change the API's ?
> +__rte_experimental
> +static __rte_always_inline void *
> +rte_mempool_cache_zc_put_bulk(struct rte_mempool_cache *cache,
> + struct rte_mempool *mp,
> + unsigned int n)
> +{
> + void **cache_objs;
> +
> + RTE_ASSERT(cache != NULL);
> + RTE_ASSERT(mp != NULL);
> + RTE_ASSERT(n <= RTE_MEMPOOL_CACHE_MAX_SIZE);
> +
> + rte_mempool_trace_cache_zc_put_bulk(cache, mp, n);
> +
> + /* Increment stats now, adding in mempool always succeeds. */
> + RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
> + RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
> +
> + /*
> + * The cache follows the following algorithm:
> + * 1. If the objects cannot be added to the cache without crossing
> + * the flush threshold, flush the cache to the backend.
> + * 2. Add the objects to the cache.
> + */
> +
> + if (cache->len + n <= cache->flushthresh) {
> + cache_objs = &cache->objs[cache->len];
> + cache->len += n;
> + } else {
> + cache_objs = &cache->objs[0];
> + rte_mempool_ops_enqueue_bulk(mp, cache_objs, cache-
> >len);
> + cache->len = n;
> + }
> +
> + return cache_objs;
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: This API may change, or be removed, without prior
> notice.
> + *
> + * Zero-copy get objects from a user-owned mempool cache backed by the
> specified mempool.
> + *
> + * @param cache
> + * A pointer to the mempool cache.
> + * @param mp
> + * A pointer to the mempool.
> + * @param n
> + * The number of objects to prefetch into the mempool cache.
> + * Must not exceed RTE_MEMPOOL_CACHE_MAX_SIZE.
> + * @return
> + * The pointer to the objects in the mempool cache.
> + * NULL on error; i.e. the cache + the pool does not contain n objects.
> + * With rte_errno set to the error code of the mempool dequeue function.
> + */
> +__rte_experimental
> +static __rte_always_inline void *
> +rte_mempool_cache_zc_get_bulk(struct rte_mempool_cache *cache,
> + struct rte_mempool *mp,
> + unsigned int n)
> +{
> + unsigned int len;
> +
> + RTE_ASSERT(cache != NULL);
> + RTE_ASSERT(mp != NULL);
> + RTE_ASSERT(n <= RTE_MEMPOOL_CACHE_MAX_SIZE);
> +
> + rte_mempool_trace_cache_zc_get_bulk(cache, mp, n);
> +
> + len = cache->len;
> +
> + if (unlikely(n > len)) {
> + /* Fill the cache from the backend; fetch size + requested -
> len objects. */
> + int ret;
> + const unsigned int size = cache->size;
> +
> + ret = rte_mempool_ops_dequeue_bulk(mp, &cache-
> >objs[len], size + n - len);
> + if (unlikely(ret < 0)) {
> + /*
> + * We are buffer constrained.
> + * Do not fill the cache, just satisfy the request.
> + */
> + ret = rte_mempool_ops_dequeue_bulk(mp, &cache-
> >objs[len], n - len);
> + if (unlikely(ret < 0)) {
> + /* Unable to satisfy the request. */
> +
> + RTE_MEMPOOL_STAT_ADD(mp,
> get_fail_bulk, 1);
> + RTE_MEMPOOL_STAT_ADD(mp,
> get_fail_objs, n);
> +
> + rte_errno = -ret;
> + return NULL;
> + }
> +
> + len = 0;
> + } else
> + len = size;
> + } else
> + len -= n;
> +
> + cache->len = len;
> +
> + RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
> + RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
> +
> + return &cache->objs[len];
> +}
> +
> /**
> * @internal Put several objects back in the mempool; used internally.
> * @param mp
> diff --git a/lib/mempool/rte_mempool_trace_fp.h
> b/lib/mempool/rte_mempool_trace_fp.h
> index ed060e887c..00567fb1cf 100644
> --- a/lib/mempool/rte_mempool_trace_fp.h
> +++ b/lib/mempool/rte_mempool_trace_fp.h
> @@ -109,6 +109,22 @@ RTE_TRACE_POINT_FP(
> rte_trace_point_emit_ptr(mempool);
> )
>
> +RTE_TRACE_POINT_FP(
> + rte_mempool_trace_cache_zc_put_bulk,
> + RTE_TRACE_POINT_ARGS(void *cache, void *mempool, uint32_t
> nb_objs),
> + rte_trace_point_emit_ptr(cache);
> + rte_trace_point_emit_ptr(mempool);
> + rte_trace_point_emit_u32(nb_objs);
> +)
> +
> +RTE_TRACE_POINT_FP(
> + rte_mempool_trace_cache_zc_get_bulk,
> + RTE_TRACE_POINT_ARGS(void *cache, void *mempool, uint32_t
> nb_objs),
> + rte_trace_point_emit_ptr(cache);
> + rte_trace_point_emit_ptr(mempool);
> + rte_trace_point_emit_u32(nb_objs);
> +)
> +
> #ifdef __cplusplus
> }
> #endif
> diff --git a/lib/mempool/version.map b/lib/mempool/version.map index
> b67d7aace7..927477b977 100644
> --- a/lib/mempool/version.map
> +++ b/lib/mempool/version.map
> @@ -63,6 +63,10 @@ EXPERIMENTAL {
> __rte_mempool_trace_ops_alloc;
> __rte_mempool_trace_ops_free;
> __rte_mempool_trace_set_ops_byname;
> +
> + # added in 23.03
> + __rte_mempool_trace_cache_zc_put_bulk;
> + __rte_mempool_trace_cache_zc_get_bulk;
> };
>
> INTERNAL {
> --
> 2.17.1
More information about the dev
mailing list