[PATCH v5 03/10] memarea: support alloc/free/update-refcnt API
fengchengwen
fengchengwen at huawei.com
Sat Oct 8 10:02:28 CEST 2022
Hi Mattias,
On 2022/10/7 3:43, Mattias Rönnblom wrote:
> On 2022-10-05 06:09, datshan wrote:
>> From: Chengwen Feng <fengchengwen at huawei.com>
>>
>> This patch supports rte_memarea_alloc()/rte_memarea_free()/
>> rte_memarea_update_refcnt() API.
>>
>> Signed-off-by: Chengwen Feng <fengchengwen at huawei.com>
>> ---
>> doc/guides/prog_guide/memarea_lib.rst | 10 ++
>> lib/memarea/memarea_private.h | 3 +
>> lib/memarea/rte_memarea.c | 143 ++++++++++++++++++++++++++
>> lib/memarea/rte_memarea.h | 56 ++++++++++
>> lib/memarea/version.map | 3 +
>> 5 files changed, 215 insertions(+)
>>
>> diff --git a/doc/guides/prog_guide/memarea_lib.rst b/doc/guides/prog_guide/memarea_lib.rst
>> index b96dad15f6..41bc0a90cd 100644
>> --- a/doc/guides/prog_guide/memarea_lib.rst
>> +++ b/doc/guides/prog_guide/memarea_lib.rst
>> @@ -33,6 +33,16 @@ returns the pointer to the created memarea or ``NULL`` if the creation failed.
>> The ``rte_memarea_destroy()`` function is used to destroy a memarea.
>> +The ``rte_memarea_alloc()`` function is used to alloc one memory object from
>> +the memarea.
>> +
>> +The ``rte_memarea_free()`` function is used to free one memory object which
>> +allocated by ``rte_memarea_alloc()``.
>> +
>> +The ``rte_memarea_update_refcnt()`` function is used to update the memory
>> +object's reference count, if the count reaches zero, the memory object will
>> +be freed to memarea.
>> +
>> Reference
>> ---------
>> diff --git a/lib/memarea/memarea_private.h b/lib/memarea/memarea_private.h
>> index c76392d3e6..98406879b9 100644
>> --- a/lib/memarea/memarea_private.h
>> +++ b/lib/memarea/memarea_private.h
>> @@ -25,6 +25,9 @@ struct rte_memarea {
>> void *area_addr;
>> struct memarea_elem_list elem_list;
>> struct memarea_elem_list free_list;
>> +
>> + uint64_t alloc_fails;
>> + uint64_t refcnt_check_fails;
>> } __rte_cache_aligned;
>> #endif /* MEMAREA_PRIVATE_H */
>> diff --git a/lib/memarea/rte_memarea.c b/lib/memarea/rte_memarea.c
>> index 868da7661d..a072f07f20 100644
>> --- a/lib/memarea/rte_memarea.c
>> +++ b/lib/memarea/rte_memarea.c
>> @@ -4,6 +4,7 @@
>> #include <stdio.h>
>> #include <stdlib.h>
>> +#include <sys/queue.h>
>> #include <rte_common.h>
>> #include <rte_log.h>
>> @@ -94,6 +95,8 @@ memarea_alloc_area(const struct rte_memarea_param *init)
>> ptr = memarea_alloc_from_system_api(init->total_sz);
>> else if (init->source == RTE_MEMAREA_SOURCE_USER_ADDR)
>> ptr = init->user_addr;
>> + else if (init->source == RTE_MEMAREA_SOURCE_USER_MEMAREA)
>> + ptr = rte_memarea_alloc(init->user_memarea, init->total_sz, 0);
>> if (ptr == NULL)
>> RTE_LOG(ERR, MEMAREA, "memarea: %s alloc memory area fail!\n", init->name);
>> @@ -145,6 +148,8 @@ memarea_free_area(struct rte_memarea *ma)
>> rte_free(ma->area_addr);
>> else if (ma->init.source == RTE_MEMAREA_SOURCE_SYSTEM_API)
>> free(ma->area_addr);
>> + else if (ma->init.source == RTE_MEMAREA_SOURCE_USER_MEMAREA)
>> + rte_memarea_free(ma->init.user_memarea, ma->area_addr);
>> }
>> void
>> @@ -155,3 +160,141 @@ rte_memarea_destroy(struct rte_memarea *ma)
>> memarea_free_area(ma);
>> rte_free(ma);
>> }
>> +
>> +static inline void
>> +memarea_lock(struct rte_memarea *ma)
>> +{
>> + if (ma->init.mt_safe)
>> + rte_spinlock_lock(&ma->lock);
>> +}
>> +
>> +static inline void
>> +memarea_unlock(struct rte_memarea *ma)
>> +{
>> + if (ma->init.mt_safe)
>> + rte_spinlock_unlock(&ma->lock);
>> +}
>> +
>> +#define memarea_roundup(val, align) ((((val) + ((align) - 1)) / (align)) * (align))
>> +
>
> You should be able to use RTE_ALIGN_CEIL() instead of your own roundup macro, assuming this function isn't used in a patch I'm yet to review, with a potentially non-power-of-2 align parameter value.
>
> If not, change the macro to a function.
Current, the default align size is CACHE_LINE_SIZE, and it's power-of-2, so will use RTE_ALIGN_CEIL in v6.
>
>> +static inline bool
>> +memarea_whether_add_node(size_t free_size, size_t need_size)
>> +{
>> + size_t align_size = memarea_roundup(need_size, RTE_CACHE_LINE_SIZE);
>> + return free_size > align_size && (free_size - align_size) > sizeof(struct memarea_elem);
>> +}
>> +
>> +static inline void
>> +memarea_add_node(struct rte_memarea *ma, struct memarea_elem *elem, size_t need_size)
>> +{
>> + size_t align_size = memarea_roundup(need_size, RTE_CACHE_LINE_SIZE);
>> + struct memarea_elem *new_elem;
>> + new_elem = (struct memarea_elem *)((uintptr_t)elem + sizeof(struct memarea_elem) +
>> + align_size);
>
> Use RTE_PTR_ADD().
>
>> + new_elem->size = elem->size - align_size - sizeof(struct memarea_elem);
>> + new_elem->cookie = MEMAREA_FREE_ELEM_COOKIE;
>> + new_elem->refcnt = 0;
>> + TAILQ_INSERT_AFTER(&ma->elem_list, elem, new_elem, elem_node);
>> + TAILQ_INSERT_AFTER(&ma->free_list, elem, new_elem, free_node);
>> + elem->size = align_size;
>> +}
>> +
>> +void *
>> +rte_memarea_alloc(struct rte_memarea *ma, size_t size, uint32_t cookie)
>> +{
>> + struct memarea_elem *elem;
>> + void *ptr = NULL;
>> +
>> + if (unlikely(ma == NULL || size == 0))
>> + return NULL;
>> +
>> + memarea_lock(ma);
>> + TAILQ_FOREACH(elem, &ma->free_list, free_node) {
>> + if (elem->size < size)
>> + continue;
>> + if (memarea_whether_add_node(elem->size, size))
>> + memarea_add_node(ma, elem, size);
>> + elem->cookie = cookie;
>> + elem->refcnt = 1;
>> + TAILQ_REMOVE(&ma->free_list, elem, free_node);
>> + ptr = (void *)((uintptr_t)elem + sizeof(struct memarea_elem));
>
> RTE_PTR_ADD() again.
>
>> + break;
>> + }
>> + if (unlikely(ptr == NULL))
>> + ma->alloc_fails++;
>> + memarea_unlock(ma);
>> +
>> + return ptr;
>> +}
>> +
>> +void
>> +rte_memarea_free(struct rte_memarea *ma, void *ptr)
>> +{
>> + rte_memarea_update_refcnt(ma, ptr, -1);
>> +}
>> +
>> +static inline void
>> +memarea_merge_node(struct rte_memarea *ma, struct memarea_elem *curr,
>> + struct memarea_elem *next, bool del_next_from_free,
>> + bool add_curr_to_free)
>> +{
>> + curr->size += next->size + sizeof(struct memarea_elem);
>> + next->size = 0;
>> + next->cookie = 0;
>> + TAILQ_REMOVE(&ma->elem_list, next, elem_node);
>> + if (del_next_from_free)
>> + TAILQ_REMOVE(&ma->free_list, next, free_node);
>> + if (add_curr_to_free) {
>> + curr->cookie = MEMAREA_FREE_ELEM_COOKIE;
>> + TAILQ_INSERT_TAIL(&ma->free_list, curr, free_node);
>> + }
>> +}
>> +
>> +static inline void
>> +memarea_free_elem(struct rte_memarea *ma, struct memarea_elem *elem)
>> +{
>> + struct memarea_elem *prev, *next;
>> + bool merged = false;
>> + prev = TAILQ_PREV(elem, memarea_elem_list, elem_node);
>> + next = TAILQ_NEXT(elem, elem_node);
>> + if (prev != NULL && prev->refcnt == 0) {
>> + memarea_merge_node(ma, prev, elem, false, false);
>> + elem = prev;
>> + merged = true;
>> + }
>> + if (next != NULL && next->refcnt == 0) {
>> + memarea_merge_node(ma, elem, next, true, !merged);
>> + merged = true;
>> + }
>> + if (!merged) {
>> + elem->cookie = MEMAREA_FREE_ELEM_COOKIE;
>> + TAILQ_INSERT_TAIL(&ma->free_list, elem, free_node);
>> + }
>> +}
>> +
>> +void
>> +rte_memarea_update_refcnt(struct rte_memarea *ma, void *ptr, int16_t value)
>> +{
>> + struct memarea_elem *elem = (struct memarea_elem *)((uintptr_t)ptr -
>> + sizeof(struct memarea_elem));
>> +
>> + if (unlikely(ma == NULL || ptr == NULL))
>> + return;
>> +
>> + memarea_lock(ma);
>> + if (unlikely(elem->refcnt <= 0 || elem->refcnt + value < 0)) {
>> + RTE_LOG(ERR, MEMAREA,
>> + "memarea: %s cookie: 0x%x curr refcnt: %d update refcnt: %d check fail!\n",
>> + ma->init.name, elem->cookie, elem->refcnt, value);
>> + ma->refcnt_check_fails++;
>> + if (elem->refcnt > 0)
>> + elem->refcnt += value;
>> + memarea_unlock(ma);
>> + return;
>> + }
>> +
>> + elem->refcnt += value;
>> + if (elem->refcnt == 0)
>> + memarea_free_elem(ma, elem);
>> + memarea_unlock(ma);
>> +}
>> diff --git a/lib/memarea/rte_memarea.h b/lib/memarea/rte_memarea.h
>> index 543cda4cac..10e0d6ad5a 100644
>> --- a/lib/memarea/rte_memarea.h
>> +++ b/lib/memarea/rte_memarea.h
>> @@ -138,6 +138,62 @@ struct rte_memarea *rte_memarea_create(const struct rte_memarea_param *init);
>> __rte_experimental
>> void rte_memarea_destroy(struct rte_memarea *ma);
>> +/**
>> + * @warning
>> + * @b EXPERIMENTAL: this API may change without prior notice.
>> + *
>> + * Allocate memory from memarea.
>> + *
>> + * Allocate one memory object from the memarea.
>> + *
>> + * @param ma
>> + * The pointer of memarea.
>> + * @param size
>> + * The memory size to be allocated.
>> + * @param cookie
>> + * User-provided footprint which could used to debug memory leak.
>> + *
>> + * @return
>> + * Non-NULL on success. Otherwise NULL is returned.
>> + */
>> +__rte_experimental
>> +void *rte_memarea_alloc(struct rte_memarea *ma, size_t size, uint32_t cookie);
>> +
>> +/**
>> + * @warning
>> + * @b EXPERIMENTAL: this API may change without prior notice.
>> + *
>> + * Free memory to memarea.
>> + *
>> + * Free one memory object to the memarea.
>> + *
>> + * @param ma
>> + * The pointer of memarea.
>> + * @param ptr
>> + * The pointer of memory object which need be freed.
>> + */
>> +__rte_experimental
>> +void rte_memarea_free(struct rte_memarea *ma, void *ptr);
>> +
>> +/**
>> + * @warning
>> + * @b EXPERIMENTAL: this API may change without prior notice.
>> + *
>> + * Update memory's refcnt.
>> + *
>> + * Update one memory object's refcnt.
>> + * When refcnt is updated to be zero, the memory object is freed.
>> + *
>> + * @param ma
>> + * The pointer of memarea.
>> + * @param ptr
>> + * The pointer of memory object which need be updated refcnt.
>> + * @param value
>> + * The value which need be updated.
>> + */
>> +__rte_experimental
>> +void rte_memarea_update_refcnt(struct rte_memarea *ma, void *ptr, int16_t value);
>> +
>> #ifdef __cplusplus
>> }
>> #endif
>> diff --git a/lib/memarea/version.map b/lib/memarea/version.map
>> index f36a04d7cf..a0026fc5f9 100644
>> --- a/lib/memarea/version.map
>> +++ b/lib/memarea/version.map
>> @@ -1,8 +1,11 @@
>> EXPERIMENTAL {
>> global:
>> + rte_memarea_alloc;
>> rte_memarea_create;
>> rte_memarea_destroy;
>> + rte_memarea_free;
>> + rte_memarea_update_refcnt;
>> local: *;
>> };
>
> .
More information about the dev
mailing list