[dpdk-dev] [PATCH v4 1/4] hash: add kv hash library

Medvedkin, Vladimir vladimir.medvedkin at intel.com
Thu Jun 25 21:56:10 CEST 2020


On 24/06/2020 00:06, Ananyev, Konstantin wrote:
>> Hi Vladimir,
>>
>>> --- /dev/null
>>> +++ b/lib/librte_hash/k32v64_hash.c
>>> @@ -0,0 +1,277 @@
>>> +/* SPDX-License-Identifier: BSD-3-Clause
>>> + * Copyright(c) 2020 Intel Corporation
>>> + */
>>> +
>>> +#include <string.h>
>>> +
>>> +#include <rte_errno.h>
>>> +#include <rte_malloc.h>
>>> +#include <rte_memory.h>
>>> +
>>> +#include "k32v64_hash.h"
>>> +
>>> +static inline int
>>> +k32v64_hash_lookup(struct k32v64_hash_table *table, uint32_t key,
>>> +uint32_t hash, uint64_t *value)
>>> +{
>>> +return __k32v64_hash_lookup(table, key, hash, value, __kv_cmp_keys);
>>> +}
>>> +
>>> +static int
>>> +k32v64_hash_bulk_lookup(struct rte_kv_hash_table *ht, void *keys_p,
>>> +uint32_t *hashes, void *values_p, unsigned int n)
>>> +{
>>> +struct k32v64_hash_table *table = (struct k32v64_hash_table *)ht;
>>> +uint32_t *keys = keys_p;
>>> +uint64_t *values = values_p;
>>> +int ret, cnt = 0;
>>> +unsigned int i;
>>> +
>>> +if (unlikely((table == NULL) || (keys == NULL) || (hashes == NULL) ||
>>> +(values == NULL)))
>>> +return -EINVAL;
>
> As a nit - this formal parameter checking better to do in public function
> (rte_kv_hash_bulk_lookup) before de-refencing table and calling actual lookup().
> Same story for modify() - formal parameter checking can be done at the top of public function.

Agree, will move checking in public part


> BTW, why to unite add/delete into modify(), if internally you have 2 different functions
> (for add/delete) anyway?


This was done for future extensibility, if we want to add some 
additional control plane features in the future, other than add or 
delete. if you think it's unnecessary I'll change it with add()/del() pair.


>
>>> +
>>> +for (i = 0; i < n; i++) {
>>> +ret = k32v64_hash_lookup(table, keys[i], hashes[i],
>>> +&values[i]);
>>> +if (ret == 0)
>>> +cnt++;
>>> +}
>>> +return cnt;
>>> +}
>>> +
>>> +#ifdef CC_AVX512VL_SUPPORT
>>> +int
>>> +k32v64_hash_bulk_lookup_avx512vl(struct rte_kv_hash_table *ht,
>>> +void *keys_p, uint32_t *hashes, void *values_p, unsigned int n);
>>> +#endif
>>> +
>>> +static rte_kv_hash_bulk_lookup_t
>>> +get_lookup_bulk_fn(void)
>>> +{
>>> +#ifdef CC_AVX512VL_SUPPORT
>>> +if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512VL))
>>> +return k32v64_hash_bulk_lookup_avx512vl;
>>> +#endif
>>> +return k32v64_hash_bulk_lookup;
>>> +}
>>> +
>>> +static int
>>> +k32v64_hash_add(struct k32v64_hash_table *table, uint32_t key,
>>> +uint32_t hash, uint64_t value, uint64_t *old_value, int *found)
>>> +{
>>> +uint32_t bucket;
>>> +int i, idx, ret;
>>> +uint8_t msk;
>>> +struct k32v64_ext_ent *tmp, *ent, *prev = NULL;
>>> +
>>> +if (table == NULL)
>>> +return -EINVAL;
>>> +
>>> +bucket = hash & table->bucket_msk;
>>> +/* Search key in table. Update value if exists */
>>> +for (i = 0; i < K32V64_KEYS_PER_BUCKET; i++) {
>>> +if ((key == table->t[bucket].key[i]) &&
>>> +(table->t[bucket].key_mask & (1 << i))) {
>>> +*old_value = table->t[bucket].val[i];
>>> +*found = 1;
>>> +__atomic_fetch_add(&table->t[bucket].cnt, 1,
>>> +__ATOMIC_ACQUIRE);
> After another thought - atomic add is probably an overkill here.
> Something like:
> void update_start(struct k32v64_hash_bucket *bkt)
> {
> bkt->cnt++;
>          __atomic_thread_fence(__ATOMIC_ACQ_REL);
> }
>
> void update_finish(struct k32v64_hash_bucket *bkt)
> {
> __atomic_thread_fence(__ATOMIC_ACQ_REL);
> bkt->cnt++;
> }
>
> I think should be sufficient here.


Good point


>
>>> +table->t[bucket].val[i] = value;
>>> +__atomic_fetch_add(&table->t[bucket].cnt, 1,
>>> +__ATOMIC_RELEASE);
>>> +return 0;
>>> +}
>>> +}
>>> +
>>> +if (!SLIST_EMPTY(&table->t[bucket].head)) {
>>> +SLIST_FOREACH(ent, &table->t[bucket].head, next) {
>>> +if (ent->key == key) {
>>> +*old_value = ent->val;
>>> +*found = 1;
>>> +__atomic_fetch_add(&table->t[bucket].cnt, 1,
>>> +__ATOMIC_ACQUIRE);
>>> +ent->val = value;
>>> +__atomic_fetch_add(&table->t[bucket].cnt, 1,
>>> +__ATOMIC_RELEASE);
>>> +return 0;
>>> +}
>>> +}
>>> +}
>>> +
>>> +msk = ~table->t[bucket].key_mask & VALID_KEY_MSK;
>>> +if (msk) {
>>> +idx = __builtin_ctz(msk);
>>> +table->t[bucket].key[idx] = key;
>>> +table->t[bucket].val[idx] = value;
>>> +__atomic_or_fetch(&table->t[bucket].key_mask, 1 << idx,
>>> +__ATOMIC_RELEASE);
>> I think this case also has to guarded with table->t[bucket].cnt updates.
>>
>>> +table->nb_ent++;
>>> +*found = 0;
>>> +return 0;
>>> +}
>>> +
>>> +ret = rte_mempool_get(table->ext_ent_pool, (void **)&ent);
>>> +if (ret < 0)
>>> +return ret;
>>> +
>>> +SLIST_NEXT(ent, next) = NULL;
>>> +ent->key = key;
>>> +ent->val = value;
>>> +rte_smp_wmb();
>> __atomic_thread_fence(__ATOMIC_RELEASE);
>> ?
>>
>>> +SLIST_FOREACH(tmp, &table->t[bucket].head, next)
>>> +prev = tmp;
>>> +
>>> +if (prev == NULL)
>>> +SLIST_INSERT_HEAD(&table->t[bucket].head, ent, next);
>>> +else
>>> +SLIST_INSERT_AFTER(prev, ent, next);
>>> +
>>> +table->nb_ent++;
>>> +table->nb_ext_ent++;
>>> +*found = 0;
>>> +return 0;
>>> +}
>>> +
>>> +static int
>>> +k32v64_hash_delete(struct k32v64_hash_table *table, uint32_t key,
>>> +uint32_t hash, uint64_t *old_value)
>>> +{
>>> +uint32_t bucket;
>>> +int i;
>>> +struct k32v64_ext_ent *ent;
>>> +
>>> +if (table == NULL)
>>> +return -EINVAL;
>>> +
>>> +bucket = hash & table->bucket_msk;
>>> +
>>> +for (i = 0; i < K32V64_KEYS_PER_BUCKET; i++) {
>>> +if ((key == table->t[bucket].key[i]) &&
>>> +(table->t[bucket].key_mask & (1 << i))) {
>>> +*old_value = table->t[bucket].val[i];
>>> +ent = SLIST_FIRST(&table->t[bucket].head);
>>> +if (ent) {
>>> +__atomic_fetch_add(&table->t[bucket].cnt, 1,
>>> +__ATOMIC_ACQUIRE);
>>> +table->t[bucket].key[i] = ent->key;
>>> +table->t[bucket].val[i] = ent->val;
>>> +SLIST_REMOVE_HEAD(&table->t[bucket].head, next);
>>> +__atomic_fetch_add(&table->t[bucket].cnt, 1,
>>> +__ATOMIC_RELEASE);
>>> +table->nb_ext_ent--;
>>> +} else
>>> +__atomic_and_fetch(&table->t[bucket].key_mask,
>>> +~(1 << i), __ATOMIC_RELEASE);
>> Same thought as above - safer to guard this mask update with cnt update.
>>
>>> +if (ent)
>>> +rte_mempool_put(table->ext_ent_pool, ent);
>> Can't this 'if(ent)' be merged with previous 'if (ent) {...}' above?
>>
>>> +table->nb_ent--;
>>> +return 0;
>>> +}
>>> +}
>>> +
>>> +SLIST_FOREACH(ent, &table->t[bucket].head, next)
>>> +if (ent->key == key)
>>> +break;
>>> +
>>> +if (ent == NULL)
>>> +return -ENOENT;
>>> +
>>> +*old_value = ent->val;
>>> +
>>> +__atomic_fetch_add(&table->t[bucket].cnt, 1, __ATOMIC_ACQUIRE);
>>> +SLIST_REMOVE(&table->t[bucket].head, ent, k32v64_ext_ent, next);
>>> +__atomic_fetch_add(&table->t[bucket].cnt, 1, __ATOMIC_RELEASE);
>>> +rte_mempool_put(table->ext_ent_pool, ent);
>>> +
>>> +table->nb_ext_ent--;
>>> +table->nb_ent--;
>>> +
>>> +return 0;
>>> +}
>>> +

-- 
Regards,
Vladimir



More information about the dev mailing list