[dpdk-dev] [PATCH v2] hash table: add an iterator over conflicting entries

Fu, Qiaobin qiaobinf at bu.edu
Thu Aug 16 09:30:06 CEST 2018


Function rte_hash_iterate_conflict_entries() iterates over
the entries that conflict with an incoming entry.

Iterating over conflicting entries enables one to decide
if the incoming entry is more valuable than the entries already
in the hash table. This is particularly useful after
an insertion failure.

v2:
* Fix the style issue

* Make the API more universal

Signed-off-by: Qiaobin Fu <qiaobinf at bu.edu>
Reviewed-by: Cody Doucette <doucette at bu.edu>
Reviewed-by: Michel Machado <michel at digirati.com.br>
Reviewed-by: Keith Wiles <keith.wiles at intel.com>
Reviewed-by: Yipeng Wang <yipeng1.wang at intel.com>
---
 lib/librte_hash/rte_cuckoo_hash.c    | 81 ++++++++++++++++++++++++++++
 lib/librte_hash/rte_hash.h           | 41 ++++++++++++++
 lib/librte_hash/rte_hash_version.map |  7 +++
 3 files changed, 129 insertions(+)

diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index a07543a29..de69f9966 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -42,6 +42,13 @@ static struct rte_tailq_elem rte_hash_tailq = {
 };
 EAL_REGISTER_TAILQ(rte_hash_tailq)
 
+struct rte_hash_iterator_conflict_entries_state {
+	const struct rte_hash *h;
+	uint32_t              vnext;
+	uint32_t              primary_bidx;
+	uint32_t              secondary_bidx;
+};
+
 struct rte_hash *
 rte_hash_find_existing(const char *name)
 {
@@ -1160,3 +1167,77 @@ rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32
 
 	return position - 1;
 }
+
+/* Get the primary bucket index given the precomputed hash value. */
+static inline uint32_t
+rte_hash_get_primary_bucket(const struct rte_hash *h, hash_sig_t sig)
+{
+	return sig & h->bucket_bitmask;
+}
+
+/* Get the secondary bucket index given the precomputed hash value. */
+static inline uint32_t
+rte_hash_get_secondary_bucket(const struct rte_hash *h, hash_sig_t sig)
+{
+	return rte_hash_secondary_hash(sig) & h->bucket_bitmask;
+}
+
+int32_t __rte_experimental
+rte_hash_iterator_conflict_entries_init(const struct rte_hash *h,
+	hash_sig_t sig, struct rte_conflict_iterator_state *state)
+{
+	struct rte_hash_iterator_conflict_entries_state *__state;
+
+	RETURN_IF_TRUE(((h == NULL) || (state == NULL)), -EINVAL);
+
+	__state = (struct rte_hash_iterator_conflict_entries_state *)state;
+	__state->h = h;
+	__state->vnext = 0;
+	__state->primary_bidx = rte_hash_get_primary_bucket(h, sig);
+	__state->secondary_bidx = rte_hash_get_secondary_bucket(h, sig);
+
+	return 0;
+}
+
+int32_t __rte_experimental
+rte_hash_iterate_conflict_entries(struct rte_conflict_iterator_state *state,
+	const void **key, const void **data)
+{
+	struct rte_hash_iterator_conflict_entries_state *__state;
+
+	RETURN_IF_TRUE(((state == NULL) || (key == NULL) ||
+		(data == NULL)), -EINVAL);
+
+	__state = (struct rte_hash_iterator_conflict_entries_state *)state;
+
+	while (__state->vnext < RTE_HASH_BUCKET_ENTRIES * 2) {
+		uint32_t bidx = (__state->vnext < RTE_HASH_BUCKET_ENTRIES) ?
+			__state->primary_bidx : __state->secondary_bidx;
+		uint32_t next = __state->vnext & (RTE_HASH_BUCKET_ENTRIES - 1);
+		uint32_t position = __state->h->buckets[bidx].key_idx[next];
+		struct rte_hash_key *next_key;
+		/*
+		 * The test below is unlikely because this iterator is meant
+		 * to be used after a failed insert.
+		 * */
+		if (unlikely(position == EMPTY_SLOT))
+			goto next;
+
+		/* Get the entry in key table. */
+		next_key = (struct rte_hash_key *) (
+			(char *)__state->h->key_store +
+			position * __state->h->key_entry_size);
+		/* Return key and data. */
+		*key = next_key->key;
+		*data = next_key->pdata;
+
+next:
+		/* Increment iterator. */
+		__state->vnext++;
+
+		if (likely(position != EMPTY_SLOT))
+			return position - 1;
+	}
+
+	return -ENOENT;
+}
diff --git a/lib/librte_hash/rte_hash.h b/lib/librte_hash/rte_hash.h
index f71ca9fbf..7ecb6a7eb 100644
--- a/lib/librte_hash/rte_hash.h
+++ b/lib/librte_hash/rte_hash.h
@@ -61,6 +61,11 @@ struct rte_hash_parameters {
 /** @internal A hash table structure. */
 struct rte_hash;
 
+/** @internal A hash table conflict iterator state structure. */
+struct rte_conflict_iterator_state {
+	uint8_t space[64];
+};
+
 /**
  * Create a new hash table.
  *
@@ -419,6 +424,42 @@ rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
  */
 int32_t
 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next);
+
+/**
+ * Initialize the iterator over entries that conflict with a new entry.
+ *
+ * @param h
+ *   Hash table to iterate
+ * @param sig
+ *   Precomputed hash value for the new entry.
+ * @return
+ *   - 0 if successful.
+ *   - -EINVAL if the parameters are invalid.
+ */
+int32_t __rte_experimental
+rte_hash_iterator_conflict_entries_init(const struct rte_hash *h,
+	hash_sig_t sig, struct rte_conflict_iterator_state *state);
+
+/**
+ * Iterate over entries that conflict with a new entry.
+ *
+ * @param state
+ *   Pointer to the iterator state.
+ * @param key
+ *   Output containing the key where current iterator
+ *   was pointing at.
+ * @param data
+ *   Output containing the data associated with key.
+ *   Returns NULL if data was not stored.
+ * @return
+ *   Position where key was stored, if successful.
+ *   - -EINVAL if the parameters are invalid.
+ *   - -ENOENT if there is no more conflicting entries.
+ */
+int32_t __rte_experimental
+rte_hash_iterate_conflict_entries(struct rte_conflict_iterator_state *state,
+	const void **key, const void **data);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/librte_hash/rte_hash_version.map b/lib/librte_hash/rte_hash_version.map
index 52a2576f9..c1c343e52 100644
--- a/lib/librte_hash/rte_hash_version.map
+++ b/lib/librte_hash/rte_hash_version.map
@@ -45,3 +45,10 @@ DPDK_16.07 {
 	rte_hash_get_key_with_position;
 
 } DPDK_2.2;
+
+EXPERIMENTAL {
+	global:
+
+	rte_hash_iterator_conflict_entries_init;
+	rte_hash_iterate_conflict_entries;
+};
-- 
2.17.1




More information about the dev mailing list