[dpdk-dev] [PATCH v2 6/7] hash: enable lock-free reader-writer concurrency

Honnappa Nagarahalli honnappa.nagarahalli at arm.com
Thu Oct 11 06:59:31 CEST 2018


Add the flag to enable reader-writer concurrency during
run time. The rte_hash_del_xxx APIs do not free the keystore
element when this flag is enabled. Hence a new API,
rte_hash_free_key_with_position, to free the key store element
is added.

Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli at arm.com>
Reviewed-by: Gavin Hu <gavin.hu at arm.com>
Reviewed-by: Ola Liljedahl <ola.liljedahl at arm.com>
Reviewed-by: Steve Capper <steve.capper at arm.com>
Reviewed-by: Yipeng Wang <yipeng1.wang at intel.com>
---
 lib/librte_hash/rte_cuckoo_hash.c    | 64 +++++++++++++++++++++---------------
 lib/librte_hash/rte_cuckoo_hash.h    |  2 ++
 lib/librte_hash/rte_hash.h           | 58 +++++++++++++++++++++++++++-----
 lib/librte_hash/rte_hash_version.map |  7 ++++
 4 files changed, 96 insertions(+), 35 deletions(-)

diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index dfd5f2a..1b13dd0 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -97,6 +97,7 @@ rte_hash_create(const struct rte_hash_parameters *params)
 	unsigned int writer_takes_lock = 0;
 	unsigned int recycle_on_del = 1;
 	uint32_t *tbl_chng_cnt = NULL;
+	unsigned int readwrite_concur_lf_support = 0;
 
 	rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
 
@@ -133,6 +134,12 @@ rte_hash_create(const struct rte_hash_parameters *params)
 	if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RECYCLE_ON_DEL)
 		recycle_on_del = 0;
 
+	if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF) {
+		readwrite_concur_lf_support = 1;
+		/* Disable freeing internal memory/index on delete */
+		recycle_on_del = 0;
+	}
+
 	/* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
 	if (multi_writer_support)
 		/*
@@ -292,6 +299,7 @@ rte_hash_create(const struct rte_hash_parameters *params)
 	h->readwrite_concur_support = readwrite_concur_support;
 	h->writer_takes_lock = writer_takes_lock;
 	h->recycle_on_del = recycle_on_del;
+	h->readwrite_concur_lf_support = readwrite_concur_lf_support;
 
 #if defined(RTE_ARCH_X86)
 	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
@@ -671,19 +679,21 @@ rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
 			return -1;
 		}
 
-		/* Inform the previous move. The current move need
-		 * not be informed now as the current bucket entry
-		 * is present in both primary and secondary.
-		 * Since there is one writer, load acquires on
-		 * tbl_chng_cnt are not required.
-		 */
-		__atomic_store_n(h->tbl_chng_cnt,
-				 *h->tbl_chng_cnt + 1,
-				 __ATOMIC_RELEASE);
-		/* The stores to sig_alt and sig_current should not
-		 * move above the store to tbl_chng_cnt.
-		 */
-		__atomic_thread_fence(__ATOMIC_RELEASE);
+		if (h->readwrite_concur_lf_support) {
+			/* Inform the previous move. The current move need
+			 * not be informed now as the current bucket entry
+			 * is present in both primary and secondary.
+			 * Since there is one writer, load acquires on
+			 * tbl_chng_cnt are not required.
+			 */
+			__atomic_store_n(h->tbl_chng_cnt,
+					 *h->tbl_chng_cnt + 1,
+					 __ATOMIC_RELEASE);
+			/* The stores to sig_alt and sig_current should not
+			 * move above the store to tbl_chng_cnt.
+			 */
+			__atomic_thread_fence(__ATOMIC_RELEASE);
+		}
 
 		/* Need to swap current/alt sig to allow later
 		 * Cuckoo insert to move elements back to its
@@ -703,19 +713,21 @@ rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
 		curr_bkt = curr_node->bkt;
 	}
 
-	/* Inform the previous move. The current move need
-	 * not be informed now as the current bucket entry
-	 * is present in both primary and secondary.
-	 * Since there is one writer, load acquires on
-	 * tbl_chng_cnt are not required.
-	 */
-	__atomic_store_n(h->tbl_chng_cnt,
-			 *h->tbl_chng_cnt + 1,
-			 __ATOMIC_RELEASE);
-	/* The stores to sig_alt and sig_current should not
-	 * move above the store to tbl_chng_cnt.
-	 */
-	__atomic_thread_fence(__ATOMIC_RELEASE);
+	if (h->readwrite_concur_lf_support) {
+		/* Inform the previous move. The current move need
+		 * not be informed now as the current bucket entry
+		 * is present in both primary and secondary.
+		 * Since there is one writer, load acquires on
+		 * tbl_chng_cnt are not required.
+		 */
+		__atomic_store_n(h->tbl_chng_cnt,
+				 *h->tbl_chng_cnt + 1,
+				 __ATOMIC_RELEASE);
+		/* The stores to sig_alt and sig_current should not
+		 * move above the store to tbl_chng_cnt.
+		 */
+		__atomic_thread_fence(__ATOMIC_RELEASE);
+	}
 
 	curr_bkt->sig_current[curr_slot] = sig;
 	curr_bkt->sig_alt[curr_slot] = alt_hash;
diff --git a/lib/librte_hash/rte_cuckoo_hash.h b/lib/librte_hash/rte_cuckoo_hash.h
index cf50ada..2e05d08 100644
--- a/lib/librte_hash/rte_cuckoo_hash.h
+++ b/lib/librte_hash/rte_cuckoo_hash.h
@@ -177,6 +177,8 @@ struct rte_hash {
 	 * the deleted entry.
 	 * This flag is enabled by default.
 	 */
+	uint8_t readwrite_concur_lf_support;
+	/**< If read-write concurrency lock free support is enabled */
 	uint8_t writer_takes_lock;
 	/**< Indicates if the writer threads need to take lock */
 	rte_hash_function hash_func;    /**< Function used to calculate hash. */
diff --git a/lib/librte_hash/rte_hash.h b/lib/librte_hash/rte_hash.h
index dd59cb0..fb88510 100644
--- a/lib/librte_hash/rte_hash.h
+++ b/lib/librte_hash/rte_hash.h
@@ -41,9 +41,14 @@ extern "C" {
 
 /** Flag to disable freeing of internal memory/indices on hash delete.
  * Refer to rte_hash_del_xxx APIs for more details.
+ * This is enabled by default when RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF
+ * is enabled.
  */
 #define RTE_HASH_EXTRA_FLAGS_RECYCLE_ON_DEL 0x08
 
+/** Flag to support lock free reader writer concurrency */
+#define RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF 0x10
+
 /** Signature of key that is stored internally. */
 typedef uint32_t hash_sig_t;
 
@@ -126,7 +131,11 @@ void
 rte_hash_free(struct rte_hash *h);
 
 /**
- * Reset all hash structure, by zeroing all entries
+ * Reset all hash structure, by zeroing all entries.
+ * When RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF is enabled,
+ * it is application's responsibility to make sure that
+ * none of the readers are referencing the hash table.
+ *
  * @param h
  *   Hash table to reset
  */
@@ -150,6 +159,12 @@ rte_hash_count(const struct rte_hash *h);
  * and should only be called from one thread by default.
  * Thread safety can be enabled by setting flag during
  * table creation.
+ * When RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF is enabled,
+ * the writer needs to be aware if this API is called to update
+ * an existing entry. The application should free any memory
+ * allocated for the existing 'data' only after all the readers
+ * have stopped referrencing it. RCU mechanisms can be used to
+ * determine such a state.
  *
  * @param h
  *   Hash table to add the key to.
@@ -172,6 +187,12 @@ rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data);
  * and should only be called from one thread by default.
  * Thread safety can be enabled by setting flag during
  * table creation.
+ * When RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF is enabled,
+ * the writer needs to be aware if this API is called to update
+ * an existing entry. The application should free any memory
+ * allocated for the existing 'data' only after all the readers
+ * have stopped referencing it. RCU mechanisms can be used to
+ * determine such a state.
  *
  * @param h
  *   Hash table to add the key to.
@@ -237,10 +258,15 @@ rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, hash_sig_t
  * and should only be called from one thread by default.
  * Thread safety can be enabled by setting flag during
  * table creation.
- * If RTE_HASH_EXTRA_FLAGS_RECYCLE_ON_DEL is enabled,
- * the hash library's internal memory/index will not be freed by this
+ * If RTE_HASH_EXTRA_FLAGS_RECYCLE_ON_DEL or
+ * RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF is enabled,
+ * the hash library's internal memory will not be freed by this
  * API. rte_hash_free_key_with_position API must be called additionally
- * to free the internal memory/index associated with the key.
+ * to free any internal memory associated with the key.
+ * If RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF is enabled,
+ * rte_hash_free_key_with_position API should be called after all
+ * the readers have stopped referencing the entry corresponding to
+ * this key. RCU mechanisms can be used to determine such a state.
  *
  * @param h
  *   Hash table to remove the key from.
@@ -252,6 +278,8 @@ rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, hash_sig_t
  *   - A positive value that can be used by the caller as an offset into an
  *     array of user data. This value is unique for this key, and is the same
  *     value that was returned when the key was added.
+ *     When lock free concurrency is enabled, this value should be used
+ *     while calling the rte_hash_free_key_with_position API.
  */
 int32_t
 rte_hash_del_key(const struct rte_hash *h, const void *key);
@@ -262,10 +290,15 @@ rte_hash_del_key(const struct rte_hash *h, const void *key);
  * and should only be called from one thread by default.
  * Thread safety can be enabled by setting flag during
  * table creation.
- * If RTE_HASH_EXTRA_FLAGS_RECYCLE_ON_DEL is enabled,
- * the hash library's internal memory/index will not be freed by this
+ * If RTE_HASH_EXTRA_FLAGS_RECYCLE_ON_DEL or
+ * RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF is enabled,
+ * the hash library's internal memory will not be freed by this
  * API. rte_hash_free_key_with_position API must be called additionally
- * to free the internal memory/index associated with the key.
+ * to free any internal memory associated with the key.
+ * If RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF is enabled,
+ * rte_hash_free_key_with_position API should be called after all
+ * the readers have stopped referencing the entry corresponding to
+ * this key. RCU mechanisms can be used to determine such a state.
  *
  * @param h
  *   Hash table to remove the key from.
@@ -279,6 +312,8 @@ rte_hash_del_key(const struct rte_hash *h, const void *key);
  *   - A positive value that can be used by the caller as an offset into an
  *     array of user data. This value is unique for this key, and is the same
  *     value that was returned when the key was added.
+ *     When lock free concurrency is enabled, this value should be used
+ *     while calling the rte_hash_free_key_with_position API.
  */
 int32_t
 rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key, hash_sig_t sig);
@@ -312,10 +347,15 @@ rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
  * of the key. This operation is not multi-thread safe and should
  * only be called from one thread by default. Thread safety
  * can be enabled by setting flag during table creation.
- * If RTE_HASH_EXTRA_FLAGS_RECYCLE_ON_DEL is enabled,
- * the hash library's internal memory/index must be freed using this API
+ * If RTE_HASH_EXTRA_FLAGS_RECYCLE_ON_DEL or
+ * RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF is enabled,
+ * the hash library's internal memory must be freed using this API
  * after the key is deleted using rte_hash_del_key_xxx APIs.
  * This API does not validate if the key is already freed.
+ * If RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF is enabled,
+ * this API should be called only after all the readers have stopped
+ * referencing the entry corresponding to this key. RCU mechanisms can
+ * be used to determine such a state.
  *
  * @param h
  *   Hash table to free the key from.
diff --git a/lib/librte_hash/rte_hash_version.map b/lib/librte_hash/rte_hash_version.map
index e216ac8..734ae28 100644
--- a/lib/librte_hash/rte_hash_version.map
+++ b/lib/librte_hash/rte_hash_version.map
@@ -53,3 +53,10 @@ DPDK_18.08 {
 	rte_hash_count;
 
 } DPDK_16.07;
+
+EXPERIMENTAL {
+	global:
+
+	rte_hash_free_key_with_position;
+
+};
-- 
2.7.4



More information about the dev mailing list