[dpdk-dev] [PATCH v5 1/5] hash: add predictable RSS API

Vladimir Medvedkin vladimir.medvedkin at intel.com
Mon Apr 19 17:59:51 CEST 2021


This patch adds predictable RSS API.
It is based on the idea of searching partial Toeplitz hash collisions.

Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin at intel.com>
Acked-by: Yipeng Wang <yipeng1.wang at intel.com>
---
 lib/librte_hash/meson.build |   3 +-
 lib/librte_hash/rte_thash.c | 109 ++++++++++++++++++++++++
 lib/librte_hash/rte_thash.h | 199 ++++++++++++++++++++++++++++++++++++++++++++
 lib/librte_hash/version.map |   8 ++
 4 files changed, 318 insertions(+), 1 deletion(-)
 create mode 100644 lib/librte_hash/rte_thash.c

diff --git a/lib/librte_hash/meson.build b/lib/librte_hash/meson.build
index 242859f..3546014 100644
--- a/lib/librte_hash/meson.build
+++ b/lib/librte_hash/meson.build
@@ -8,6 +8,7 @@ headers = files('rte_fbk_hash.h',
 	'rte_thash.h')
 indirect_headers += files('rte_crc_arm64.h')
 
-sources = files('rte_cuckoo_hash.c', 'rte_fbk_hash.c')
+sources = files('rte_cuckoo_hash.c', 'rte_fbk_hash.c', 'rte_thash.c')
+deps += ['net']
 deps += ['ring']
 deps += ['rcu']
diff --git a/lib/librte_hash/rte_thash.c b/lib/librte_hash/rte_thash.c
new file mode 100644
index 0000000..1325678
--- /dev/null
+++ b/lib/librte_hash/rte_thash.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include <rte_thash.h>
+#include <rte_tailq.h>
+#include <rte_random.h>
+#include <rte_memcpy.h>
+#include <rte_errno.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
+
+#define THASH_NAME_LEN		64
+
+struct thash_lfsr {
+	uint32_t	ref_cnt;
+	uint32_t	poly;
+	/**< polynomial associated with the lfsr */
+	uint32_t	rev_poly;
+	/**< polynomial to generate the sequence in reverse direction */
+	uint32_t	state;
+	/**< current state of the lfsr */
+	uint32_t	rev_state;
+	/**< current state of the lfsr for reverse direction */
+	uint32_t	deg;	/**< polynomial degree*/
+	uint32_t	bits_cnt;  /**< number of bits generated by lfsr*/
+};
+
+struct rte_thash_subtuple_helper {
+	char	name[THASH_NAME_LEN];	/** < Name of subtuple configuration */
+	LIST_ENTRY(rte_thash_subtuple_helper)	next;
+	struct thash_lfsr	*lfsr;
+	uint32_t	offset;		/** < Offset of the m-sequence */
+	uint32_t	len;		/** < Length of the m-sequence */
+	uint32_t	tuple_offset;	/** < Offset in bits of the subtuple */
+	uint32_t	tuple_len;	/** < Length in bits of the subtuple */
+	uint32_t	lsb_msk;	/** < (1 << reta_sz_log) - 1 */
+	__extension__ uint32_t	compl_table[0] __rte_cache_aligned;
+	/** < Complementary table */
+};
+
+struct rte_thash_ctx {
+	char		name[THASH_NAME_LEN];
+	LIST_HEAD(, rte_thash_subtuple_helper) head;
+	uint32_t	key_len;	/** < Length of the NIC RSS hash key */
+	uint32_t	reta_sz_log;	/** < size of the RSS ReTa in bits */
+	uint32_t	subtuples_nb;	/** < number of subtuples */
+	uint32_t	flags;
+	uint8_t		hash_key[0];
+};
+
+struct rte_thash_ctx *
+rte_thash_init_ctx(const char *name __rte_unused,
+	uint32_t key_len __rte_unused, uint32_t reta_sz __rte_unused,
+	uint8_t *key __rte_unused, uint32_t flags __rte_unused)
+{
+	return NULL;
+}
+
+struct rte_thash_ctx *
+rte_thash_find_existing(const char *name __rte_unused)
+{
+	return NULL;
+}
+
+void
+rte_thash_free_ctx(struct rte_thash_ctx *ctx __rte_unused)
+{
+}
+
+int
+rte_thash_add_helper(struct rte_thash_ctx *ctx __rte_unused,
+	const char *name __rte_unused, uint32_t len __rte_unused,
+	uint32_t offset __rte_unused)
+{
+	return 0;
+}
+
+struct rte_thash_subtuple_helper *
+rte_thash_get_helper(struct rte_thash_ctx *ctx __rte_unused,
+	const char *name __rte_unused)
+{
+	return NULL;
+}
+
+uint32_t
+rte_thash_get_complement(struct rte_thash_subtuple_helper *h __rte_unused,
+	uint32_t hash __rte_unused, uint32_t desired_hash __rte_unused)
+{
+	return 0;
+}
+
+const uint8_t *
+rte_thash_get_key(struct rte_thash_ctx *ctx __rte_unused)
+{
+	return NULL;
+}
+
+int
+rte_thash_adjust_tuple(struct rte_thash_ctx *ctx __rte_unused,
+	struct rte_thash_subtuple_helper *h __rte_unused,
+	uint8_t *tuple __rte_unused, unsigned int tuple_len __rte_unused,
+	uint32_t desired_value __rte_unused,
+	unsigned int attempts __rte_unused,
+	rte_thash_check_tuple_t fn __rte_unused, void *userdata __rte_unused)
+{
+	return 0;
+}
diff --git a/lib/librte_hash/rte_thash.h b/lib/librte_hash/rte_thash.h
index 061efa2..659a387 100644
--- a/lib/librte_hash/rte_thash.h
+++ b/lib/librte_hash/rte_thash.h
@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright(c) 2015-2019 Vladimir Medvedkin <medvedkinv at gmail.com>
+ * Copyright(c) 2021 Intel Corporation
  */
 
 #ifndef _RTE_THASH_H
@@ -222,6 +223,204 @@ rte_softrss_be(uint32_t *input_tuple, uint32_t input_len,
 	return ret;
 }
 
+/** @internal Logarithm of minimum size of the RSS ReTa */
+#define	RTE_THASH_RETA_SZ_MIN	2U
+/** @internal Logarithm of maximum size of the RSS ReTa */
+#define	RTE_THASH_RETA_SZ_MAX	16U
+
+/**
+ * LFSR will ignore if generated m-sequence has more than 2^n -1 bits,
+ * where n is the logarithm of the RSS ReTa size.
+ */
+#define RTE_THASH_IGNORE_PERIOD_OVERFLOW	0x1
+/**
+ * Generate minimal required bit (equal to ReTa LSB) sequence into
+ * the hash_key
+ */
+#define RTE_THASH_MINIMAL_SEQ			0x2
+
+/** @internal thash context structure. */
+struct rte_thash_ctx;
+/** @internal thash helper structure. */
+struct rte_thash_subtuple_helper;
+
+/**
+ * Create a new thash context.
+ *
+ * @param name
+ *  Context name
+ * @param key_len
+ *  Length of the toeplitz hash key
+ * @param reta_sz
+ *  Logarithm of the NIC's Redirection Table (ReTa) size,
+ *  i.e. number of the LSBs if the hash used to determine
+ *  the reta entry.
+ * @param key
+ *  Pointer to the key used to init an internal key state.
+ *  Could be NULL, in this case internal key will be inited with random.
+ * @param flags
+ *  Supported flags are:
+ *   RTE_THASH_IGNORE_PERIOD_OVERFLOW
+ *   RTE_THASH_MINIMAL_SEQ
+ * @return
+ *  A pointer to the created context on success
+ *  NULL otherwise
+ */
+__rte_experimental
+struct rte_thash_ctx *
+rte_thash_init_ctx(const char *name, uint32_t key_len, uint32_t reta_sz,
+	uint8_t *key, uint32_t flags);
+
+/**
+ * Find an existing thash context and return a pointer to it.
+ *
+ * @param name
+ *  Name of the thash context
+ * @return
+ *  Pointer to the thash context or NULL if it was not found with rte_errno
+ *  set appropriately. Possible rte_errno values include:
+ *   - ENOENT - required entry not available to return.
+ */
+__rte_experimental
+struct rte_thash_ctx *
+rte_thash_find_existing(const char *name);
+
+/**
+ * Free a thash context object
+ *
+ * @param ctx
+ *  Thash context
+ * @return
+ *  None
+ */
+__rte_experimental
+void
+rte_thash_free_ctx(struct rte_thash_ctx *ctx);
+
+/**
+ * Add a special properties to the toeplitz hash key inside a thash context.
+ * Creates an internal helper struct which has a complementary table
+ * to calculate toeplitz hash collisions.
+ * This function is not multi-thread safe.
+ *
+ * @param ctx
+ *  Thash context
+ * @param name
+ *  Name of the helper
+ * @param len
+ *  Length in bits of the target subtuple
+ *  Must be no shorter than reta_sz passed on rte_thash_init_ctx().
+ * @param offset
+ *  Offset in bits of the subtuple
+ * @return
+ *  0 on success
+ *  negative on error
+ */
+__rte_experimental
+int
+rte_thash_add_helper(struct rte_thash_ctx *ctx, const char *name, uint32_t len,
+	uint32_t offset);
+
+/**
+ * Find a helper in the context by the given name
+ *
+ * @param ctx
+ *  Thash context
+ * @param name
+ *  Name of the helper
+ * @return
+ *  Pointer to the thash helper or NULL if it was not found.
+ */
+__rte_experimental
+struct rte_thash_subtuple_helper *
+rte_thash_get_helper(struct rte_thash_ctx *ctx, const char *name);
+
+/**
+ * Get a complementary value for the subtuple to produce a
+ * partial toeplitz hash collision. It must be XOR'ed with the
+ * subtuple to produce the hash value with the desired hash LSB's
+ * This function is multi-thread safe.
+ *
+ * @param h
+ *  Pointer to the helper struct
+ * @param hash
+ *  Toeplitz hash value calculated for the given tuple
+ * @param desired_hash
+ *  Desired hash value to find a collision for
+ * @return
+ *  A complementary value which must be xored with the corresponding subtuple
+ */
+__rte_experimental
+uint32_t
+rte_thash_get_complement(struct rte_thash_subtuple_helper *h,
+	uint32_t hash, uint32_t desired_hash);
+
+/**
+ * Get a pointer to the toeplitz hash contained in the context.
+ * It changes after each addition of a helper. It should be installed to
+ * the NIC.
+ *
+ * @param ctx
+ *  Thash context
+ * @return
+ *  A pointer to the toeplitz hash key
+ */
+__rte_experimental
+const uint8_t *
+rte_thash_get_key(struct rte_thash_ctx *ctx);
+
+/**
+ * Function prototype for the rte_thash_adjust_tuple
+ * to check if adjusted tuple could be used.
+ * Generally it is some kind of lookup function to check
+ * if adjusted tuple is already in use.
+ *
+ * @param userdata
+ *  Pointer to the userdata. It could be a pointer to the
+ *  table with used tuples to search.
+ * @param tuple
+ *  Pointer to the tuple to check
+ *
+ * @return
+ *  1 on success
+ *  0 otherwise
+ */
+typedef int (*rte_thash_check_tuple_t)(void *userdata, uint8_t *tuple);
+
+/**
+ * Adjusts tuple in the way to make Toeplitz hash has
+ * desired least significant bits.
+ * This function is multi-thread safe.
+ *
+ * @param ctx
+ *  Thash context
+ * @param h
+ *  Pointer to the helper struct
+ * @param tuple
+ *  Pointer to the tuple to be adjusted
+ * @param tuple_len
+ *  Length of the tuple. Must be multiple of 4.
+ * @param desired_value
+ *  Desired value of least significant bits of the hash
+ * @param attempts
+ *  Number of attempts to adjust tuple with fn() calling
+ * @param fn
+ *  Callback function to check adjusted tuple. Could be NULL
+ * @param userdata
+ *  Pointer to the userdata to be passed to fn(). Could be NULL
+ *
+ * @return
+ *  0 on success
+ *  negative otherwise
+ */
+__rte_experimental
+int
+rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,
+	struct rte_thash_subtuple_helper *h,
+	uint8_t *tuple, unsigned int tuple_len,
+	uint32_t desired_value, unsigned int attempts,
+	rte_thash_check_tuple_t fn, void *userdata);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/librte_hash/version.map b/lib/librte_hash/version.map
index c6d7308..17cb8aa 100644
--- a/lib/librte_hash/version.map
+++ b/lib/librte_hash/version.map
@@ -32,9 +32,17 @@ DPDK_21 {
 EXPERIMENTAL {
 	global:
 
+	rte_thash_adjust_tuple;
 	rte_hash_free_key_with_position;
 	rte_hash_lookup_with_hash_bulk;
 	rte_hash_lookup_with_hash_bulk_data;
 	rte_hash_max_key_id;
 	rte_hash_rcu_qsbr_add;
+	rte_thash_add_helper;
+	rte_thash_find_existing;
+	rte_thash_free_ctx;
+	rte_thash_get_complement;
+	rte_thash_get_helper;
+	rte_thash_get_key;
+	rte_thash_init_ctx;
 };
-- 
2.7.4



More information about the dev mailing list