[dpdk-dev] [PATCH v3 1/2] lpm: extended ipv4 next_hop field

Michal Kobylinski michalx.kobylinski at intel.com
Tue Mar 8 21:52:02 CET 2016


Signed-off-by: Michal Kobylinski <michalx.kobylinski at intel.com>
Acked-by: David Hunt <david.hunt at intel.com>
---
 app/test/test_lpm.c                    |  122 ++--
 doc/guides/rel_notes/release_16_04.rst |    5 +-
 lib/librte_lpm/Makefile                |    2 +-
 lib/librte_lpm/rte_lpm.c               | 1090 +++++++++++++++++++++++++++++---
 lib/librte_lpm/rte_lpm.h               |  202 ++++--
 lib/librte_lpm/rte_lpm_version.map     |   11 +
 lib/librte_table/rte_table_lpm.c       |   15 +-
 7 files changed, 1217 insertions(+), 230 deletions(-)

diff --git a/app/test/test_lpm.c b/app/test/test_lpm.c
index 8b4ded9..f367553 100644
--- a/app/test/test_lpm.c
+++ b/app/test/test_lpm.c
@@ -57,7 +57,7 @@
 	}                                                                     \
 } while(0)
 
-typedef int32_t (* rte_lpm_test)(void);
+typedef int32_t (*rte_lpm_test)(void);
 
 static int32_t test0(void);
 static int32_t test1(void);
@@ -180,8 +180,8 @@ int32_t
 test3(void)
 {
 	struct rte_lpm *lpm = NULL;
-	uint32_t ip = IPv4(0, 0, 0, 0);
-	uint8_t depth = 24, next_hop = 100;
+	uint32_t ip = IPv4(0, 0, 0, 0), next_hop = 100;
+	uint8_t depth = 24;
 	int32_t status = 0;
 
 	/* rte_lpm_add: lpm == NULL */
@@ -247,8 +247,7 @@ test5(void)
 {
 #if defined(RTE_LIBRTE_LPM_DEBUG)
 	struct rte_lpm *lpm = NULL;
-	uint32_t ip = IPv4(0, 0, 0, 0);
-	uint8_t next_hop_return = 0;
+	uint32_t ip = IPv4(0, 0, 0, 0), next_hop_return = 0;
 	int32_t status = 0;
 
 	/* rte_lpm_lookup: lpm == NULL */
@@ -277,8 +276,8 @@ int32_t
 test6(void)
 {
 	struct rte_lpm *lpm = NULL;
-	uint32_t ip = IPv4(0, 0, 0, 0);
-	uint8_t depth = 24, next_hop_add = 100, next_hop_return = 0;
+	uint32_t ip = IPv4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0;
+	uint8_t depth = 24;
 	int32_t status = 0;
 
 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
@@ -309,10 +308,10 @@ int32_t
 test7(void)
 {
 	__m128i ipx4;
-	uint16_t hop[4];
+	uint32_t hop[4];
 	struct rte_lpm *lpm = NULL;
-	uint32_t ip = IPv4(0, 0, 0, 0);
-	uint8_t depth = 32, next_hop_add = 100, next_hop_return = 0;
+	uint32_t ip = IPv4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0;
+	uint8_t depth = 32;
 	int32_t status = 0;
 
 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
@@ -325,10 +324,10 @@ test7(void)
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	ipx4 = _mm_set_epi32(ip, ip + 0x100, ip - 0x100, ip);
-	rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
+	rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
 	TEST_LPM_ASSERT(hop[0] == next_hop_add);
-	TEST_LPM_ASSERT(hop[1] == UINT16_MAX);
-	TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
+	TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
+	TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
 	TEST_LPM_ASSERT(hop[3] == next_hop_add);
 
 	status = rte_lpm_delete(lpm, ip, depth);
@@ -355,10 +354,11 @@ int32_t
 test8(void)
 {
 	__m128i ipx4;
-	uint16_t hop[4];
+	uint32_t hop[4];
 	struct rte_lpm *lpm = NULL;
 	uint32_t ip1 = IPv4(127, 255, 255, 255), ip2 = IPv4(128, 0, 0, 0);
-	uint8_t depth, next_hop_add, next_hop_return;
+	uint32_t next_hop_add, next_hop_return;
+	uint8_t depth;
 	int32_t status = 0;
 
 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
@@ -381,10 +381,10 @@ test8(void)
 			(next_hop_return == next_hop_add));
 
 		ipx4 = _mm_set_epi32(ip2, ip1, ip2, ip1);
-		rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
-		TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
+		rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
+		TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
 		TEST_LPM_ASSERT(hop[1] == next_hop_add);
-		TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
+		TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
 		TEST_LPM_ASSERT(hop[3] == next_hop_add);
 	}
 
@@ -400,8 +400,7 @@ test8(void)
 		if (depth != 1) {
 			TEST_LPM_ASSERT((status == 0) &&
 				(next_hop_return == next_hop_add));
-		}
-		else {
+		} else {
 			TEST_LPM_ASSERT(status == -ENOENT);
 		}
 
@@ -409,16 +408,16 @@ test8(void)
 		TEST_LPM_ASSERT(status == -ENOENT);
 
 		ipx4 = _mm_set_epi32(ip1, ip1, ip2, ip2);
-		rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
+		rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
 		if (depth != 1) {
 			TEST_LPM_ASSERT(hop[0] == next_hop_add);
 			TEST_LPM_ASSERT(hop[1] == next_hop_add);
 		} else {
-			TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
-			TEST_LPM_ASSERT(hop[1] == UINT16_MAX);
+			TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
+			TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
 		}
-		TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
-		TEST_LPM_ASSERT(hop[3] == UINT16_MAX);
+		TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
+		TEST_LPM_ASSERT(hop[3] == UINT32_MAX);
 	}
 
 	rte_lpm_free(lpm);
@@ -438,8 +437,8 @@ test9(void)
 {
 	struct rte_lpm *lpm = NULL;
 	uint32_t ip, ip_1, ip_2;
-	uint8_t depth, depth_1, depth_2, next_hop_add, next_hop_add_1,
-		next_hop_add_2, next_hop_return;
+	uint8_t depth, depth_1, depth_2;
+	uint32_t next_hop_add, next_hop_add_1, next_hop_add_2, next_hop_return;
 	int32_t status = 0;
 
 	/* Add & lookup to hit invalid TBL24 entry */
@@ -601,8 +600,8 @@ test10(void)
 {
 
 	struct rte_lpm *lpm = NULL;
-	uint32_t ip;
-	uint8_t depth, next_hop_add, next_hop_return;
+	uint32_t ip, next_hop_add, next_hop_return;
+	uint8_t depth;
 	int32_t status = 0;
 
 	/* Add rule that covers a TBL24 range previously invalid & lookup
@@ -787,8 +786,8 @@ test11(void)
 {
 
 	struct rte_lpm *lpm = NULL;
-	uint32_t ip;
-	uint8_t depth, next_hop_add, next_hop_return;
+	uint32_t ip, next_hop_add, next_hop_return;
+	uint8_t depth;
 	int32_t status = 0;
 
 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
@@ -851,10 +850,10 @@ int32_t
 test12(void)
 {
 	__m128i ipx4;
-	uint16_t hop[4];
+	uint32_t hop[4];
 	struct rte_lpm *lpm = NULL;
-	uint32_t ip, i;
-	uint8_t depth, next_hop_add, next_hop_return;
+	uint32_t ip, i, next_hop_add, next_hop_return;
+	uint8_t depth;
 	int32_t status = 0;
 
 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
@@ -873,10 +872,10 @@ test12(void)
 				(next_hop_return == next_hop_add));
 
 		ipx4 = _mm_set_epi32(ip, ip + 1, ip, ip - 1);
-		rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
-		TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
+		rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
+		TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
 		TEST_LPM_ASSERT(hop[1] == next_hop_add);
-		TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
+		TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
 		TEST_LPM_ASSERT(hop[3] == next_hop_add);
 
 		status = rte_lpm_delete(lpm, ip, depth);
@@ -903,8 +902,8 @@ int32_t
 test13(void)
 {
 	struct rte_lpm *lpm = NULL;
-	uint32_t ip, i;
-	uint8_t depth, next_hop_add_1, next_hop_add_2, next_hop_return;
+	uint32_t ip, i, next_hop_add_1, next_hop_add_2, next_hop_return;
+	uint8_t depth;
 	int32_t status = 0;
 
 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
@@ -965,8 +964,8 @@ test14(void)
 	 * that we have enough storage for all rules at that depth*/
 
 	struct rte_lpm *lpm = NULL;
-	uint32_t ip;
-	uint8_t depth, next_hop_add, next_hop_return;
+	uint32_t ip, next_hop_add, next_hop_return;
+	uint8_t depth;
 	int32_t status = 0;
 
 	/* Add enough space for 256 rules for every depth */
@@ -1044,7 +1043,7 @@ test16(void)
 			256 * 32, 0);
 
 	/* ip loops through all possibilities for top 24 bits of address */
-	for (ip = 0; ip < 0xFFFFFF; ip++){
+	for (ip = 0; ip < 0xFFFFFF; ip++) {
 		/* add an entry within a different tbl8 each time, since
 		 * depth >24 and the top 24 bits are different */
 		if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0)
@@ -1078,10 +1077,10 @@ test17(void)
 	const uint8_t d_ip_10_32 = 32,
 			d_ip_10_24 = 24,
 			d_ip_20_25 = 25;
-	const uint8_t next_hop_ip_10_32 = 100,
+	const uint32_t next_hop_ip_10_32 = 100,
 			next_hop_ip_10_24 = 105,
 			next_hop_ip_20_25 = 111;
-	uint8_t next_hop_return = 0;
+	uint32_t next_hop_return = 0;
 	int32_t status = 0;
 
 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
@@ -1092,7 +1091,7 @@ test17(void)
 		return -1;
 
 	status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
-	uint8_t test_hop_10_32 = next_hop_return;
+	uint32_t test_hop_10_32 = next_hop_return;
 	TEST_LPM_ASSERT(status == 0);
 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
 
@@ -1101,7 +1100,7 @@ test17(void)
 			return -1;
 
 	status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
-	uint8_t test_hop_10_24 = next_hop_return;
+	uint32_t test_hop_10_24 = next_hop_return;
 	TEST_LPM_ASSERT(status == 0);
 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
 
@@ -1110,7 +1109,7 @@ test17(void)
 		return -1;
 
 	status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return);
-	uint8_t test_hop_20_25 = next_hop_return;
+	uint32_t test_hop_20_25 = next_hop_return;
 	TEST_LPM_ASSERT(status == 0);
 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
 
@@ -1119,7 +1118,7 @@ test17(void)
 		return -1;
 	}
 
-	if (test_hop_10_24 == test_hop_20_25){
+	if (test_hop_10_24 == test_hop_20_25) {
 		printf("Next hop return equal\n");
 		return -1;
 	}
@@ -1155,7 +1154,7 @@ print_route_distribution(const struct route_rule *table, uint32_t n)
 	printf("--------------------------- \n");
 
 	/* Count depths. */
-	for(i = 1; i <= 32; i++) {
+	for (i = 1; i <= 32; i++) {
 		unsigned depth_counter = 0;
 		double percent_hits;
 
@@ -1175,7 +1174,7 @@ perf_test(void)
 	struct rte_lpm *lpm = NULL;
 	uint64_t begin, total_time, lpm_used_entries = 0;
 	unsigned i, j;
-	uint8_t next_hop_add = 0xAA, next_hop_return = 0;
+	uint32_t next_hop_add = 0xAA, next_hop_return = 0;
 	int status = 0;
 	uint64_t cache_line_counter = 0;
 	int64_t count = 0;
@@ -1206,7 +1205,7 @@ perf_test(void)
 		if (lpm->tbl24[i].valid)
 			lpm_used_entries++;
 
-		if (i % 32 == 0){
+		if (i % 32 == 0) {
 			if ((uint64_t)count < lpm_used_entries) {
 				cache_line_counter++;
 				count = lpm_used_entries;
@@ -1220,22 +1219,23 @@ perf_test(void)
 	printf("64 byte Cache entries used = %u (%u bytes)\n",
 			(unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);
 
-	printf("Average LPM Add: %g cycles\n", (double)total_time / NUM_ROUTE_ENTRIES);
+	printf("Average LPM Add: %g cycles\n",
+			(double)total_time / NUM_ROUTE_ENTRIES);
 
 	/* Measure single Lookup */
 	total_time = 0;
 	count = 0;
 
-	for (i = 0; i < ITERATIONS; i ++) {
+	for (i = 0; i < ITERATIONS; i++) {
 		static uint32_t ip_batch[BATCH_SIZE];
 
-		for (j = 0; j < BATCH_SIZE; j ++)
+		for (j = 0; j < BATCH_SIZE; j++)
 			ip_batch[j] = rte_rand();
 
 		/* Lookup per batch */
 		begin = rte_rdtsc();
 
-		for (j = 0; j < BATCH_SIZE; j ++) {
+		for (j = 0; j < BATCH_SIZE; j++) {
 			if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
 				count++;
 		}
@@ -1250,12 +1250,12 @@ perf_test(void)
 	/* Measure bulk Lookup */
 	total_time = 0;
 	count = 0;
-	for (i = 0; i < ITERATIONS; i ++) {
+	for (i = 0; i < ITERATIONS; i++) {
 		static uint32_t ip_batch[BATCH_SIZE];
-		uint16_t next_hops[BULK_SIZE];
+		uint32_t next_hops[BULK_SIZE];
 
 		/* Create array of random IP addresses */
-		for (j = 0; j < BATCH_SIZE; j ++)
+		for (j = 0; j < BATCH_SIZE; j++)
 			ip_batch[j] = rte_rand();
 
 		/* Lookup per batch */
@@ -1279,7 +1279,7 @@ perf_test(void)
 	count = 0;
 	for (i = 0; i < ITERATIONS; i++) {
 		static uint32_t ip_batch[BATCH_SIZE];
-		uint16_t next_hops[4];
+		uint32_t next_hops[4];
 
 		/* Create array of random IP addresses */
 		for (j = 0; j < BATCH_SIZE; j++)
@@ -1293,9 +1293,9 @@ perf_test(void)
 
 			ipx4 = _mm_loadu_si128((__m128i *)(ip_batch + j));
 			ipx4 = *(__m128i *)(ip_batch + j);
-			rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT16_MAX);
+			rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT32_MAX);
 			for (k = 0; k < RTE_DIM(next_hops); k++)
-				if (unlikely(next_hops[k] == UINT16_MAX))
+				if (unlikely(next_hops[k] == UINT32_MAX))
 					count++;
 		}
 
diff --git a/doc/guides/rel_notes/release_16_04.rst b/doc/guides/rel_notes/release_16_04.rst
index 24f15bf..686db70 100644
--- a/doc/guides/rel_notes/release_16_04.rst
+++ b/doc/guides/rel_notes/release_16_04.rst
@@ -98,6 +98,9 @@ Libraries
   Fix crc32c hash functions to return a valid crc32c value for data lengths
   not multiple of 4 bytes.
 
+* **librte_lpm: Increased number of next hops for IPv4 to 2^24.**
+
+  Extended next_hop field from 8-bits to 24-bits for IPv4.
 
 Examples
 ~~~~~~~~
@@ -164,7 +167,7 @@ The libraries prepended with a plus sign were incremented in this version.
      librte_jobstats.so.1
      librte_kni.so.2
      librte_kvargs.so.1
-     librte_lpm.so.2
+     +librte_lpm.so.3
      librte_mbuf.so.2
      librte_mempool.so.1
      librte_meter.so.1
diff --git a/lib/librte_lpm/Makefile b/lib/librte_lpm/Makefile
index 688cfc9..7a342f8 100644
--- a/lib/librte_lpm/Makefile
+++ b/lib/librte_lpm/Makefile
@@ -39,7 +39,7 @@ CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
 
 EXPORT_MAP := rte_lpm_version.map
 
-LIBABIVER := 2
+LIBABIVER := 3
 
 # all source are stored in SRCS-y
 SRCS-$(CONFIG_RTE_LIBRTE_LPM) := rte_lpm.c rte_lpm6.c
diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c
index 22f2ae9..ea4d234 100644
--- a/lib/librte_lpm/rte_lpm.c
+++ b/lib/librte_lpm/rte_lpm.c
@@ -119,8 +119,34 @@ depth_to_range(uint8_t depth)
 /*
  * Find an existing lpm table and return a pointer to it.
  */
+struct rte_lpm_v20 *
+rte_lpm_find_existing_v20(const char *name)
+{
+	struct rte_lpm_v20 *l = NULL;
+	struct rte_tailq_entry *te;
+	struct rte_lpm_list *lpm_list;
+
+	lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
+
+	rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+	TAILQ_FOREACH(te, lpm_list, next) {
+		l = (struct rte_lpm_v20 *) te->data;
+		if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
+			break;
+	}
+	rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+	if (te == NULL) {
+		rte_errno = ENOENT;
+		return NULL;
+	}
+
+	return l;
+}
+VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
+
 struct rte_lpm *
-rte_lpm_find_existing(const char *name)
+rte_lpm_find_existing_v1604(const char *name)
 {
 	struct rte_lpm *l = NULL;
 	struct rte_tailq_entry *te;
@@ -143,12 +169,82 @@ rte_lpm_find_existing(const char *name)
 
 	return l;
 }
+BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04);
+MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),
+		rte_lpm_find_existing_v1604);
 
 /*
  * Allocates memory for LPM object
  */
+struct rte_lpm_v20 *
+rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
+		__rte_unused int flags)
+{
+	char mem_name[RTE_LPM_NAMESIZE];
+	struct rte_lpm_v20 *lpm = NULL;
+	struct rte_tailq_entry *te;
+	uint32_t mem_size;
+	struct rte_lpm_list *lpm_list;
+
+	lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
+
+	RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2);
+
+	/* Check user arguments. */
+	if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+
+	snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
+
+	/* Determine the amount of memory to allocate. */
+	mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
+
+	rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+	/* guarantee there's no existing */
+	TAILQ_FOREACH(te, lpm_list, next) {
+		lpm = (struct rte_lpm_v20 *) te->data;
+		if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
+			break;
+	}
+	if (te != NULL)
+		goto exit;
+
+	/* allocate tailq entry */
+	te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
+	if (te == NULL) {
+		RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
+		goto exit;
+	}
+
+	/* Allocate memory to store the LPM data structures. */
+	lpm = (struct rte_lpm_v20 *)rte_zmalloc_socket(mem_name, mem_size,
+			RTE_CACHE_LINE_SIZE, socket_id);
+	if (lpm == NULL) {
+		RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
+		rte_free(te);
+		goto exit;
+	}
+
+	/* Save user arguments. */
+	lpm->max_rules = max_rules;
+	snprintf(lpm->name, sizeof(lpm->name), "%s", name);
+
+	te->data = (void *) lpm;
+
+	TAILQ_INSERT_TAIL(lpm_list, te, next);
+
+exit:
+	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+	return lpm;
+}
+VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
+
 struct rte_lpm *
-rte_lpm_create(const char *name, int socket_id, int max_rules,
+rte_lpm_create_v1604(const char *name, int socket_id, int max_rules,
 		__rte_unused int flags)
 {
 	char mem_name[RTE_LPM_NAMESIZE];
@@ -159,10 +255,10 @@ rte_lpm_create(const char *name, int socket_id, int max_rules,
 
 	lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
 
-	RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 2);
+	RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
 
 	/* Check user arguments. */
-	if ((name == NULL) || (socket_id < -1) || (max_rules == 0)){
+	if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
 		rte_errno = EINVAL;
 		return NULL;
 	}
@@ -212,12 +308,49 @@ exit:
 
 	return lpm;
 }
+BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
+MAP_STATIC_SYMBOL(
+	struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
+			int max_rules, int flags), rte_lpm_create_v1604);
 
 /*
  * Deallocates memory for given LPM table.
  */
 void
-rte_lpm_free(struct rte_lpm *lpm)
+rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
+{
+	struct rte_lpm_list *lpm_list;
+	struct rte_tailq_entry *te;
+
+	/* Check user arguments. */
+	if (lpm == NULL)
+		return;
+
+	lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
+
+	rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+	/* find our tailq entry */
+	TAILQ_FOREACH(te, lpm_list, next) {
+		if (te->data == (void *) lpm)
+			break;
+	}
+	if (te == NULL) {
+		rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+		return;
+	}
+
+	TAILQ_REMOVE(lpm_list, te, next);
+
+	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+	rte_free(lpm);
+	rte_free(te);
+}
+VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
+
+void
+rte_lpm_free_v1604(struct rte_lpm *lpm)
 {
 	struct rte_lpm_list *lpm_list;
 	struct rte_tailq_entry *te;
@@ -247,6 +380,9 @@ rte_lpm_free(struct rte_lpm *lpm)
 	rte_free(lpm);
 	rte_free(te);
 }
+BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04);
+MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),
+		rte_lpm_free_v1604);
 
 /*
  * Adds a rule to the rule table.
@@ -259,7 +395,7 @@ rte_lpm_free(struct rte_lpm *lpm)
  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
  */
 static inline int32_t
-rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
+rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
 	uint8_t next_hop)
 {
 	uint32_t rule_gindex, rule_index, last_rule;
@@ -295,7 +431,80 @@ rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
 
 		for (i = depth - 1; i > 0; i--) {
 			if (lpm->rule_info[i - 1].used_rules > 0) {
-				rule_index = lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules;
+				rule_index = lpm->rule_info[i - 1].first_rule
+						+ lpm->rule_info[i - 1].used_rules;
+				break;
+			}
+		}
+		if (rule_index == lpm->max_rules)
+			return -ENOSPC;
+
+		lpm->rule_info[depth - 1].first_rule = rule_index;
+	}
+
+	/* Make room for the new rule in the array. */
+	for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
+		if (lpm->rule_info[i - 1].first_rule
+				+ lpm->rule_info[i - 1].used_rules == lpm->max_rules)
+			return -ENOSPC;
+
+		if (lpm->rule_info[i - 1].used_rules > 0) {
+			lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
+				+ lpm->rule_info[i - 1].used_rules]
+					= lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
+			lpm->rule_info[i - 1].first_rule++;
+		}
+	}
+
+	/* Add the new rule. */
+	lpm->rules_tbl[rule_index].ip = ip_masked;
+	lpm->rules_tbl[rule_index].next_hop = next_hop;
+
+	/* Increment the used rules counter for this rule group. */
+	lpm->rule_info[depth - 1].used_rules++;
+
+	return rule_index;
+}
+
+static inline int32_t
+rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
+	uint32_t next_hop)
+{
+	uint32_t rule_gindex, rule_index, last_rule;
+	int i;
+
+	VERIFY_DEPTH(depth);
+
+	/* Scan through rule group to see if rule already exists. */
+	if (lpm->rule_info[depth - 1].used_rules > 0) {
+
+		/* rule_gindex stands for rule group index. */
+		rule_gindex = lpm->rule_info[depth - 1].first_rule;
+		/* Initialise rule_index to point to start of rule group. */
+		rule_index = rule_gindex;
+		/* Last rule = Last used rule in this rule group. */
+		last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
+
+		for (; rule_index < last_rule; rule_index++) {
+
+			/* If rule already exists update its next_hop and return. */
+			if (lpm->rules_tbl[rule_index].ip == ip_masked) {
+				lpm->rules_tbl[rule_index].next_hop = next_hop;
+
+				return rule_index;
+			}
+		}
+
+		if (rule_index == lpm->max_rules)
+			return -ENOSPC;
+	} else {
+		/* Calculate the position in which the rule will be stored. */
+		rule_index = 0;
+
+		for (i = depth - 1; i > 0; i--) {
+			if (lpm->rule_info[i - 1].used_rules > 0) {
+				rule_index = lpm->rule_info[i - 1].first_rule
+						+ lpm->rule_info[i - 1].used_rules;
 				break;
 			}
 		}
@@ -307,11 +516,13 @@ rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
 
 	/* Make room for the new rule in the array. */
 	for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
-		if (lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
+		if (lpm->rule_info[i - 1].first_rule
+				+ lpm->rule_info[i - 1].used_rules == lpm->max_rules)
 			return -ENOSPC;
 
 		if (lpm->rule_info[i - 1].used_rules > 0) {
-			lpm->rules_tbl[lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules]
+			lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
+				+ lpm->rule_info[i - 1].used_rules]
 					= lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
 			lpm->rule_info[i - 1].first_rule++;
 		}
@@ -332,19 +543,44 @@ rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
  */
 static inline void
-rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
+rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
+{
+	int i;
+
+	VERIFY_DEPTH(depth);
+
+	lpm->rules_tbl[rule_index] =
+			lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
+				+ lpm->rule_info[depth - 1].used_rules - 1];
+
+	for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
+		if (lpm->rule_info[i].used_rules > 0) {
+			lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
+				lpm->rules_tbl[lpm->rule_info[i].first_rule
+					+ lpm->rule_info[i].used_rules - 1];
+			lpm->rule_info[i].first_rule--;
+		}
+	}
+
+	lpm->rule_info[depth - 1].used_rules--;
+}
+
+static inline void
+rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
 {
 	int i;
 
 	VERIFY_DEPTH(depth);
 
-	lpm->rules_tbl[rule_index] = lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
+	lpm->rules_tbl[rule_index] =
+			lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
 			+ lpm->rule_info[depth - 1].used_rules - 1];
 
 	for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
 		if (lpm->rule_info[i].used_rules > 0) {
 			lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
-					lpm->rules_tbl[lpm->rule_info[i].first_rule + lpm->rule_info[i].used_rules - 1];
+					lpm->rules_tbl[lpm->rule_info[i].first_rule
+						+ lpm->rule_info[i].used_rules - 1];
 			lpm->rule_info[i].first_rule--;
 		}
 	}
@@ -357,7 +593,28 @@ rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
  */
 static inline int32_t
-rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
+rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
+{
+	uint32_t rule_gindex, last_rule, rule_index;
+
+	VERIFY_DEPTH(depth);
+
+	rule_gindex = lpm->rule_info[depth - 1].first_rule;
+	last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
+
+	/* Scan used rules at given depth to find rule. */
+	for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
+		/* If rule is found return the rule index. */
+		if (lpm->rules_tbl[rule_index].ip == ip_masked)
+			return rule_index;
+	}
+
+	/* If rule is not found return -EINVAL. */
+	return -EINVAL;
+}
+
+static inline int32_t
+rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
 {
 	uint32_t rule_gindex, last_rule, rule_index;
 
@@ -381,7 +638,34 @@ rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
  * Find, clean and allocate a tbl8.
  */
 static inline int32_t
-tbl8_alloc(struct rte_lpm_tbl_entry *tbl8)
+tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
+{
+	uint32_t group_idx; /* tbl8 group index. */
+	struct rte_lpm_tbl_entry_v20 *tbl8_entry;
+
+	/* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
+	for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
+			group_idx++) {
+		tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
+		/* If a free tbl8 group is found clean it and set as VALID. */
+		if (!tbl8_entry->valid_group) {
+			memset(&tbl8_entry[0], 0,
+					RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
+					sizeof(tbl8_entry[0]));
+
+			tbl8_entry->valid_group = VALID;
+
+			/* Return group index for allocated tbl8 group. */
+			return group_idx;
+		}
+	}
+
+	/* If there are no tbl8 groups free then return error. */
+	return -ENOSPC;
+}
+
+static inline int32_t
+tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8)
 {
 	uint32_t group_idx; /* tbl8 group index. */
 	struct rte_lpm_tbl_entry *tbl8_entry;
@@ -389,8 +673,7 @@ tbl8_alloc(struct rte_lpm_tbl_entry *tbl8)
 	/* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
 	for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
 			group_idx++) {
-		tbl8_entry = &tbl8[group_idx *
-		                   RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
+		tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
 		/* If a free tbl8 group is found clean it and set as VALID. */
 		if (!tbl8_entry->valid_group) {
 			memset(&tbl8_entry[0], 0,
@@ -409,14 +692,21 @@ tbl8_alloc(struct rte_lpm_tbl_entry *tbl8)
 }
 
 static inline void
-tbl8_free(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
+tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
+{
+	/* Set tbl8 group invalid*/
+	tbl8[tbl8_group_start].valid_group = INVALID;
+}
+
+static inline void
+tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
 {
 	/* Set tbl8 group invalid*/
 	tbl8[tbl8_group_start].valid_group = INVALID;
 }
 
 static inline int32_t
-add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
 		uint8_t next_hop)
 {
 	uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
@@ -433,7 +723,7 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
 		if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
 				lpm->tbl24[i].depth <= depth)) {
 
-			struct rte_lpm_tbl_entry new_tbl24_entry = {
+			struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
 				{ .next_hop = next_hop, },
 				.valid = VALID,
 				.valid_group = 0,
@@ -460,7 +750,7 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
 			for (j = tbl8_index; j < tbl8_group_end; j++) {
 				if (!lpm->tbl8[j].valid ||
 						lpm->tbl8[j].depth <= depth) {
-					struct rte_lpm_tbl_entry
+					struct rte_lpm_tbl_entry_v20
 						new_tbl8_entry = {
 						.valid = VALID,
 						.valid_group = VALID,
@@ -484,19 +774,88 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
 }
 
 static inline int32_t
-add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
-		uint8_t next_hop)
+add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+		uint32_t next_hop)
 {
-	uint32_t tbl24_index;
-	int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
-		tbl8_range, i;
+#define group_idx next_hop
+	uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
 
-	tbl24_index = (ip_masked >> 8);
-	tbl8_range = depth_to_range(depth);
+	/* Calculate the index into Table24. */
+	tbl24_index = ip >> 8;
+	tbl24_range = depth_to_range(depth);
 
-	if (!lpm->tbl24[tbl24_index].valid) {
-		/* Search for a free tbl8 group. */
-		tbl8_group_index = tbl8_alloc(lpm->tbl8);
+	for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
+		/*
+		 * For invalid OR valid and non-extended tbl 24 entries set
+		 * entry.
+		 */
+		if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
+				lpm->tbl24[i].depth <= depth)) {
+
+			struct rte_lpm_tbl_entry new_tbl24_entry = {
+				.next_hop = next_hop,
+				.valid = VALID,
+				.valid_group = 0,
+				.depth = depth,
+			};
+
+			/* Setting tbl24 entry in one go to avoid race
+			 * conditions
+			 */
+			lpm->tbl24[i] = new_tbl24_entry;
+
+			continue;
+		}
+
+		if (lpm->tbl24[i].valid_group == 1) {
+			/* If tbl24 entry is valid and extended calculate the
+			 *  index into tbl8.
+			 */
+			tbl8_index = lpm->tbl24[i].group_idx *
+					RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+			tbl8_group_end = tbl8_index +
+					RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+			for (j = tbl8_index; j < tbl8_group_end; j++) {
+				if (!lpm->tbl8[j].valid ||
+						lpm->tbl8[j].depth <= depth) {
+					struct rte_lpm_tbl_entry
+						new_tbl8_entry = {
+						.valid = VALID,
+						.valid_group = VALID,
+						.depth = depth,
+						.next_hop = next_hop,
+					};
+
+					/*
+					 * Setting tbl8 entry in one go to avoid
+					 * race conditions
+					 */
+					lpm->tbl8[j] = new_tbl8_entry;
+
+					continue;
+				}
+			}
+		}
+	}
+#undef group_idx
+	return 0;
+}
+
+static inline int32_t
+add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
+		uint8_t next_hop)
+{
+	uint32_t tbl24_index;
+	int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
+		tbl8_range, i;
+
+	tbl24_index = (ip_masked >> 8);
+	tbl8_range = depth_to_range(depth);
+
+	if (!lpm->tbl24[tbl24_index].valid) {
+		/* Search for a free tbl8 group. */
+		tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
 
 		/* Check tbl8 allocation was successful. */
 		if (tbl8_group_index < 0) {
@@ -521,7 +880,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
 		 * so assign whole structure in one go
 		 */
 
-		struct rte_lpm_tbl_entry new_tbl24_entry = {
+		struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
 			{ .group_idx = (uint8_t)tbl8_group_index, },
 			.valid = VALID,
 			.valid_group = 1,
@@ -530,10 +889,10 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
 
 		lpm->tbl24[tbl24_index] = new_tbl24_entry;
 
-	}/* If valid entry but not extended calculate the index into Table8. */
+	} /* If valid entry but not extended calculate the index into Table8. */
 	else if (lpm->tbl24[tbl24_index].valid_group == 0) {
 		/* Search for free tbl8 group. */
-		tbl8_group_index = tbl8_alloc(lpm->tbl8);
+		tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
 
 		if (tbl8_group_index < 0) {
 			return tbl8_group_index;
@@ -572,7 +931,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
 		 * so assign whole structure in one go.
 		 */
 
-		struct rte_lpm_tbl_entry new_tbl24_entry = {
+		struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
 				{ .group_idx = (uint8_t)tbl8_group_index, },
 				.valid = VALID,
 				.valid_group = 1,
@@ -581,8 +940,139 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
 
 		lpm->tbl24[tbl24_index] = new_tbl24_entry;
 
+	} else { /*
+		* If it is valid, extended entry calculate the index into tbl8.
+		*/
+		tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
+		tbl8_group_start = tbl8_group_index *
+				RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+		tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
+
+		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+
+			if (!lpm->tbl8[i].valid ||
+					lpm->tbl8[i].depth <= depth) {
+				struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
+					.valid = VALID,
+					.depth = depth,
+					.next_hop = next_hop,
+					.valid_group = lpm->tbl8[i].valid_group,
+				};
+
+				/*
+				 * Setting tbl8 entry in one go to avoid race
+				 * condition
+				 */
+				lpm->tbl8[i] = new_tbl8_entry;
+
+				continue;
+			}
+		}
 	}
-	else { /*
+
+	return 0;
+}
+
+static inline int32_t
+add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
+		uint32_t next_hop)
+{
+#define group_idx next_hop
+	uint32_t tbl24_index;
+	int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
+		tbl8_range, i;
+
+	tbl24_index = (ip_masked >> 8);
+	tbl8_range = depth_to_range(depth);
+
+	if (!lpm->tbl24[tbl24_index].valid) {
+		/* Search for a free tbl8 group. */
+		tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8);
+
+		/* Check tbl8 allocation was successful. */
+		if (tbl8_group_index < 0) {
+			return tbl8_group_index;
+		}
+
+		/* Find index into tbl8 and range. */
+		tbl8_index = (tbl8_group_index *
+				RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
+				(ip_masked & 0xFF);
+
+		/* Set tbl8 entry. */
+		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+			lpm->tbl8[i].depth = depth;
+			lpm->tbl8[i].next_hop = next_hop;
+			lpm->tbl8[i].valid = VALID;
+		}
+
+		/*
+		 * Update tbl24 entry to point to new tbl8 entry. Note: The
+		 * ext_flag and tbl8_index need to be updated simultaneously,
+		 * so assign whole structure in one go
+		 */
+
+		struct rte_lpm_tbl_entry new_tbl24_entry = {
+			.group_idx = (uint8_t)tbl8_group_index,
+			.valid = VALID,
+			.valid_group = 1,
+			.depth = 0,
+		};
+
+		lpm->tbl24[tbl24_index] = new_tbl24_entry;
+
+	} /* If valid entry but not extended calculate the index into Table8. */
+	else if (lpm->tbl24[tbl24_index].valid_group == 0) {
+		/* Search for free tbl8 group. */
+		tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8);
+
+		if (tbl8_group_index < 0) {
+			return tbl8_group_index;
+		}
+
+		tbl8_group_start = tbl8_group_index *
+				RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+		tbl8_group_end = tbl8_group_start +
+				RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+		/* Populate new tbl8 with tbl24 value. */
+		for (i = tbl8_group_start; i < tbl8_group_end; i++) {
+			lpm->tbl8[i].valid = VALID;
+			lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
+			lpm->tbl8[i].next_hop =
+					lpm->tbl24[tbl24_index].next_hop;
+		}
+
+		tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
+
+		/* Insert new rule into the tbl8 entry. */
+		for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
+			if (!lpm->tbl8[i].valid ||
+					lpm->tbl8[i].depth <= depth) {
+				lpm->tbl8[i].valid = VALID;
+				lpm->tbl8[i].depth = depth;
+				lpm->tbl8[i].next_hop = next_hop;
+
+				continue;
+			}
+		}
+
+		/*
+		 * Update tbl24 entry to point to new tbl8 entry. Note: The
+		 * ext_flag and tbl8_index need to be updated simultaneously,
+		 * so assign whole structure in one go.
+		 */
+
+		struct rte_lpm_tbl_entry new_tbl24_entry = {
+				.group_idx = (uint8_t)tbl8_group_index,
+				.valid = VALID,
+				.valid_group = 1,
+				.depth = 0,
+		};
+
+		lpm->tbl24[tbl24_index] = new_tbl24_entry;
+
+	} else { /*
 		* If it is valid, extended entry calculate the index into tbl8.
 		*/
 		tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
@@ -611,7 +1101,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
 			}
 		}
 	}
-
+#undef group_idx
 	return 0;
 }
 
@@ -619,7 +1109,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
  * Add a route
  */
 int
-rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
 		uint8_t next_hop)
 {
 	int32_t rule_index, status = 0;
@@ -632,7 +1122,7 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
 	ip_masked = ip & depth_to_mask(depth);
 
 	/* Add the rule to the rule table. */
-	rule_index = rule_add(lpm, ip_masked, depth, next_hop);
+	rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);
 
 	/* If the is no space available for new rule return error. */
 	if (rule_index < 0) {
@@ -640,17 +1130,57 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
 	}
 
 	if (depth <= MAX_DEPTH_TBL24) {
-		status = add_depth_small(lpm, ip_masked, depth, next_hop);
+		status = add_depth_small_v20(lpm, ip_masked, depth, next_hop);
+	} else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
+		status = add_depth_big_v20(lpm, ip_masked, depth, next_hop);
+
+		/*
+		 * If add fails due to exhaustion of tbl8 extensions delete
+		 * rule that was added to rule table.
+		 */
+		if (status < 0) {
+			rule_delete_v20(lpm, rule_index, depth);
+
+			return status;
+		}
+	}
+
+	return 0;
+}
+VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
+
+int
+rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+		uint32_t next_hop)
+{
+	int32_t rule_index, status = 0;
+	uint32_t ip_masked;
+
+	/* Check user arguments. */
+	if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
+		return -EINVAL;
+
+	ip_masked = ip & depth_to_mask(depth);
+
+	/* Add the rule to the rule table. */
+	rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop);
+
+	/* If the is no space available for new rule return error. */
+	if (rule_index < 0) {
+		return rule_index;
 	}
-	else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
-		status = add_depth_big(lpm, ip_masked, depth, next_hop);
+
+	if (depth <= MAX_DEPTH_TBL24) {
+		status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop);
+	} else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
+		status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop);
 
 		/*
 		 * If add fails due to exhaustion of tbl8 extensions delete
 		 * rule that was added to rule table.
 		 */
 		if (status < 0) {
-			rule_delete(lpm, rule_index, depth);
+			rule_delete_v1604(lpm, rule_index, depth);
 
 			return status;
 		}
@@ -658,61 +1188,211 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
 
 	return 0;
 }
+BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04);
+MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip,
+		uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604);
+
+/*
+ * Look for a rule in the high-level rules table
+ */
+int
+rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
+uint8_t *next_hop)
+{
+	uint32_t ip_masked;
+	int32_t rule_index;
+
+	/* Check user arguments. */
+	if ((lpm == NULL) ||
+		(next_hop == NULL) ||
+		(depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
+		return -EINVAL;
+
+	/* Look for the rule using rule_find. */
+	ip_masked = ip & depth_to_mask(depth);
+	rule_index = rule_find_v20(lpm, ip_masked, depth);
+
+	if (rule_index >= 0) {
+		*next_hop = lpm->rules_tbl[rule_index].next_hop;
+		return 1;
+	}
+
+	/* If rule is not found return 0. */
+	return 0;
+}
+VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
+
+int
+rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+uint32_t *next_hop)
+{
+	uint32_t ip_masked;
+	int32_t rule_index;
+
+	/* Check user arguments. */
+	if ((lpm == NULL) ||
+		(next_hop == NULL) ||
+		(depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
+		return -EINVAL;
+
+	/* Look for the rule using rule_find. */
+	ip_masked = ip & depth_to_mask(depth);
+	rule_index = rule_find_v1604(lpm, ip_masked, depth);
+
+	if (rule_index >= 0) {
+		*next_hop = lpm->rules_tbl[rule_index].next_hop;
+		return 1;
+	}
+
+	/* If rule is not found return 0. */
+	return 0;
+}
+BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04);
+MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
+		uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
+
+static inline int32_t
+find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
+		uint8_t *sub_rule_depth)
+{
+	int32_t rule_index;
+	uint32_t ip_masked;
+	uint8_t prev_depth;
+
+	for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
+		ip_masked = ip & depth_to_mask(prev_depth);
+
+		rule_index = rule_find_v20(lpm, ip_masked, prev_depth);
+
+		if (rule_index >= 0) {
+			*sub_rule_depth = prev_depth;
+			return rule_index;
+		}
+	}
+
+	return -1;
+}
+
+static inline int32_t
+find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+		uint8_t *sub_rule_depth)
+{
+	int32_t rule_index;
+	uint32_t ip_masked;
+	uint8_t prev_depth;
+
+	for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
+		ip_masked = ip & depth_to_mask(prev_depth);
+
+		rule_index = rule_find_v1604(lpm, ip_masked, prev_depth);
+
+		if (rule_index >= 0) {
+			*sub_rule_depth = prev_depth;
+			return rule_index;
+		}
+	}
+
+	return -1;
+}
+
+static inline int32_t
+delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
+	uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
+{
+	uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
+
+	/* Calculate the range and index into Table24. */
+	tbl24_range = depth_to_range(depth);
+	tbl24_index = (ip_masked >> 8);
+
+	/*
+	 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
+	 * and a positive number indicates a sub_rule_index.
+	 */
+	if (sub_rule_index < 0) {
+		/*
+		 * If no replacement rule exists then invalidate entries
+		 * associated with this rule.
+		 */
+		for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
+
+			if (lpm->tbl24[i].valid_group == 0 &&
+					lpm->tbl24[i].depth <= depth) {
+				lpm->tbl24[i].valid = INVALID;
+			} else if (lpm->tbl24[i].valid_group == 1) {
+				/*
+				 * If TBL24 entry is extended, then there has
+				 * to be a rule with depth >= 25 in the
+				 * associated TBL8 group.
+				 */
+
+				tbl8_group_index = lpm->tbl24[i].group_idx;
+				tbl8_index = tbl8_group_index *
+						RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
 
-/*
- * Look for a rule in the high-level rules table
- */
-int
-rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
-uint8_t *next_hop)
-{
-	uint32_t ip_masked;
-	int32_t rule_index;
+				for (j = tbl8_index; j < (tbl8_index +
+					RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
 
-	/* Check user arguments. */
-	if ((lpm == NULL) ||
-		(next_hop == NULL) ||
-		(depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
-		return -EINVAL;
+					if (lpm->tbl8[j].depth <= depth)
+						lpm->tbl8[j].valid = INVALID;
+				}
+			}
+		}
+	} else {
+		/*
+		 * If a replacement rule exists then modify entries
+		 * associated with this rule.
+		 */
 
-	/* Look for the rule using rule_find. */
-	ip_masked = ip & depth_to_mask(depth);
-	rule_index = rule_find(lpm, ip_masked, depth);
+		struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
+			{.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
+			.valid = VALID,
+			.valid_group = 0,
+			.depth = sub_rule_depth,
+		};
 
-	if (rule_index >= 0) {
-		*next_hop = lpm->rules_tbl[rule_index].next_hop;
-		return 1;
-	}
+		struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
+			.valid = VALID,
+			.valid_group = VALID,
+			.depth = sub_rule_depth,
+			.next_hop = lpm->rules_tbl
+			[sub_rule_index].next_hop,
+		};
 
-	/* If rule is not found return 0. */
-	return 0;
-}
+		for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
 
-static inline int32_t
-find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t *sub_rule_depth)
-{
-	int32_t rule_index;
-	uint32_t ip_masked;
-	uint8_t prev_depth;
+			if (lpm->tbl24[i].valid_group == 0 &&
+					lpm->tbl24[i].depth <= depth) {
+				lpm->tbl24[i] = new_tbl24_entry;
+			} else  if (lpm->tbl24[i].valid_group == 1) {
+				/*
+				 * If TBL24 entry is extended, then there has
+				 * to be a rule with depth >= 25 in the
+				 * associated TBL8 group.
+				 */
 
-	for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
-		ip_masked = ip & depth_to_mask(prev_depth);
+				tbl8_group_index = lpm->tbl24[i].group_idx;
+				tbl8_index = tbl8_group_index *
+						RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
 
-		rule_index = rule_find(lpm, ip_masked, prev_depth);
+				for (j = tbl8_index; j < (tbl8_index +
+					RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
 
-		if (rule_index >= 0) {
-			*sub_rule_depth = prev_depth;
-			return rule_index;
+					if (lpm->tbl8[j].depth <= depth)
+						lpm->tbl8[j] = new_tbl8_entry;
+				}
+			}
 		}
 	}
 
-	return -1;
+	return 0;
 }
 
 static inline int32_t
-delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
+delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
 	uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
 {
+#define group_idx next_hop
 	uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
 
 	/* Calculate the range and index into Table24. */
@@ -731,7 +1411,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
 		for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
 
 			if (lpm->tbl24[i].valid_group == 0 &&
-					lpm->tbl24[i].depth <= depth ) {
+					lpm->tbl24[i].depth <= depth) {
 				lpm->tbl24[i].valid = INVALID;
 			} else if (lpm->tbl24[i].valid_group == 1) {
 				/*
@@ -752,15 +1432,14 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
 				}
 			}
 		}
-	}
-	else {
+	} else {
 		/*
 		 * If a replacement rule exists then modify entries
 		 * associated with this rule.
 		 */
 
 		struct rte_lpm_tbl_entry new_tbl24_entry = {
-			{.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
+			.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
 			.valid = VALID,
 			.valid_group = 0,
 			.depth = sub_rule_depth,
@@ -777,7 +1456,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
 		for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
 
 			if (lpm->tbl24[i].valid_group == 0 &&
-					lpm->tbl24[i].depth <= depth ) {
+					lpm->tbl24[i].depth <= depth) {
 				lpm->tbl24[i] = new_tbl24_entry;
 			} else  if (lpm->tbl24[i].valid_group == 1) {
 				/*
@@ -799,7 +1478,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
 			}
 		}
 	}
-
+#undef group_idx
 	return 0;
 }
 
@@ -812,7 +1491,55 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
  * thus can be recycled
  */
 static inline int32_t
-tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
+tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
+		uint32_t tbl8_group_start)
+{
+	uint32_t tbl8_group_end, i;
+	tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+	/*
+	 * Check the first entry of the given tbl8. If it is invalid we know
+	 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
+	 *  (As they would affect all entries in a tbl8) and thus this table
+	 *  can not be recycled.
+	 */
+	if (tbl8[tbl8_group_start].valid) {
+		/*
+		 * If first entry is valid check if the depth is less than 24
+		 * and if so check the rest of the entries to verify that they
+		 * are all of this depth.
+		 */
+		if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
+			for (i = (tbl8_group_start + 1); i < tbl8_group_end;
+					i++) {
+
+				if (tbl8[i].depth !=
+						tbl8[tbl8_group_start].depth) {
+
+					return -EEXIST;
+				}
+			}
+			/* If all entries are the same return the tb8 index */
+			return tbl8_group_start;
+		}
+
+		return -EEXIST;
+	}
+	/*
+	 * If the first entry is invalid check if the rest of the entries in
+	 * the tbl8 are invalid.
+	 */
+	for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
+		if (tbl8[i].valid)
+			return -EEXIST;
+	}
+	/* If no valid entries are found then return -EINVAL. */
+	return -EINVAL;
+}
+
+static inline int32_t
+tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
+		uint32_t tbl8_group_start)
 {
 	uint32_t tbl8_group_end, i;
 	tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
@@ -858,7 +1585,7 @@ tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
 }
 
 static inline int32_t
-delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
+delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
 	uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
 {
 	uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
@@ -886,8 +1613,85 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
 			if (lpm->tbl8[i].depth <= depth)
 				lpm->tbl8[i].valid = INVALID;
 		}
+	} else {
+		/* Set new tbl8 entry. */
+		struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
+			.valid = VALID,
+			.depth = sub_rule_depth,
+			.valid_group = lpm->tbl8[tbl8_group_start].valid_group,
+			.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
+		};
+
+		/*
+		 * Loop through the range of entries on tbl8 for which the
+		 * rule_to_delete must be modified.
+		 */
+		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+			if (lpm->tbl8[i].depth <= depth)
+				lpm->tbl8[i] = new_tbl8_entry;
+		}
+	}
+
+	/*
+	 * Check if there are any valid entries in this tbl8 group. If all
+	 * tbl8 entries are invalid we can free the tbl8 and invalidate the
+	 * associated tbl24 entry.
+	 */
+
+	tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
+
+	if (tbl8_recycle_index == -EINVAL) {
+		/* Set tbl24 before freeing tbl8 to avoid race condition. */
+		lpm->tbl24[tbl24_index].valid = 0;
+		tbl8_free_v20(lpm->tbl8, tbl8_group_start);
+	} else if (tbl8_recycle_index > -1) {
+		/* Update tbl24 entry. */
+		struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
+			{ .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
+			.valid = VALID,
+			.valid_group = 0,
+			.depth = lpm->tbl8[tbl8_recycle_index].depth,
+		};
+
+		/* Set tbl24 before freeing tbl8 to avoid race condition. */
+		lpm->tbl24[tbl24_index] = new_tbl24_entry;
+		tbl8_free_v20(lpm->tbl8, tbl8_group_start);
 	}
-	else {
+
+	return 0;
+}
+
+static inline int32_t
+delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
+	uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
+{
+#define group_idx next_hop
+	uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
+			tbl8_range, i;
+	int32_t tbl8_recycle_index;
+
+	/*
+	 * Calculate the index into tbl24 and range. Note: All depths larger
+	 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
+	 */
+	tbl24_index = ip_masked >> 8;
+
+	/* Calculate the index into tbl8 and range. */
+	tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
+	tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+	tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
+	tbl8_range = depth_to_range(depth);
+
+	if (sub_rule_index < 0) {
+		/*
+		 * Loop through the range of entries on tbl8 for which the
+		 * rule_to_delete must be removed or modified.
+		 */
+		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+			if (lpm->tbl8[i].depth <= depth)
+				lpm->tbl8[i].valid = INVALID;
+		}
+	} else {
 		/* Set new tbl8 entry. */
 		struct rte_lpm_tbl_entry new_tbl8_entry = {
 			.valid = VALID,
@@ -912,17 +1716,16 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
 	 * associated tbl24 entry.
 	 */
 
-	tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start);
+	tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
 
-	if (tbl8_recycle_index == -EINVAL){
+	if (tbl8_recycle_index == -EINVAL) {
 		/* Set tbl24 before freeing tbl8 to avoid race condition. */
 		lpm->tbl24[tbl24_index].valid = 0;
-		tbl8_free(lpm->tbl8, tbl8_group_start);
-	}
-	else if (tbl8_recycle_index > -1) {
+		tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
+	} else if (tbl8_recycle_index > -1) {
 		/* Update tbl24 entry. */
 		struct rte_lpm_tbl_entry new_tbl24_entry = {
-			{ .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
+			.next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
 			.valid = VALID,
 			.valid_group = 0,
 			.depth = lpm->tbl8[tbl8_recycle_index].depth,
@@ -930,9 +1733,9 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
 
 		/* Set tbl24 before freeing tbl8 to avoid race condition. */
 		lpm->tbl24[tbl24_index] = new_tbl24_entry;
-		tbl8_free(lpm->tbl8, tbl8_group_start);
+		tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
 	}
-
+#undef group_idx
 	return 0;
 }
 
@@ -940,7 +1743,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
  * Deletes a rule
  */
 int
-rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
+rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)
 {
 	int32_t rule_to_delete_index, sub_rule_index;
 	uint32_t ip_masked;
@@ -959,7 +1762,7 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
 	 * Find the index of the input rule, that needs to be deleted, in the
 	 * rule table.
 	 */
-	rule_to_delete_index = rule_find(lpm, ip_masked, depth);
+	rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);
 
 	/*
 	 * Check if rule_to_delete_index was found. If no rule was found the
@@ -969,7 +1772,7 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
 		return -EINVAL;
 
 	/* Delete the rule from the rule table. */
-	rule_delete(lpm, rule_to_delete_index, depth);
+	rule_delete_v20(lpm, rule_to_delete_index, depth);
 
 	/*
 	 * Find rule to replace the rule_to_delete. If there is no rule to
@@ -977,26 +1780,100 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
 	 * entries associated with this rule.
 	 */
 	sub_rule_depth = 0;
-	sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth);
+	sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth);
 
 	/*
 	 * If the input depth value is less than 25 use function
 	 * delete_depth_small otherwise use delete_depth_big.
 	 */
 	if (depth <= MAX_DEPTH_TBL24) {
-		return delete_depth_small(lpm, ip_masked, depth,
+		return delete_depth_small_v20(lpm, ip_masked, depth,
 				sub_rule_index, sub_rule_depth);
+	} else { /* If depth > MAX_DEPTH_TBL24 */
+		return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index,
+				sub_rule_depth);
+	}
+}
+VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
+
+int
+rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
+{
+	int32_t rule_to_delete_index, sub_rule_index;
+	uint32_t ip_masked;
+	uint8_t sub_rule_depth;
+	/*
+	 * Check input arguments. Note: IP must be a positive integer of 32
+	 * bits in length therefore it need not be checked.
+	 */
+	if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
+		return -EINVAL;
 	}
-	else { /* If depth > MAX_DEPTH_TBL24 */
-		return delete_depth_big(lpm, ip_masked, depth, sub_rule_index, sub_rule_depth);
+
+	ip_masked = ip & depth_to_mask(depth);
+
+	/*
+	 * Find the index of the input rule, that needs to be deleted, in the
+	 * rule table.
+	 */
+	rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth);
+
+	/*
+	 * Check if rule_to_delete_index was found. If no rule was found the
+	 * function rule_find returns -EINVAL.
+	 */
+	if (rule_to_delete_index < 0)
+		return -EINVAL;
+
+	/* Delete the rule from the rule table. */
+	rule_delete_v1604(lpm, rule_to_delete_index, depth);
+
+	/*
+	 * Find rule to replace the rule_to_delete. If there is no rule to
+	 * replace the rule_to_delete we return -1 and invalidate the table
+	 * entries associated with this rule.
+	 */
+	sub_rule_depth = 0;
+	sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth);
+
+	/*
+	 * If the input depth value is less than 25 use function
+	 * delete_depth_small otherwise use delete_depth_big.
+	 */
+	if (depth <= MAX_DEPTH_TBL24) {
+		return delete_depth_small_v1604(lpm, ip_masked, depth,
+				sub_rule_index, sub_rule_depth);
+	} else { /* If depth > MAX_DEPTH_TBL24 */
+		return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index,
+				sub_rule_depth);
 	}
 }
+BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04);
+MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,
+		uint8_t depth), rte_lpm_delete_v1604);
 
 /*
  * Delete all rules from the LPM table.
  */
 void
-rte_lpm_delete_all(struct rte_lpm *lpm)
+rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)
+{
+	/* Zero rule information. */
+	memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
+
+	/* Zero tbl24. */
+	memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
+
+	/* Zero tbl8. */
+	memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
+
+	/* Delete all rules form the rules table. */
+	memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
+}
+VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
+
+void
+rte_lpm_delete_all_v1604(struct rte_lpm *lpm)
 {
 	/* Zero rule information. */
 	memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
@@ -1010,3 +1887,6 @@ rte_lpm_delete_all(struct rte_lpm *lpm)
 	/* Delete all rules form the rules table. */
 	memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
 }
+BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04);
+MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),
+		rte_lpm_delete_all_v1604);
diff --git a/lib/librte_lpm/rte_lpm.h b/lib/librte_lpm/rte_lpm.h
index 8b4ec17..d759c8a 100644
--- a/lib/librte_lpm/rte_lpm.h
+++ b/lib/librte_lpm/rte_lpm.h
@@ -48,6 +48,7 @@
 #include <rte_memory.h>
 #include <rte_common.h>
 #include <rte_vect.h>
+#include <rte_compat.h>
 
 #ifdef __cplusplus
 extern "C" {
@@ -82,14 +83,14 @@ extern "C" {
 #endif
 
 /** @internal bitmask with valid and valid_group fields set */
-#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x0300
+#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03000000
 
 /** Bitmask used to indicate successful lookup */
-#define RTE_LPM_LOOKUP_SUCCESS          0x0100
+#define RTE_LPM_LOOKUP_SUCCESS          0x01000000
 
 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
 /** @internal Tbl24 entry structure. */
-struct rte_lpm_tbl_entry {
+struct rte_lpm_tbl_entry_v20 {
 	/**
 	 * Stores Next hop (tbl8 or tbl24 when valid_group is not set) or
 	 * a group index pointing to a tbl8 structure (tbl24 only, when
@@ -112,8 +113,28 @@ struct rte_lpm_tbl_entry {
 	uint8_t depth       :6; /**< Rule depth. */
 };
 
-#else
 struct rte_lpm_tbl_entry {
+	/**
+	 * Stores Next hop (tbl8 or tbl24 when valid_group is not set) or
+	 * a group index pointing to a tbl8 structure (tbl24 only, when
+	 * valid_group is set)
+	 */
+	uint32_t next_hop    :24;
+	/* Using single uint8_t to store 3 values. */
+	uint32_t valid       :1;   /**< Validation flag. */
+	/**
+	 * For tbl24:
+	 *  - valid_group == 0: entry stores a next hop
+	 *  - valid_group == 1: entry stores a group_index pointing to a tbl8
+	 * For tbl8:
+	 *  - valid_group indicates whether the current tbl8 is in use or not
+	 */
+	uint32_t valid_group :1;
+	uint32_t depth       :6; /**< Rule depth. */
+};
+
+#else
+struct rte_lpm_tbl_entry_v20 {
 	uint8_t depth       :6;
 	uint8_t valid_group :1;
 	uint8_t valid       :1;
@@ -123,14 +144,27 @@ struct rte_lpm_tbl_entry {
 	};
 };
 
+struct rte_lpm_tbl_entry {
+	uint32_t depth       :6;
+	uint32_t valid_group :1;
+	uint32_t valid       :1;
+	uint32_t next_hop    :24;
+
+};
+
 #endif
 
 /** @internal Rule structure. */
-struct rte_lpm_rule {
+struct rte_lpm_rule_v20 {
 	uint32_t ip; /**< Rule IP address. */
 	uint8_t  next_hop; /**< Rule next hop. */
 };
 
+struct rte_lpm_rule {
+	uint32_t ip; /**< Rule IP address. */
+	uint32_t next_hop; /**< Rule next hop. */
+};
+
 /** @internal Contains metadata about the rules table. */
 struct rte_lpm_rule_info {
 	uint32_t used_rules; /**< Used rules so far. */
@@ -138,6 +172,21 @@ struct rte_lpm_rule_info {
 };
 
 /** @internal LPM structure. */
+struct rte_lpm_v20 {
+	/* LPM metadata. */
+	char name[RTE_LPM_NAMESIZE];        /**< Name of the lpm. */
+	uint32_t max_rules; /**< Max. balanced rules per lpm. */
+	struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH]; /**< Rule info table. */
+
+	/* LPM Tables. */
+	struct rte_lpm_tbl_entry_v20 tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
+			__rte_cache_aligned; /**< LPM tbl24 table. */
+	struct rte_lpm_tbl_entry_v20 tbl8[RTE_LPM_TBL8_NUM_ENTRIES]
+			__rte_cache_aligned; /**< LPM tbl8 table. */
+	struct rte_lpm_rule_v20 rules_tbl[0] \
+			__rte_cache_aligned; /**< LPM rules. */
+};
+
 struct rte_lpm {
 	/* LPM metadata. */
 	char name[RTE_LPM_NAMESIZE];        /**< Name of the lpm. */
@@ -176,6 +225,10 @@ struct rte_lpm {
  */
 struct rte_lpm *
 rte_lpm_create(const char *name, int socket_id, int max_rules, int flags);
+struct rte_lpm_v20 *
+rte_lpm_create_v20(const char *name, int socket_id, int max_rules, int flags);
+struct rte_lpm *
+rte_lpm_create_v1604(const char *name, int socket_id, int max_rules, int flags);
 
 /**
  * Find an existing LPM object and return a pointer to it.
@@ -189,6 +242,10 @@ rte_lpm_create(const char *name, int socket_id, int max_rules, int flags);
  */
 struct rte_lpm *
 rte_lpm_find_existing(const char *name);
+struct rte_lpm_v20 *
+rte_lpm_find_existing_v20(const char *name);
+struct rte_lpm *
+rte_lpm_find_existing_v1604(const char *name);
 
 /**
  * Free an LPM object.
@@ -200,6 +257,10 @@ rte_lpm_find_existing(const char *name);
  */
 void
 rte_lpm_free(struct rte_lpm *lpm);
+void
+rte_lpm_free_v20(struct rte_lpm_v20 *lpm);
+void
+rte_lpm_free_v1604(struct rte_lpm *lpm);
 
 /**
  * Add a rule to the LPM table.
@@ -216,7 +277,13 @@ rte_lpm_free(struct rte_lpm *lpm);
  *   0 on success, negative value otherwise
  */
 int
-rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop);
+rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop);
+int
+rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
+		uint8_t next_hop);
+int
+rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+		uint32_t next_hop);
 
 /**
  * Check if a rule is present in the LPM table,
@@ -235,7 +302,13 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop);
  */
 int
 rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
-uint8_t *next_hop);
+uint32_t *next_hop);
+int
+rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
+		uint8_t *next_hop);
+int
+rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+		uint32_t *next_hop);
 
 /**
  * Delete a rule from the LPM table.
@@ -251,6 +324,10 @@ uint8_t *next_hop);
  */
 int
 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
+int
+rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth);
+int
+rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
 
 /**
  * Delete all rules from the LPM table.
@@ -260,6 +337,10 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
  */
 void
 rte_lpm_delete_all(struct rte_lpm *lpm);
+void
+rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm);
+void
+rte_lpm_delete_all_v1604(struct rte_lpm *lpm);
 
 /**
  * Lookup an IP into the LPM table.
@@ -274,28 +355,32 @@ rte_lpm_delete_all(struct rte_lpm *lpm);
  *   -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit
  */
 static inline int
-rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop)
+rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
 {
 	unsigned tbl24_index = (ip >> 8);
-	uint16_t tbl_entry;
+	uint32_t tbl_entry;
+	const uint32_t *ptbl;
 
 	/* DEBUG: Check user input arguments. */
 	RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
 
 	/* Copy tbl24 entry */
-	tbl_entry = *(const uint16_t *)&lpm->tbl24[tbl24_index];
+	ptbl = (const uint32_t *)(&lpm->tbl24[tbl24_index]);
+	tbl_entry = *ptbl;
 
 	/* Copy tbl8 entry (only if needed) */
 	if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
 			RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
 
 		unsigned tbl8_index = (uint8_t)ip +
-				((uint8_t)tbl_entry * RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
+				(((uint32_t)tbl_entry & 0x00FFFFFF) *
+						RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
 
-		tbl_entry = *(const uint16_t *)&lpm->tbl8[tbl8_index];
+		ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
+		tbl_entry = *ptbl;
 	}
 
-	*next_hop = (uint8_t)tbl_entry;
+	*next_hop = ((uint32_t)tbl_entry & 0x00FFFFFF);
 	return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT;
 }
 
@@ -323,11 +408,12 @@ rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop)
 		rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
 
 static inline int
-rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t * ips,
-		uint16_t * next_hops, const unsigned n)
+rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t *ips,
+		uint32_t *next_hops, const unsigned n)
 {
 	unsigned i;
 	unsigned tbl24_indexes[n];
+	const uint32_t *ptbl;
 
 	/* DEBUG: Check user input arguments. */
 	RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
@@ -339,24 +425,26 @@ rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t * ips,
 
 	for (i = 0; i < n; i++) {
 		/* Simply copy tbl24 entry to output */
-		next_hops[i] = *(const uint16_t *)&lpm->tbl24[tbl24_indexes[i]];
+		ptbl = (const uint32_t *)&lpm->tbl24[tbl24_indexes[i]];
+		next_hops[i] = *ptbl;
 
 		/* Overwrite output with tbl8 entry if needed */
 		if (unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
 				RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
 
 			unsigned tbl8_index = (uint8_t)ips[i] +
-					((uint8_t)next_hops[i] *
+					(((uint32_t)next_hops[i] & 0x00FFFFFF) *
 					 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
 
-			next_hops[i] = *(const uint16_t *)&lpm->tbl8[tbl8_index];
+			ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
+			next_hops[i] = *ptbl;
 		}
 	}
 	return 0;
 }
 
 /* Mask four results. */
-#define	 RTE_LPM_MASKX4_RES	UINT64_C(0x00ff00ff00ff00ff)
+#define	 RTE_LPM_MASKX4_RES	UINT64_C(0x00ffffff00ffffff)
 
 /**
  * Lookup four IP addresses in an LPM table.
@@ -378,36 +466,33 @@ rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t * ips,
  *   if lookup would fail.
  */
 static inline void
-rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, uint16_t hop[4],
-	uint16_t defv)
+rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, uint32_t hop[4],
+	uint32_t defv)
 {
 	__m128i i24;
 	rte_xmm_t i8;
-	uint16_t tbl[4];
-	uint64_t idx, pt;
+	uint32_t tbl[4];
+	uint64_t idx, pt, pt2;
+	const uint32_t *ptbl;
 
 	const __m128i mask8 =
 		_mm_set_epi32(UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX);
 
 	/*
-	 * RTE_LPM_VALID_EXT_ENTRY_BITMASK for 4 LPM entries
-	 * as one 64-bit value (0x0300030003000300).
+	 * RTE_LPM_VALID_EXT_ENTRY_BITMASK for 2 LPM entries
+	 * as one 64-bit value (0x0300000003000000).
 	 */
 	const uint64_t mask_xv =
 		((uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK |
-		(uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 16 |
-		(uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 32 |
-		(uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 48);
+		(uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 32);
 
 	/*
-	 * RTE_LPM_LOOKUP_SUCCESS for 4 LPM entries
-	 * as one 64-bit value (0x0100010001000100).
+	 * RTE_LPM_LOOKUP_SUCCESS for 2 LPM entries
+	 * as one 64-bit value (0x0100000001000000).
 	 */
 	const uint64_t mask_v =
 		((uint64_t)RTE_LPM_LOOKUP_SUCCESS |
-		(uint64_t)RTE_LPM_LOOKUP_SUCCESS << 16 |
-		(uint64_t)RTE_LPM_LOOKUP_SUCCESS << 32 |
-		(uint64_t)RTE_LPM_LOOKUP_SUCCESS << 48);
+		(uint64_t)RTE_LPM_LOOKUP_SUCCESS << 32);
 
 	/* get 4 indexes for tbl24[]. */
 	i24 = _mm_srli_epi32(ip, CHAR_BIT);
@@ -416,26 +501,31 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, uint16_t hop[4],
 	idx = _mm_cvtsi128_si64(i24);
 	i24 = _mm_srli_si128(i24, sizeof(uint64_t));
 
-	tbl[0] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx];
-	tbl[1] = *(const uint16_t *)&lpm->tbl24[idx >> 32];
+	ptbl = (const uint32_t *)&lpm->tbl24[(uint32_t)idx];
+	tbl[0] = *ptbl;
+	ptbl = (const uint32_t *)&lpm->tbl24[idx >> 32];
+	tbl[1] = *ptbl;
 
 	idx = _mm_cvtsi128_si64(i24);
 
-	tbl[2] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx];
-	tbl[3] = *(const uint16_t *)&lpm->tbl24[idx >> 32];
+	ptbl = (const uint32_t *)&lpm->tbl24[(uint32_t)idx];
+	tbl[2] = *ptbl;
+	ptbl = (const uint32_t *)&lpm->tbl24[idx >> 32];
+	tbl[3] = *ptbl;
 
 	/* get 4 indexes for tbl8[]. */
 	i8.x = _mm_and_si128(ip, mask8);
 
 	pt = (uint64_t)tbl[0] |
-		(uint64_t)tbl[1] << 16 |
-		(uint64_t)tbl[2] << 32 |
-		(uint64_t)tbl[3] << 48;
+		(uint64_t)tbl[1] << 32;
+	pt2 = (uint64_t)tbl[2] |
+		(uint64_t)tbl[3] << 32;
 
 	/* search successfully finished for all 4 IP addresses. */
-	if (likely((pt & mask_xv) == mask_v)) {
-		uintptr_t ph = (uintptr_t)hop;
-		*(uint64_t *)ph = pt & RTE_LPM_MASKX4_RES;
+	if (likely((pt & mask_xv) == mask_v) &&
+			likely((pt2 & mask_xv) == mask_v)) {
+		*(uint64_t *)hop = pt & RTE_LPM_MASKX4_RES;
+		*(uint64_t *)(hop + 2) = pt2 & RTE_LPM_MASKX4_RES;
 		return;
 	}
 
@@ -443,31 +533,35 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, uint16_t hop[4],
 			RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
 		i8.u32[0] = i8.u32[0] +
 			(uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-		tbl[0] = *(const uint16_t *)&lpm->tbl8[i8.u32[0]];
+		ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[0]];
+		tbl[0] = *ptbl;
 	}
-	if (unlikely((pt >> 16 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
+	if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
 			RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
 		i8.u32[1] = i8.u32[1] +
 			(uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-		tbl[1] = *(const uint16_t *)&lpm->tbl8[i8.u32[1]];
+		ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[1]];
+		tbl[1] = *ptbl;
 	}
-	if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
+	if (unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
 			RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
 		i8.u32[2] = i8.u32[2] +
 			(uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-		tbl[2] = *(const uint16_t *)&lpm->tbl8[i8.u32[2]];
+		ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[2]];
+		tbl[2] = *ptbl;
 	}
-	if (unlikely((pt >> 48 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
+	if (unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
 			RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
 		i8.u32[3] = i8.u32[3] +
 			(uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-		tbl[3] = *(const uint16_t *)&lpm->tbl8[i8.u32[3]];
+		ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[3]];
+		tbl[3] = *ptbl;
 	}
 
-	hop[0] = (tbl[0] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[0] : defv;
-	hop[1] = (tbl[1] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[1] : defv;
-	hop[2] = (tbl[2] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[2] : defv;
-	hop[3] = (tbl[3] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[3] : defv;
+	hop[0] = (tbl[0] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[0] & 0x00FFFFFF : defv;
+	hop[1] = (tbl[1] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[1] & 0x00FFFFFF : defv;
+	hop[2] = (tbl[2] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[2] & 0x00FFFFFF : defv;
+	hop[3] = (tbl[3] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[3] & 0x00FFFFFF : defv;
 }
 
 #ifdef __cplusplus
diff --git a/lib/librte_lpm/rte_lpm_version.map b/lib/librte_lpm/rte_lpm_version.map
index 70e1c05..22f0116 100644
--- a/lib/librte_lpm/rte_lpm_version.map
+++ b/lib/librte_lpm/rte_lpm_version.map
@@ -21,3 +21,14 @@ DPDK_2.0 {
 
 	local: *;
 };
+
+DPDK_16.04 {
+	global:
+		rte_lpm_add;
+		rte_lpm_find_existing;
+		rte_lpm_create;
+		rte_lpm_free;
+		rte_lpm_is_rule_present;
+		rte_lpm_delete;
+		rte_lpm_delete_all;
+} DPDK_2.0;
\ No newline at end of file
diff --git a/lib/librte_table/rte_table_lpm.c b/lib/librte_table/rte_table_lpm.c
index 673f401..7b2ecb0 100644
--- a/lib/librte_table/rte_table_lpm.c
+++ b/lib/librte_table/rte_table_lpm.c
@@ -74,7 +74,7 @@ struct rte_table_lpm {
 
 	/* Next Hop Table (NHT) */
 	uint32_t nht_users[RTE_TABLE_LPM_MAX_NEXT_HOPS];
-	uint8_t nht[0] __rte_cache_aligned;
+	uint32_t nht[0] __rte_cache_aligned;
 };
 
 static void *
@@ -178,7 +178,7 @@ nht_find_existing(struct rte_table_lpm *lpm, void *entry, uint32_t *pos)
 	uint32_t i;
 
 	for (i = 0; i < RTE_TABLE_LPM_MAX_NEXT_HOPS; i++) {
-		uint8_t *nht_entry = &lpm->nht[i * lpm->entry_size];
+		uint32_t *nht_entry = &lpm->nht[i * lpm->entry_size];
 
 		if ((lpm->nht_users[i] > 0) && (memcmp(nht_entry, entry,
 			lpm->entry_unique_size) == 0)) {
@@ -202,7 +202,7 @@ rte_table_lpm_entry_add(
 	struct rte_table_lpm_key *ip_prefix = (struct rte_table_lpm_key *) key;
 	uint32_t nht_pos, nht_pos0_valid;
 	int status;
-	uint8_t nht_pos0 = 0;
+	uint32_t nht_pos0 = 0;
 
 	/* Check input parameters */
 	if (lpm == NULL) {
@@ -232,7 +232,7 @@ rte_table_lpm_entry_add(
 
 	/* Find existing or free NHT entry */
 	if (nht_find_existing(lpm, entry, &nht_pos) == 0) {
-		uint8_t *nht_entry;
+		uint32_t *nht_entry;
 
 		if (nht_find_free(lpm, &nht_pos) == 0) {
 			RTE_LOG(ERR, TABLE, "%s: NHT full\n", __func__);
@@ -244,8 +244,7 @@ rte_table_lpm_entry_add(
 	}
 
 	/* Add rule to low level LPM table */
-	if (rte_lpm_add(lpm->lpm, ip_prefix->ip, ip_prefix->depth,
-		(uint8_t) nht_pos) < 0) {
+	if (rte_lpm_add(lpm->lpm, ip_prefix->ip, ip_prefix->depth, nht_pos) < 0) {
 		RTE_LOG(ERR, TABLE, "%s: LPM rule add failed\n", __func__);
 		return -1;
 	}
@@ -268,7 +267,7 @@ rte_table_lpm_entry_delete(
 {
 	struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
 	struct rte_table_lpm_key *ip_prefix = (struct rte_table_lpm_key *) key;
-	uint8_t nht_pos;
+	uint32_t nht_pos;
 	int status;
 
 	/* Check input parameters */
@@ -342,7 +341,7 @@ rte_table_lpm_lookup(
 			uint32_t ip = rte_bswap32(
 				RTE_MBUF_METADATA_UINT32(pkt, lpm->offset));
 			int status;
-			uint8_t nht_pos;
+			uint32_t nht_pos;
 
 			status = rte_lpm_lookup(lpm->lpm, ip, &nht_pos);
 			if (status == 0) {
-- 
1.9.1



More information about the dev mailing list