[dpdk-dev] [PATCH v1 1/3] lpm: increase number of next hops for lpm (ipv4)

Michal Jastrzebski michalx.k.jastrzebski at intel.com
Fri Oct 23 15:51:49 CEST 2015


From: Michal Kobylinski <michalx.kobylinski at intel.com>

Main implementation - changes to lpm library regarding new data types.
Additionally this patch implements changes required by test application. 
ABI versioning requirements are met only for lpm library, 
for table library it will be sent in v2 of this patch-set.
 
Signed-off-by: Michal Kobylinski <michalx.kobylinski at intel.com>
---
 app/test/test_func_reentrancy.c    |   4 +-
 app/test/test_lpm.c                | 227 +++++-----
 lib/librte_lpm/rte_lpm.c           | 887 ++++++++++++++++++++++++++++++++++++-
 lib/librte_lpm/rte_lpm.h           | 295 +++++++++++-
 lib/librte_lpm/rte_lpm_version.map |  59 ++-
 lib/librte_table/rte_table_lpm.c   |  10 +-
 6 files changed, 1322 insertions(+), 160 deletions(-)

diff --git a/app/test/test_func_reentrancy.c b/app/test/test_func_reentrancy.c
index dbecc52..331ab29 100644
--- a/app/test/test_func_reentrancy.c
+++ b/app/test/test_func_reentrancy.c
@@ -343,7 +343,7 @@ static void
 lpm_clean(unsigned lcore_id)
 {
 	char lpm_name[MAX_STRING_SIZE];
-	struct rte_lpm *lpm;
+	struct rte_lpm_extend *lpm;
 	int i;
 
 	for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
@@ -358,7 +358,7 @@ static int
 lpm_create_free(__attribute__((unused)) void *arg)
 {
 	unsigned lcore_self = rte_lcore_id();
-	struct rte_lpm *lpm;
+	struct rte_lpm_extend *lpm;
 	char lpm_name[MAX_STRING_SIZE];
 	int i;
 
diff --git a/app/test/test_lpm.c b/app/test/test_lpm.c
index 8b4ded9..31f54d0 100644
--- a/app/test/test_lpm.c
+++ b/app/test/test_lpm.c
@@ -114,7 +114,7 @@ rte_lpm_test tests[] = {
 int32_t
 test0(void)
 {
-	struct rte_lpm *lpm = NULL;
+	struct rte_lpm_extend *lpm = NULL;
 
 	/* rte_lpm_create: lpm name == NULL */
 	lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, MAX_RULES, 0);
@@ -139,7 +139,7 @@ test0(void)
 int32_t
 test1(void)
 {
-	struct rte_lpm *lpm = NULL;
+	struct rte_lpm_extend *lpm = NULL;
 	int32_t i;
 
 	/* rte_lpm_free: Free NULL */
@@ -163,7 +163,7 @@ test1(void)
 int32_t
 test2(void)
 {
-	struct rte_lpm *lpm = NULL;
+	struct rte_lpm_extend *lpm = NULL;
 
 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
 	TEST_LPM_ASSERT(lpm != NULL);
@@ -179,7 +179,7 @@ test2(void)
 int32_t
 test3(void)
 {
-	struct rte_lpm *lpm = NULL;
+	struct rte_lpm_extend *lpm = NULL;
 	uint32_t ip = IPv4(0, 0, 0, 0);
 	uint8_t depth = 24, next_hop = 100;
 	int32_t status = 0;
@@ -212,7 +212,7 @@ test3(void)
 int32_t
 test4(void)
 {
-	struct rte_lpm *lpm = NULL;
+	struct rte_lpm_extend *lpm = NULL;
 	uint32_t ip = IPv4(0, 0, 0, 0);
 	uint8_t depth = 24;
 	int32_t status = 0;
@@ -252,7 +252,7 @@ test5(void)
 	int32_t status = 0;
 
 	/* rte_lpm_lookup: lpm == NULL */
-	status = rte_lpm_lookup(NULL, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(NULL, ip, &next_hop_return);
 	TEST_LPM_ASSERT(status < 0);
 
 	/*Create vaild lpm to use in rest of test. */
@@ -260,7 +260,7 @@ test5(void)
 	TEST_LPM_ASSERT(lpm != NULL);
 
 	/* rte_lpm_lookup: depth < 1 */
-	status = rte_lpm_lookup(lpm, ip, NULL);
+	status = rte_lpm_lookup_extend(lpm, ip, NULL);
 	TEST_LPM_ASSERT(status < 0);
 
 	rte_lpm_free(lpm);
@@ -276,9 +276,10 @@ test5(void)
 int32_t
 test6(void)
 {
-	struct rte_lpm *lpm = NULL;
+	struct rte_lpm_extend *lpm = NULL;
 	uint32_t ip = IPv4(0, 0, 0, 0);
-	uint8_t depth = 24, next_hop_add = 100, next_hop_return = 0;
+	uint8_t depth = 24;
+	uint32_t next_hop_add = 100, next_hop_return = 0;
 	int32_t status = 0;
 
 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
@@ -287,13 +288,13 @@ test6(void)
 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	status = rte_lpm_delete(lpm, ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm_free(lpm);
@@ -309,10 +310,11 @@ int32_t
 test7(void)
 {
 	__m128i ipx4;
-	uint16_t hop[4];
-	struct rte_lpm *lpm = NULL;
+	uint32_t hop[4];
+	struct rte_lpm_extend *lpm = NULL;
 	uint32_t ip = IPv4(0, 0, 0, 0);
-	uint8_t depth = 32, next_hop_add = 100, next_hop_return = 0;
+	uint8_t depth = 32;
+	uint32_t next_hop_add = 100, next_hop_return = 0;
 	int32_t status = 0;
 
 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
@@ -321,20 +323,20 @@ test7(void)
 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	ipx4 = _mm_set_epi32(ip, ip + 0x100, ip - 0x100, ip);
-	rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
+	rte_lpm_lookupx4_extend(lpm, ipx4, hop, UINT32_MAX);
 	TEST_LPM_ASSERT(hop[0] == next_hop_add);
-	TEST_LPM_ASSERT(hop[1] == UINT16_MAX);
-	TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
+	TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
+	TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
 	TEST_LPM_ASSERT(hop[3] == next_hop_add);
 
 	status = rte_lpm_delete(lpm, ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm_free(lpm);
@@ -355,10 +357,11 @@ int32_t
 test8(void)
 {
 	__m128i ipx4;
-	uint16_t hop[4];
-	struct rte_lpm *lpm = NULL;
+	uint32_t hop[4];
+	struct rte_lpm_extend *lpm = NULL;
 	uint32_t ip1 = IPv4(127, 255, 255, 255), ip2 = IPv4(128, 0, 0, 0);
-	uint8_t depth, next_hop_add, next_hop_return;
+	uint8_t depth;
+	uint32_t next_hop_add, next_hop_return;
 	int32_t status = 0;
 
 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
@@ -373,18 +376,18 @@ test8(void)
 		TEST_LPM_ASSERT(status == 0);
 
 		/* Check IP in first half of tbl24 which should be empty. */
-		status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
+		status = rte_lpm_lookup_extend(lpm, ip1, &next_hop_return);
 		TEST_LPM_ASSERT(status == -ENOENT);
 
-		status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
+		status = rte_lpm_lookup_extend(lpm, ip2, &next_hop_return);
 		TEST_LPM_ASSERT((status == 0) &&
 			(next_hop_return == next_hop_add));
 
 		ipx4 = _mm_set_epi32(ip2, ip1, ip2, ip1);
-		rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
-		TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
+		rte_lpm_lookupx4_extend(lpm, ipx4, hop, UINT32_MAX);
+		TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
 		TEST_LPM_ASSERT(hop[1] == next_hop_add);
-		TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
+		TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
 		TEST_LPM_ASSERT(hop[3] == next_hop_add);
 	}
 
@@ -395,7 +398,7 @@ test8(void)
 		status = rte_lpm_delete(lpm, ip2, depth);
 		TEST_LPM_ASSERT(status == 0);
 
-		status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
+		status = rte_lpm_lookup_extend(lpm, ip2, &next_hop_return);
 
 		if (depth != 1) {
 			TEST_LPM_ASSERT((status == 0) &&
@@ -405,20 +408,20 @@ test8(void)
 			TEST_LPM_ASSERT(status == -ENOENT);
 		}
 
-		status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
+		status = rte_lpm_lookup_extend(lpm, ip1, &next_hop_return);
 		TEST_LPM_ASSERT(status == -ENOENT);
 
 		ipx4 = _mm_set_epi32(ip1, ip1, ip2, ip2);
-		rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
+		rte_lpm_lookupx4_extend(lpm, ipx4, hop, UINT32_MAX);
 		if (depth != 1) {
 			TEST_LPM_ASSERT(hop[0] == next_hop_add);
 			TEST_LPM_ASSERT(hop[1] == next_hop_add);
 		} else {
-			TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
-			TEST_LPM_ASSERT(hop[1] == UINT16_MAX);
+			TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
+			TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
 		}
-		TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
-		TEST_LPM_ASSERT(hop[3] == UINT16_MAX);
+		TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
+		TEST_LPM_ASSERT(hop[3] == UINT32_MAX);
 	}
 
 	rte_lpm_free(lpm);
@@ -436,9 +439,10 @@ test8(void)
 int32_t
 test9(void)
 {
-	struct rte_lpm *lpm = NULL;
+	struct rte_lpm_extend *lpm = NULL;
 	uint32_t ip, ip_1, ip_2;
-	uint8_t depth, depth_1, depth_2, next_hop_add, next_hop_add_1,
+	uint8_t depth, depth_1, depth_2;
+	uint32_t next_hop_add, next_hop_add_1,
 		next_hop_add_2, next_hop_return;
 	int32_t status = 0;
 
@@ -453,13 +457,13 @@ test9(void)
 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	status = rte_lpm_delete(lpm, ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm_delete_all(lpm);
@@ -472,7 +476,7 @@ test9(void)
 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	depth = 24;
@@ -481,7 +485,7 @@ test9(void)
 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	depth = 24;
@@ -494,7 +498,7 @@ test9(void)
 	status = rte_lpm_delete(lpm, ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm_delete_all(lpm);
@@ -508,7 +512,7 @@ test9(void)
 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	ip = IPv4(128, 0, 0, 5);
@@ -518,26 +522,26 @@ test9(void)
 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	status = rte_lpm_delete(lpm, ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	ip = IPv4(128, 0, 0, 0);
 	depth = 32;
 	next_hop_add = 100;
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	status = rte_lpm_delete(lpm, ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm_delete_all(lpm);
@@ -557,25 +561,25 @@ test9(void)
 	status = rte_lpm_add(lpm, ip_1, depth_1, next_hop_add_1);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip_1, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
 
 	status = rte_lpm_add(lpm, ip_2, depth_2, next_hop_add_2);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip_2, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2));
 
 	status = rte_lpm_delete(lpm, ip_2, depth_2);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip_2, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
 
 	status = rte_lpm_delete(lpm, ip_1, depth_1);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip_1, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm_free(lpm);
@@ -600,9 +604,10 @@ int32_t
 test10(void)
 {
 
-	struct rte_lpm *lpm = NULL;
+	struct rte_lpm_extend *lpm = NULL;
 	uint32_t ip;
-	uint8_t depth, next_hop_add, next_hop_return;
+	uint8_t depth;
+	uint32_t next_hop_add, next_hop_return;
 	int32_t status = 0;
 
 	/* Add rule that covers a TBL24 range previously invalid & lookup
@@ -617,13 +622,13 @@ test10(void)
 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	status = rte_lpm_delete(lpm, ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm_delete_all(lpm);
@@ -635,7 +640,7 @@ test10(void)
 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	status = rte_lpm_delete(lpm, ip, depth);
@@ -660,13 +665,13 @@ test10(void)
 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	ip = IPv4(128, 0, 0, 0);
 	next_hop_add = 100;
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	ip = IPv4(128, 0, 0, 0);
@@ -675,7 +680,7 @@ test10(void)
 	status = rte_lpm_delete(lpm, ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	ip = IPv4(128, 0, 0, 10);
@@ -684,7 +689,7 @@ test10(void)
 	status = rte_lpm_delete(lpm, ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm_delete_all(lpm);
@@ -699,7 +704,7 @@ test10(void)
 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	next_hop_add = 101;
@@ -707,13 +712,13 @@ test10(void)
 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	status = rte_lpm_delete(lpm, ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm_delete_all(lpm);
@@ -728,7 +733,7 @@ test10(void)
 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	next_hop_add = 101;
@@ -736,13 +741,13 @@ test10(void)
 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	status = rte_lpm_delete(lpm, ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm_delete_all(lpm);
@@ -755,7 +760,7 @@ test10(void)
 	status = rte_lpm_delete(lpm, ip, depth);
 	TEST_LPM_ASSERT(status < 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm_delete_all(lpm);
@@ -768,7 +773,7 @@ test10(void)
 	status = rte_lpm_delete(lpm, ip, depth);
 	TEST_LPM_ASSERT(status < 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm_free(lpm);
@@ -786,9 +791,10 @@ int32_t
 test11(void)
 {
 
-	struct rte_lpm *lpm = NULL;
+	struct rte_lpm_extend *lpm = NULL;
 	uint32_t ip;
-	uint8_t depth, next_hop_add, next_hop_return;
+	uint8_t depth;
+	uint32_t next_hop_add, next_hop_return;
 	int32_t status = 0;
 
 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
@@ -808,13 +814,13 @@ test11(void)
 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	ip = IPv4(128, 0, 0, 0);
 	next_hop_add = 100;
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
 
 	ip = IPv4(128, 0, 0, 0);
@@ -823,7 +829,7 @@ test11(void)
 	status = rte_lpm_delete(lpm, ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	ip = IPv4(128, 0, 0, 10);
@@ -832,7 +838,7 @@ test11(void)
 	status = rte_lpm_delete(lpm, ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm_free(lpm);
@@ -851,10 +857,11 @@ int32_t
 test12(void)
 {
 	__m128i ipx4;
-	uint16_t hop[4];
-	struct rte_lpm *lpm = NULL;
+	uint32_t hop[4];
+	struct rte_lpm_extend *lpm = NULL;
 	uint32_t ip, i;
-	uint8_t depth, next_hop_add, next_hop_return;
+	uint8_t depth;
+	uint32_t next_hop_add, next_hop_return;
 	int32_t status = 0;
 
 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
@@ -868,21 +875,21 @@ test12(void)
 		status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 		TEST_LPM_ASSERT(status == 0);
 
-		status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+		status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 		TEST_LPM_ASSERT((status == 0) &&
 				(next_hop_return == next_hop_add));
 
 		ipx4 = _mm_set_epi32(ip, ip + 1, ip, ip - 1);
-		rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
-		TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
+		rte_lpm_lookupx4_extend(lpm, ipx4, hop, UINT32_MAX);
+		TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
 		TEST_LPM_ASSERT(hop[1] == next_hop_add);
-		TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
+		TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
 		TEST_LPM_ASSERT(hop[3] == next_hop_add);
 
 		status = rte_lpm_delete(lpm, ip, depth);
 		TEST_LPM_ASSERT(status == 0);
 
-		status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+		status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 		TEST_LPM_ASSERT(status == -ENOENT);
 	}
 
@@ -902,9 +909,10 @@ test12(void)
 int32_t
 test13(void)
 {
-	struct rte_lpm *lpm = NULL;
+	struct rte_lpm_extend *lpm = NULL;
 	uint32_t ip, i;
-	uint8_t depth, next_hop_add_1, next_hop_add_2, next_hop_return;
+	uint8_t depth;
+	uint32_t next_hop_add_1, next_hop_add_2, next_hop_return;
 	int32_t status = 0;
 
 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
@@ -917,7 +925,7 @@ test13(void)
 	status = rte_lpm_add(lpm, ip, depth, next_hop_add_1);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
 
 	depth = 32;
@@ -927,14 +935,14 @@ test13(void)
 		status = rte_lpm_add(lpm, ip, depth, next_hop_add_2);
 		TEST_LPM_ASSERT(status == 0);
 
-		status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+		status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 		TEST_LPM_ASSERT((status == 0) &&
 				(next_hop_return == next_hop_add_2));
 
 		status = rte_lpm_delete(lpm, ip, depth);
 		TEST_LPM_ASSERT(status == 0);
 
-		status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+		status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 		TEST_LPM_ASSERT((status == 0) &&
 				(next_hop_return == next_hop_add_1));
 	}
@@ -944,7 +952,7 @@ test13(void)
 	status = rte_lpm_delete(lpm, ip, depth);
 	TEST_LPM_ASSERT(status == 0);
 
-	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 	TEST_LPM_ASSERT(status == -ENOENT);
 
 	rte_lpm_free(lpm);
@@ -964,9 +972,10 @@ test14(void)
 	/* We only use depth = 32 in the loop below so we must make sure
 	 * that we have enough storage for all rules at that depth*/
 
-	struct rte_lpm *lpm = NULL;
+	struct rte_lpm_extend *lpm = NULL;
 	uint32_t ip;
-	uint8_t depth, next_hop_add, next_hop_return;
+	uint8_t depth;
+	uint32_t next_hop_add, next_hop_return;
 	int32_t status = 0;
 
 	/* Add enough space for 256 rules for every depth */
@@ -982,7 +991,7 @@ test14(void)
 		status = rte_lpm_add(lpm, ip, depth, next_hop_add);
 		TEST_LPM_ASSERT(status == 0);
 
-		status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+		status = rte_lpm_lookup_extend(lpm, ip, &next_hop_return);
 		TEST_LPM_ASSERT((status == 0) &&
 				(next_hop_return == next_hop_add));
 	}
@@ -1011,7 +1020,7 @@ test14(void)
 int32_t
 test15(void)
 {
-	struct rte_lpm *lpm = NULL, *result = NULL;
+	struct rte_lpm_extend *lpm = NULL, *result = NULL;
 
 	/* Create lpm  */
 	lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, 256 * 32, 0);
@@ -1040,7 +1049,7 @@ int32_t
 test16(void)
 {
 	uint32_t ip;
-	struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY,
+	struct rte_lpm_extend *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY,
 			256 * 32, 0);
 
 	/* ip loops through all possibilities for top 24 bits of address */
@@ -1071,17 +1080,17 @@ test16(void)
 int32_t
 test17(void)
 {
-	struct rte_lpm *lpm = NULL;
+	struct rte_lpm_extend *lpm = NULL;
 	const uint32_t ip_10_32 = IPv4(10, 10, 10, 2);
 	const uint32_t ip_10_24 = IPv4(10, 10, 10, 0);
 	const uint32_t ip_20_25 = IPv4(10, 10, 20, 2);
 	const uint8_t d_ip_10_32 = 32,
 			d_ip_10_24 = 24,
 			d_ip_20_25 = 25;
-	const uint8_t next_hop_ip_10_32 = 100,
+	const uint32_t next_hop_ip_10_32 = 100,
 			next_hop_ip_10_24 = 105,
 			next_hop_ip_20_25 = 111;
-	uint8_t next_hop_return = 0;
+	uint32_t next_hop_return = 0;
 	int32_t status = 0;
 
 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
@@ -1091,7 +1100,7 @@ test17(void)
 			next_hop_ip_10_32)) < 0)
 		return -1;
 
-	status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip_10_32, &next_hop_return);
 	uint8_t test_hop_10_32 = next_hop_return;
 	TEST_LPM_ASSERT(status == 0);
 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
@@ -1100,7 +1109,7 @@ test17(void)
 			next_hop_ip_10_24)) < 0)
 			return -1;
 
-	status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip_10_24, &next_hop_return);
 	uint8_t test_hop_10_24 = next_hop_return;
 	TEST_LPM_ASSERT(status == 0);
 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
@@ -1109,7 +1118,7 @@ test17(void)
 			next_hop_ip_20_25)) < 0)
 		return -1;
 
-	status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip_20_25, &next_hop_return);
 	uint8_t test_hop_20_25 = next_hop_return;
 	TEST_LPM_ASSERT(status == 0);
 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
@@ -1124,11 +1133,11 @@ test17(void)
 		return -1;
 	}
 
-	status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip_10_32, &next_hop_return);
 	TEST_LPM_ASSERT(status == 0);
 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
 
-	status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
+	status = rte_lpm_lookup_extend(lpm, ip_10_24, &next_hop_return);
 	TEST_LPM_ASSERT(status == 0);
 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
 
@@ -1172,10 +1181,10 @@ print_route_distribution(const struct route_rule *table, uint32_t n)
 int32_t
 perf_test(void)
 {
-	struct rte_lpm *lpm = NULL;
+	struct rte_lpm_extend *lpm = NULL;
 	uint64_t begin, total_time, lpm_used_entries = 0;
 	unsigned i, j;
-	uint8_t next_hop_add = 0xAA, next_hop_return = 0;
+	uint32_t next_hop_add = 0xAA, next_hop_return = 0;
 	int status = 0;
 	uint64_t cache_line_counter = 0;
 	int64_t count = 0;
@@ -1236,7 +1245,7 @@ perf_test(void)
 		begin = rte_rdtsc();
 
 		for (j = 0; j < BATCH_SIZE; j ++) {
-			if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
+			if (rte_lpm_lookup_extend(lpm, ip_batch[j], &next_hop_return) != 0)
 				count++;
 		}
 
@@ -1252,7 +1261,7 @@ perf_test(void)
 	count = 0;
 	for (i = 0; i < ITERATIONS; i ++) {
 		static uint32_t ip_batch[BATCH_SIZE];
-		uint16_t next_hops[BULK_SIZE];
+		uint32_t next_hops[BULK_SIZE];
 
 		/* Create array of random IP addresses */
 		for (j = 0; j < BATCH_SIZE; j ++)
@@ -1262,9 +1271,9 @@ perf_test(void)
 		begin = rte_rdtsc();
 		for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
 			unsigned k;
-			rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE);
+			rte_lpm_lookup_bulk_func_extend(lpm, &ip_batch[j], next_hops, BULK_SIZE);
 			for (k = 0; k < BULK_SIZE; k++)
-				if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS)))
+				if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS_EXTEND)))
 					count++;
 		}
 
@@ -1279,7 +1288,7 @@ perf_test(void)
 	count = 0;
 	for (i = 0; i < ITERATIONS; i++) {
 		static uint32_t ip_batch[BATCH_SIZE];
-		uint16_t next_hops[4];
+		uint32_t next_hops[4];
 
 		/* Create array of random IP addresses */
 		for (j = 0; j < BATCH_SIZE; j++)
@@ -1293,9 +1302,9 @@ perf_test(void)
 
 			ipx4 = _mm_loadu_si128((__m128i *)(ip_batch + j));
 			ipx4 = *(__m128i *)(ip_batch + j);
-			rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT16_MAX);
+			rte_lpm_lookupx4_extend(lpm, ipx4, next_hops, UINT32_MAX);
 			for (k = 0; k < RTE_DIM(next_hops); k++)
-				if (unlikely(next_hops[k] == UINT16_MAX))
+				if (unlikely(next_hops[k] == UINT32_MAX))
 					count++;
 		}
 
diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c
index 163ba3c..58b7fcc 100644
--- a/lib/librte_lpm/rte_lpm.c
+++ b/lib/librte_lpm/rte_lpm.c
@@ -120,7 +120,7 @@ depth_to_range(uint8_t depth)
  * Find an existing lpm table and return a pointer to it.
  */
 struct rte_lpm *
-rte_lpm_find_existing(const char *name)
+rte_lpm_find_existing_v20(const char *name)
 {
 	struct rte_lpm *l = NULL;
 	struct rte_tailq_entry *te;
@@ -143,12 +143,42 @@ rte_lpm_find_existing(const char *name)
 
 	return l;
 }
+VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
+
+struct rte_lpm_extend *
+rte_lpm_find_existing_v22(const char *name)
+{
+	struct rte_lpm_extend *l = NULL;
+	struct rte_tailq_entry *te;
+	struct rte_lpm_list *lpm_list;
+
+	lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
+
+	rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+	TAILQ_FOREACH(te, lpm_list, next) {
+		l = (struct rte_lpm_extend *) te->data;
+		if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
+			break;
+	}
+	rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+	if (te == NULL) {
+		rte_errno = ENOENT;
+		return NULL;
+	}
+
+	return l;
+}
+BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v22, 2.2);
+MAP_STATIC_SYMBOL(struct rte_lpm_extend *
+		rte_lpm_find_existing(const char *name), rte_lpm_find_existing_v22);
 
 /*
  * Allocates memory for LPM object
  */
+
 struct rte_lpm *
-rte_lpm_create(const char *name, int socket_id, int max_rules,
+rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
 		__rte_unused int flags)
 {
 	char mem_name[RTE_LPM_NAMESIZE];
@@ -213,12 +243,117 @@ exit:
 
 	return lpm;
 }
+VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
+
+struct rte_lpm_extend *
+rte_lpm_create_v22(const char *name, int socket_id, int max_rules,
+		__rte_unused int flags)
+{
+	char mem_name[RTE_LPM_NAMESIZE];
+	struct rte_lpm_extend *lpm = NULL;
+	struct rte_tailq_entry *te;
+	uint32_t mem_size;
+	struct rte_lpm_list *lpm_list;
+
+	lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
+
+	RTE_BUILD_BUG_ON(sizeof(union rte_lpm_tbl24_entry_extend) != 4);
+	RTE_BUILD_BUG_ON(sizeof(union rte_lpm_tbl8_entry_extend) != 4);
+
+	/* Check user arguments. */
+	if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+
+	snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
+
+	/* Determine the amount of memory to allocate. */
+	mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
+
+	rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+	/* guarantee there's no existing */
+	TAILQ_FOREACH(te, lpm_list, next) {
+		lpm = (struct rte_lpm_extend *) te->data;
+		if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
+			break;
+	}
+	if (te != NULL)
+		goto exit;
+
+	/* allocate tailq entry */
+	te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
+	if (te == NULL) {
+		RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
+		goto exit;
+	}
+
+	/* Allocate memory to store the LPM data structures. */
+	lpm = (struct rte_lpm_extend *)rte_zmalloc_socket(mem_name, mem_size,
+			RTE_CACHE_LINE_SIZE, socket_id);
+	if (lpm == NULL) {
+		RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
+		rte_free(te);
+		goto exit;
+	}
+
+	/* Save user arguments. */
+	lpm->max_rules = max_rules;
+	snprintf(lpm->name, sizeof(lpm->name), "%s", name);
+
+	te->data = (void *) lpm;
+
+	TAILQ_INSERT_TAIL(lpm_list, te, next);
+
+exit:
+	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+	return lpm;
+}
+BIND_DEFAULT_SYMBOL(rte_lpm_create, _v22, 2.2);
+MAP_STATIC_SYMBOL(struct rte_lpm_extend *
+		rte_lpm_create(const char *name, int socket_id, int max_rules,
+				__rte_unused int flags), rte_lpm_create_v22);
 
 /*
  * Deallocates memory for given LPM table.
  */
 void
-rte_lpm_free(struct rte_lpm *lpm)
+rte_lpm_free_v20(struct rte_lpm *lpm)
+{
+	struct rte_lpm_list *lpm_list;
+	struct rte_tailq_entry *te;
+
+	/* Check user arguments. */
+	if (lpm == NULL)
+		return;
+
+	lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
+
+	rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+	/* find our tailq entry */
+	TAILQ_FOREACH(te, lpm_list, next) {
+		if (te->data == (void *) lpm)
+			break;
+	}
+	if (te == NULL) {
+		rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+		return;
+	}
+
+	TAILQ_REMOVE(lpm_list, te, next);
+
+	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+	rte_free(lpm);
+	rte_free(te);
+}
+VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
+
+void
+rte_lpm_free_v22(struct rte_lpm_extend *lpm)
 {
 	struct rte_lpm_list *lpm_list;
 	struct rte_tailq_entry *te;
@@ -248,6 +383,9 @@ rte_lpm_free(struct rte_lpm *lpm)
 	rte_free(lpm);
 	rte_free(te);
 }
+BIND_DEFAULT_SYMBOL(rte_lpm_free, _v22, 2.2);
+MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm_extend *lpm),
+		rte_lpm_free_v22);
 
 /*
  * Adds a rule to the rule table.
@@ -328,10 +466,80 @@ rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
 	return rule_index;
 }
 
+static inline int32_t
+rule_add_extend(struct rte_lpm_extend *lpm, uint32_t ip_masked, uint8_t depth,
+	uint32_t next_hop)
+{
+	uint32_t rule_gindex, rule_index, last_rule;
+	int i;
+
+	VERIFY_DEPTH(depth);
+
+	/* Scan through rule group to see if rule already exists. */
+	if (lpm->rule_info[depth - 1].used_rules > 0) {
+
+		/* rule_gindex stands for rule group index. */
+		rule_gindex = lpm->rule_info[depth - 1].first_rule;
+		/* Initialise rule_index to point to start of rule group. */
+		rule_index = rule_gindex;
+		/* Last rule = Last used rule in this rule group. */
+		last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
+
+		for (; rule_index < last_rule; rule_index++) {
+
+			/* If rule already exists update its next_hop and return. */
+			if (lpm->rules_tbl[rule_index].ip == ip_masked) {
+				lpm->rules_tbl[rule_index].next_hop = next_hop;
+
+				return rule_index;
+			}
+		}
+
+		if (rule_index == lpm->max_rules)
+			return -ENOSPC;
+	} else {
+		/* Calculate the position in which the rule will be stored. */
+		rule_index = 0;
+
+		for (i = depth - 1; i > 0; i--) {
+			if (lpm->rule_info[i - 1].used_rules > 0) {
+				rule_index = lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules;
+				break;
+			}
+		}
+		if (rule_index == lpm->max_rules)
+			return -ENOSPC;
+
+		lpm->rule_info[depth - 1].first_rule = rule_index;
+	}
+
+	/* Make room for the new rule in the array. */
+	for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
+		if (lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
+			return -ENOSPC;
+
+		if (lpm->rule_info[i - 1].used_rules > 0) {
+			lpm->rules_tbl[lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules]
+					= lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
+			lpm->rule_info[i - 1].first_rule++;
+		}
+	}
+
+	/* Add the new rule. */
+	lpm->rules_tbl[rule_index].ip = ip_masked;
+	lpm->rules_tbl[rule_index].next_hop = next_hop;
+
+	/* Increment the used rules counter for this rule group. */
+	lpm->rule_info[depth - 1].used_rules++;
+
+	return rule_index;
+}
+
 /*
  * Delete a rule from the rule table.
  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
  */
+
 static inline void
 rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
 {
@@ -353,6 +561,27 @@ rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
 	lpm->rule_info[depth - 1].used_rules--;
 }
 
+static inline void
+rule_delete_extend(struct rte_lpm_extend *lpm, int32_t rule_index, uint8_t depth)
+{
+	int i;
+
+	VERIFY_DEPTH(depth);
+
+	lpm->rules_tbl[rule_index] = lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
+			+ lpm->rule_info[depth - 1].used_rules - 1];
+
+	for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
+		if (lpm->rule_info[i].used_rules > 0) {
+			lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
+					lpm->rules_tbl[lpm->rule_info[i].first_rule + lpm->rule_info[i].used_rules - 1];
+			lpm->rule_info[i].first_rule--;
+		}
+	}
+
+	lpm->rule_info[depth - 1].used_rules--;
+}
+
 /*
  * Finds a rule in rule table.
  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
@@ -378,6 +607,27 @@ rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
 	return -EINVAL;
 }
 
+static inline int32_t
+rule_find_extend(struct rte_lpm_extend *lpm, uint32_t ip_masked, uint8_t depth)
+{
+	uint32_t rule_gindex, last_rule, rule_index;
+
+	VERIFY_DEPTH(depth);
+
+	rule_gindex = lpm->rule_info[depth - 1].first_rule;
+	last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
+
+	/* Scan used rules at given depth to find rule. */
+	for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
+		/* If rule is found return the rule index. */
+		if (lpm->rules_tbl[rule_index].ip == ip_masked)
+			return rule_index;
+	}
+
+	/* If rule is not found return -EINVAL. */
+	return -EINVAL;
+}
+
 /*
  * Find, clean and allocate a tbl8.
  */
@@ -409,6 +659,33 @@ tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8)
 	return -ENOSPC;
 }
 
+static inline int32_t
+tbl8_alloc_extend(union rte_lpm_tbl8_entry_extend *tbl8)
+{
+	uint32_t tbl8_gindex; /* tbl8 group index. */
+	union rte_lpm_tbl8_entry_extend *tbl8_entry;
+
+	/* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
+	for (tbl8_gindex = 0; tbl8_gindex < RTE_LPM_TBL8_NUM_GROUPS;
+			tbl8_gindex++) {
+		tbl8_entry = &tbl8[tbl8_gindex * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
+		/* If a free tbl8 group is found clean it and set as VALID. */
+		if (!tbl8_entry->valid_group) {
+			memset(&tbl8_entry[0], 0,
+					RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
+					sizeof(tbl8_entry[0]));
+
+			tbl8_entry->valid_group = VALID;
+
+			/* Return group index for allocated tbl8 group. */
+			return tbl8_gindex;
+		}
+	}
+
+	/* If there are no tbl8 groups free then return error. */
+	return -ENOSPC;
+}
+
 static inline void
 tbl8_free(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
 {
@@ -416,6 +693,13 @@ tbl8_free(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
 	tbl8[tbl8_group_start].valid_group = INVALID;
 }
 
+static inline void
+tbl8_free_extend(union rte_lpm_tbl8_entry_extend *tbl8, uint32_t tbl8_group_start)
+{
+	/* Set tbl8 group invalid*/
+	tbl8[tbl8_group_start].valid_group = INVALID;
+}
+
 static inline int32_t
 add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
 		uint8_t next_hop)
@@ -485,12 +769,77 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
 }
 
 static inline int32_t
-add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
-		uint8_t next_hop)
+add_depth_small_extend(struct rte_lpm_extend *lpm, uint32_t ip, uint8_t depth,
+		uint32_t next_hop)
 {
-	uint32_t tbl24_index;
-	int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
-		tbl8_range, i;
+	uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
+
+	/* Calculate the index into Table24. */
+	tbl24_index = ip >> 8;
+	tbl24_range = depth_to_range(depth);
+
+	for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
+		/*
+		 * For invalid OR valid and non-extended tbl 24 entries set
+		 * entry.
+		 */
+		if (!lpm->tbl24[i].valid || (lpm->tbl24[i].ext_entry == 0 &&
+				lpm->tbl24[i].depth <= depth)) {
+
+			union rte_lpm_tbl24_entry_extend new_tbl24_entry;
+				new_tbl24_entry.next_hop = next_hop;
+				new_tbl24_entry.valid = VALID;
+				new_tbl24_entry.ext_entry = 0;
+				new_tbl24_entry.depth = depth;
+
+			/* Setting tbl24 entry in one go to avoid race
+			 * conditions
+			 */
+			lpm->tbl24[i] = new_tbl24_entry;
+
+			continue;
+		}
+
+		if (lpm->tbl24[i].ext_entry == 1) {
+			/* If tbl24 entry is valid and extended calculate the
+			 *  index into tbl8.
+			 */
+			tbl8_index = lpm->tbl24[i].tbl8_gindex *
+					RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+			tbl8_group_end = tbl8_index +
+					RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+			for (j = tbl8_index; j < tbl8_group_end; j++) {
+				if (!lpm->tbl8[j].valid ||
+						lpm->tbl8[j].depth <= depth) {
+					union rte_lpm_tbl8_entry_extend new_tbl8_entry;
+						new_tbl8_entry.valid = VALID;
+						new_tbl8_entry.valid_group = VALID;
+						new_tbl8_entry.depth = depth;
+						new_tbl8_entry.next_hop = next_hop;
+
+					/*
+					 * Setting tbl8 entry in one go to avoid
+					 * race conditions
+					 */
+					lpm->tbl8[j] = new_tbl8_entry;
+
+					continue;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+static inline int32_t
+add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
+		uint8_t next_hop)
+{
+	uint32_t tbl24_index;
+	int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
+		tbl8_range, i;
 
 	tbl24_index = (ip_masked >> 8);
 	tbl8_range = depth_to_range(depth);
@@ -616,11 +965,140 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
 	return 0;
 }
 
+static inline int32_t
+add_depth_big_extend(struct rte_lpm_extend *lpm, uint32_t ip_masked, uint8_t depth,
+		uint32_t next_hop)
+{
+	uint32_t tbl24_index;
+	int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
+		tbl8_range, i;
+
+	tbl24_index = (ip_masked >> 8);
+	tbl8_range = depth_to_range(depth);
+
+	if (!lpm->tbl24[tbl24_index].valid) {
+		/* Search for a free tbl8 group. */
+		tbl8_group_index = tbl8_alloc_extend(lpm->tbl8);
+
+		/* Check tbl8 allocation was successful. */
+		if (tbl8_group_index < 0) {
+			return tbl8_group_index;
+		}
+
+		/* Find index into tbl8 and range. */
+		tbl8_index = (tbl8_group_index *
+				RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
+				(ip_masked & 0xFF);
+
+		/* Set tbl8 entry. */
+		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+			lpm->tbl8[i].depth = depth;
+			lpm->tbl8[i].next_hop = next_hop;
+			lpm->tbl8[i].valid = VALID;
+		}
+
+		/*
+		 * Update tbl24 entry to point to new tbl8 entry. Note: The
+		 * ext_flag and tbl8_index need to be updated simultaneously,
+		 * so assign whole structure in one go
+		 */
+
+		union rte_lpm_tbl24_entry_extend new_tbl24_entry;
+				new_tbl24_entry.next_hop = (uint8_t)tbl8_group_index;
+				new_tbl24_entry.valid = VALID;
+				new_tbl24_entry.ext_entry = 1;
+				new_tbl24_entry.depth = 0;
+
+		lpm->tbl24[tbl24_index] = new_tbl24_entry;
+
+	}
+	/* If valid entry but not extended calculate the index into Table8. */
+	else if (lpm->tbl24[tbl24_index].ext_entry == 0) {
+		/* Search for free tbl8 group. */
+		tbl8_group_index = tbl8_alloc_extend(lpm->tbl8);
+
+		if (tbl8_group_index < 0) {
+			return tbl8_group_index;
+		}
+
+		tbl8_group_start = tbl8_group_index *
+				RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+		tbl8_group_end = tbl8_group_start +
+				RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+		/* Populate new tbl8 with tbl24 value. */
+		for (i = tbl8_group_start; i < tbl8_group_end; i++) {
+			lpm->tbl8[i].valid = VALID;
+			lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
+			lpm->tbl8[i].next_hop =
+					lpm->tbl24[tbl24_index].next_hop;
+		}
+
+		tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
+
+		/* Insert new rule into the tbl8 entry. */
+		for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
+			if (!lpm->tbl8[i].valid ||
+					lpm->tbl8[i].depth <= depth) {
+				lpm->tbl8[i].valid = VALID;
+				lpm->tbl8[i].depth = depth;
+				lpm->tbl8[i].next_hop = next_hop;
+
+				continue;
+			}
+		}
+
+		/*
+		 * Update tbl24 entry to point to new tbl8 entry. Note: The
+		 * ext_flag and tbl8_index need to be updated simultaneously,
+		 * so assign whole structure in one go.
+		 */
+
+		union rte_lpm_tbl24_entry_extend new_tbl24_entry;
+				new_tbl24_entry.next_hop = (uint8_t)tbl8_group_index;
+				new_tbl24_entry.valid = VALID;
+				new_tbl24_entry.ext_entry = 1;
+				new_tbl24_entry.depth = 0;
+
+		lpm->tbl24[tbl24_index] = new_tbl24_entry;
+
+	} else { /*
+		* If it is valid, extended entry calculate the index into tbl8.
+		*/
+		tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
+		tbl8_group_start = tbl8_group_index *
+				RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+		tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
+
+		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+
+			if (!lpm->tbl8[i].valid ||
+					lpm->tbl8[i].depth <= depth) {
+				union rte_lpm_tbl8_entry_extend new_tbl8_entry;
+						new_tbl8_entry.valid = VALID;
+						new_tbl8_entry.depth = depth;
+						new_tbl8_entry.next_hop = next_hop;
+						new_tbl8_entry.valid_group = lpm->tbl8[i].valid_group;
+
+				/*
+				 * Setting tbl8 entry in one go to avoid race
+				 * condition
+				 */
+				lpm->tbl8[i] = new_tbl8_entry;
+
+				continue;
+			}
+		}
+	}
+
+	return 0;
+}
+
 /*
  * Add a route
  */
 int
-rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+rte_lpm_add_v20(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
 		uint8_t next_hop)
 {
 	int32_t rule_index, status = 0;
@@ -659,12 +1137,56 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
 
 	return 0;
 }
+VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
+
+int
+rte_lpm_add_v22(struct rte_lpm_extend *lpm, uint32_t ip, uint8_t depth,
+		uint32_t next_hop)
+{
+	int32_t rule_index, status = 0;
+	uint32_t ip_masked;
+
+	/* Check user arguments. */
+	if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
+		return -EINVAL;
+
+	ip_masked = ip & depth_to_mask(depth);
+
+	/* Add the rule to the rule table. */
+	rule_index = rule_add_extend(lpm, ip_masked, depth, next_hop);
+
+	/* If the is no space available for new rule return error. */
+	if (rule_index < 0) {
+		return rule_index;
+	}
+
+	if (depth <= MAX_DEPTH_TBL24) {
+		status = add_depth_small_extend(lpm, ip_masked, depth, next_hop);
+	} else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
+		status = add_depth_big_extend(lpm, ip_masked, depth, next_hop);
+
+		/*
+		 * If add fails due to exhaustion of tbl8 extensions delete
+		 * rule that was added to rule table.
+		 */
+		if (status < 0) {
+			rule_delete_extend(lpm, rule_index, depth);
+
+			return status;
+		}
+	}
+
+	return 0;
+}
+BIND_DEFAULT_SYMBOL(rte_lpm_add, _v22, 2.2);
+MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm_extend *lpm,
+		uint32_t ip, uint8_t depth, uint32_t next_hop), rte_lpm_add_v22);
 
 /*
  * Look for a rule in the high-level rules table
  */
 int
-rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+rte_lpm_is_rule_present_v20(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
 uint8_t *next_hop)
 {
 	uint32_t ip_masked;
@@ -688,6 +1210,37 @@ uint8_t *next_hop)
 	/* If rule is not found return 0. */
 	return 0;
 }
+VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
+
+int
+rte_lpm_is_rule_present_v22(struct rte_lpm_extend *lpm, uint32_t ip, uint8_t depth,
+uint32_t *next_hop)
+{
+	uint32_t ip_masked;
+	int32_t rule_index;
+
+	/* Check user arguments. */
+	if ((lpm == NULL) ||
+		(next_hop == NULL) ||
+		(depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
+		return -EINVAL;
+
+	/* Look for the rule using rule_find. */
+	ip_masked = ip & depth_to_mask(depth);
+	rule_index = rule_find_extend(lpm, ip_masked, depth);
+
+	if (rule_index >= 0) {
+		*next_hop = lpm->rules_tbl[rule_index].next_hop;
+		return 1;
+	}
+
+	/* If rule is not found return 0. */
+	return 0;
+}
+BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v22, 2.2);
+MAP_STATIC_SYMBOL(int
+		rte_lpm_is_rule_present(struct rte_lpm_extend *lpm, uint32_t ip, uint8_t depth,
+		uint32_t *next_hop), rte_lpm_is_rule_present_v22);
 
 static inline int32_t
 find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t *sub_rule_depth)
@@ -711,6 +1264,28 @@ find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t *sub
 }
 
 static inline int32_t
+find_previous_rule_extend(struct rte_lpm_extend *lpm,
+		uint32_t ip, uint8_t depth, uint8_t *sub_rule_depth)
+{
+	int32_t rule_index;
+	uint32_t ip_masked;
+	uint8_t prev_depth;
+
+	for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
+		ip_masked = ip & depth_to_mask(prev_depth);
+
+		rule_index = rule_find_extend(lpm, ip_masked, prev_depth);
+
+		if (rule_index >= 0) {
+			*sub_rule_depth = prev_depth;
+			return rule_index;
+		}
+	}
+
+	return -1;
+}
+
+static inline int32_t
 delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
 	uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
 {
@@ -805,6 +1380,96 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
 	return 0;
 }
 
+static inline int32_t
+delete_depth_small_extend(struct rte_lpm_extend *lpm, uint32_t ip_masked,
+	uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
+{
+	uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
+
+	/* Calculate the range and index into Table24. */
+	tbl24_range = depth_to_range(depth);
+	tbl24_index = (ip_masked >> 8);
+
+	/*
+	 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
+	 * and a positive number indicates a sub_rule_index.
+	 */
+	if (sub_rule_index < 0) {
+		/*
+		 * If no replacement rule exists then invalidate entries
+		 * associated with this rule.
+		 */
+		for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
+
+			if (lpm->tbl24[i].ext_entry == 0 &&
+					lpm->tbl24[i].depth <= depth) {
+				lpm->tbl24[i].valid = INVALID;
+			} else {
+				/*
+				 * If TBL24 entry is extended, then there has
+				 * to be a rule with depth >= 25 in the
+				 * associated TBL8 group.
+				 */
+
+				tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
+				tbl8_index = tbl8_group_index *
+						RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+				for (j = tbl8_index; j < (tbl8_index +
+					RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
+
+					if (lpm->tbl8[j].depth <= depth)
+						lpm->tbl8[j].valid = INVALID;
+				}
+			}
+		}
+	} else {
+		/*
+		 * If a replacement rule exists then modify entries
+		 * associated with this rule.
+		 */
+
+		union rte_lpm_tbl24_entry_extend new_tbl24_entry;
+				new_tbl24_entry.next_hop = lpm->rules_tbl[sub_rule_index].next_hop;
+				new_tbl24_entry.valid = VALID;
+				new_tbl24_entry.ext_entry = 0;
+				new_tbl24_entry.depth = sub_rule_depth;
+
+		union rte_lpm_tbl8_entry_extend new_tbl8_entry;
+				new_tbl8_entry.valid = VALID;
+				new_tbl8_entry.depth = sub_rule_depth;
+				new_tbl8_entry.next_hop = lpm->rules_tbl[sub_rule_index].next_hop;
+
+
+		for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
+
+			if (lpm->tbl24[i].ext_entry == 0 &&
+					lpm->tbl24[i].depth <= depth) {
+				lpm->tbl24[i] = new_tbl24_entry;
+			} else {
+				/*
+				 * If TBL24 entry is extended, then there has
+				 * to be a rule with depth >= 25 in the
+				 * associated TBL8 group.
+				 */
+
+				tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
+				tbl8_index = tbl8_group_index *
+						RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+				for (j = tbl8_index; j < (tbl8_index +
+					RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
+
+					if (lpm->tbl8[j].depth <= depth)
+						lpm->tbl8[j] = new_tbl8_entry;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
 /*
  * Checks if table 8 group can be recycled.
  *
@@ -813,6 +1478,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
  * Return of value > -1 means tbl8 is in use but has all the same values and
  * thus can be recycled
  */
+
 static inline int32_t
 tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
 {
@@ -860,6 +1526,53 @@ tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
 }
 
 static inline int32_t
+tbl8_recycle_check_extend(union rte_lpm_tbl8_entry_extend *tbl8, uint32_t tbl8_group_start)
+{
+	uint32_t tbl8_group_end, i;
+
+	tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+	/*
+	 * Check the first entry of the given tbl8. If it is invalid we know
+	 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
+	 *  (As they would affect all entries in a tbl8) and thus this table
+	 *  can not be recycled.
+	 */
+	if (tbl8[tbl8_group_start].valid) {
+		/*
+		 * If first entry is valid check if the depth is less than 24
+		 * and if so check the rest of the entries to verify that they
+		 * are all of this depth.
+		 */
+		if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
+			for (i = (tbl8_group_start + 1); i < tbl8_group_end;
+					i++) {
+
+				if (tbl8[i].depth !=
+						tbl8[tbl8_group_start].depth) {
+
+					return -EEXIST;
+				}
+			}
+			/* If all entries are the same return the tb8 index */
+			return tbl8_group_start;
+		}
+
+		return -EEXIST;
+	}
+	/*
+	 * If the first entry is invalid check if the rest of the entries in
+	 * the tbl8 are invalid.
+	 */
+	for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
+		if (tbl8[i].valid)
+			return -EEXIST;
+	}
+	/* If no valid entries are found then return -EINVAL. */
+	return -EINVAL;
+}
+
+static inline int32_t
 delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
 	uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
 {
@@ -938,11 +1651,86 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
 	return 0;
 }
 
+static inline int32_t
+delete_depth_big_extend(struct rte_lpm_extend *lpm, uint32_t ip_masked,
+	uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
+{
+	uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
+			tbl8_range, i;
+	int32_t tbl8_recycle_index;
+
+	/*
+	 * Calculate the index into tbl24 and range. Note: All depths larger
+	 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
+	 */
+	tbl24_index = ip_masked >> 8;
+
+	/* Calculate the index into tbl8 and range. */
+	tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
+	tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+	tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
+	tbl8_range = depth_to_range(depth);
+
+	if (sub_rule_index < 0) {
+		/*
+		 * Loop through the range of entries on tbl8 for which the
+		 * rule_to_delete must be removed or modified.
+		 */
+		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+			if (lpm->tbl8[i].depth <= depth)
+				lpm->tbl8[i].valid = INVALID;
+		}
+	} else {
+		/* Set new tbl8 entry. */
+		union rte_lpm_tbl8_entry_extend new_tbl8_entry;
+				new_tbl8_entry.valid = VALID;
+				new_tbl8_entry.depth = sub_rule_depth;
+				new_tbl8_entry.valid_group = lpm->tbl8[tbl8_group_start].valid_group;
+				new_tbl8_entry.next_hop = lpm->rules_tbl[sub_rule_index].next_hop;
+
+		/*
+		 * Loop through the range of entries on tbl8 for which the
+		 * rule_to_delete must be modified.
+		 */
+		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+			if (lpm->tbl8[i].depth <= depth)
+				lpm->tbl8[i] = new_tbl8_entry;
+		}
+	}
+
+	/*
+	 * Check if there are any valid entries in this tbl8 group. If all
+	 * tbl8 entries are invalid we can free the tbl8 and invalidate the
+	 * associated tbl24 entry.
+	 */
+
+	tbl8_recycle_index = tbl8_recycle_check_extend(lpm->tbl8, tbl8_group_start);
+
+	if (tbl8_recycle_index == -EINVAL) {
+		/* Set tbl24 before freeing tbl8 to avoid race condition. */
+		lpm->tbl24[tbl24_index].valid = 0;
+		tbl8_free_extend(lpm->tbl8, tbl8_group_start);
+	} else if (tbl8_recycle_index > -1) {
+		/* Update tbl24 entry. */
+		union rte_lpm_tbl24_entry_extend new_tbl24_entry;
+				new_tbl24_entry.next_hop = lpm->tbl8[tbl8_recycle_index].next_hop;
+				new_tbl24_entry.valid = VALID;
+				new_tbl24_entry.ext_entry = 0;
+				new_tbl24_entry.depth = lpm->tbl8[tbl8_recycle_index].depth;
+
+		/* Set tbl24 before freeing tbl8 to avoid race condition. */
+		lpm->tbl24[tbl24_index] = new_tbl24_entry;
+		tbl8_free_extend(lpm->tbl8, tbl8_group_start);
+	}
+
+	return 0;
+}
+
 /*
  * Deletes a rule
  */
 int
-rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
+rte_lpm_delete_v20(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
 {
 	int32_t rule_to_delete_index, sub_rule_index;
 	uint32_t ip_masked;
@@ -993,12 +1781,85 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
 		return delete_depth_big(lpm, ip_masked, depth, sub_rule_index, sub_rule_depth);
 	}
 }
+VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
+
+int
+rte_lpm_delete_v22(struct rte_lpm_extend *lpm, uint32_t ip, uint8_t depth)
+{
+	int32_t rule_to_delete_index, sub_rule_index;
+	uint32_t ip_masked;
+	uint8_t sub_rule_depth;
+	/*
+	 * Check input arguments. Note: IP must be a positive integer of 32
+	 * bits in length therefore it need not be checked.
+	 */
+	if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
+		return -EINVAL;
+	}
+
+	ip_masked = ip & depth_to_mask(depth);
+
+	/*
+	 * Find the index of the input rule, that needs to be deleted, in the
+	 * rule table.
+	 */
+	rule_to_delete_index = rule_find_extend(lpm, ip_masked, depth);
+
+	/*
+	 * Check if rule_to_delete_index was found. If no rule was found the
+	 * function rule_find returns -EINVAL.
+	 */
+	if (rule_to_delete_index < 0)
+		return -EINVAL;
+
+	/* Delete the rule from the rule table. */
+	rule_delete_extend(lpm, rule_to_delete_index, depth);
+
+	/*
+	 * Find rule to replace the rule_to_delete. If there is no rule to
+	 * replace the rule_to_delete we return -1 and invalidate the table
+	 * entries associated with this rule.
+	 */
+	sub_rule_depth = 0;
+	sub_rule_index = find_previous_rule_extend(lpm, ip, depth, &sub_rule_depth);
+
+	/*
+	 * If the input depth value is less than 25 use function
+	 * delete_depth_small otherwise use delete_depth_big.
+	 */
+	if (depth <= MAX_DEPTH_TBL24) {
+		return delete_depth_small_extend(lpm, ip_masked, depth,
+				sub_rule_index, sub_rule_depth);
+	} else { /* If depth > MAX_DEPTH_TBL24 */
+		return delete_depth_big_extend(lpm, ip_masked, depth, sub_rule_index, sub_rule_depth);
+	}
+}
+BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v22, 2.2);
+MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm_extend *lpm, uint32_t ip, uint8_t depth), rte_lpm_delete_v22);
+
 
 /*
  * Delete all rules from the LPM table.
  */
 void
-rte_lpm_delete_all(struct rte_lpm *lpm)
+rte_lpm_delete_all_v20(struct rte_lpm *lpm)
+{
+	/* Zero rule information. */
+	memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
+
+	/* Zero tbl24. */
+	memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
+
+	/* Zero tbl8. */
+	memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
+
+	/* Delete all rules form the rules table. */
+	memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
+}
+VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
+
+void
+rte_lpm_delete_all_v22(struct rte_lpm_extend *lpm)
 {
 	/* Zero rule information. */
 	memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
@@ -1012,3 +1873,5 @@ rte_lpm_delete_all(struct rte_lpm *lpm)
 	/* Delete all rules form the rules table. */
 	memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
 }
+BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v22, 2.2);
+MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm_extend *lpm), rte_lpm_delete_all_v22);
diff --git a/lib/librte_lpm/rte_lpm.h b/lib/librte_lpm/rte_lpm.h
index c299ce2..5ecb95b 100644
--- a/lib/librte_lpm/rte_lpm.h
+++ b/lib/librte_lpm/rte_lpm.h
@@ -49,6 +49,8 @@
 #include <rte_common.h>
 #include <rte_vect.h>
 
+#include <rte_compat.h>
+
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -128,12 +130,76 @@ struct rte_lpm_tbl8_entry {
 };
 #endif
 
+/** @internal bitmask with valid and ext_entry/valid_group fields set */
+#define RTE_LPM_VALID_EXT_ENTRY_BITMASK_EXTEND 0x03000000
+
+/** Bitmask used to indicate successful lookup */
+#define RTE_LPM_LOOKUP_SUCCESS_EXTEND          0x01000000
+
+/** Bitmask used to get 24-bits value next hop from uint32_t **/
+#define RTE_LPM_NEXT_HOP_MASK 0x00ffffff
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+/** @internal Tbl24 entry structure. */
+union rte_lpm_tbl24_entry_extend {
+uint32_t entry;
+struct{
+	uint32_t next_hop	:24;/**< next hop. */
+	uint32_t valid		:1; /**< Validation flag. */
+    uint32_t ext_entry	:1; /**< External entry. */
+    uint32_t depth		:6; /**< Rule depth. */
+	};
+};
+/* Store group index (i.e. gindex)into tbl8. */
+#define tbl8_gindex next_hop
+
+
+/** @internal Tbl8 entry structure. */
+union rte_lpm_tbl8_entry_extend {
+uint32_t entry;
+struct {
+	uint32_t next_hop		:24;/**< next hop. */
+	uint32_t valid			:1; /**< Validation flag. */
+	uint32_t valid_group	:1; /**< External entry. */
+	uint32_t depth			:6; /**< Rule depth. */
+	};
+};
+#else
+union rte_lpm_tbl24_entry_extend {
+struct {
+	uint32_t depth		:6;
+	uint32_t ext_entry	:1;
+	uint32_t valid		:1;
+	uint32_t next_hop	:24;
+	};
+uint32_t entry;
+};
+#define tbl8_gindex next_hop
+
+union rte_lpm_tbl8_entry_extend {
+struct {
+	uint32_t depth			:6;
+	uint32_t valid_group	:1;
+	uint32_t valid			:1;
+	uint32_t next_hop		:24;
+	};
+uint32_t entry;
+};
+#endif
+
 /** @internal Rule structure. */
 struct rte_lpm_rule {
 	uint32_t ip; /**< Rule IP address. */
 	uint8_t  next_hop; /**< Rule next hop. */
 };
 
+/** @internal Rule (extend) structure. */
+struct rte_lpm_rule_extend {
+	uint32_t ip; /**< Rule IP address. */
+	uint32_t  next_hop; /**< Rule next hop. */
+};
+
 /** @internal Contains metadata about the rules table. */
 struct rte_lpm_rule_info {
 	uint32_t used_rules; /**< Used rules so far. */
@@ -156,6 +222,22 @@ struct rte_lpm {
 			__rte_cache_aligned; /**< LPM rules. */
 };
 
+/** @internal LPM (extend) structure. */
+struct rte_lpm_extend {
+	/* LPM metadata. */
+	char name[RTE_LPM_NAMESIZE];        /**< Name of the lpm. */
+	uint32_t max_rules; /**< Max. balanced rules per lpm. */
+	struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH]; /**< Rule info table. */
+
+	/* LPM Tables. */
+	union rte_lpm_tbl24_entry_extend tbl24[RTE_LPM_TBL24_NUM_ENTRIES] \
+			__rte_cache_aligned; /**< LPM tbl24 table. */
+	union rte_lpm_tbl8_entry_extend tbl8[RTE_LPM_TBL8_NUM_ENTRIES] \
+			__rte_cache_aligned; /**< LPM tbl8 table. */
+	struct rte_lpm_rule_extend rules_tbl[0] \
+			__rte_cache_aligned; /**< LPM rules. */
+};
+
 /**
  * Create an LPM object.
  *
@@ -177,8 +259,12 @@ struct rte_lpm {
  *    - EEXIST - a memzone with the same name already exists
  *    - ENOMEM - no appropriate memory area found in which to create memzone
  */
-struct rte_lpm *
+struct rte_lpm_extend *
 rte_lpm_create(const char *name, int socket_id, int max_rules, int flags);
+struct rte_lpm *
+rte_lpm_create_v20(const char *name, int socket_id, int max_rules, int flags);
+struct rte_lpm_extend *
+rte_lpm_create_v22(const char *name, int socket_id, int max_rules, int flags);
 
 /**
  * Find an existing LPM object and return a pointer to it.
@@ -190,8 +276,12 @@ rte_lpm_create(const char *name, int socket_id, int max_rules, int flags);
  *   set appropriately. Possible rte_errno values include:
  *    - ENOENT - required entry not available to return.
  */
-struct rte_lpm *
+struct rte_lpm_extend *
 rte_lpm_find_existing(const char *name);
+struct rte_lpm *
+rte_lpm_find_existing_v20(const char *name);
+struct rte_lpm_extend *
+rte_lpm_find_existing_v22(const char *name);
 
 /**
  * Free an LPM object.
@@ -202,7 +292,11 @@ rte_lpm_find_existing(const char *name);
  *   None
  */
 void
-rte_lpm_free(struct rte_lpm *lpm);
+rte_lpm_free(struct rte_lpm_extend *lpm);
+void
+rte_lpm_free_v20(struct rte_lpm *lpm);
+void
+rte_lpm_free_v22(struct rte_lpm_extend *lpm);
 
 /**
  * Add a rule to the LPM table.
@@ -219,7 +313,11 @@ rte_lpm_free(struct rte_lpm *lpm);
  *   0 on success, negative value otherwise
  */
 int
-rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop);
+rte_lpm_add(struct rte_lpm_extend *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop);
+int
+rte_lpm_add_v20(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop);
+int
+rte_lpm_add_v22(struct rte_lpm_extend *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop);
 
 /**
  * Check if a rule is present in the LPM table,
@@ -237,8 +335,14 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop);
  *   1 if the rule exists, 0 if it does not, a negative value on failure
  */
 int
-rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+rte_lpm_is_rule_present(struct rte_lpm_extend *lpm, uint32_t ip, uint8_t depth,
+uint32_t *next_hop);
+int
+rte_lpm_is_rule_present_v20(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
 uint8_t *next_hop);
+int
+rte_lpm_is_rule_present_v22(struct rte_lpm_extend *lpm, uint32_t ip, uint8_t depth,
+uint32_t *next_hop);
 
 /**
  * Delete a rule from the LPM table.
@@ -253,7 +357,11 @@ uint8_t *next_hop);
  *   0 on success, negative value otherwise
  */
 int
-rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
+rte_lpm_delete(struct rte_lpm_extend *lpm, uint32_t ip, uint8_t depth);
+int
+rte_lpm_delete_v20(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
+int
+rte_lpm_delete_v22(struct rte_lpm_extend *lpm, uint32_t ip, uint8_t depth);
 
 /**
  * Delete all rules from the LPM table.
@@ -262,7 +370,11 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
  *   LPM object handle
  */
 void
-rte_lpm_delete_all(struct rte_lpm *lpm);
+rte_lpm_delete_all(struct rte_lpm_extend *lpm);
+void
+rte_lpm_delete_all_v20(struct rte_lpm *lpm);
+void
+rte_lpm_delete_all_v22(struct rte_lpm_extend *lpm);
 
 /**
  * Lookup an IP into the LPM table.
@@ -276,6 +388,7 @@ rte_lpm_delete_all(struct rte_lpm *lpm);
  * @return
  *   -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit
  */
+
 static inline int
 rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop)
 {
@@ -302,6 +415,32 @@ rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop)
 	return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT;
 }
 
+static inline int
+rte_lpm_lookup_extend(struct rte_lpm_extend *lpm, uint32_t ip, uint32_t *next_hop)
+{
+	unsigned tbl24_index = (ip >> 8);
+	uint32_t tbl_entry;
+
+	/* DEBUG: Check user input arguments. */
+	RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
+
+	/* Copy tbl24 entry */
+	tbl_entry = lpm->tbl24[tbl24_index].entry;
+
+	/* Copy tbl8 entry (only if needed) */
+	if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK_EXTEND) ==
+			RTE_LPM_VALID_EXT_ENTRY_BITMASK_EXTEND)) {
+
+		unsigned tbl8_index = (uint8_t)ip +
+				((uint8_t)tbl_entry * RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
+
+		tbl_entry = lpm->tbl8[tbl8_index].entry;
+	}
+
+	*next_hop = tbl_entry & RTE_LPM_NEXT_HOP_MASK;
+	return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS_EXTEND) ? 0 : -ENOENT;
+}
+
 /**
  * Lookup multiple IP addresses in an LPM table. This may be implemented as a
  * macro, so the address of the function should not be used.
@@ -312,9 +451,9 @@ rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop)
  *   Array of IPs to be looked up in the LPM table
  * @param next_hops
  *   Next hop of the most specific rule found for IP (valid on lookup hit only).
- *   This is an array of two byte values. The most significant byte in each
+ *   This is an array of four byte values. The most significant byte in each
  *   value says whether the lookup was successful (bitmask
- *   RTE_LPM_LOOKUP_SUCCESS is set). The least significant byte is the
+ *   RTE_LPM_LOOKUP_SUCCESS is set). The three least significant bytes are the
  *   actual next hop.
  * @param n
  *   Number of elements in ips (and next_hops) array to lookup. This should be a
@@ -322,8 +461,11 @@ rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop)
  *  @return
  *   -EINVAL for incorrect arguments, otherwise 0
  */
+
 #define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
 		rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
+#define rte_lpm_lookup_bulk_extend(lpm, ips, next_hops, n) \
+		rte_lpm_lookup_bulk_func_extend(lpm, ips, next_hops, n)
 
 static inline int
 rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t * ips,
@@ -358,8 +500,42 @@ rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t * ips,
 	return 0;
 }
 
+static inline int
+rte_lpm_lookup_bulk_func_extend(const struct rte_lpm_extend *lpm, const uint32_t *ips,
+		uint32_t *next_hops, const unsigned n)
+{
+	unsigned i;
+	unsigned tbl24_indexes[n];
+
+	/* DEBUG: Check user input arguments. */
+	RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
+			(next_hops == NULL)), -EINVAL);
+
+	for (i = 0; i < n; i++) {
+		tbl24_indexes[i] = ips[i] >> 8;
+	}
+
+	for (i = 0; i < n; i++) {
+		/* Simply copy tbl24 entry to output */
+		next_hops[i] = lpm->tbl24[tbl24_indexes[i]].entry;
+
+		/* Overwrite output with tbl8 entry if needed */
+		if (unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK_EXTEND) ==
+				RTE_LPM_VALID_EXT_ENTRY_BITMASK_EXTEND)) {
+
+			unsigned tbl8_index = (uint8_t)ips[i] +
+					((uint8_t)next_hops[i] *
+					 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
+
+			next_hops[i] = lpm->tbl8[tbl8_index].entry;
+		}
+	}
+	return 0;
+}
+
 /* Mask four results. */
 #define	 RTE_LPM_MASKX4_RES	UINT64_C(0x00ff00ff00ff00ff)
+#define	 RTE_LPM_MASKX2_RES	UINT64_C(0x00ffffff00ffffff)
 
 /**
  * Lookup four IP addresses in an LPM table.
@@ -370,9 +546,9 @@ rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t * ips,
  *   Four IPs to be looked up in the LPM table
  * @param hop
  *   Next hop of the most specific rule found for IP (valid on lookup hit only).
- *   This is an 4 elements array of two byte values.
- *   If the lookup was succesfull for the given IP, then least significant byte
- *   of the corresponding element is the  actual next hop and the most
+ *   This is an 4 elements array of four byte values.
+ *   If the lookup was successful for the given IP, then three least significant bytes
+ *   of the corresponding element are the actual next hop and the most
  *   significant byte is zero.
  *   If the lookup for the given IP failed, then corresponding element would
  *   contain default value, see description of then next parameter.
@@ -380,6 +556,7 @@ rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t * ips,
  *   Default value to populate into corresponding element of hop[] array,
  *   if lookup would fail.
  */
+
 static inline void
 rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, uint16_t hop[4],
 	uint16_t defv)
@@ -473,6 +650,100 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, uint16_t hop[4],
 	hop[3] = (tbl[3] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[3] : defv;
 }
 
+static inline void
+rte_lpm_lookupx4_extend(const struct rte_lpm_extend *lpm, __m128i ip, uint32_t hop[4],
+	uint32_t defv)
+{
+	__m128i i24;
+	rte_xmm_t i8;
+	uint32_t tbl[4];
+	uint64_t idx, pt, pt2;
+
+	const __m128i mask8 =
+		_mm_set_epi32(UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX);
+
+	/*
+	 * RTE_LPM_VALID_EXT_ENTRY_BITMASK_EXTEND for 2 LPM entries
+	 * as one 64-bit value (0x0300000003000000).
+	 */
+	const uint64_t mask_xv =
+		((uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK_EXTEND |
+		(uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK_EXTEND << 32);
+
+	/*
+	 * RTE_LPM_LOOKUP_SUCCESS_EXTEND for 2 LPM entries
+	 * as one 64-bit value (0x0100000001000000).
+	 */
+	const uint64_t mask_v =
+		((uint64_t)RTE_LPM_LOOKUP_SUCCESS_EXTEND |
+		(uint64_t)RTE_LPM_LOOKUP_SUCCESS_EXTEND << 32);
+
+	/* get 4 indexes for tbl24[]. */
+	i24 = _mm_srli_epi32(ip, CHAR_BIT);
+
+	/* extract values from tbl24[] */
+	idx = _mm_cvtsi128_si64(i24);
+	i24 = _mm_srli_si128(i24, sizeof(uint64_t));
+
+	tbl[0] = lpm->tbl24[(uint32_t)idx].entry;
+	tbl[1] = lpm->tbl24[idx >> 32].entry;
+
+	idx = _mm_cvtsi128_si64(i24);
+
+	tbl[2] = lpm->tbl24[(uint32_t)idx].entry;
+	tbl[3] = lpm->tbl24[idx >> 32].entry;
+
+	/* get 4 indexes for tbl8[]. */
+	i8.x = _mm_and_si128(ip, mask8);
+
+	pt = (uint64_t)tbl[0] |
+		(uint64_t)tbl[1] << 32;
+	pt2 = (uint64_t)tbl[2] |
+		(uint64_t)tbl[3] << 32;
+
+	/* search successfully finished for all 4 IP addresses. */
+	if (likely((pt & mask_xv) == mask_v) &&
+			likely((pt2 & mask_xv) == mask_v)) {
+		*(uint64_t *)hop = pt & RTE_LPM_MASKX2_RES;
+		*(uint64_t *)(hop + 2) = pt2 & RTE_LPM_MASKX2_RES;
+		return;
+	}
+
+	if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK_EXTEND) ==
+			RTE_LPM_VALID_EXT_ENTRY_BITMASK_EXTEND)) {
+		i8.u32[0] = i8.u32[0] +
+			(uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+		tbl[0] = lpm->tbl8[i8.u32[0]].entry;
+	}
+	if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK_EXTEND) ==
+			RTE_LPM_VALID_EXT_ENTRY_BITMASK_EXTEND)) {
+		i8.u32[1] = i8.u32[1] +
+			(uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+		tbl[1] = lpm->tbl8[i8.u32[1]].entry;
+	}
+	if (unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK_EXTEND) ==
+			RTE_LPM_VALID_EXT_ENTRY_BITMASK_EXTEND)) {
+		i8.u32[2] = i8.u32[2] +
+			(uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+		tbl[2] = lpm->tbl8[i8.u32[2]].entry;
+	}
+	if (unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK_EXTEND) ==
+			RTE_LPM_VALID_EXT_ENTRY_BITMASK_EXTEND)) {
+		i8.u32[3] = i8.u32[3] +
+			(uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+		tbl[3] = lpm->tbl8[i8.u32[3]].entry;
+	}
+
+	hop[0] = (tbl[0] & RTE_LPM_LOOKUP_SUCCESS_EXTEND)
+			? tbl[0] & RTE_LPM_NEXT_HOP_MASK : defv;
+	hop[1] = (tbl[1] & RTE_LPM_LOOKUP_SUCCESS_EXTEND)
+			? tbl[1] & RTE_LPM_NEXT_HOP_MASK : defv;
+	hop[2] = (tbl[2] & RTE_LPM_LOOKUP_SUCCESS_EXTEND)
+			? tbl[2] & RTE_LPM_NEXT_HOP_MASK : defv;
+	hop[3] = (tbl[3] & RTE_LPM_LOOKUP_SUCCESS_EXTEND)
+			? tbl[3] & RTE_LPM_NEXT_HOP_MASK : defv;
+}
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/librte_lpm/rte_lpm_version.map b/lib/librte_lpm/rte_lpm_version.map
index 70e1c05..6ac8d15 100644
--- a/lib/librte_lpm/rte_lpm_version.map
+++ b/lib/librte_lpm/rte_lpm_version.map
@@ -1,23 +1,42 @@
 DPDK_2.0 {
-	global:
+      global:
+      rte_lpm6_add;
+      rte_lpm6_create;
+      rte_lpm6_delete;
+      rte_lpm6_delete_all;
+      rte_lpm6_delete_bulk_func;
+      rte_lpm6_find_existing;
+      rte_lpm6_free;
+      rte_lpm6_is_rule_present;
+      rte_lpm6_lookup;
+      rte_lpm6_lookup_bulk_func;
 
-	rte_lpm_add;
-	rte_lpm_create;
-	rte_lpm_delete;
-	rte_lpm_delete_all;
-	rte_lpm_find_existing;
-	rte_lpm_free;
-	rte_lpm_is_rule_present;
-	rte_lpm6_add;
-	rte_lpm6_create;
-	rte_lpm6_delete;
-	rte_lpm6_delete_all;
-	rte_lpm6_delete_bulk_func;
-	rte_lpm6_find_existing;
-	rte_lpm6_free;
-	rte_lpm6_is_rule_present;
-	rte_lpm6_lookup;
-	rte_lpm6_lookup_bulk_func;
-
-	local: *;
+      local: *;
 };
+
+DPDK_2.2 {
+       global:
+       rte_lpm_add;
+       rte_lpm_is_rule_present;
+       rte_lpm_create;
+       rte_lpm_delete;
+       rte_lpm_delete_all;
+       rte_lpm_find_existing;
+       rte_lpm_free;
+       local:
+       rule_add_extend;
+       rule_delete_extend;
+       rule_find_extend;
+       tbl8_alloc_extend;
+       tbl8_free_extend;
+       add_depth_small_extend;
+       add_depth_big_extend;
+       find_previous_rule_extend;
+       delete_depth_small_extend;
+       tbl8_recycle_check_extend;
+       delete_depth_big_extend;
+       rte_lpm_lookup_extend;
+       rte_lpm_lookup_bulk_func_extend;
+       rte_lpm_lookupx4_extend;
+
+} DPDK_2.0;
diff --git a/lib/librte_table/rte_table_lpm.c b/lib/librte_table/rte_table_lpm.c
index 849d899..ba55319 100644
--- a/lib/librte_table/rte_table_lpm.c
+++ b/lib/librte_table/rte_table_lpm.c
@@ -70,7 +70,7 @@ struct rte_table_lpm {
 	uint32_t offset;
 
 	/* Handle to low-level LPM table */
-	struct rte_lpm *lpm;
+	struct rte_lpm_extend *lpm;
 
 	/* Next Hop Table (NHT) */
 	uint32_t nht_users[RTE_TABLE_LPM_MAX_NEXT_HOPS];
@@ -202,7 +202,7 @@ rte_table_lpm_entry_add(
 	struct rte_table_lpm_key *ip_prefix = (struct rte_table_lpm_key *) key;
 	uint32_t nht_pos, nht_pos0_valid;
 	int status;
-	uint8_t nht_pos0 = 0;
+	uint32_t nht_pos0 = 0;
 
 	/* Check input parameters */
 	if (lpm == NULL) {
@@ -268,7 +268,7 @@ rte_table_lpm_entry_delete(
 {
 	struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
 	struct rte_table_lpm_key *ip_prefix = (struct rte_table_lpm_key *) key;
-	uint8_t nht_pos;
+	uint32_t nht_pos;
 	int status;
 
 	/* Check input parameters */
@@ -342,9 +342,9 @@ rte_table_lpm_lookup(
 			uint32_t ip = rte_bswap32(
 				RTE_MBUF_METADATA_UINT32(pkt, lpm->offset));
 			int status;
-			uint8_t nht_pos;
+			uint32_t nht_pos;
 
-			status = rte_lpm_lookup(lpm->lpm, ip, &nht_pos);
+			status = rte_lpm_lookup_extend(lpm->lpm, ip, &nht_pos);
 			if (status == 0) {
 				pkts_out_mask |= pkt_mask;
 				entries[i] = (void *) &lpm->nht[nht_pos *
-- 
1.9.1



More information about the dev mailing list