patch 'bpf: fix starting with conditional jump' has been queued to stable release 25.11.1

Kevin Traynor ktraynor at redhat.com
Thu Feb 26 14:09:17 CET 2026


Hi,

FYI, your patch has been queued to stable release 25.11.1

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 03/02/26. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://github.com/kevintraynor/dpdk-stable

This queued commit can be viewed at:
https://github.com/kevintraynor/dpdk-stable/commit/6675df68a807a0c8a7a4dfe4b72670f5985be1c9

Thanks.

Kevin

---
>From 6675df68a807a0c8a7a4dfe4b72670f5985be1c9 Mon Sep 17 00:00:00 2001
From: Marat Khalili <marat.khalili at huawei.com>
Date: Tue, 27 Jan 2026 11:49:42 +0000
Subject: [PATCH] bpf: fix starting with conditional jump

[ upstream commit 5b3ef932bee6da8b40ed54f41ef7185240833dde ]

When the BPF program was starting with a conditional jump only one
(true) execution branch of the program was evaluated. Any instructions
jumped over were not evaluated and could contain invalid operations.
The root cause was using zero instruction index as a signal for ending
evaluation when backtracking.

Switch from using previous instruction index for tracking execution
history to a previous instruction pointer. First instruction will not
have it set, and therefore backtracking _from_ it will end evaluation,
not backtracking _to_ it like before.

Add two tests demonstrating the problem:
* test_jump_over_invalid_first: loads BPF program with
  conditional jump over the invalid operation, should not succeed;
* test_jump_over_invalid_non_first: same program with one extra
  instruction at the start to demonstrate that it is indeed invalid
  (and also guard against another kind of regression);

Fixes: 6e12ec4c4d6d ("bpf: add more checks")

Signed-off-by: Marat Khalili <marat.khalili at huawei.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev at huawei.com>
Tested-by: Konstantin Ananyev <konstantin.ananyev at huawei.com>
Acked-by: Stephen Hemminger <stephen at networkplumber.org>
---
 app/test/test_bpf.c    | 80 ++++++++++++++++++++++++++++++++++++++++++
 lib/bpf/bpf_validate.c | 20 ++++-------
 2 files changed, 86 insertions(+), 14 deletions(-)

diff --git a/app/test/test_bpf.c b/app/test/test_bpf.c
index a08eeae9e1..776a94c6bd 100644
--- a/app/test/test_bpf.c
+++ b/app/test/test_bpf.c
@@ -209,4 +209,84 @@ test_subtract_one(void)
 REGISTER_FAST_TEST(bpf_subtract_one_autotest, NOHUGE_OK, ASAN_OK, test_subtract_one);
 
+/*
+ * Conditionally jump over invalid operation as first instruction.
+ */
+static int
+test_jump_over_invalid_first(void)
+{
+	static const struct ebpf_insn ins[] = {
+		{
+			/* Jump over the next instruction for some r1. */
+			.code = (BPF_JMP | BPF_JEQ | BPF_K),
+			.dst_reg = EBPF_REG_1,
+			.imm = 42,
+			.off = 1,
+		},
+		{
+			/* Write 0xDEADBEEF to [r1 + INT16_MIN]. */
+			.code = (BPF_ST | BPF_MEM | EBPF_DW),
+			.dst_reg = EBPF_REG_1,
+			.off = INT16_MIN,
+			.imm = 0xDEADBEEF,
+		},
+		{
+			/* Set return value to the program argument. */
+			.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+			.src_reg = EBPF_REG_1,
+			.dst_reg = EBPF_REG_0,
+		},
+		{
+			.code = (BPF_JMP | EBPF_EXIT),
+		},
+	};
+	return bpf_load_test(RTE_DIM(ins), ins, EINVAL);
+}
+
+REGISTER_FAST_TEST(bpf_jump_over_invalid_first_autotest, NOHUGE_OK, ASAN_OK,
+	test_jump_over_invalid_first);
+
+/*
+ * Conditionally jump over invalid operation as non-first instruction.
+ */
+static int
+test_jump_over_invalid_non_first(void)
+{
+	static const struct ebpf_insn ins[] = {
+		{
+			/* Set return value to the program argument. */
+			.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+			.src_reg = EBPF_REG_1,
+			.dst_reg = EBPF_REG_0,
+		},
+		{
+			/* Jump over the next instruction for some r1. */
+			.code = (BPF_JMP | BPF_JEQ | BPF_K),
+			.dst_reg = EBPF_REG_1,
+			.imm = 42,
+			.off = 1,
+		},
+		{
+			/* Write 0xDEADBEEF to [r1 + INT16_MIN]. */
+			.code = (BPF_ST | BPF_MEM | EBPF_DW),
+			.dst_reg = EBPF_REG_1,
+			.off = INT16_MIN,
+			.imm = 0xDEADBEEF,
+		},
+		{
+			/* Set return value to the program argument. */
+			.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+			.src_reg = EBPF_REG_1,
+			.dst_reg = EBPF_REG_0,
+		},
+		{
+			.code = (BPF_JMP | EBPF_EXIT),
+		},
+	};
+	return bpf_load_test(RTE_DIM(ins), ins, EINVAL);
+}
+
+REGISTER_FAST_TEST(bpf_jump_over_invalid_non_first_autotest, NOHUGE_OK, ASAN_OK,
+	test_jump_over_invalid_non_first);
+
 /*
  * Basic functional tests for librte_bpf.
diff --git a/lib/bpf/bpf_validate.c b/lib/bpf/bpf_validate.c
index 47ad6fef0f..64a8f227a3 100644
--- a/lib/bpf/bpf_validate.c
+++ b/lib/bpf/bpf_validate.c
@@ -65,5 +65,5 @@ struct inst_node {
 	uint8_t edge_type[MAX_EDGES];
 	uint32_t edge_dest[MAX_EDGES];
-	uint32_t prev_node;
+	struct inst_node *prev_node;
 	struct {
 		struct bpf_eval_state *cur;   /* save/restore for jcc targets */
@@ -1876,10 +1876,4 @@ set_edge_type(struct bpf_verifier *bvf, struct inst_node *node,
 }
 
-static struct inst_node *
-get_prev_node(struct bpf_verifier *bvf, struct inst_node *node)
-{
-	return  bvf->in + node->prev_node;
-}
-
 /*
  * Depth-First Search (DFS) through previously constructed
@@ -1917,5 +1911,5 @@ dfs(struct bpf_verifier *bvf)
 			if (next != NULL) {
 				/* proceed with next child */
-				next->prev_node = get_node_idx(bvf, node);
+				next->prev_node = node;
 				node = next;
 			} else {
@@ -1926,5 +1920,5 @@ dfs(struct bpf_verifier *bvf)
 				set_node_colour(bvf, node, BLACK);
 				node->cur_edge = 0;
-				node = get_prev_node(bvf, node);
+				node = node->prev_node;
 			}
 		} else
@@ -2491,5 +2485,5 @@ evaluate(struct bpf_verifier *bvf)
 				stats.nb_prune++;
 			} else {
-				next->prev_node = get_node_idx(bvf, node);
+				next->prev_node = node;
 				node = next;
 			}
@@ -2502,9 +2496,7 @@ evaluate(struct bpf_verifier *bvf)
 			node->cur_edge = 0;
 			save_safe_eval_state(bvf, node);
-			node = get_prev_node(bvf, node);
+			node = node->prev_node;
 
-			/* finished */
-			if (node == bvf->in)
-				node = NULL;
+			/* first node will not have prev, signalling finish */
 		}
 	}
-- 
2.53.0

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2026-02-26 10:16:50.075110666 +0000
+++ 0075-bpf-fix-starting-with-conditional-jump.patch	2026-02-26 10:16:47.009459491 +0000
@@ -1 +1 @@
-From 5b3ef932bee6da8b40ed54f41ef7185240833dde Mon Sep 17 00:00:00 2001
+From 6675df68a807a0c8a7a4dfe4b72670f5985be1c9 Mon Sep 17 00:00:00 2001
@@ -5,0 +6,2 @@
+[ upstream commit 5b3ef932bee6da8b40ed54f41ef7185240833dde ]
+
@@ -25 +26,0 @@
-Cc: stable at dpdk.org
@@ -37 +38 @@
-index f4002ac199..cf49a13394 100644
+index a08eeae9e1..776a94c6bd 100644
@@ -126 +127 @@
-index d015eab915..e8dbec2827 100644
+index 47ad6fef0f..64a8f227a3 100644
@@ -136 +137 @@
-@@ -1886,10 +1886,4 @@ set_edge_type(struct bpf_verifier *bvf, struct inst_node *node,
+@@ -1876,10 +1876,4 @@ set_edge_type(struct bpf_verifier *bvf, struct inst_node *node,
@@ -147 +148 @@
-@@ -1927,5 +1921,5 @@ dfs(struct bpf_verifier *bvf)
+@@ -1917,5 +1911,5 @@ dfs(struct bpf_verifier *bvf)
@@ -154 +155 @@
-@@ -1936,5 +1930,5 @@ dfs(struct bpf_verifier *bvf)
+@@ -1926,5 +1920,5 @@ dfs(struct bpf_verifier *bvf)
@@ -161 +162 @@
-@@ -2501,5 +2495,5 @@ evaluate(struct bpf_verifier *bvf)
+@@ -2491,5 +2485,5 @@ evaluate(struct bpf_verifier *bvf)
@@ -168 +169 @@
-@@ -2512,9 +2506,7 @@ evaluate(struct bpf_verifier *bvf)
+@@ -2502,9 +2496,7 @@ evaluate(struct bpf_verifier *bvf)



More information about the stable mailing list