patch 'bpf: fix signed shift overflows in ARM JIT' has been queued to stable release 25.11.1
Kevin Traynor
ktraynor at redhat.com
Thu Feb 26 14:09:14 CET 2026
Hi,
FYI, your patch has been queued to stable release 25.11.1
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 03/02/26. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Queued patches are on a temporary branch at:
https://github.com/kevintraynor/dpdk-stable
This queued commit can be viewed at:
https://github.com/kevintraynor/dpdk-stable/commit/f308dc53d0d47e1689ba6a66177e5714603a8592
Thanks.
Kevin
---
>From f308dc53d0d47e1689ba6a66177e5714603a8592 Mon Sep 17 00:00:00 2001
From: Marat Khalili <marat.khalili at huawei.com>
Date: Tue, 27 Jan 2026 11:49:38 +0000
Subject: [PATCH] bpf: fix signed shift overflows in ARM JIT
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
[ upstream commit 8203cb408d51a49828d9870a96557c245825d49a ]
Left shifts of integer literals and bool values overwriting the sign bit
were used multiple times in bpf_jit_arm64.c. E.g.:
insn = (!!is64) << 31;
where is64 has type bool (double bang is a no-op here). The operand of
left shift was promoted to type int, which when 32-bit wide cannot
represent the result. Similarly literal integers have int type by
default. Sanitizer produced the following diagnostic during runtime
(for various lines):
lib/bpf/bpf_jit_arm64.c:241:18: runtime error: left shift of 1 by 31
places cannot be represented in type 'int'
To fix the issue use RTE_BIT32 and similar macros instead.
Fixes: f3e516772464 ("bpf/arm: add prologue and epilogue")
Signed-off-by: Marat Khalili <marat.khalili at huawei.com>
Acked-by: Morten Brørup <mb at smartsharesystems.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev at huawei.com>
Acked-by: Stephen Hemminger <stephen at networkplumber.org>
---
lib/bpf/bpf_jit_arm64.c | 162 ++++++++++++++++++++--------------------
1 file changed, 81 insertions(+), 81 deletions(-)
diff --git a/lib/bpf/bpf_jit_arm64.c b/lib/bpf/bpf_jit_arm64.c
index 96b8cd2e03..76df1e4ba1 100644
--- a/lib/bpf/bpf_jit_arm64.c
+++ b/lib/bpf/bpf_jit_arm64.c
@@ -29,5 +29,5 @@
#define check_imm(n, val) (((val) >= 0) ? !!((val) >> (n)) : !!((~val) >> (n)))
-#define mask_imm(n, val) ((val) & ((1 << (n)) - 1))
+#define mask_imm(n, val) ((val) & (RTE_BIT32(n) - 1))
struct ebpf_a64_map {
@@ -239,10 +239,10 @@ emit_add_sub_imm(struct a64_jit_ctx *ctx, bool is64, bool sub, uint8_t rd,
imm = mask_imm(12, imm12);
- insn = (!!is64) << 31;
- insn |= (!!sub) << 30;
+ insn = RTE_SHIFT_VAL32(is64, 31);
+ insn |= RTE_SHIFT_VAL32(sub, 30);
insn |= 0x11000000;
insn |= rd;
- insn |= rn << 5;
- insn |= imm << 10;
+ insn |= RTE_SHIFT_VAL32(rn, 5);
+ insn |= RTE_SHIFT_VAL32(imm, 10);
emit_insn(ctx, insn,
@@ -280,14 +280,14 @@ emit_ls_pair_64(struct a64_jit_ctx *ctx, uint8_t rt, uint8_t rt2, uint8_t rn,
uint32_t insn;
- insn = (!!load) << 22;
- insn |= (!!pre_index) << 24;
+ insn = RTE_SHIFT_VAL32(load, 22);
+ insn |= RTE_SHIFT_VAL32(pre_index, 24);
insn |= 0xa8800000;
insn |= rt;
- insn |= rn << 5;
- insn |= rt2 << 10;
+ insn |= RTE_SHIFT_VAL32(rn, 5);
+ insn |= RTE_SHIFT_VAL32(rt2, 10);
if (push)
- insn |= 0x7e << 15; /* 0x7e means -2 with imm7 */
+ insn |= RTE_SHIFT_VAL32(0x7e, 15); /* 0x7e means -2 with imm7 */
else
- insn |= 0x2 << 15;
+ insn |= RTE_SHIFT_VAL32(0x2, 15);
emit_insn(ctx, insn, check_reg(rn) || check_reg(rt) || check_reg(rt2));
@@ -318,9 +318,9 @@ mov_imm(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t type,
uint32_t insn;
- insn = (!!is64) << 31;
- insn |= type << 29;
- insn |= 0x25 << 23;
- insn |= (shift/16) << 21;
- insn |= imm16 << 5;
+ insn = RTE_SHIFT_VAL32(is64, 31);
+ insn |= RTE_SHIFT_VAL32(type, 29);
+ insn |= RTE_SHIFT_VAL32(0x25, 23);
+ insn |= RTE_SHIFT_VAL32(shift/16, 21);
+ insn |= RTE_SHIFT_VAL32(imm16, 5);
insn |= rd;
@@ -335,5 +335,5 @@ emit_mov_imm32(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint32_t val)
/* Positive number */
- if ((val & 1UL << 31) == 0) {
+ if ((val & RTE_BIT32(31)) == 0) {
mov_imm(ctx, is64, rd, A64_MOVZ, lower, 0);
if (upper)
@@ -394,19 +394,19 @@ emit_ls(struct a64_jit_ctx *ctx, uint8_t sz, uint8_t rt, uint8_t rn, uint8_t rm,
uint32_t insn;
- insn = 0x1c1 << 21;
+ insn = RTE_SHIFT_VAL32(0x1c1, 21);
if (load)
- insn |= 1 << 22;
+ insn |= RTE_BIT32(22);
if (sz == BPF_B)
- insn |= 0 << 30;
+ insn |= RTE_SHIFT_VAL32(0, 30);
else if (sz == BPF_H)
- insn |= 1 << 30;
+ insn |= RTE_SHIFT_VAL32(1, 30);
else if (sz == BPF_W)
- insn |= 2 << 30;
+ insn |= RTE_SHIFT_VAL32(2, 30);
else if (sz == EBPF_DW)
- insn |= 3 << 30;
+ insn |= RTE_SHIFT_VAL32(3, 30);
- insn |= rm << 16;
- insn |= 0x1a << 10; /* LSL and S = 0 */
- insn |= rn << 5;
+ insn |= RTE_SHIFT_VAL32(rm, 16);
+ insn |= RTE_SHIFT_VAL32(0x1a, 10); /* LSL and S = 0 */
+ insn |= RTE_SHIFT_VAL32(rn, 5);
insn |= rt;
@@ -437,8 +437,8 @@ emit_add_sub(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rn,
uint32_t insn;
- insn = (!!is64) << 31;
- insn |= op << 21; /* shift == 0 */
- insn |= rm << 16;
- insn |= rn << 5;
+ insn = RTE_SHIFT_VAL32(is64, 31);
+ insn |= RTE_SHIFT_VAL32(op, 21); /* shift == 0 */
+ insn |= RTE_SHIFT_VAL32(rm, 16);
+ insn |= RTE_SHIFT_VAL32(rn, 5);
insn |= rd;
@@ -469,9 +469,9 @@ emit_mul(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rm)
uint32_t insn;
- insn = (!!is64) << 31;
- insn |= 0xd8 << 21;
- insn |= rm << 16;
- insn |= A64_ZR << 10;
- insn |= rd << 5;
+ insn = RTE_SHIFT_VAL32(is64, 31);
+ insn |= RTE_SHIFT_VAL32(0xd8, 21);
+ insn |= RTE_SHIFT_VAL32(rm, 16);
+ insn |= RTE_SHIFT_VAL32(A64_ZR, 10);
+ insn |= RTE_SHIFT_VAL32(rd, 5);
insn |= rd;
@@ -490,9 +490,9 @@ emit_data_process_two_src(struct a64_jit_ctx *ctx, bool is64, uint8_t rd,
uint32_t insn;
- insn = (!!is64) << 31;
- insn |= 0xd6 << 21;
- insn |= rm << 16;
- insn |= op << 10;
- insn |= rn << 5;
+ insn = RTE_SHIFT_VAL32(is64, 31);
+ insn |= RTE_SHIFT_VAL32(0xd6, 21);
+ insn |= RTE_SHIFT_VAL32(rm, 16);
+ insn |= RTE_SHIFT_VAL32(op, 10);
+ insn |= RTE_SHIFT_VAL32(rn, 5);
insn |= rd;
@@ -533,12 +533,12 @@ emit_bitfield(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rn,
uint32_t insn;
- insn = (!!is64) << 31;
+ insn = RTE_SHIFT_VAL32(is64, 31);
if (insn)
- insn |= 1 << 22; /* Set N bit when is64 is set */
- insn |= op << 29;
- insn |= 0x26 << 23;
- insn |= immr << 16;
- insn |= imms << 10;
- insn |= rn << 5;
+ insn |= RTE_BIT32(22); /* Set N bit when is64 is set */
+ insn |= RTE_SHIFT_VAL32(op, 29);
+ insn |= RTE_SHIFT_VAL32(0x26, 23);
+ insn |= RTE_SHIFT_VAL32(immr, 16);
+ insn |= RTE_SHIFT_VAL32(imms, 10);
+ insn |= RTE_SHIFT_VAL32(rn, 5);
insn |= rd;
@@ -579,9 +579,9 @@ emit_logical(struct a64_jit_ctx *ctx, bool is64, uint8_t rd,
uint32_t insn;
- insn = (!!is64) << 31;
- insn |= op << 29;
- insn |= 0x50 << 21;
- insn |= rm << 16;
- insn |= rd << 5;
+ insn = RTE_SHIFT_VAL32(is64, 31);
+ insn |= RTE_SHIFT_VAL32(op, 29);
+ insn |= RTE_SHIFT_VAL32(0x50, 21);
+ insn |= RTE_SHIFT_VAL32(rm, 16);
+ insn |= RTE_SHIFT_VAL32(rd, 5);
insn |= rd;
@@ -613,10 +613,10 @@ emit_msub(struct a64_jit_ctx *ctx, bool is64, uint8_t rd, uint8_t rn,
uint32_t insn;
- insn = (!!is64) << 31;
- insn |= 0xd8 << 21;
- insn |= rm << 16;
- insn |= 0x1 << 15;
- insn |= ra << 10;
- insn |= rn << 5;
+ insn = RTE_SHIFT_VAL32(is64, 31);
+ insn |= RTE_SHIFT_VAL32(0xd8, 21);
+ insn |= RTE_SHIFT_VAL32(rm, 16);
+ insn |= RTE_SHIFT_VAL32(0x1, 15);
+ insn |= RTE_SHIFT_VAL32(ra, 10);
+ insn |= RTE_SHIFT_VAL32(rn, 5);
insn |= rd;
@@ -639,5 +639,5 @@ emit_blr(struct a64_jit_ctx *ctx, uint8_t rn)
insn = 0xd63f0000;
- insn |= rn << 5;
+ insn |= RTE_SHIFT_VAL32(rn, 5);
emit_insn(ctx, insn, check_reg(rn));
@@ -670,20 +670,20 @@ emit_rev(struct a64_jit_ctx *ctx, uint8_t rd, int32_t imm)
insn = 0xdac00000;
- insn |= rd << 5;
+ insn |= RTE_SHIFT_VAL32(rd, 5);
insn |= rd;
switch (imm) {
case 16:
- insn |= 1 << 10;
+ insn |= RTE_SHIFT_VAL32(1, 10);
emit_insn(ctx, insn, check_reg(rd));
emit_zero_extend(ctx, rd, 16);
break;
case 32:
- insn |= 2 << 10;
+ insn |= RTE_SHIFT_VAL32(2, 10);
emit_insn(ctx, insn, check_reg(rd));
/* Upper 32 bits already cleared */
break;
case 64:
- insn |= 3 << 10;
+ insn |= RTE_SHIFT_VAL32(3, 10);
emit_insn(ctx, insn, check_reg(rd));
break;
@@ -934,7 +934,7 @@ emit_cbnz(struct a64_jit_ctx *ctx, bool is64, uint8_t rt, int32_t imm19)
imm = mask_imm(19, imm19);
- insn = (!!is64) << 31;
- insn |= 0x35 << 24;
- insn |= imm << 5;
+ insn = RTE_SHIFT_VAL32(is64, 31);
+ insn |= RTE_SHIFT_VAL32(0x35, 24);
+ insn |= RTE_SHIFT_VAL32(imm, 5);
insn |= rt;
@@ -948,5 +948,5 @@ emit_b(struct a64_jit_ctx *ctx, int32_t imm26)
imm = mask_imm(26, imm26);
- insn = 0x5 << 26;
+ insn = RTE_SHIFT_VAL32(0x5, 26);
insn |= imm;
@@ -972,7 +972,7 @@ emit_stadd(struct a64_jit_ctx *ctx, bool is64, uint8_t rs, uint8_t rn)
insn = 0xb820001f;
- insn |= (!!is64) << 30;
- insn |= rs << 16;
- insn |= rn << 5;
+ insn |= RTE_SHIFT_VAL32(is64, 30);
+ insn |= RTE_SHIFT_VAL32(rs, 16);
+ insn |= RTE_SHIFT_VAL32(rn, 5);
emit_insn(ctx, insn, check_reg(rs) || check_reg(rn));
@@ -985,6 +985,6 @@ emit_ldxr(struct a64_jit_ctx *ctx, bool is64, uint8_t rt, uint8_t rn)
insn = 0x885f7c00;
- insn |= (!!is64) << 30;
- insn |= rn << 5;
+ insn |= RTE_SHIFT_VAL32(is64, 30);
+ insn |= RTE_SHIFT_VAL32(rn, 5);
insn |= rt;
@@ -999,7 +999,7 @@ emit_stxr(struct a64_jit_ctx *ctx, bool is64, uint8_t rs, uint8_t rt,
insn = 0x88007c00;
- insn |= (!!is64) << 30;
- insn |= rs << 16;
- insn |= rn << 5;
+ insn |= RTE_SHIFT_VAL32(is64, 30);
+ insn |= RTE_SHIFT_VAL32(rs, 16);
+ insn |= RTE_SHIFT_VAL32(rn, 5);
insn |= rt;
@@ -1052,7 +1052,7 @@ emit_cmp_tst(struct a64_jit_ctx *ctx, bool is64, uint8_t rn, uint8_t rm,
insn = opc;
- insn |= (!!is64) << 31;
- insn |= rm << 16;
- insn |= rn << 5;
+ insn |= RTE_SHIFT_VAL32(is64, 31);
+ insn |= RTE_SHIFT_VAL32(rm, 16);
+ insn |= RTE_SHIFT_VAL32(rn, 5);
emit_insn(ctx, insn, check_reg(rn) || check_reg(rm));
@@ -1077,6 +1077,6 @@ emit_b_cond(struct a64_jit_ctx *ctx, uint8_t cond, int32_t imm19)
imm = mask_imm(19, imm19);
- insn = 0x15 << 26;
- insn |= imm << 5;
+ insn = RTE_SHIFT_VAL32(0x15, 26);
+ insn |= RTE_SHIFT_VAL32(imm, 5);
insn |= cond;
@@ -1302,5 +1302,5 @@ emit(struct a64_jit_ctx *ctx, struct rte_bpf *bpf)
/* dst = imm64 */
case (BPF_LD | BPF_IMM | EBPF_DW):
- u64 = ((uint64_t)ins[1].imm << 32) | (uint32_t)imm;
+ u64 = RTE_SHIFT_VAL64(ins[1].imm, 32) | (uint32_t)imm;
emit_mov_imm(ctx, 1, dst, u64);
i++;
--
2.53.0
---
Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- - 2026-02-26 10:16:49.946394435 +0000
+++ 0072-bpf-fix-signed-shift-overflows-in-ARM-JIT.patch 2026-02-26 10:16:47.000459452 +0000
@@ -1 +1 @@
-From 8203cb408d51a49828d9870a96557c245825d49a Mon Sep 17 00:00:00 2001
+From f308dc53d0d47e1689ba6a66177e5714603a8592 Mon Sep 17 00:00:00 2001
@@ -8,0 +9,2 @@
+[ upstream commit 8203cb408d51a49828d9870a96557c245825d49a ]
+
@@ -26 +27,0 @@
-Cc: stable at dpdk.org
@@ -37 +38 @@
-index 13186c84c8..78ce894199 100644
+index 96b8cd2e03..76df1e4ba1 100644
@@ -290 +291 @@
-@@ -999,6 +999,6 @@ emit_ldxr(struct a64_jit_ctx *ctx, bool is64, uint8_t rt, uint8_t rn)
+@@ -985,6 +985,6 @@ emit_ldxr(struct a64_jit_ctx *ctx, bool is64, uint8_t rt, uint8_t rn)
@@ -299 +300 @@
-@@ -1013,7 +1013,7 @@ emit_stxr(struct a64_jit_ctx *ctx, bool is64, uint8_t rs, uint8_t rt,
+@@ -999,7 +999,7 @@ emit_stxr(struct a64_jit_ctx *ctx, bool is64, uint8_t rs, uint8_t rt,
@@ -310 +311 @@
-@@ -1085,7 +1085,7 @@ emit_cmp_tst(struct a64_jit_ctx *ctx, bool is64, uint8_t rn, uint8_t rm,
+@@ -1052,7 +1052,7 @@ emit_cmp_tst(struct a64_jit_ctx *ctx, bool is64, uint8_t rn, uint8_t rm,
@@ -321 +322 @@
-@@ -1110,6 +1110,6 @@ emit_b_cond(struct a64_jit_ctx *ctx, uint8_t cond, int32_t imm19)
+@@ -1077,6 +1077,6 @@ emit_b_cond(struct a64_jit_ctx *ctx, uint8_t cond, int32_t imm19)
@@ -330 +331 @@
-@@ -1335,5 +1335,5 @@ emit(struct a64_jit_ctx *ctx, struct rte_bpf *bpf)
+@@ -1302,5 +1302,5 @@ emit(struct a64_jit_ctx *ctx, struct rte_bpf *bpf)
More information about the stable
mailing list