|SUCCESS| net/tap: use offsets provided by rte_net_get_ptype
dpdklab at iol.unh.edu
dpdklab at iol.unh.edu
Wed Apr 22 18:24:35 CEST 2026
Test-Label: ai-code-review
Test-Status: SUCCESS
https://dpdk.org/patch/163235
_AI Code Review_
=== Patch Review: series.patch (via Claude) ===
Review date: 2026-04-22
Reviewing patch: net/tap: use offsets provided by rte_net_get_ptype
Author: Robin Jarry <rjarry at redhat.com>
This patch modifies tap_verify_csum() to use the rte_net_hdr_lens
structure populated by rte_net_get_ptype() instead of manually
calculating header offsets.
> diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
> index a5d460a0b3cb..bb25d03ff786 100644
> --- a/drivers/net/tap/rte_eth_tap.c
> +++ b/drivers/net/tap/rte_eth_tap.c
[ ... ]
> @@ -327,55 +327,26 @@ tun_alloc(struct pmd_internals *pmd, int is_keepalive, int persistent)
> }
>
> static void
> -tap_verify_csum(struct rte_mbuf *mbuf)
> +tap_verify_csum(struct rte_mbuf *mbuf, const struct rte_net_hdr_lens *hlen)
> {
> - uint32_t l2 = mbuf->packet_type & RTE_PTYPE_L2_MASK;
> uint32_t l3 = mbuf->packet_type & RTE_PTYPE_L3_MASK;
> uint32_t l4 = mbuf->packet_type & RTE_PTYPE_L4_MASK;
> - unsigned int l2_len = sizeof(struct rte_ether_hdr);
> - unsigned int l3_len;
> + uint32_t l4_off = hlen->l2_len + hlen->l3_len;
> + const void *l3_hdr;
> + const void *l4_hdr;
> uint16_t cksum = 0;
> - void *l3_hdr;
> - void *l4_hdr;
> - struct rte_udp_hdr *udp_hdr;
Does this patch introduce a NULL pointer dereference risk? The hlen
parameter is not validated before dereferencing in the l4_off calculation.
If pmd_rx_burst()->rte_net_get_ptype() fails to populate hlen correctly,
this function will use invalid offset values.
[ ... ]
> /* Don't verify checksum for packets with discontinuous L2 header */
> - if (unlikely(l2_len + sizeof(struct rte_ipv4_hdr) >
> - rte_pktmbuf_data_len(mbuf)))
> + if (unlikely(l4_off > rte_pktmbuf_data_len(mbuf)))
> return;
> - l3_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len);
> +
> + l3_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, hlen->l2_len);
> if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
> - struct rte_ipv4_hdr *iph = l3_hdr;
> -
> - l3_len = rte_ipv4_hdr_len(iph);
> - if (unlikely(l2_len + l3_len > rte_pktmbuf_data_len(mbuf)))
> - return;
> - /* check that the total length reported by header is not
> - * greater than the total received size
> - */
> - if (l2_len + rte_be_to_cpu_16(iph->total_length) >
> - rte_pktmbuf_data_len(mbuf))
> - return;
> -
> - cksum = ~rte_raw_cksum(iph, l3_len);
> + cksum = ~rte_raw_cksum(l3_hdr, hlen->l3_len);
Does removing the total_length validation weaken packet sanity checking?
The original code verified that the IPv4 total_length field matches the
received packet size. Without this check, malformed packets with incorrect
total_length values could pass through undetected.
[ ... ]
> @@ -386,20 +357,19 @@ tap_verify_csum(struct rte_mbuf *mbuf)
>
> if (l4 == RTE_PTYPE_L4_UDP || l4 == RTE_PTYPE_L4_TCP) {
> int cksum_ok;
> - const unsigned int l4_min_len = (l4 == RTE_PTYPE_L4_UDP)
> - ? sizeof(struct rte_udp_hdr) : sizeof(struct rte_tcp_hdr);
>
> /* Don't verify checksum if L4 header is truncated */
> - if (l2_len + l3_len + l4_min_len > rte_pktmbuf_data_len(mbuf))
> + if (l4_off + hlen->l4_len > rte_pktmbuf_data_len(mbuf))
> return;
Does this change alter the truncation check semantics? The original code
checked for l4_min_len (minimum header size) while the new code uses
hlen->l4_len which may include options or payload length depending on
rte_net_get_ptype() implementation. Can this cause false negatives where
truncated packets are not properly detected?
[ ... ]
> @@ -561,10 +532,11 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
> continue;
> }
>
> - mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
> + memset(&hdr_lens, 0, sizeof(hdr_lens));
> + mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
> RTE_PTYPE_ALL_MASK);
> if (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
> - tap_verify_csum(mbuf);
> + tap_verify_csum(mbuf, &hdr_lens);
Does memset() need to be called for every packet in the receive burst?
This adds overhead to the fast path. Would declaring hdr_lens with a
static initializer be more efficient?
More information about the test-report
mailing list