[PATCH v3 1/1] net/nbl: fix issues reported by Coverity
Dimon Zhao
dimon.zhao at nebula-matrix.com
Thu Jan 15 04:27:44 CET 2026
Address multiple issues reported by Coverity static analysis:
Check return values of ioctl() calls for NBL_DEV_USER_GET_BAR_SIZE
and NBL_DEV_USER_CLEAR_EVENTFD. The original code tested the return
value without capturing it from the ioctl call.
Move debug log statement after NULL pointer validation in
nbl_dev_configure() to prevent dereference of adapter before the
check.
Cast u16 operands to u64 before multiplication when calculating
buffer sizes and offsets to prevent potential integer overflow.
The num_txq_entries, num_rxq_entries, txq_buf_size, and rxq_buf_size
fields are all u16 types, and their product could overflow before
assignment to size_t or u64 destination variables.
Coverity issue: 490942
Coverity issue: 490943
Coverity issue: 490946
Coverity issue: 490947
Coverity issue: 490949
Coverity issue: 490950
Coverity issue: 490951
Coverity issue: 490952
Coverity issue: 490953
Coverity issue: 490954
Coverity issue: 490955
Coverity issue: 490957
Coverity issue: 490958
Coverity issue: 490959
Fixes: a1c5ffa13b2c ("net/nbl: add channel layer")
Fixes: dc955cd24c8f ("net/nbl: add coexistence mode")
Fixes: 93b38df5a2ec ("net/nbl: support basic configuration")
Cc: stable at dpdk.org
Signed-off-by: Dimon Zhao <dimon.zhao at nebula-matrix.com>
---
drivers/net/nbl/nbl_common/nbl_userdev.c | 25 ++++++++++++++----------
drivers/net/nbl/nbl_dev/nbl_dev.c | 4 ++--
drivers/net/nbl/nbl_hw/nbl_channel.c | 17 ++++++++--------
3 files changed, 26 insertions(+), 20 deletions(-)
diff --git a/drivers/net/nbl/nbl_common/nbl_userdev.c b/drivers/net/nbl/nbl_common/nbl_userdev.c
index 566f3a11ee..96f0d2e264 100644
--- a/drivers/net/nbl/nbl_common/nbl_userdev.c
+++ b/drivers/net/nbl/nbl_common/nbl_userdev.c
@@ -724,17 +724,19 @@ int nbl_pci_map_device(struct nbl_adapter *adapter)
}
common->eventfd = fd;
- ioctl(common->devfd, NBL_DEV_USER_GET_BAR_SIZE, &bar_size);
+ ret = ioctl(common->devfd, NBL_DEV_USER_GET_BAR_SIZE, &bar_size);
+ if (ret) {
+ NBL_LOG(ERR, "nbl userdev get bar size failed");
+ goto close_eventfd;
+ }
- if (!ret) {
- pci_dev->mem_resource[0].addr = nbl_userdev_mmap(common->devfd, 0, bar_size);
- pci_dev->mem_resource[0].phys_addr = 0;
- pci_dev->mem_resource[0].len = bar_size;
- pci_dev->mem_resource[2].addr = 0;
+ pci_dev->mem_resource[0].addr = nbl_userdev_mmap(common->devfd, 0, bar_size);
+ pci_dev->mem_resource[0].phys_addr = 0;
+ pci_dev->mem_resource[0].len = bar_size;
+ pci_dev->mem_resource[2].addr = 0;
- common->ifindex = nbl_userdev_get_ifindex(common->devfd);
- common->nl_socket_route = nbl_userdev_nl_init(NETLINK_ROUTE);
- }
+ common->ifindex = nbl_userdev_get_ifindex(common->devfd);
+ common->nl_socket_route = nbl_userdev_nl_init(NETLINK_ROUTE);
return ret;
@@ -753,12 +755,15 @@ void nbl_pci_unmap_device(struct nbl_adapter *adapter)
{
struct rte_pci_device *pci_dev = adapter->pci_dev;
struct nbl_common_info *common = &adapter->common;
+ int ret;
if (NBL_IS_NOT_COEXISTENCE(common))
return rte_pci_unmap_device(pci_dev);
rte_mem_unmap(pci_dev->mem_resource[0].addr, pci_dev->mem_resource[0].len);
- ioctl(common->devfd, NBL_DEV_USER_CLEAR_EVENTFD, 0);
+ ret = ioctl(common->devfd, NBL_DEV_USER_CLEAR_EVENTFD, 0);
+ if (ret)
+ NBL_LOG(ERR, "nbl userdev set clear eventfd failed, ret: %d", ret);
close(common->eventfd);
close(common->nl_socket_route);
diff --git a/drivers/net/nbl/nbl_dev/nbl_dev.c b/drivers/net/nbl/nbl_dev/nbl_dev.c
index e926c06456..8a56939aa7 100644
--- a/drivers/net/nbl/nbl_dev/nbl_dev.c
+++ b/drivers/net/nbl/nbl_dev/nbl_dev.c
@@ -18,11 +18,11 @@ int nbl_dev_configure(struct rte_eth_dev *eth_dev)
struct nbl_adapter *adapter = ETH_DEV_TO_NBL_DEV_PF_PRIV(eth_dev);
int ret;
- NBL_LOG(DEBUG, "Begin to configure the device, state: %d", adapter->state);
-
if (dev_data == NULL || adapter == NULL)
return -EINVAL;
+ NBL_LOG(DEBUG, "Begin to configure the device, state: %d", adapter->state);
+
if (rx_mq_mode != RTE_ETH_MQ_RX_NONE && rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
NBL_LOG(ERR, "Rx mq mode %d is not supported", rx_mq_mode);
return -ENOTSUP;
diff --git a/drivers/net/nbl/nbl_hw/nbl_channel.c b/drivers/net/nbl/nbl_hw/nbl_channel.c
index 7cc214df01..f81c4c8591 100644
--- a/drivers/net/nbl/nbl_hw/nbl_channel.c
+++ b/drivers/net/nbl/nbl_hw/nbl_channel.c
@@ -36,7 +36,7 @@ static int nbl_chan_init_tx_queue(union nbl_chan_info *chan_info)
goto req_wait_queue_failed;
}
- size = chan_info->mailbox.num_txq_entries * chan_info->mailbox.txq_buf_size;
+ size = (u64)chan_info->mailbox.num_txq_entries * (u64)chan_info->mailbox.txq_buf_size;
txq->buf = nbl_alloc_dma_mem(&txq->buf_mem, size);
if (!txq->buf) {
NBL_LOG(ERR, "Allocate memory for chan tx buffer arrays failed");
@@ -66,7 +66,7 @@ static int nbl_chan_init_rx_queue(union nbl_chan_info *chan_info)
return -ENOMEM;
}
- size = chan_info->mailbox.num_rxq_entries * chan_info->mailbox.rxq_buf_size;
+ size = (u64)chan_info->mailbox.num_rxq_entries * (u64)chan_info->mailbox.rxq_buf_size;
rxq->buf = nbl_alloc_dma_mem(&rxq->buf_mem, size);
if (!rxq->buf) {
NBL_LOG(ERR, "Allocate memory for chan rx buffer arrays failed");
@@ -163,7 +163,7 @@ static int nbl_chan_prepare_rx_bufs(struct nbl_channel_mgt *chan_mgt,
desc = rxq->desc;
for (i = 0; i < chan_info->mailbox.num_rxq_entries - 1; i++) {
desc[i].flags = NBL_CHAN_RX_DESC_AVAIL;
- desc[i].buf_addr = rxq->buf_mem.pa + i * chan_info->mailbox.rxq_buf_size;
+ desc[i].buf_addr = rxq->buf_mem.pa + (u64)i * (u64)chan_info->mailbox.rxq_buf_size;
desc[i].buf_len = chan_info->mailbox.rxq_buf_size;
}
@@ -324,7 +324,8 @@ static void nbl_chan_advance_rx_ring(struct nbl_channel_mgt *chan_mgt,
rx_desc = NBL_CHAN_RX_DESC(rxq, next_to_use);
rx_desc->flags = NBL_CHAN_RX_DESC_AVAIL;
- rx_desc->buf_addr = rxq->buf_mem.pa + chan_info->mailbox.rxq_buf_size * next_to_use;
+ rx_desc->buf_addr = rxq->buf_mem.pa +
+ (u64)chan_info->mailbox.rxq_buf_size * (u64)next_to_use;
rx_desc->buf_len = chan_info->mailbox.rxq_buf_size;
rte_wmb();
@@ -347,7 +348,7 @@ static void nbl_chan_clean_queue(void *priv)
next_to_clean = rxq->next_to_clean;
rx_desc = NBL_CHAN_RX_DESC(rxq, next_to_clean);
- data = (u8 *)rxq->buf + next_to_clean * chan_info->mailbox.rxq_buf_size;
+ data = (u8 *)rxq->buf + (u64)next_to_clean * (u64)chan_info->mailbox.rxq_buf_size;
while (rx_desc->flags & NBL_CHAN_RX_DESC_USED) {
rte_rmb();
nbl_chan_recv_msg(chan_mgt, data);
@@ -358,7 +359,7 @@ static void nbl_chan_clean_queue(void *priv)
if (next_to_clean == chan_info->mailbox.num_rxq_entries)
next_to_clean = 0;
rx_desc = NBL_CHAN_RX_DESC(rxq, next_to_clean);
- data = (u8 *)rxq->buf + next_to_clean * chan_info->mailbox.rxq_buf_size;
+ data = (u8 *)rxq->buf + (u64)next_to_clean * (u64)chan_info->mailbox.rxq_buf_size;
}
rxq->next_to_clean = next_to_clean;
}
@@ -376,8 +377,8 @@ static uint16_t nbl_chan_update_txqueue(union nbl_chan_info *chan_info,
txq = &chan_info->mailbox.txq;
next_to_use = txq->next_to_use;
- va = (u8 *)txq->buf + next_to_use * chan_info->mailbox.txq_buf_size;
- pa = txq->buf_mem.pa + next_to_use * chan_info->mailbox.txq_buf_size;
+ va = (u8 *)txq->buf + (u64)next_to_use * (u64)chan_info->mailbox.txq_buf_size;
+ pa = txq->buf_mem.pa + (u64)next_to_use * (u64)chan_info->mailbox.txq_buf_size;
tx_desc = NBL_CHAN_TX_DESC(txq, next_to_use);
tx_desc->dstid = dstid;
--
2.34.1
More information about the dev
mailing list