[PATCH 2/2] drivers/net: support single queue per port
Morten Brørup
mb at smartsharesystems.com
Fri Oct 25 13:52:23 CEST 2024
When configuring DPDK for one queue per port
(#define RTE_MAX_QUEUES_PER_PORT 1), compilation of some network drivers
fails with e.g.:
../drivers/net/bnxt/bnxt_rxq.c: In function 'bnxt_rx_queue_stop':
../drivers/net/bnxt/bnxt_rxq.c:587:34: error: array subscript 1 is above array bounds of 'uint8_t[1]' {aka 'unsigned char[1]'} [-Werror=array-bounds=]
587 | dev->data->rx_queue_state[q_id] = RTE_ETH_QUEUE_STATE_STOPPED;
| ~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~
In file included from ../drivers/net/bnxt/bnxt.h:16,
from ../drivers/net/bnxt/bnxt_rxq.c:10:
../lib/ethdev/ethdev_driver.h:168:17: note: while referencing 'rx_queue_state'
168 | uint8_t rx_queue_state[RTE_MAX_QUEUES_PER_PORT];
| ^~~~~~~~~~~~~~
To fix this, a hint is added to the network drivers where a compiler in
the CI has been seen to emit the above error when DPDK is configured for
one queue per port, but we know that the error cannot occur.
Signed-off-by: Morten Brørup <mb at smartsharesystems.com>
---
drivers/net/bnxt/bnxt_ethdev.c | 2 ++
drivers/net/bnxt/bnxt_rxq.c | 1 +
drivers/net/e1000/igb_rxtx.c | 2 ++
drivers/net/failsafe/failsafe_ops.c | 10 ++++++++--
drivers/net/hns3/hns3_rxtx.c | 2 ++
drivers/net/mana/tx.c | 1 +
6 files changed, 16 insertions(+), 2 deletions(-)
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 1f7c0d77d5..136e308437 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -910,6 +910,7 @@ static int bnxt_start_nic(struct bnxt *bp)
struct bnxt_rx_queue *rxq = bp->rx_queues[j];
if (!rxq->rx_deferred_start) {
+ __rte_assume(j < RTE_MAX_QUEUES_PER_PORT);
bp->eth_dev->data->rx_queue_state[j] =
RTE_ETH_QUEUE_STATE_STARTED;
rxq->rx_started = true;
@@ -930,6 +931,7 @@ static int bnxt_start_nic(struct bnxt *bp)
struct bnxt_tx_queue *txq = bp->tx_queues[j];
if (!txq->tx_deferred_start) {
+ __rte_assume(j < RTE_MAX_QUEUES_PER_PORT);
bp->eth_dev->data->tx_queue_state[j] =
RTE_ETH_QUEUE_STATE_STARTED;
txq->tx_started = true;
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 1c25c57ca6..1651c26545 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -584,6 +584,7 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return -EINVAL;
}
+ __rte_assume(q_id < RTE_MAX_QUEUES_PER_PORT);
dev->data->rx_queue_state[q_id] = RTE_ETH_QUEUE_STATE_STOPPED;
rxq->rx_started = false;
PMD_DRV_LOG_LINE(DEBUG, "Rx queue stopped");
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index d61eaad2de..4276bb6d31 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -1868,6 +1868,7 @@ igb_dev_clear_queues(struct rte_eth_dev *dev)
struct igb_rx_queue *rxq;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ __rte_assume(i < RTE_MAX_QUEUES_PER_PORT);
txq = dev->data->tx_queues[i];
if (txq != NULL) {
igb_tx_queue_release_mbufs(txq);
@@ -1877,6 +1878,7 @@ igb_dev_clear_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ __rte_assume(i < RTE_MAX_QUEUES_PER_PORT);
rxq = dev->data->rx_queues[i];
if (rxq != NULL) {
igb_rx_queue_release_mbufs(rxq);
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index 9c013e0419..5321c3385c 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -111,12 +111,14 @@ fs_set_queues_state_start(struct rte_eth_dev *dev)
uint16_t i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ __rte_assume(i < RTE_MAX_QUEUES_PER_PORT);
rxq = dev->data->rx_queues[i];
if (rxq != NULL && !rxq->info.conf.rx_deferred_start)
dev->data->rx_queue_state[i] =
RTE_ETH_QUEUE_STATE_STARTED;
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ __rte_assume(i < RTE_MAX_QUEUES_PER_PORT);
txq = dev->data->tx_queues[i];
if (txq != NULL && !txq->info.conf.tx_deferred_start)
dev->data->tx_queue_state[i] =
@@ -176,14 +178,18 @@ fs_set_queues_state_stop(struct rte_eth_dev *dev)
{
uint16_t i;
- for (i = 0; i < dev->data->nb_rx_queues; i++)
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ __rte_assume(i < RTE_MAX_QUEUES_PER_PORT);
if (dev->data->rx_queues[i] != NULL)
dev->data->rx_queue_state[i] =
RTE_ETH_QUEUE_STATE_STOPPED;
- for (i = 0; i < dev->data->nb_tx_queues; i++)
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ __rte_assume(i < RTE_MAX_QUEUES_PER_PORT);
if (dev->data->tx_queues[i] != NULL)
dev->data->tx_queue_state[i] =
RTE_ETH_QUEUE_STATE_STOPPED;
+ }
}
static int
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 5941b966e0..03bbbc435f 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -1309,6 +1309,7 @@ hns3_start_tqps(struct hns3_hw *hw)
hns3_enable_all_queues(hw, true);
for (i = 0; i < hw->data->nb_tx_queues; i++) {
+ __rte_assume(i < RTE_MAX_QUEUES_PER_PORT);
txq = hw->data->tx_queues[i];
if (txq->enabled)
hw->data->tx_queue_state[i] =
@@ -1316,6 +1317,7 @@ hns3_start_tqps(struct hns3_hw *hw)
}
for (i = 0; i < hw->data->nb_rx_queues; i++) {
+ __rte_assume(i < RTE_MAX_QUEUES_PER_PORT);
rxq = hw->data->rx_queues[i];
if (rxq->enabled)
hw->data->rx_queue_state[i] =
diff --git a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c
index 272a28bcba..40931ac027 100644
--- a/drivers/net/mana/tx.c
+++ b/drivers/net/mana/tx.c
@@ -154,6 +154,7 @@ mana_start_tx_queues(struct rte_eth_dev *dev)
txq->gdma_cq.count, txq->gdma_cq.size,
txq->gdma_cq.head);
+ __rte_assume(i < RTE_MAX_QUEUES_PER_PORT);
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
}
--
2.43.0
More information about the dev
mailing list