[dpdk-dev] [PATCH v2 5/7] net/ena: add NUMA aware allocations
Michal Krawczyk
mk at semihalf.com
Fri Oct 15 18:26:59 CEST 2021
Only the IO rings memory was allocated with taking the socket ID into
the respect, while the other structures was allocated using the regular
rte_zmalloc() API.
Ring specific structures are now being allocated using the ring's
socket ID.
Signed-off-by: Michal Krawczyk <mk at semihalf.com>
Reviewed-by: Igor Chauskin <igorch at amazon.com>
Reviewed-by: Shai Brandes <shaibran at amazon.com>
---
doc/guides/rel_notes/release_21_11.rst | 1 +
drivers/net/ena/ena_ethdev.c | 42 ++++++++++++++------------
2 files changed, 24 insertions(+), 19 deletions(-)
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 45d5cbdc78..c87862e713 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -99,6 +99,7 @@ New Features
bug fixes and improvements, including:
* Support for the tx_free_thresh and rx_free_thresh configuration parameters.
+ * NUMA aware allocations for the queue helper structures.
* **Updated Broadcom bnxt PMD.**
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 35db2e8356..e31cb0b65c 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -1177,19 +1177,20 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
txq->numa_socket_id = socket_id;
txq->pkts_without_db = false;
- txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info",
- sizeof(struct ena_tx_buffer) *
- txq->ring_size,
- RTE_CACHE_LINE_SIZE);
+ txq->tx_buffer_info = rte_zmalloc_socket("txq->tx_buffer_info",
+ sizeof(struct ena_tx_buffer) * txq->ring_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
if (!txq->tx_buffer_info) {
PMD_DRV_LOG(ERR,
"Failed to allocate memory for Tx buffer info\n");
return -ENOMEM;
}
- txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs",
- sizeof(u16) * txq->ring_size,
- RTE_CACHE_LINE_SIZE);
+ txq->empty_tx_reqs = rte_zmalloc_socket("txq->empty_tx_reqs",
+ sizeof(uint16_t) * txq->ring_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
if (!txq->empty_tx_reqs) {
PMD_DRV_LOG(ERR,
"Failed to allocate memory for empty Tx requests\n");
@@ -1198,9 +1199,10 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
}
txq->push_buf_intermediate_buf =
- rte_zmalloc("txq->push_buf_intermediate_buf",
- txq->tx_max_header_size,
- RTE_CACHE_LINE_SIZE);
+ rte_zmalloc_socket("txq->push_buf_intermediate_buf",
+ txq->tx_max_header_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
if (!txq->push_buf_intermediate_buf) {
PMD_DRV_LOG(ERR, "Failed to alloc push buffer for LLQ\n");
rte_free(txq->tx_buffer_info);
@@ -1282,19 +1284,20 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
rxq->numa_socket_id = socket_id;
rxq->mb_pool = mp;
- rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info",
+ rxq->rx_buffer_info = rte_zmalloc_socket("rxq->buffer_info",
sizeof(struct ena_rx_buffer) * nb_desc,
- RTE_CACHE_LINE_SIZE);
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
if (!rxq->rx_buffer_info) {
PMD_DRV_LOG(ERR,
"Failed to allocate memory for Rx buffer info\n");
return -ENOMEM;
}
- rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer",
- sizeof(struct rte_mbuf *) * nb_desc,
- RTE_CACHE_LINE_SIZE);
-
+ rxq->rx_refill_buffer = rte_zmalloc_socket("rxq->rx_refill_buffer",
+ sizeof(struct rte_mbuf *) * nb_desc,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
if (!rxq->rx_refill_buffer) {
PMD_DRV_LOG(ERR,
"Failed to allocate memory for Rx refill buffer\n");
@@ -1303,9 +1306,10 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
return -ENOMEM;
}
- rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs",
- sizeof(uint16_t) * nb_desc,
- RTE_CACHE_LINE_SIZE);
+ rxq->empty_rx_reqs = rte_zmalloc_socket("rxq->empty_rx_reqs",
+ sizeof(uint16_t) * nb_desc,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
if (!rxq->empty_rx_reqs) {
PMD_DRV_LOG(ERR,
"Failed to allocate memory for empty Rx requests\n");
--
2.25.1
More information about the dev
mailing list