patch 'net/mana: use datapath logging' has been queued to stable release 22.11.3
Xueming Li
xuemingl at nvidia.com
Sun Jun 25 08:34:07 CEST 2023
Hi,
FYI, your patch has been queued to stable release 22.11.3
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 06/27/23. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Queued patches are on a temporary branch at:
https://git.dpdk.org/dpdk-stable/log/?h=22.11-staging
This queued commit can be viewed at:
https://git.dpdk.org/dpdk-stable/commit/?h=22.11-staging&id=753a735ea0d5382c73b073807b8582a7fdca33b1
Thanks.
Xueming Li <xuemingl at nvidia.com>
---
>From 753a735ea0d5382c73b073807b8582a7fdca33b1 Mon Sep 17 00:00:00 2001
From: Long Li <longli at microsoft.com>
Date: Tue, 21 Feb 2023 12:42:25 -0800
Subject: [PATCH] net/mana: use datapath logging
Cc: Xueming Li <xuemingl at nvidia.com>
[ upstream commit e2d3a3c060c415133180ad3980d2b7e564ac071f ]
Change all the logging statements on the datapath to use RTE_LOG_DP() to
reduce CPU overhead when logging is not enabled for datapath.
Fixes: 517ed6e2d590 ("net/mana: add basic driver with build environment")
Signed-off-by: Long Li <longli at microsoft.com>
---
drivers/net/mana/gdma.c | 56 +++++++++++++++---------------
drivers/net/mana/mana.h | 3 ++
drivers/net/mana/mp.c | 4 +--
drivers/net/mana/mr.c | 77 ++++++++++++++++++++---------------------
drivers/net/mana/rx.c | 32 ++++++++---------
drivers/net/mana/tx.c | 66 +++++++++++++++++------------------
6 files changed, 120 insertions(+), 118 deletions(-)
diff --git a/drivers/net/mana/gdma.c b/drivers/net/mana/gdma.c
index 3d4039014f..f637084137 100644
--- a/drivers/net/mana/gdma.c
+++ b/drivers/net/mana/gdma.c
@@ -14,12 +14,12 @@ gdma_get_wqe_pointer(struct mana_gdma_queue *queue)
(queue->head * GDMA_WQE_ALIGNMENT_UNIT_SIZE) &
(queue->size - 1);
- DRV_LOG(DEBUG, "txq sq_head %u sq_size %u offset_in_bytes %u",
- queue->head, queue->size, offset_in_bytes);
+ DP_LOG(DEBUG, "txq sq_head %u sq_size %u offset_in_bytes %u",
+ queue->head, queue->size, offset_in_bytes);
if (offset_in_bytes + GDMA_WQE_ALIGNMENT_UNIT_SIZE > queue->size)
- DRV_LOG(ERR, "fatal error: offset_in_bytes %u too big",
- offset_in_bytes);
+ DP_LOG(ERR, "fatal error: offset_in_bytes %u too big",
+ offset_in_bytes);
return ((uint8_t *)queue->buffer) + offset_in_bytes;
}
@@ -39,11 +39,11 @@ write_dma_client_oob(uint8_t *work_queue_buffer_pointer,
client_oob_size / sizeof(uint32_t);
header->client_data_unit = work_request->client_data_unit;
- DRV_LOG(DEBUG, "queue buf %p sgl %u oob_h %u du %u oob_buf %p oob_b %u",
- work_queue_buffer_pointer, header->num_sgl_entries,
- header->inline_client_oob_size_in_dwords,
- header->client_data_unit, work_request->inline_oob_data,
- work_request->inline_oob_size_in_bytes);
+ DP_LOG(DEBUG, "queue buf %p sgl %u oob_h %u du %u oob_buf %p oob_b %u",
+ work_queue_buffer_pointer, header->num_sgl_entries,
+ header->inline_client_oob_size_in_dwords,
+ header->client_data_unit, work_request->inline_oob_data,
+ work_request->inline_oob_size_in_bytes);
p += sizeof(struct gdma_wqe_dma_oob);
if (work_request->inline_oob_data &&
@@ -73,8 +73,8 @@ write_scatter_gather_list(uint8_t *work_queue_head_pointer,
uint32_t size_to_queue_end;
uint32_t sge_list_size;
- DRV_LOG(DEBUG, "work_queue_cur_pointer %p work_request->flags %x",
- work_queue_cur_pointer, work_request->flags);
+ DP_LOG(DEBUG, "work_queue_cur_pointer %p work_request->flags %x",
+ work_queue_cur_pointer, work_request->flags);
num_sge = work_request->num_sgl_elements;
sge_list = work_request->sgl;
@@ -110,9 +110,9 @@ write_scatter_gather_list(uint8_t *work_queue_head_pointer,
sge_list_size = size;
}
- DRV_LOG(DEBUG, "sge %u address 0x%" PRIx64 " size %u key %u list_s %u",
- num_sge, sge_list->address, sge_list->size,
- sge_list->memory_key, sge_list_size);
+ DP_LOG(DEBUG, "sge %u address 0x%" PRIx64 " size %u key %u list_s %u",
+ num_sge, sge_list->address, sge_list->size,
+ sge_list->memory_key, sge_list_size);
return sge_list_size;
}
@@ -141,13 +141,13 @@ gdma_post_work_request(struct mana_gdma_queue *queue,
uint32_t queue_free_units = queue->count - (queue->head - queue->tail);
if (wqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE > queue_free_units) {
- DRV_LOG(DEBUG, "WQE size %u queue count %u head %u tail %u",
- wqe_size, queue->count, queue->head, queue->tail);
+ DP_LOG(DEBUG, "WQE size %u queue count %u head %u tail %u",
+ wqe_size, queue->count, queue->head, queue->tail);
return -EBUSY;
}
- DRV_LOG(DEBUG, "client_oob_size %u sgl_data_size %u wqe_size %u",
- client_oob_size, sgl_data_size, wqe_size);
+ DP_LOG(DEBUG, "client_oob_size %u sgl_data_size %u wqe_size %u",
+ client_oob_size, sgl_data_size, wqe_size);
if (wqe_info) {
wqe_info->wqe_index =
@@ -242,15 +242,15 @@ mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type,
break;
default:
- DRV_LOG(ERR, "Unsupported queue type %d", queue_type);
+ DP_LOG(ERR, "Unsupported queue type %d", queue_type);
return -1;
}
/* Ensure all writes are done before ringing doorbell */
rte_wmb();
- DRV_LOG(DEBUG, "db_page %p addr %p queue_id %u type %u tail %u arm %u",
- db_page, addr, queue_id, queue_type, tail, arm);
+ DP_LOG(DEBUG, "db_page %p addr %p queue_id %u type %u tail %u arm %u",
+ db_page, addr, queue_id, queue_type, tail, arm);
rte_write64(e.as_uint64, addr);
return 0;
@@ -274,15 +274,15 @@ gdma_poll_completion_queue(struct mana_gdma_queue *cq, struct gdma_comp *comp)
COMPLETION_QUEUE_OWNER_MASK;
cqe_owner_bits = cqe->owner_bits;
- DRV_LOG(DEBUG, "comp cqe bits 0x%x owner bits 0x%x",
- cqe_owner_bits, old_owner_bits);
+ DP_LOG(DEBUG, "comp cqe bits 0x%x owner bits 0x%x",
+ cqe_owner_bits, old_owner_bits);
if (cqe_owner_bits == old_owner_bits)
return 0; /* No new entry */
if (cqe_owner_bits != new_owner_bits) {
- DRV_LOG(ERR, "CQ overflowed, ID %u cqe 0x%x new 0x%x",
- cq->id, cqe_owner_bits, new_owner_bits);
+ DP_LOG(ERR, "CQ overflowed, ID %u cqe 0x%x new 0x%x",
+ cq->id, cqe_owner_bits, new_owner_bits);
return -1;
}
@@ -296,8 +296,8 @@ gdma_poll_completion_queue(struct mana_gdma_queue *cq, struct gdma_comp *comp)
cq->head++;
- DRV_LOG(DEBUG, "comp new 0x%x old 0x%x cqe 0x%x wq %u sq %u head %u",
- new_owner_bits, old_owner_bits, cqe_owner_bits,
- comp->work_queue_number, comp->send_work_queue, cq->head);
+ DP_LOG(DEBUG, "comp new 0x%x old 0x%x cqe 0x%x wq %u sq %u head %u",
+ new_owner_bits, old_owner_bits, cqe_owner_bits,
+ comp->work_queue_number, comp->send_work_queue, cq->head);
return 1;
}
diff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h
index 4a05238a96..7b8c27df2a 100644
--- a/drivers/net/mana/mana.h
+++ b/drivers/net/mana/mana.h
@@ -447,6 +447,9 @@ extern int mana_logtype_init;
rte_log(RTE_LOG_ ## level, mana_logtype_driver, "%s(): " fmt "\n", \
__func__, ## args)
+#define DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
#define PMD_INIT_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, mana_logtype_init, "%s(): " fmt "\n",\
__func__, ## args)
diff --git a/drivers/net/mana/mp.c b/drivers/net/mana/mp.c
index 92432c431d..738487f65a 100644
--- a/drivers/net/mana/mp.c
+++ b/drivers/net/mana/mp.c
@@ -28,8 +28,8 @@ mana_mp_mr_create(struct mana_priv *priv, uintptr_t addr, uint32_t len)
if (!ibv_mr)
return -errno;
- DRV_LOG(DEBUG, "MR (2nd) lkey %u addr %p len %zu",
- ibv_mr->lkey, ibv_mr->addr, ibv_mr->length);
+ DP_LOG(DEBUG, "MR (2nd) lkey %u addr %p len %zu",
+ ibv_mr->lkey, ibv_mr->addr, ibv_mr->length);
mr = rte_calloc("MANA MR", 1, sizeof(*mr), 0);
if (!mr) {
diff --git a/drivers/net/mana/mr.c b/drivers/net/mana/mr.c
index 22df0917bb..fec0dc961c 100644
--- a/drivers/net/mana/mr.c
+++ b/drivers/net/mana/mr.c
@@ -47,23 +47,23 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv,
for (i = 0; i < pool->nb_mem_chunks; i++) {
if (ranges[i].len > priv->max_mr_size) {
- DRV_LOG(ERR, "memory chunk size %u exceeding max MR",
- ranges[i].len);
+ DP_LOG(ERR, "memory chunk size %u exceeding max MR",
+ ranges[i].len);
return -ENOMEM;
}
- DRV_LOG(DEBUG,
- "registering memory chunk start 0x%" PRIx64 " len %u",
- ranges[i].start, ranges[i].len);
+ DP_LOG(DEBUG,
+ "registering memory chunk start 0x%" PRIx64 " len %u",
+ ranges[i].start, ranges[i].len);
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
/* Send a message to the primary to do MR */
ret = mana_mp_req_mr_create(priv, ranges[i].start,
ranges[i].len);
if (ret) {
- DRV_LOG(ERR,
- "MR failed start 0x%" PRIx64 " len %u",
- ranges[i].start, ranges[i].len);
+ DP_LOG(ERR,
+ "MR failed start 0x%" PRIx64 " len %u",
+ ranges[i].start, ranges[i].len);
return ret;
}
continue;
@@ -72,8 +72,8 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv,
ibv_mr = ibv_reg_mr(priv->ib_pd, (void *)ranges[i].start,
ranges[i].len, IBV_ACCESS_LOCAL_WRITE);
if (ibv_mr) {
- DRV_LOG(DEBUG, "MR lkey %u addr %p len %" PRIu64,
- ibv_mr->lkey, ibv_mr->addr, ibv_mr->length);
+ DP_LOG(DEBUG, "MR lkey %u addr %p len %" PRIu64,
+ ibv_mr->lkey, ibv_mr->addr, ibv_mr->length);
mr = rte_calloc("MANA MR", 1, sizeof(*mr), 0);
mr->lkey = ibv_mr->lkey;
@@ -86,7 +86,7 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv,
rte_spinlock_unlock(&priv->mr_btree_lock);
if (ret) {
ibv_dereg_mr(ibv_mr);
- DRV_LOG(ERR, "Failed to add to global MR btree");
+ DP_LOG(ERR, "Failed to add to global MR btree");
return ret;
}
@@ -95,12 +95,12 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv,
/* Don't need to clean up MR as it's already
* in the global tree
*/
- DRV_LOG(ERR, "Failed to add to local MR btree");
+ DP_LOG(ERR, "Failed to add to local MR btree");
return ret;
}
} else {
- DRV_LOG(ERR, "MR failed at 0x%" PRIx64 " len %u",
- ranges[i].start, ranges[i].len);
+ DP_LOG(ERR, "MR failed at 0x%" PRIx64 " len %u",
+ ranges[i].start, ranges[i].len);
return -errno;
}
}
@@ -118,7 +118,7 @@ mana_del_pmd_mr(struct mana_mr_cache *mr)
ret = ibv_dereg_mr(ibv_mr);
if (ret)
- DRV_LOG(ERR, "dereg MR failed ret %d", ret);
+ DP_LOG(ERR, "dereg MR failed ret %d", ret);
}
/*
@@ -133,17 +133,16 @@ mana_find_pmd_mr(struct mana_mr_btree *local_mr_btree, struct mana_priv *priv,
struct mana_mr_cache *mr;
uint16_t idx;
- DRV_LOG(DEBUG, "finding mr for mbuf addr %p len %d",
- mbuf->buf_addr, mbuf->buf_len);
+ DP_LOG(DEBUG, "finding mr for mbuf addr %p len %d",
+ mbuf->buf_addr, mbuf->buf_len);
try_again:
/* First try to find the MR in local queue tree */
mr = mana_mr_btree_lookup(local_mr_btree, &idx,
(uintptr_t)mbuf->buf_addr, mbuf->buf_len);
if (mr) {
- DRV_LOG(DEBUG,
- "Local mr lkey %u addr 0x%" PRIx64 " len %" PRIu64,
- mr->lkey, mr->addr, mr->len);
+ DP_LOG(DEBUG, "Local mr lkey %u addr 0x%" PRIx64 " len %" PRIu64,
+ mr->lkey, mr->addr, mr->len);
return mr;
}
@@ -158,25 +157,25 @@ try_again:
if (mr) {
ret = mana_mr_btree_insert(local_mr_btree, mr);
if (ret) {
- DRV_LOG(DEBUG, "Failed to add MR to local tree.");
+ DP_LOG(ERR, "Failed to add MR to local tree.");
return NULL;
}
- DRV_LOG(DEBUG,
- "Added local MR key %u addr 0x%" PRIx64 " len %" PRIu64,
- mr->lkey, mr->addr, mr->len);
+ DP_LOG(DEBUG,
+ "Added local MR key %u addr 0x%" PRIx64 " len %" PRIu64,
+ mr->lkey, mr->addr, mr->len);
return mr;
}
if (second_try) {
- DRV_LOG(ERR, "Internal error second try failed");
+ DP_LOG(ERR, "Internal error second try failed");
return NULL;
}
ret = mana_new_pmd_mr(local_mr_btree, priv, pool);
if (ret) {
- DRV_LOG(ERR, "Failed to allocate MR ret %d addr %p len %d",
- ret, mbuf->buf_addr, mbuf->buf_len);
+ DP_LOG(ERR, "Failed to allocate MR ret %d addr %p len %d",
+ ret, mbuf->buf_addr, mbuf->buf_len);
return NULL;
}
@@ -215,11 +214,11 @@ mana_mr_btree_expand(struct mana_mr_btree *bt, int n)
mem = rte_realloc_socket(bt->table, n * sizeof(struct mana_mr_cache),
0, bt->socket);
if (!mem) {
- DRV_LOG(ERR, "Failed to expand btree size %d", n);
+ DP_LOG(ERR, "Failed to expand btree size %d", n);
return -1;
}
- DRV_LOG(ERR, "Expanded btree to size %d", n);
+ DP_LOG(ERR, "Expanded btree to size %d", n);
bt->table = mem;
bt->size = n;
@@ -266,9 +265,9 @@ mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx,
if (addr + len <= table[base].addr + table[base].len)
return &table[base];
- DRV_LOG(DEBUG,
- "addr 0x%" PRIx64 " len %zu idx %u sum 0x%" PRIx64 " not found",
- addr, len, *idx, addr + len);
+ DP_LOG(DEBUG,
+ "addr 0x%" PRIx64 " len %zu idx %u sum 0x%" PRIx64 " not found",
+ addr, len, *idx, addr + len);
return NULL;
}
@@ -317,8 +316,8 @@ mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry)
uint16_t shift;
if (mana_mr_btree_lookup(bt, &idx, entry->addr, entry->len)) {
- DRV_LOG(DEBUG, "Addr 0x%" PRIx64 " len %zu exists in btree",
- entry->addr, entry->len);
+ DP_LOG(DEBUG, "Addr 0x%" PRIx64 " len %zu exists in btree",
+ entry->addr, entry->len);
return 0;
}
@@ -332,17 +331,17 @@ mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry)
idx++;
shift = (bt->len - idx) * sizeof(struct mana_mr_cache);
if (shift) {
- DRV_LOG(DEBUG, "Moving %u bytes from idx %u to %u",
- shift, idx, idx + 1);
+ DP_LOG(DEBUG, "Moving %u bytes from idx %u to %u",
+ shift, idx, idx + 1);
memmove(&table[idx + 1], &table[idx], shift);
}
table[idx] = *entry;
bt->len++;
- DRV_LOG(DEBUG,
- "Inserted MR b-tree table %p idx %d addr 0x%" PRIx64 " len %zu",
- table, idx, entry->addr, entry->len);
+ DP_LOG(DEBUG,
+ "Inserted MR b-tree table %p idx %d addr 0x%" PRIx64 " len %zu",
+ table, idx, entry->addr, entry->len);
return 0;
}
diff --git a/drivers/net/mana/rx.c b/drivers/net/mana/rx.c
index 55247889c1..10392ae292 100644
--- a/drivers/net/mana/rx.c
+++ b/drivers/net/mana/rx.c
@@ -42,7 +42,7 @@ mana_rq_ring_doorbell(struct mana_rxq *rxq, uint8_t arm)
arm);
if (ret)
- DRV_LOG(ERR, "failed to ring RX doorbell ret %d", ret);
+ DP_LOG(ERR, "failed to ring RX doorbell ret %d", ret);
return ret;
}
@@ -66,7 +66,7 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq)
mr = mana_find_pmd_mr(&rxq->mr_btree, priv, mbuf);
if (!mr) {
- DRV_LOG(ERR, "failed to register RX MR");
+ DP_LOG(ERR, "failed to register RX MR");
rte_pktmbuf_free(mbuf);
return -ENOMEM;
}
@@ -97,7 +97,7 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq)
desc->wqe_size_in_bu = wqe_info.wqe_size_in_bu;
rxq->desc_ring_head = (rxq->desc_ring_head + 1) % rxq->num_desc;
} else {
- DRV_LOG(ERR, "failed to post recv ret %d", ret);
+ DP_LOG(DEBUG, "failed to post recv ret %d", ret);
return ret;
}
@@ -116,7 +116,7 @@ mana_alloc_and_post_rx_wqes(struct mana_rxq *rxq)
for (i = 0; i < rxq->num_desc; i++) {
ret = mana_alloc_and_post_rx_wqe(rxq);
if (ret) {
- DRV_LOG(ERR, "failed to post RX ret = %d", ret);
+ DP_LOG(ERR, "failed to post RX ret = %d", ret);
return ret;
}
}
@@ -395,8 +395,8 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
(struct mana_rx_comp_oob *)&comp.completion_data[0];
if (comp.work_queue_number != rxq->gdma_rq.id) {
- DRV_LOG(ERR, "rxq comp id mismatch wqid=0x%x rcid=0x%x",
- comp.work_queue_number, rxq->gdma_rq.id);
+ DP_LOG(ERR, "rxq comp id mismatch wqid=0x%x rcid=0x%x",
+ comp.work_queue_number, rxq->gdma_rq.id);
rxq->stats.errors++;
break;
}
@@ -411,22 +411,22 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
break;
case CQE_RX_TRUNCATED:
- DRV_LOG(ERR, "Drop a truncated packet");
+ DP_LOG(DEBUG, "Drop a truncated packet");
rxq->stats.errors++;
rte_pktmbuf_free(mbuf);
goto drop;
case CQE_RX_COALESCED_4:
- DRV_LOG(ERR, "RX coalescing is not supported");
+ DP_LOG(ERR, "RX coalescing is not supported");
continue;
default:
- DRV_LOG(ERR, "Unknown RX CQE type %d",
- oob->cqe_hdr.cqe_type);
+ DP_LOG(ERR, "Unknown RX CQE type %d",
+ oob->cqe_hdr.cqe_type);
continue;
}
- DRV_LOG(DEBUG, "mana_rx_comp_oob CQE_RX_OKAY rxq %p", rxq);
+ DP_LOG(DEBUG, "mana_rx_comp_oob CQE_RX_OKAY rxq %p", rxq);
mbuf->data_off = RTE_PKTMBUF_HEADROOM;
mbuf->nb_segs = 1;
@@ -470,7 +470,7 @@ drop:
/* Post another request */
ret = mana_alloc_and_post_rx_wqe(rxq);
if (ret) {
- DRV_LOG(ERR, "failed to post rx wqe ret=%d", ret);
+ DP_LOG(ERR, "failed to post rx wqe ret=%d", ret);
break;
}
@@ -490,8 +490,8 @@ mana_arm_cq(struct mana_rxq *rxq, uint8_t arm)
uint32_t head = rxq->gdma_cq.head %
(rxq->gdma_cq.count << COMPLETION_QUEUE_ENTRY_OWNER_BITS_SIZE);
- DRV_LOG(ERR, "Ringing completion queue ID %u head %u arm %d",
- rxq->gdma_cq.id, head, arm);
+ DP_LOG(DEBUG, "Ringing completion queue ID %u head %u arm %d",
+ rxq->gdma_cq.id, head, arm);
return mana_ring_doorbell(priv->db_page, GDMA_QUEUE_COMPLETION,
rxq->gdma_cq.id, head, arm);
@@ -521,8 +521,8 @@ mana_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (ret) {
if (ret != EAGAIN)
- DRV_LOG(ERR, "Can't disable RX intr queue %d",
- rx_queue_id);
+ DP_LOG(ERR, "Can't disable RX intr queue %d",
+ rx_queue_id);
} else {
ibv_ack_cq_events(rxq->cq, 1);
}
diff --git a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c
index 300bf27cc1..a45b5e289c 100644
--- a/drivers/net/mana/tx.c
+++ b/drivers/net/mana/tx.c
@@ -183,17 +183,17 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
(struct mana_tx_comp_oob *)&comp.completion_data[0];
if (oob->cqe_hdr.cqe_type != CQE_TX_OKAY) {
- DRV_LOG(ERR,
- "mana_tx_comp_oob cqe_type %u vendor_err %u",
- oob->cqe_hdr.cqe_type, oob->cqe_hdr.vendor_err);
+ DP_LOG(ERR,
+ "mana_tx_comp_oob cqe_type %u vendor_err %u",
+ oob->cqe_hdr.cqe_type, oob->cqe_hdr.vendor_err);
txq->stats.errors++;
} else {
- DRV_LOG(DEBUG, "mana_tx_comp_oob CQE_TX_OKAY");
+ DP_LOG(DEBUG, "mana_tx_comp_oob CQE_TX_OKAY");
txq->stats.packets++;
}
if (!desc->pkt) {
- DRV_LOG(ERR, "mana_txq_desc has a NULL pkt");
+ DP_LOG(ERR, "mana_txq_desc has a NULL pkt");
} else {
txq->stats.bytes += desc->pkt->data_len;
rte_pktmbuf_free(desc->pkt);
@@ -214,8 +214,8 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Drop the packet if it exceeds max segments */
if (m_pkt->nb_segs > priv->max_send_sge) {
- DRV_LOG(ERR, "send packet segments %d exceeding max",
- m_pkt->nb_segs);
+ DP_LOG(ERR, "send packet segments %d exceeding max",
+ m_pkt->nb_segs);
continue;
}
@@ -257,7 +257,7 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
tcp_hdr->cksum = rte_ipv6_phdr_cksum(ip_hdr,
m_pkt->ol_flags);
} else {
- DRV_LOG(ERR, "Invalid input for TCP CKSUM");
+ DP_LOG(ERR, "Invalid input for TCP CKSUM");
}
tx_oob.short_oob.tx_compute_TCP_checksum = 1;
@@ -297,7 +297,7 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
m_pkt->ol_flags);
} else {
- DRV_LOG(ERR, "Invalid input for UDP CKSUM");
+ DP_LOG(ERR, "Invalid input for UDP CKSUM");
}
tx_oob.short_oob.tx_compute_UDP_checksum = 1;
@@ -310,20 +310,20 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
get_vsq_frame_num(txq->gdma_sq.id);
tx_oob.short_oob.short_vport_offset = txq->tx_vp_offset;
- DRV_LOG(DEBUG, "tx_oob packet_format %u ipv4 %u ipv6 %u",
- tx_oob.short_oob.packet_format,
- tx_oob.short_oob.tx_is_outer_ipv4,
- tx_oob.short_oob.tx_is_outer_ipv6);
+ DP_LOG(DEBUG, "tx_oob packet_format %u ipv4 %u ipv6 %u",
+ tx_oob.short_oob.packet_format,
+ tx_oob.short_oob.tx_is_outer_ipv4,
+ tx_oob.short_oob.tx_is_outer_ipv6);
- DRV_LOG(DEBUG, "tx_oob checksum ip %u tcp %u udp %u offset %u",
- tx_oob.short_oob.tx_compute_IP_header_checksum,
- tx_oob.short_oob.tx_compute_TCP_checksum,
- tx_oob.short_oob.tx_compute_UDP_checksum,
- tx_oob.short_oob.tx_transport_header_offset);
+ DP_LOG(DEBUG, "tx_oob checksum ip %u tcp %u udp %u offset %u",
+ tx_oob.short_oob.tx_compute_IP_header_checksum,
+ tx_oob.short_oob.tx_compute_TCP_checksum,
+ tx_oob.short_oob.tx_compute_UDP_checksum,
+ tx_oob.short_oob.tx_transport_header_offset);
- DRV_LOG(DEBUG, "pkt[%d]: buf_addr 0x%p, nb_segs %d, pkt_len %d",
- pkt_idx, m_pkt->buf_addr, m_pkt->nb_segs,
- m_pkt->pkt_len);
+ DP_LOG(DEBUG, "pkt[%d]: buf_addr 0x%p, nb_segs %d, pkt_len %d",
+ pkt_idx, m_pkt->buf_addr, m_pkt->nb_segs,
+ m_pkt->pkt_len);
/* Create SGL for packet data buffers */
for (seg_idx = 0; seg_idx < m_pkt->nb_segs; seg_idx++) {
@@ -331,8 +331,8 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
mana_find_pmd_mr(&txq->mr_btree, priv, m_seg);
if (!mr) {
- DRV_LOG(ERR, "failed to get MR, pkt_idx %u",
- pkt_idx);
+ DP_LOG(ERR, "failed to get MR, pkt_idx %u",
+ pkt_idx);
break;
}
@@ -342,11 +342,11 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
sgl.gdma_sgl[seg_idx].size = m_seg->data_len;
sgl.gdma_sgl[seg_idx].memory_key = mr->lkey;
- DRV_LOG(DEBUG,
- "seg idx %u addr 0x%" PRIx64 " size %x key %x",
- seg_idx, sgl.gdma_sgl[seg_idx].address,
- sgl.gdma_sgl[seg_idx].size,
- sgl.gdma_sgl[seg_idx].memory_key);
+ DP_LOG(DEBUG,
+ "seg idx %u addr 0x%" PRIx64 " size %x key %x",
+ seg_idx, sgl.gdma_sgl[seg_idx].address,
+ sgl.gdma_sgl[seg_idx].size,
+ sgl.gdma_sgl[seg_idx].memory_key);
m_seg = m_seg->next;
}
@@ -383,11 +383,11 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
pkt_sent++;
- DRV_LOG(DEBUG, "nb_pkts %u pkt[%d] sent",
- nb_pkts, pkt_idx);
+ DP_LOG(DEBUG, "nb_pkts %u pkt[%d] sent",
+ nb_pkts, pkt_idx);
} else {
- DRV_LOG(INFO, "pkt[%d] failed to post send ret %d",
- pkt_idx, ret);
+ DP_LOG(DEBUG, "pkt[%d] failed to post send ret %d",
+ pkt_idx, ret);
break;
}
}
@@ -409,7 +409,7 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
GDMA_WQE_ALIGNMENT_UNIT_SIZE,
0);
if (ret)
- DRV_LOG(ERR, "mana_ring_doorbell failed ret %d", ret);
+ DP_LOG(ERR, "mana_ring_doorbell failed ret %d", ret);
}
return pkt_sent;
--
2.25.1
---
Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- - 2023-06-25 14:31:59.307442700 +0800
+++ 0029-net-mana-use-datapath-logging.patch 2023-06-25 14:31:58.315773900 +0800
@@ -1 +1 @@
-From e2d3a3c060c415133180ad3980d2b7e564ac071f Mon Sep 17 00:00:00 2001
+From 753a735ea0d5382c73b073807b8582a7fdca33b1 Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit e2d3a3c060c415133180ad3980d2b7e564ac071f ]
@@ -10 +12,0 @@
-Cc: stable at dpdk.org
More information about the stable
mailing list