[PATCH v5 29/45] common/idpf: use rte stdatomic API
Tyler Retzlaff
roretzla at linux.microsoft.com
Mon May 6 19:58:10 CEST 2024
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla at linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen at networkplumber.org>
---
drivers/common/idpf/idpf_common_device.h | 6 +++---
drivers/common/idpf/idpf_common_rxtx.c | 14 ++++++++------
drivers/common/idpf/idpf_common_rxtx.h | 2 +-
drivers/common/idpf/idpf_common_rxtx_avx512.c | 16 ++++++++--------
4 files changed, 20 insertions(+), 18 deletions(-)
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 3834c1f..bfa927a 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -48,7 +48,7 @@ struct idpf_adapter {
struct idpf_hw hw;
struct virtchnl2_version_info virtchnl_version;
struct virtchnl2_get_capabilities caps;
- volatile uint32_t pend_cmd; /* pending command not finished */
+ volatile RTE_ATOMIC(uint32_t) pend_cmd; /* pending command not finished */
uint32_t cmd_retval; /* return value of the cmd response from cp */
uint8_t *mbx_resp; /* buffer to store the mailbox response from cp */
@@ -179,8 +179,8 @@ struct idpf_cmd_info {
atomic_set_cmd(struct idpf_adapter *adapter, uint32_t ops)
{
uint32_t op_unk = VIRTCHNL2_OP_UNKNOWN;
- bool ret = __atomic_compare_exchange(&adapter->pend_cmd, &op_unk, &ops,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ bool ret = rte_atomic_compare_exchange_strong_explicit(&adapter->pend_cmd, &op_unk, ops,
+ rte_memory_order_acquire, rte_memory_order_acquire);
if (!ret)
DRV_LOG(ERR, "There is incomplete cmd %d", adapter->pend_cmd);
diff --git a/drivers/common/idpf/idpf_common_rxtx.c b/drivers/common/idpf/idpf_common_rxtx.c
index 83b131e..b09c58c 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -592,8 +592,8 @@
next_avail = 0;
rx_bufq->nb_rx_hold -= delta;
} else {
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- nb_desc - next_avail, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ nb_desc - next_avail, rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
rx_bufq->port_id, rx_bufq->queue_id);
return;
@@ -612,8 +612,8 @@
next_avail += nb_refill;
rx_bufq->nb_rx_hold -= nb_refill;
} else {
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- nb_desc - next_avail, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ nb_desc - next_avail, rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
rx_bufq->port_id, rx_bufq->queue_id);
}
@@ -1093,7 +1093,8 @@
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(nmb == NULL)) {
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed, 1,
+ rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", rxq->port_id, rxq->queue_id);
break;
@@ -1203,7 +1204,8 @@
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(!nmb)) {
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed, 1,
+ rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", rxq->port_id, rxq->queue_id);
break;
diff --git a/drivers/common/idpf/idpf_common_rxtx.h b/drivers/common/idpf/idpf_common_rxtx.h
index b49b1ed..eeeeed1 100644
--- a/drivers/common/idpf/idpf_common_rxtx.h
+++ b/drivers/common/idpf/idpf_common_rxtx.h
@@ -97,7 +97,7 @@
#define IDPF_RX_SPLIT_BUFQ2_ID 2
struct idpf_rx_stats {
- uint64_t mbuf_alloc_failed;
+ RTE_ATOMIC(uint64_t) mbuf_alloc_failed;
};
struct idpf_rx_queue {
diff --git a/drivers/common/idpf/idpf_common_rxtx_avx512.c b/drivers/common/idpf/idpf_common_rxtx_avx512.c
index f65e8d5..3b5e124 100644
--- a/drivers/common/idpf/idpf_common_rxtx_avx512.c
+++ b/drivers/common/idpf/idpf_common_rxtx_avx512.c
@@ -38,8 +38,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
@@ -168,8 +168,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
}
@@ -564,8 +564,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
@@ -638,8 +638,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
}
--
1.8.3.1
More information about the dev
mailing list