[dpdk-dev] [PATCH v3 08/32] net/mlx5: move static_assert calls to global scope

Tal Shnaiderman talshn at nvidia.com
Sun Dec 13 11:20:32 CET 2020


From: Ophir Munk <ophirmu at nvidia.com>

Some Windows compilers consider static_assert() as calls to another
function rather than a compiler directive which allows checking type
information at compile time.  This only occurs if the static_assert call
appears inside another function scope. To solve it move the
static_assert calls to global scope in the files where they are used.

Signed-off-by: Ophir Munk <ophirmu at nvidia.com>
Acked-by: Matan Azrad <matan at nvidia.com>
---
 drivers/net/mlx5/mlx5_rxtx.c | 98 ++++++++++++++++++++++----------------------
 drivers/net/mlx5/mlx5_txpp.c |  5 ++-
 2 files changed, 53 insertions(+), 50 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index d12d746c2f..65a1f997e9 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -79,6 +79,56 @@ static uint16_t mlx5_tx_burst_##func(void *txq, \
 
 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
 
+/* static asserts */
+static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
+static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+		(sizeof(uint16_t) +
+		 sizeof(rte_v128u32_t)),
+		"invalid Ethernet Segment data size");
+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+		(sizeof(uint16_t) +
+		 sizeof(struct rte_vlan_hdr) +
+		 2 * RTE_ETHER_ADDR_LEN),
+		"invalid Ethernet Segment data size");
+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+		(sizeof(uint16_t) +
+		 sizeof(rte_v128u32_t)),
+		"invalid Ethernet Segment data size");
+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+		(sizeof(uint16_t) +
+		 sizeof(struct rte_vlan_hdr) +
+		 2 * RTE_ETHER_ADDR_LEN),
+		"invalid Ethernet Segment data size");
+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+		(sizeof(uint16_t) +
+		 sizeof(rte_v128u32_t)),
+		"invalid Ethernet Segment data size");
+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+		(sizeof(uint16_t) +
+		 sizeof(struct rte_vlan_hdr) +
+		 2 * RTE_ETHER_ADDR_LEN),
+		"invalid Ethernet Segment data size");
+static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
+		(2 * RTE_ETHER_ADDR_LEN),
+		"invalid Data Segment data size");
+static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
+static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
+static_assert((sizeof(struct rte_vlan_hdr) +
+			sizeof(struct rte_ether_hdr)) ==
+		MLX5_ESEG_MIN_INLINE_SIZE,
+		"invalid min inline data size");
+static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
+		MLX5_DSEG_MAX, "invalid WQE max size");
+static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
+		"invalid WQE Control Segment size");
+static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
+		"invalid WQE Ethernet Segment size");
+static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
+		"invalid WQE Data Segment size");
+static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
+		"invalid WQE size");
+
 static __rte_always_inline uint32_t
 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
 				   volatile struct mlx5_mini_cqe8 *mcqe);
@@ -2070,8 +2120,6 @@ mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
 	bool ring_doorbell = false;
 	int ret;
 
-	static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
-	static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
 	do {
 		volatile struct mlx5_cqe *cqe;
 
@@ -2381,15 +2429,6 @@ mlx5_tx_eseg_dmin(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
 	es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
 		       loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
 		       *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
-	static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
-				(sizeof(uint16_t) +
-				 sizeof(rte_v128u32_t)),
-		      "invalid Ethernet Segment data size");
-	static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
-				(sizeof(uint16_t) +
-				 sizeof(struct rte_vlan_hdr) +
-				 2 * RTE_ETHER_ADDR_LEN),
-		      "invalid Ethernet Segment data size");
 	psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
 	es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
 	es->inline_data = *(unaligned_uint16_t *)psrc;
@@ -2474,15 +2513,6 @@ mlx5_tx_eseg_data(struct mlx5_txq_data *__rte_restrict txq,
 	es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
 		       loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
 		       *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
-	static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
-				(sizeof(uint16_t) +
-				 sizeof(rte_v128u32_t)),
-		      "invalid Ethernet Segment data size");
-	static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
-				(sizeof(uint16_t) +
-				 sizeof(struct rte_vlan_hdr) +
-				 2 * RTE_ETHER_ADDR_LEN),
-		      "invalid Ethernet Segment data size");
 	psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
 	es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
 	es->inline_data = *(unaligned_uint16_t *)psrc;
@@ -2697,15 +2727,6 @@ mlx5_tx_eseg_mdat(struct mlx5_txq_data *__rte_restrict txq,
 	es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
 		       loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
 		       *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
-	static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
-				(sizeof(uint16_t) +
-				 sizeof(rte_v128u32_t)),
-		      "invalid Ethernet Segment data size");
-	static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
-				(sizeof(uint16_t) +
-				 sizeof(struct rte_vlan_hdr) +
-				 2 * RTE_ETHER_ADDR_LEN),
-		      "invalid Ethernet Segment data size");
 	MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
 	pdst = (uint8_t *)&es->inline_data;
 	if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
@@ -2952,9 +2973,6 @@ mlx5_tx_dseg_vlan(struct mlx5_txq_data *__rte_restrict txq,
 	uint8_t *pdst;
 
 	MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
-	static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
-				 (2 * RTE_ETHER_ADDR_LEN),
-		      "invalid Data Segment data size");
 	if (!MLX5_TXOFF_CONFIG(MPW)) {
 		/* Store the descriptor byte counter for eMPW sessions. */
 		dseg->bcount = rte_cpu_to_be_32
@@ -4070,7 +4088,6 @@ mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,
 	MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
 	MLX5_ASSERT(loc->elts_free && loc->wqe_free);
 	MLX5_ASSERT(pkts_n > loc->pkts_sent);
-	static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
 	pkts += loc->pkts_sent + 1;
 	pkts_n -= loc->pkts_sent;
 	for (;;) {
@@ -4247,7 +4264,6 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
 	MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
 	MLX5_ASSERT(loc->elts_free && loc->wqe_free);
 	MLX5_ASSERT(pkts_n > loc->pkts_sent);
-	static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
 	pkts += loc->pkts_sent + 1;
 	pkts_n -= loc->pkts_sent;
 	for (;;) {
@@ -4561,10 +4577,6 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
 			    loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
 				vlan = sizeof(struct rte_vlan_hdr);
 				inlen += vlan;
-				static_assert((sizeof(struct rte_vlan_hdr) +
-					       sizeof(struct rte_ether_hdr)) ==
-					       MLX5_ESEG_MIN_INLINE_SIZE,
-					       "invalid min inline data size");
 			}
 			/*
 			 * If inlining is enabled at configuration time
@@ -5567,16 +5579,6 @@ mlx5_select_tx_function(struct rte_eth_dev *dev)
 	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
 	unsigned int diff = 0, olx = 0, i, m;
 
-	static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
-		      MLX5_DSEG_MAX, "invalid WQE max size");
-	static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
-		      "invalid WQE Control Segment size");
-	static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
-		      "invalid WQE Ethernet Segment size");
-	static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
-		      "invalid WQE Data Segment size");
-	static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
-		      "invalid WQE size");
 	MLX5_ASSERT(priv);
 	if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
 		/* We should support Multi-Segment Packets. */
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index a1ec294942..d61e43e55d 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -18,6 +18,9 @@
 #include "mlx5_rxtx.h"
 #include "mlx5_common_os.h"
 
+static_assert(sizeof(struct mlx5_cqe_ts) == sizeof(rte_int128_t),
+		"Wrong timestamp CQE part size");
+
 static const char * const mlx5_txpp_stat_names[] = {
 	"tx_pp_missed_interrupt_errors", /* Missed service interrupt. */
 	"tx_pp_rearm_queue_errors", /* Rearm Queue errors. */
@@ -741,8 +744,6 @@ mlx5_txpp_update_timestamp(struct mlx5_dev_ctx_shared *sh)
 	uint64_t ts;
 	uint16_t ci;
 
-	static_assert(sizeof(struct mlx5_cqe_ts) == sizeof(rte_int128_t),
-		      "Wrong timestamp CQE part size");
 	mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
 	if (to.cts.op_own >> 4) {
 		DRV_LOG(DEBUG, "Clock Queue error sync lost.");
-- 
2.16.1.windows.4



More information about the dev mailing list