[dpdk-dev] [PATCH v1 1/9] net/mlx5: remove Rx queues indexes correlation

Matan Azrad matan at mellanox.com
Thu May 30 12:20:31 CEST 2019


There is a full correlation between the CQE indexes to the WQE indexes
in the vectorized Rx queues management.

When the RQ is inserted to the reset state, the correlation may break
because the HW starts the RQ polling from index 0 while the CQ polling
continues regularly.

As an arrangement to CQE errors handling, when the RQ can be reset,
the correlation dependence should be removed from all the Rx queues
index managments.

Remove the aformentioned dependence from the vectorized Rx burst
functions.

Cc: stable at dpdk.org

Signed-off-by: Matan Azrad <matan at mellanox.com>
---
 drivers/net/mlx5/mlx5_rxq.c           |  1 +
 drivers/net/mlx5/mlx5_rxtx.h          |  6 +++++-
 drivers/net/mlx5/mlx5_rxtx_vec_neon.h | 26 +++++++++++++-------------
 drivers/net/mlx5/mlx5_rxtx_vec_sse.h  | 26 +++++++++++++-------------
 4 files changed, 32 insertions(+), 27 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index a00cb12..b248f38 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1006,6 +1006,7 @@ struct mlx5_rxq_ibv *
 	rxq_data->cq_uar = cq_info.cq_uar;
 	rxq_data->cqn = cq_info.cqn;
 	rxq_data->cq_arm_sn = 0;
+	rxq_data->decompressed = 0;
 	/* Update doorbell counter. */
 	rxq_data->rq_ci = wqe_n >> rxq_data->sges_n;
 	rte_wmb();
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 4339aaf..7bacdba 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -101,11 +101,15 @@ struct mlx5_rxq_data {
 	uint32_t rq_pi;
 	uint32_t cq_ci;
 	uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */
+	union {
+		struct rxq_zip zip; /* Compressed context. */
+		uint16_t decompressed;
+		/* Number of ready mbufs decompressed from the CQ. */
+	};
 	struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
 	uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */
 	volatile void *wqes;
 	volatile struct mlx5_cqe(*cqes)[];
-	struct rxq_zip zip; /* Compressed context. */
 	RTE_STD_C11
 	union  {
 		struct rte_mbuf *(*elts)[];
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index 38e915c..6a1b2bb 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -352,8 +352,11 @@
  * @param elts
  *   Pointer to SW ring to be filled. The first mbuf has to be pre-built from
  *   the title completion descriptor to be copied to the rest of mbufs.
+ *
+ * @return
+ *   Number of mini-CQEs successfully decompressed.
  */
-static inline void
+static inline uint16_t
 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 		    struct rte_mbuf **elts)
 {
@@ -505,6 +508,7 @@
 	rxq->stats.ibytes += rcvd_byte;
 #endif
 	rxq->cq_ci += mcqe_n;
+	return mcqe_n;
 }
 
 /**
@@ -729,24 +733,17 @@
 	rte_prefetch_non_temporal(cq + 2);
 	rte_prefetch_non_temporal(cq + 3);
 	pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
-	/*
-	 * Order of indexes:
-	 *   rq_ci >= cq_ci >= rq_pi
-	 * Definition of indexes:
-	 *   rq_ci - cq_ci := # of buffers owned by HW (posted).
-	 *   cq_ci - rq_pi := # of buffers not returned to app (decompressed).
-	 *   N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
-	 */
 	repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
 	if (repl_n >= rxq->rq_repl_thresh)
 		mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
 	/* See if there're unreturned mbufs from compressed CQE. */
-	rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
+	rcvd_pkt = rxq->decompressed;
 	if (rcvd_pkt > 0) {
 		rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
 		rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
 		rxq->rq_pi += rcvd_pkt;
 		pkts += rcvd_pkt;
+		rxq->decompressed -= rcvd_pkt;
 	}
 	elts_idx = rxq->rq_pi & q_mask;
 	elts = &(*rxq->elts)[elts_idx];
@@ -754,10 +751,11 @@
 	pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
 	/* Not to cross queue end. */
 	pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
+	pkts_n = RTE_MIN(pkts_n, q_n - cq_idx);
 	if (!pkts_n)
 		return rcvd_pkt;
 	/* At this point, there shouldn't be any remained packets. */
-	assert(rxq->rq_pi == rxq->cq_ci);
+	assert(rxq->decompressed == 0);
 	/*
 	 * Note that vectors have reverse order - {v3, v2, v1, v0}, because
 	 * there's no instruction to count trailing zeros. __builtin_clzl() is
@@ -1003,15 +1001,17 @@
 	/* Decompress the last CQE if compressed. */
 	if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
 		assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
-		rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]);
+		rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
+							&elts[nocmp_n]);
 		/* Return more packets if needed. */
 		if (nocmp_n < pkts_n) {
-			uint16_t n = rxq->cq_ci - rxq->rq_pi;
+			uint16_t n = rxq->decompressed;
 
 			n = RTE_MIN(n, pkts_n - nocmp_n);
 			rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
 			rxq->rq_pi += n;
 			rcvd_pkt += n;
+			rxq->decompressed -= n;
 		}
 	}
 	rte_compiler_barrier();
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index fb384ef..cc2f251 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -349,8 +349,11 @@
  * @param elts
  *   Pointer to SW ring to be filled. The first mbuf has to be pre-built from
  *   the title completion descriptor to be copied to the rest of mbufs.
+ *
+ * @return
+ *   Number of mini-CQEs successfully decompressed.
  */
-static inline void
+static inline uint16_t
 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 		    struct rte_mbuf **elts)
 {
@@ -486,6 +489,7 @@
 	rxq->stats.ibytes += rcvd_byte;
 #endif
 	rxq->cq_ci += mcqe_n;
+	return mcqe_n;
 }
 
 /**
@@ -712,23 +716,16 @@
 	rte_prefetch0(cq + 2);
 	rte_prefetch0(cq + 3);
 	pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
-	/*
-	 * Order of indexes:
-	 *   rq_ci >= cq_ci >= rq_pi
-	 * Definition of indexes:
-	 *   rq_ci - cq_ci := # of buffers owned by HW (posted).
-	 *   cq_ci - rq_pi := # of buffers not returned to app (decompressed).
-	 *   N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
-	 */
 	repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
 	if (repl_n >= rxq->rq_repl_thresh)
 		mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
 	/* See if there're unreturned mbufs from compressed CQE. */
-	rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
+	rcvd_pkt = rxq->decompressed;
 	if (rcvd_pkt > 0) {
 		rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
 		rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
 		rxq->rq_pi += rcvd_pkt;
+		rxq->decompressed -= rcvd_pkt;
 		pkts += rcvd_pkt;
 	}
 	elts_idx = rxq->rq_pi & q_mask;
@@ -737,10 +734,11 @@
 	pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
 	/* Not to cross queue end. */
 	pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
+	pkts_n = RTE_MIN(pkts_n, q_n - cq_idx);
 	if (!pkts_n)
 		return rcvd_pkt;
 	/* At this point, there shouldn't be any remained packets. */
-	assert(rxq->rq_pi == rxq->cq_ci);
+	assert(rxq->decompressed == 0);
 	/*
 	 * A. load first Qword (8bytes) in one loop.
 	 * B. copy 4 mbuf pointers from elts ring to returing pkts.
@@ -953,15 +951,17 @@
 	/* Decompress the last CQE if compressed. */
 	if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
 		assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
-		rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]);
+		rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
+							&elts[nocmp_n]);
 		/* Return more packets if needed. */
 		if (nocmp_n < pkts_n) {
-			uint16_t n = rxq->cq_ci - rxq->rq_pi;
+			uint16_t n = rxq->decompressed;
 
 			n = RTE_MIN(n, pkts_n - nocmp_n);
 			rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
 			rxq->rq_pi += n;
 			rcvd_pkt += n;
+			rxq->decompressed -= n;
 		}
 	}
 	rte_compiler_barrier();
-- 
1.8.3.1



More information about the dev mailing list