[dpdk-dev] [PATCH v5 7/8] net/mlx4: fix HW memory optimizations careless

Matan Azrad matan at mellanox.com
Thu Nov 2 17:42:50 CET 2017


Volatilize all Rx/Tx HW negotiation memories to be sure no compiler
optimization prevents either load or store commands.

Fixes: c3c977bbecbd ("net/mlx4: add Tx bypassing Verbs")
Fixes: 9f57340a8087 ("net/mlx4: restore Rx offloads")
Fixes: 6681b845034c ("net/mlx4: add Rx bypassing Verbs")
Fixes: 62e96ffb93ad ("net/mlx4: fix no Rx interrupts")

Signed-off-by: Matan Azrad <matan at mellanox.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil at 6wind.com>
---
 drivers/net/mlx4/mlx4_prm.h  | 20 ++++++-------
 drivers/net/mlx4/mlx4_rxtx.c | 67 ++++++++++++++++++++++++--------------------
 2 files changed, 46 insertions(+), 41 deletions(-)

diff --git a/drivers/net/mlx4/mlx4_prm.h b/drivers/net/mlx4/mlx4_prm.h
index b0fd982..339831a 100644
--- a/drivers/net/mlx4/mlx4_prm.h
+++ b/drivers/net/mlx4/mlx4_prm.h
@@ -80,14 +80,14 @@ enum {
 
 /* Send queue information. */
 struct mlx4_sq {
-	uint8_t *buf; /**< SQ buffer. */
-	uint8_t *eob; /**< End of SQ buffer */
+	volatile uint8_t *buf; /**< SQ buffer. */
+	volatile uint8_t *eob; /**< End of SQ buffer */
 	uint32_t head; /**< SQ head counter in units of TXBBS. */
 	uint32_t tail; /**< SQ tail counter in units of TXBBS. */
 	uint32_t txbb_cnt; /**< Num of WQEBB in the Q (should be ^2). */
 	uint32_t txbb_cnt_mask; /**< txbbs_cnt mask (txbb_cnt is ^2). */
 	uint32_t headroom_txbbs; /**< Num of txbbs that should be kept free. */
-	uint32_t *db; /**< Pointer to the doorbell. */
+	volatile uint32_t *db; /**< Pointer to the doorbell. */
 	uint32_t doorbell_qpn; /**< qp number to write to the doorbell. */
 };
 
@@ -100,11 +100,11 @@ struct mlx4_sq {
 
 /* Completion queue information. */
 struct mlx4_cq {
-	void *cq_uar; /**< CQ user access region. */
-	void *cq_db_reg; /**< CQ doorbell register. */
-	uint32_t *set_ci_db; /**< Pointer to the completion queue doorbell. */
-	uint32_t *arm_db; /**< Pointer to doorbell for arming Rx events. */
-	uint8_t *buf; /**< Pointer to the completion queue buffer. */
+	volatile void *cq_uar; /**< CQ user access region. */
+	volatile void *cq_db_reg; /**< CQ doorbell register. */
+	volatile uint32_t *set_ci_db; /**< Pointer to the CQ doorbell. */
+	volatile uint32_t *arm_db; /**< Arming Rx events doorbell. */
+	volatile uint8_t *buf; /**< Pointer to the completion queue buffer. */
 	uint32_t cqe_cnt; /**< Number of entries in the queue. */
 	uint32_t cqe_64:1; /**< CQ entry size is 64 bytes. */
 	uint32_t cons_index; /**< Last queue entry that was handled. */
@@ -128,10 +128,10 @@ struct mlx4_cq {
  * @return
  *   Pointer to CQE entry.
  */
-static inline struct mlx4_cqe *
+static inline volatile struct mlx4_cqe *
 mlx4_get_cqe(struct mlx4_cq *cq, uint32_t index)
 {
-	return (struct mlx4_cqe *)(cq->buf +
+	return (volatile struct mlx4_cqe *)(cq->buf +
 				   ((index & (cq->cqe_cnt - 1)) <<
 				    (5 + cq->cqe_64)) +
 				   (cq->cqe_64 << 5));
diff --git a/drivers/net/mlx4/mlx4_rxtx.c b/drivers/net/mlx4/mlx4_rxtx.c
index d13c8d2..20060ce 100644
--- a/drivers/net/mlx4/mlx4_rxtx.c
+++ b/drivers/net/mlx4/mlx4_rxtx.c
@@ -69,7 +69,7 @@
  * DWORD (32 byte) of a TXBB.
  */
 struct pv {
-	struct mlx4_wqe_data_seg *dseg;
+	volatile struct mlx4_wqe_data_seg *dseg;
 	uint32_t val;
 };
 
@@ -97,14 +97,15 @@ struct pv {
 {
 	uint32_t stamp = rte_cpu_to_be_32(MLX4_SQ_STAMP_VAL |
 					  (!!owner << MLX4_SQ_STAMP_SHIFT));
-	uint8_t *wqe = mlx4_get_send_wqe(sq, (index & sq->txbb_cnt_mask));
-	uint32_t *ptr = (uint32_t *)wqe;
+	volatile uint8_t *wqe = mlx4_get_send_wqe(sq,
+						(index & sq->txbb_cnt_mask));
+	volatile uint32_t *ptr = (volatile uint32_t *)wqe;
 	int i;
 	int txbbs_size;
 	int num_txbbs;
 
 	/* Extract the size from the control segment of the WQE. */
-	num_txbbs = MLX4_SIZE_TO_TXBBS((((struct mlx4_wqe_ctrl_seg *)
+	num_txbbs = MLX4_SIZE_TO_TXBBS((((volatile struct mlx4_wqe_ctrl_seg *)
 					 wqe)->fence_size & 0x3f) << 4);
 	txbbs_size = num_txbbs * MLX4_TXBB_SIZE;
 	/* Optimize the common case when there is no wrap-around. */
@@ -119,8 +120,8 @@ struct pv {
 		for (i = 0; i < txbbs_size; i += MLX4_SQ_STAMP_STRIDE) {
 			*ptr = stamp;
 			ptr += MLX4_SQ_STAMP_DWORDS;
-			if ((uint8_t *)ptr >= sq->eob) {
-				ptr = (uint32_t *)sq->buf;
+			if ((volatile uint8_t *)ptr >= sq->eob) {
+				ptr = (volatile uint32_t *)sq->buf;
 				stamp ^= RTE_BE32(0x80000000);
 			}
 		}
@@ -149,7 +150,7 @@ struct pv {
 	unsigned int elts_comp = txq->elts_comp;
 	unsigned int elts_tail = txq->elts_tail;
 	struct mlx4_cq *cq = &txq->mcq;
-	struct mlx4_cqe *cqe;
+	volatile struct mlx4_cqe *cqe;
 	uint32_t cons_index = cq->cons_index;
 	uint16_t new_index;
 	uint16_t nr_txbbs = 0;
@@ -160,7 +161,7 @@ struct pv {
 	 * reported by them.
 	 */
 	do {
-		cqe = (struct mlx4_cqe *)mlx4_get_cqe(cq, cons_index);
+		cqe = (volatile struct mlx4_cqe *)mlx4_get_cqe(cq, cons_index);
 		if (unlikely(!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
 		    !!(cons_index & cq->cqe_cnt)))
 			break;
@@ -171,8 +172,8 @@ struct pv {
 #ifndef NDEBUG
 		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
 			     MLX4_CQE_OPCODE_ERROR)) {
-			struct mlx4_err_cqe *cqe_err =
-				(struct mlx4_err_cqe *)cqe;
+			volatile struct mlx4_err_cqe *cqe_err =
+				(volatile struct mlx4_err_cqe *)cqe;
 			ERROR("%p CQE error - vendor syndrome: 0x%x"
 			      " syndrome: 0x%x\n",
 			      (void *)txq, cqe_err->vendor_err,
@@ -239,15 +240,15 @@ struct pv {
 
 static int
 mlx4_tx_burst_segs(struct rte_mbuf *buf, struct txq *txq,
-			       struct mlx4_wqe_ctrl_seg **pctrl)
+		   volatile struct mlx4_wqe_ctrl_seg **pctrl)
 {
 	int wqe_real_size;
 	int nr_txbbs;
 	struct pv *pv = (struct pv *)txq->bounce_buf;
 	struct mlx4_sq *sq = &txq->msq;
 	uint32_t head_idx = sq->head & sq->txbb_cnt_mask;
-	struct mlx4_wqe_ctrl_seg *ctrl;
-	struct mlx4_wqe_data_seg *dseg;
+	volatile struct mlx4_wqe_ctrl_seg *ctrl;
+	volatile struct mlx4_wqe_data_seg *dseg;
 	struct rte_mbuf *sbuf;
 	uint32_t lkey;
 	uintptr_t addr;
@@ -255,8 +256,8 @@ struct pv {
 	int pv_counter = 0;
 
 	/* Calculate the needed work queue entry size for this packet. */
-	wqe_real_size = sizeof(struct mlx4_wqe_ctrl_seg) +
-		buf->nb_segs * sizeof(struct mlx4_wqe_data_seg);
+	wqe_real_size = sizeof(volatile struct mlx4_wqe_ctrl_seg) +
+		buf->nb_segs * sizeof(volatile struct mlx4_wqe_data_seg);
 	nr_txbbs = MLX4_SIZE_TO_TXBBS(wqe_real_size);
 	/*
 	 * Check that there is room for this WQE in the send queue and that
@@ -268,17 +269,18 @@ struct pv {
 		return -1;
 	}
 	/* Get the control and data entries of the WQE. */
-	ctrl = (struct mlx4_wqe_ctrl_seg *)mlx4_get_send_wqe(sq, head_idx);
-	dseg = (struct mlx4_wqe_data_seg *)((uintptr_t)ctrl +
-			sizeof(struct mlx4_wqe_ctrl_seg));
+	ctrl = (volatile struct mlx4_wqe_ctrl_seg *)
+			mlx4_get_send_wqe(sq, head_idx);
+	dseg = (volatile struct mlx4_wqe_data_seg *)
+			((uintptr_t)ctrl + sizeof(struct mlx4_wqe_ctrl_seg));
 	*pctrl = ctrl;
 	/* Fill the data segments with buffer information. */
 	for (sbuf = buf; sbuf != NULL; sbuf = sbuf->next, dseg++) {
 		addr = rte_pktmbuf_mtod(sbuf, uintptr_t);
 		rte_prefetch0((volatile void *)addr);
 		/* Handle WQE wraparound. */
-		if (dseg >= (struct mlx4_wqe_data_seg *)sq->eob)
-			dseg = (struct mlx4_wqe_data_seg *)sq->buf;
+		if (dseg >= (volatile struct mlx4_wqe_data_seg *)sq->eob)
+			dseg = (volatile struct mlx4_wqe_data_seg *)sq->buf;
 		dseg->addr = rte_cpu_to_be_64(addr);
 		/* Memory region key (big endian) for this memory pool. */
 		lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
@@ -395,8 +397,8 @@ struct pv {
 		struct txq_elt *elt_next = &(*txq->elts)[elts_head_next];
 		struct txq_elt *elt = &(*txq->elts)[elts_head];
 		uint32_t owner_opcode = MLX4_OPCODE_SEND;
-		struct mlx4_wqe_ctrl_seg *ctrl;
-		struct mlx4_wqe_data_seg *dseg;
+		volatile struct mlx4_wqe_ctrl_seg *ctrl;
+		volatile struct mlx4_wqe_data_seg *dseg;
 		union {
 			uint32_t flags;
 			uint16_t flags16[2];
@@ -433,15 +435,18 @@ struct pv {
 				break;
 			}
 			/* Get the control and data entries of the WQE. */
-			ctrl = (struct mlx4_wqe_ctrl_seg *)
+			ctrl = (volatile struct mlx4_wqe_ctrl_seg *)
 					mlx4_get_send_wqe(sq, head_idx);
-			dseg = (struct mlx4_wqe_data_seg *)((uintptr_t)ctrl +
+			dseg = (volatile struct mlx4_wqe_data_seg *)
+					((uintptr_t)ctrl +
 					sizeof(struct mlx4_wqe_ctrl_seg));
 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
 			rte_prefetch0((volatile void *)addr);
 			/* Handle WQE wraparound. */
-			if (dseg >= (struct mlx4_wqe_data_seg *)sq->eob)
-				dseg = (struct mlx4_wqe_data_seg *)sq->buf;
+			if (dseg >=
+				(volatile struct mlx4_wqe_data_seg *)sq->eob)
+				dseg = (volatile struct mlx4_wqe_data_seg *)
+						sq->buf;
 			dseg->addr = rte_cpu_to_be_64(addr);
 			/* Memory region key (big endian). */
 			lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(buf));
@@ -633,7 +638,7 @@ struct pv {
  *   CQE checksum information.
  */
 static inline uint32_t
-mlx4_cqe_flags(struct mlx4_cqe *cqe, int csum, int csum_l2tun)
+mlx4_cqe_flags(volatile struct mlx4_cqe *cqe, int csum, int csum_l2tun)
 {
 	uint32_t flags = 0;
 
@@ -666,13 +671,13 @@ struct pv {
  *   Number of bytes of the CQE, 0 in case there is no completion.
  */
 static unsigned int
-mlx4_cq_poll_one(struct rxq *rxq, struct mlx4_cqe **out)
+mlx4_cq_poll_one(struct rxq *rxq, volatile struct mlx4_cqe **out)
 {
 	int ret = 0;
-	struct mlx4_cqe *cqe = NULL;
+	volatile struct mlx4_cqe *cqe = NULL;
 	struct mlx4_cq *cq = &rxq->mcq;
 
-	cqe = (struct mlx4_cqe *)mlx4_get_cqe(cq, cq->cons_index);
+	cqe = (volatile struct mlx4_cqe *)mlx4_get_cqe(cq, cq->cons_index);
 	if (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
 	    !!(cq->cons_index & cq->cqe_cnt))
 		goto out;
@@ -717,7 +722,7 @@ struct pv {
 	int len = 0;
 
 	while (pkts_n) {
-		struct mlx4_cqe *cqe;
+		volatile struct mlx4_cqe *cqe;
 		uint32_t idx = rq_ci & wr_cnt;
 		struct rte_mbuf *rep = (*rxq->elts)[idx];
 		volatile struct mlx4_wqe_data_seg *scat = &(*rxq->wqes)[idx];
-- 
1.8.3.1



More information about the dev mailing list