[dpdk-dev] [PATCH] The VMXNET3 PMD can't receive packet suddenly after a lot of traffic coming in

Marco Lee mac_leehk at yahoo.com.hk
Thu Jul 23 10:51:32 CEST 2015


The RX of VMXNET3 PMD will have deadlock when a lot of traffic coming in.
The root cause is due to mbuf allocation fail in vmxnet3_post_rx_bufs() and there is no error handling when it is called
from vmxnet3_recv_pkts(). The RXD will not have "free" mbuf for it but the counter still increment.
Finally, no packet can be received.

This fix is allocate the mbuf first, if the allocation is failed, then reuse the old mbuf
If the allocation is sucess, the vmxnet3_post_rx_bufs() will call vmxnet3_renew_desc()
and RXD will be renew inside.

Signed-off-by: Marco Lee <mac_leehk at yahoo.com.hk/marco.lee at ruckuswireless.com>
---
 drivers/net/vmxnet3/vmxnet3_rxtx.c |   36 +++++++++++++++++++++++++++++++++++-
 1 file changed, 35 insertions(+), 1 deletion(-)

diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index 39ad6ef..3a15009 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -421,6 +421,34 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return nb_tx;
 }
 
+static inline void
+vmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,struct rte_mbuf *mbuf)
+{
+	uint32_t  val = 0;
+	struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
+
+	struct Vmxnet3_RxDesc *rxd;
+	vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
+
+	rxd = (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
+
+	if (ring->rid == 0) {
+		val = VMXNET3_RXD_BTYPE_HEAD;
+	} else {
+		val = VMXNET3_RXD_BTYPE_BODY;
+	}
+
+	buf_info->m = mbuf;
+	buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
+	buf_info->bufPA = RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf);
+
+	rxd->addr = buf_info->bufPA;
+	rxd->btype = val;
+	rxd->len = buf_info->len;
+	rxd->gen = ring->gen;
+
+	vmxnet3_cmd_ring_adv_next2fill(ring);
+}
 /*
  *  Allocates mbufs and clusters. Post rx descriptors with buffer details
  *  so that device can receive packets in those buffers.
@@ -578,6 +606,8 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		if (nb_rx >= nb_pkts)
 			break;
 
+		struct rte_mbuf *rep;
+		rep = rte_rxmbuf_alloc(rxq->mb_pool);
 		idx = rcd->rxdIdx;
 		ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
 		rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
@@ -651,13 +681,17 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 
 		vmxnet3_rx_offload(rcd, rxm);
 
+		if (unlikely(rep == NULL)) {
+			rep = rxm;
+			goto rcd_done;
+		}
 		rx_pkts[nb_rx++] = rxm;
 rcd_done:
 		rxq->cmd_ring[ring_idx].next2comp = idx;
 		VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp, rxq->cmd_ring[ring_idx].size);
 
 		/* It's time to allocate some new buf and renew descriptors */
-		vmxnet3_post_rx_bufs(rxq, ring_idx);
+		vmxnet3_renew_desc(rxq, ring_idx,rep);
 		if (unlikely(rxq->shared->ctrl.updateRxProd)) {
 			VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
 					       rxq->cmd_ring[ring_idx].next2fill);
-- 
1.7.9.5



More information about the dev mailing list