[dpdk-dev] [PATCH v2 2/2] vhost: call rte_pktmbuf_alloc_bulk in vhost dequeue

Huawei Xie huawei.xie at intel.com
Mon Dec 14 02:14:42 CET 2015


pre-allocate a bulk of mbufs instead of allocating one mbuf a time on demand

Signed-off-by: Gerald Rogers <gerald.rogers at intel.com>
Signed-off-by: Huawei Xie <huawei.xie at intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev at intel.com>
---
 lib/librte_vhost/vhost_rxtx.c | 35 ++++++++++++++++++++++-------------
 1 file changed, 22 insertions(+), 13 deletions(-)

diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
index bbf3fac..0faae58 100644
--- a/lib/librte_vhost/vhost_rxtx.c
+++ b/lib/librte_vhost/vhost_rxtx.c
@@ -576,6 +576,8 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
 	uint32_t i;
 	uint16_t free_entries, entry_success = 0;
 	uint16_t avail_idx;
+	uint8_t alloc_err = 0;
+	uint8_t seg_num;
 
 	if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->virt_qp_nb))) {
 		RTE_LOG(ERR, VHOST_DATA,
@@ -609,6 +611,14 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
 
 	LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n",
 			dev->device_fh, free_entries);
+
+	if (unlikely(rte_pktmbuf_alloc_bulk(mbuf_pool,
+		pkts, free_entries)) < 0) {
+		RTE_LOG(ERR, VHOST_DATA,
+			"Failed to bulk allocating %d mbufs\n", free_entries);
+		return 0;
+	}
+
 	/* Retrieve all of the head indexes first to avoid caching issues. */
 	for (i = 0; i < free_entries; i++)
 		head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)];
@@ -621,9 +631,9 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
 		uint32_t vb_avail, vb_offset;
 		uint32_t seg_avail, seg_offset;
 		uint32_t cpy_len;
-		uint32_t seg_num = 0;
+		seg_num = 0;
 		struct rte_mbuf *cur;
-		uint8_t alloc_err = 0;
+
 
 		desc = &vq->desc[head[entry_success]];
 
@@ -654,13 +664,7 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
 		vq->used->ring[used_idx].id = head[entry_success];
 		vq->used->ring[used_idx].len = 0;
 
-		/* Allocate an mbuf and populate the structure. */
-		m = rte_pktmbuf_alloc(mbuf_pool);
-		if (unlikely(m == NULL)) {
-			RTE_LOG(ERR, VHOST_DATA,
-				"Failed to allocate memory for mbuf.\n");
-			break;
-		}
+		prev = cur = m = pkts[entry_success];
 		seg_offset = 0;
 		seg_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
 		cpy_len = RTE_MIN(vb_avail, seg_avail);
@@ -668,8 +672,6 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
 		PRINT_PACKET(dev, (uintptr_t)vb_addr, desc->len, 0);
 
 		seg_num++;
-		cur = m;
-		prev = m;
 		while (cpy_len != 0) {
 			rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *, seg_offset),
 				(void *)((uintptr_t)(vb_addr + vb_offset)),
@@ -761,16 +763,23 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
 			cpy_len = RTE_MIN(vb_avail, seg_avail);
 		}
 
-		if (unlikely(alloc_err == 1))
+		if (unlikely(alloc_err))
 			break;
 
 		m->nb_segs = seg_num;
 
-		pkts[entry_success] = m;
 		vq->last_used_idx++;
 		entry_success++;
 	}
 
+	if (unlikely(alloc_err)) {
+		uint16_t i = entry_success;
+
+		m->nb_segs = seg_num;
+		for (; i < free_entries; i++)
+			rte_pktmbuf_free(pkts[entry_success]);
+	}
+
 	rte_compiler_barrier();
 	vq->used->idx += entry_success;
 	/* Kick guest if required. */
-- 
1.8.1.4



More information about the dev mailing list