[dpdk-dev] [PATCH 2/5] net/mlx5: configure Rx queue to support split

Viacheslav Ovsiienko viacheslavo at nvidia.com
Thu Oct 22 17:42:48 CEST 2020


The scatter-gather elements should be configured
accordingly to support the buffer split feature.
The application provides the desired settings for
the segments at the beginning of the packets and
PMD pads the buffer chain (if needed) with attributes
of last specified segment to accommodate the packet
of maximal length.

There are some limitations are implied. The MPRQ
feature should be disengaged if split is requested,
due to MPRQ neither supports pushing data to the
dedicated pools nor follows the flexible buffer sizes.
The vectorized rx_burst routines does not support
the scattering (these ones are extremely simplified
and work over the single segment only) and can't
handle split as well.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo at nvidia.com>
---
 drivers/net/mlx5/mlx5_rxq.c | 96 ++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 82 insertions(+), 14 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index ce03c75..dc79498 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1374,7 +1374,8 @@ struct mlx5_rxq_ctrl *
 	uint64_t offloads = conf->offloads |
 			   dev->data->dev_conf.rxmode.offloads;
 	unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
-	const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1;
+	const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 &&
+			    !rx_seg[0].offset && !rx_seg[0].length;
 	unsigned int max_rx_pkt_len = lro_on_queue ?
 			dev->data->dev_conf.rxmode.max_lro_pkt_size :
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -1382,22 +1383,89 @@ struct mlx5_rxq_ctrl *
 							RTE_PKTMBUF_HEADROOM;
 	unsigned int max_lro_size = 0;
 	unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
+	const struct rte_eth_rxseg_split *qs_seg = rx_seg;
+	unsigned int tail_len;
 
-	if (non_scatter_min_mbuf_size > mb_len && !(offloads &
-						    DEV_RX_OFFLOAD_SCATTER)) {
+	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
+			   desc_n * sizeof(struct rte_mbuf *), 0, socket);
+	if (!tmpl) {
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+	MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
+	/*
+	 * Build the array of actual buffer offsets and lengths.
+	 * Pad with the buffers from the last memory pool if
+	 * needed to handle max size packets, replace zero length
+	 * with the buffer length from the pool.
+	 */
+	tail_len = max_rx_pkt_len;
+	do {
+		struct mlx5_eth_rxseg *hw_seg =
+					&tmpl->rxq.rxseg[tmpl->rxq.rxseg_n];
+		uint32_t buf_len, offset, seg_len;
+
+		/*
+		 * For the buffers beyond descriptions offset is zero,
+		 * the first buffer contains head room.
+		 */
+		buf_len = rte_pktmbuf_data_room_size(qs_seg->mp);
+		offset = (tmpl->rxq.rxseg_n >= n_seg ? 0 : qs_seg->offset) +
+			 (tmpl->rxq.rxseg_n ? 0 : RTE_PKTMBUF_HEADROOM);
+		/*
+		 * For the buffers beyond descriptions the length is
+		 * pool buffer length, zero lengths are replaced with
+		 * pool buffer length either.
+		 */
+		seg_len = tmpl->rxq.rxseg_n >= n_seg ? buf_len :
+						       qs_seg->length ?
+						       qs_seg->length :
+						       (buf_len - offset);
+		/* Check is done in long int, now overflows. */
+		if (buf_len < seg_len + offset) {
+			DRV_LOG(ERR, "port %u Rx queue %u: Split offset/length "
+				     "%u/%u can't be satisfied",
+				     dev->data->port_id, idx,
+				     qs_seg->length, qs_seg->offset);
+			rte_errno = EINVAL;
+			goto error;
+		}
+		if (seg_len > tail_len)
+			seg_len = buf_len - offset;
+		if (++tmpl->rxq.rxseg_n > MLX5_MAX_RXQ_NSEG) {
+			DRV_LOG(ERR,
+				"port %u too many SGEs (%u) needed to handle"
+				" requested maximum packet size %u, the maximum"
+				" supported are %u", dev->data->port_id,
+				tmpl->rxq.rxseg_n, max_rx_pkt_len,
+				MLX5_MAX_RXQ_NSEG);
+			rte_errno = ENOTSUP;
+			goto error;
+		}
+		/* Build the actual scattering element in the queue object. */
+		hw_seg->mp = qs_seg->mp;
+		MLX5_ASSERT(offset <= UINT16_MAX);
+		MLX5_ASSERT(seg_len <= UINT16_MAX);
+		hw_seg->offset = (uint16_t)offset;
+		hw_seg->length = (uint16_t)seg_len;
+		/*
+		 * Advance the segment descriptor, the padding is the based
+		 * on the attributes of the last descriptor.
+		 */
+		if (tmpl->rxq.rxseg_n < n_seg)
+			qs_seg++;
+		tail_len -= RTE_MIN(tail_len, seg_len);
+	} while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
+	MLX5_ASSERT(tmpl->rxq.rxseg_n &&
+		    tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
+	if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) {
 		DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
 			" configured and no enough mbuf space(%u) to contain "
 			"the maximum RX packet length(%u) with head-room(%u)",
 			dev->data->port_id, idx, mb_len, max_rx_pkt_len,
 			RTE_PKTMBUF_HEADROOM);
 		rte_errno = ENOSPC;
-		return NULL;
-	}
-	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
-			   desc_n * sizeof(struct rte_mbuf *), 0, socket);
-	if (!tmpl) {
-		rte_errno = ENOMEM;
-		return NULL;
+		goto error;
 	}
 	tmpl->type = MLX5_RXQ_TYPE_STANDARD;
 	if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
@@ -1424,7 +1492,7 @@ struct mlx5_rxq_ctrl *
 	 *  - The number of descs is more than the number of strides.
 	 *  - max_rx_pkt_len plus overhead is less than the max size
 	 *    of a stride or mprq_stride_size is specified by a user.
-	 *    Need to nake sure that there are enough stides to encap
+	 *    Need to make sure that there are enough stides to encap
 	 *    the maximum packet size in case mprq_stride_size is set.
 	 *  Otherwise, enable Rx scatter if necessary.
 	 */
@@ -1454,11 +1522,11 @@ struct mlx5_rxq_ctrl *
 			" strd_num_n = %u, strd_sz_n = %u",
 			dev->data->port_id, idx,
 			tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
-	} else if (max_rx_pkt_len <= first_mb_free_size) {
+	} else if (tmpl->rxq.rxseg_n == 1) {
+		MLX5_ASSERT(max_rx_pkt_len <= first_mb_free_size);
 		tmpl->rxq.sges_n = 0;
 		max_lro_size = max_rx_pkt_len;
 	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
-		unsigned int size = non_scatter_min_mbuf_size;
 		unsigned int sges_n;
 
 		if (lro_on_queue && first_mb_free_size <
@@ -1473,7 +1541,7 @@ struct mlx5_rxq_ctrl *
 		 * Determine the number of SGEs needed for a full packet
 		 * and round it to the next power of two.
 		 */
-		sges_n = log2above((size / mb_len) + !!(size % mb_len));
+		sges_n = log2above(tmpl->rxq.rxseg_n);
 		if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
 			DRV_LOG(ERR,
 				"port %u too many SGEs (%u) needed to handle"
-- 
1.8.3.1



More information about the dev mailing list