[dpdk-dev] [PATCH 4/4] net/mlx5: add multi-segment packets in MPRQ mode
Slava Ovsiienko
viacheslavo at mellanox.com
Thu Apr 2 12:02:52 CEST 2020
> -----Original Message-----
> From: Alexander Kozyrev <akozyrev at mellanox.com>
> Sent: Wednesday, April 1, 2020 0:53
> To: dev at dpdk.org
> Cc: Raslan Darawsheh <rasland at mellanox.com>; Matan Azrad
> <matan at mellanox.com>; Slava Ovsiienko <viacheslavo at mellanox.com>;
> ferruh.yigit at intel.com; Thomas Monjalon <thomas at monjalon.net>
> Subject: [PATCH 4/4] net/mlx5: add multi-segment packets in MPRQ mode
>
> The multi-stride operations now allow to reduce a stride size while supporting
> Jumbo frames. That means that it is possible to have mbufs configured with a
> size smaller than the whole packet received. It is not an issue during normal
> MPRQ operations since we attach external buffers instead of copying the data
> into the mbuf itself. But it is not the case in "emergency mode"
> when we have to copy every packet because of no more external mbufs are
> available. Assemble a multi-segment packet to overcome this issue in case
> scatter mode is enabled, drop a packet if not.
>
> Signed-off-by: Alexander Kozyrev <akozyrev at mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo at mellanox.com>
> ---
> drivers/net/mlx5/mlx5_rxtx.c | 47
> ++++++++++++++++++++++++++++++++++++--------
> 1 file changed, 39 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index
> 4c27952..7ce3732 100644
> --- a/drivers/net/mlx5/mlx5_rxtx.c
> +++ b/drivers/net/mlx5/mlx5_rxtx.c
> @@ -1734,22 +1734,52 @@ enum mlx5_txcmp_code {
> * Memcpy packets to the target mbuf if:
> * - The size of packet is smaller than
> mprq_max_memcpy_len.
> * - Out of buffer in the Mempool for Multi-Packet RQ.
> - * - There is no space for a headroom and scatter is disabled.
> + * - The packet's stride overlaps a headroom and scatter is off.
> */
> if (len <= rxq->mprq_max_memcpy_len ||
> rxq->mprq_repl == NULL ||
> (hdrm_overlap > 0 && !rxq->strd_scatter_en)) {
> - /*
> - * When memcpy'ing packet due to out-of-buffer, the
> - * packet must be smaller than the target mbuf.
> - */
> - if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
> + if (likely(rte_pktmbuf_tailroom(pkt) >= len)) {
> + rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
> + addr, len);
> + DATA_LEN(pkt) = len;
> + } else if (rxq->strd_scatter_en) {
> + struct rte_mbuf *prev = pkt;
> + uint32_t seg_len =
> + RTE_MIN(rte_pktmbuf_tailroom(pkt),
> len);
> + uint32_t rem_len = len - seg_len;
> +
> + rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
> + addr, seg_len);
> + DATA_LEN(pkt) = seg_len;
> + while (rem_len) {
> + struct rte_mbuf *next =
> + rte_pktmbuf_alloc(rxq->mp);
> +
> + if (unlikely(next == NULL)) {
> + rte_pktmbuf_free(pkt);
> + ++rxq->stats.rx_nombuf;
> + goto out;
> + }
> + NEXT(prev) = next;
> + SET_DATA_OFF(next, 0);
> + addr = RTE_PTR_ADD(addr, seg_len);
> + seg_len = RTE_MIN
> + (rte_pktmbuf_tailroom(next),
> + rem_len);
> + rte_memcpy
> + (rte_pktmbuf_mtod(next,
> void *),
> + addr, seg_len);
> + DATA_LEN(next) = seg_len;
> + rem_len -= seg_len;
> + prev = next;
> + ++NB_SEGS(pkt);
> + }
> + } else {
> rte_pktmbuf_free_seg(pkt);
> ++rxq->stats.idropped;
> continue;
> }
> - rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr,
> len);
> - DATA_LEN(pkt) = len;
> } else {
> rte_iova_t buf_iova;
> struct rte_mbuf_ext_shared_info *shinfo; @@ -
> 1826,6 +1856,7 @@ enum mlx5_txcmp_code {
> *(pkts++) = pkt;
> ++i;
> }
> +out:
> /* Update the consumer indexes. */
> rxq->consumed_strd = consumed_strd;
> rte_cio_wmb();
> --
> 1.8.3.1
More information about the dev
mailing list