[dpdk-dev] [PATCH v2 13/13] net/mlx5: add shared Rx queue port datapath support
Xueming Li
xuemingl at nvidia.com
Sat Oct 16 11:12:13 CEST 2021
From: Viacheslav Ovsiienko <viacheslavo at nvidia.com>
When receive packet, mlx5 PMD saves mbuf port number from
rxq data.
To support shared rxq, save port number into RQ context as
user index. Received packet resolve port number from
CQE user index which derived from RQ context.
Legacy Verbs API doesn't support RQ user index setting,
still read from rxq port number.
Signed-off-by: Xueming Li <xuemingl at nvidia.com>
Signed-off-by: Viacheslav Ovsiienko <viacheslavo at nvidia.com>
---
drivers/net/mlx5/mlx5_devx.c | 1 +
drivers/net/mlx5/mlx5_rx.c | 1 +
drivers/net/mlx5/mlx5_rxq.c | 3 ++-
drivers/net/mlx5/mlx5_rxtx_vec_altivec.h | 6 ++++++
drivers/net/mlx5/mlx5_rxtx_vec_neon.h | 12 +++++++++++-
drivers/net/mlx5/mlx5_rxtx_vec_sse.h | 8 +++++++-
6 files changed, 28 insertions(+), 3 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index 94253047141..11c426eee14 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -277,6 +277,7 @@ mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq)
MLX5_WQ_END_PAD_MODE_NONE;
rq_attr.wq_attr.pd = priv->sh->pdn;
rq_attr.counter_set_id = priv->counter_set_id;
+ rq_attr.user_index = rte_cpu_to_be_16(priv->dev_data->port_id);
if (rxq_data->shared) /* Create RMP based RQ. */
rxq->devx_rq.rmp = &rxq_ctrl->obj->devx_rmp;
/* Create RQ using DevX API. */
diff --git a/drivers/net/mlx5/mlx5_rx.c b/drivers/net/mlx5/mlx5_rx.c
index 3017a8da20c..6ee54b820f1 100644
--- a/drivers/net/mlx5/mlx5_rx.c
+++ b/drivers/net/mlx5/mlx5_rx.c
@@ -707,6 +707,7 @@ rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
{
/* Update packet information. */
pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe, mcqe);
+ pkt->port = unlikely(rxq->shared) ? cqe->user_index_low : rxq->port_id;
if (rxq->rss_hash) {
uint32_t rss_hash_res = 0;
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 494c9e3517f..250922b0d7a 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -179,7 +179,8 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
rte_mbuf_refcnt_set(mbuf_init, 1);
mbuf_init->nb_segs = 1;
- mbuf_init->port = rxq->port_id;
+ /* For shared queues port is provided in CQE */
+ mbuf_init->port = rxq->shared ? 0 : rxq->port_id;
if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
mbuf_init->ol_flags = EXT_ATTACHED_MBUF;
/*
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
index 82586f012cb..115320a26f0 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
@@ -1189,6 +1189,12 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
/* D.5 fill in mbuf - rearm_data and packet_type. */
rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
+ if (unlikely(rxq->shared)) {
+ pkts[pos]->port = cq[pos].user_index_low;
+ pkts[pos + p1]->port = cq[pos + p1].user_index_low;
+ pkts[pos + p2]->port = cq[pos + p2].user_index_low;
+ pkts[pos + p3]->port = cq[pos + p3].user_index_low;
+ }
if (rxq->hw_timestamp) {
int offset = rxq->timestamp_offset;
if (rxq->rt_timestamp) {
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index 5ff792f4cb5..9e78318129a 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -787,7 +787,17 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
/* C.4 fill in mbuf - rearm_data and packet_type. */
rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag,
opcode, &elts[pos]);
- if (rxq->hw_timestamp) {
+ if (unlikely(rxq->shared)) {
+ elts[pos]->port = container_of(p0, struct mlx5_cqe,
+ pkt_info)->user_index_low;
+ elts[pos + 1]->port = container_of(p1, struct mlx5_cqe,
+ pkt_info)->user_index_low;
+ elts[pos + 2]->port = container_of(p2, struct mlx5_cqe,
+ pkt_info)->user_index_low;
+ elts[pos + 3]->port = container_of(p3, struct mlx5_cqe,
+ pkt_info)->user_index_low;
+ }
+ if (unlikely(rxq->hw_timestamp)) {
int offset = rxq->timestamp_offset;
if (rxq->rt_timestamp) {
struct mlx5_dev_ctx_shared *sh = rxq->sh;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index adf991f0139..97eb0adc9e6 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -736,7 +736,13 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
*err |= _mm_cvtsi128_si64(opcode);
/* D.5 fill in mbuf - rearm_data and packet_type. */
rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
- if (rxq->hw_timestamp) {
+ if (unlikely(rxq->shared)) {
+ pkts[pos]->port = cq[pos].user_index_low;
+ pkts[pos + p1]->port = cq[pos + p1].user_index_low;
+ pkts[pos + p2]->port = cq[pos + p2].user_index_low;
+ pkts[pos + p3]->port = cq[pos + p3].user_index_low;
+ }
+ if (unlikely(rxq->hw_timestamp)) {
int offset = rxq->timestamp_offset;
if (rxq->rt_timestamp) {
struct mlx5_dev_ctx_shared *sh = rxq->sh;
--
2.33.0
More information about the dev
mailing list