[dpdk-dev] [PATCH 1/4] Revert "net/mlx5: fix Rx queue count calculation"
Maxime Leroy
maxime.leroy at 6wind.com
Tue Nov 10 15:09:35 CET 2020
This reverts commit d2d57605522d4a43be17e22e649e54033f6d8835.
This fix is uncorrect for at least two reasons.
First issue, when there are more than 8 CQEs to uncompress, the
computation done in this commit cannot work. Because the zip-ai
variable describes the current index inside the CQE8 array and thus is
limited from 0 to 7 included. So if we are decompressed the 9 packets,
ai is 0. So in this case, n is equals to cqe_cnt - 0.
Example with 11 packets we will have:
C | a | e0 | e1 | e2 | e3 | e4 | e5 | C | a | e0
c <-- CQE compressed
a <-- Array of minicqe
ex <-- emptry entry to store uncompressed CQE.
If the 9th packet is decompressed by the soft, n is equals to 9.
But with this commit, n is equals to 11 (i.e. 11 - 0).
Second issue is to count the next packet.
Example:
packet 1 | packet 2
C | a | e0 | e1 | e2 | e3 | e4 | e5 | C | a | e0
There are 2 packets compressed in the first queue. For the first packet,
n is computed correctly.
But for the second, n is not computed properly. Because the zip context
is for the first packet. The second packet is not yet decompressed, so
there are no context.
Signed-off-by: Maxime Leroy <maxime.leroy at 6wind.com>
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro at 6wind.com>
---
drivers/net/mlx5/mlx5_rxtx.c | 27 +++++++++++++++------------
1 file changed, 15 insertions(+), 12 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 844a1c63..4c566486 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -462,11 +462,19 @@ rx_queue_count(struct mlx5_rxq_data *rxq)
{
struct rxq_zip *zip = &rxq->zip;
volatile struct mlx5_cqe *cqe;
- unsigned int cq_ci = rxq->cq_ci;
const unsigned int cqe_n = (1 << rxq->cqe_n);
const unsigned int cqe_cnt = cqe_n - 1;
- unsigned int used = 0;
+ unsigned int cq_ci;
+ unsigned int used;
+ /* if we are processing a compressed cqe */
+ if (zip->ai) {
+ used = zip->cqe_cnt - zip->ca;
+ cq_ci = zip->cq_ci;
+ } else {
+ used = 0;
+ cq_ci = rxq->cq_ci;
+ }
cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
int8_t op_own;
@@ -474,17 +482,14 @@ rx_queue_count(struct mlx5_rxq_data *rxq)
op_own = cqe->op_own;
if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
- if (unlikely(zip->ai))
- n = zip->cqe_cnt - zip->ai;
- else
- n = rte_be_to_cpu_32(cqe->byte_cnt);
+ n = rte_be_to_cpu_32(cqe->byte_cnt);
else
n = 1;
cq_ci += n;
used += n;
cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
}
- used = RTE_MIN(used, cqe_n);
+ used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
return used;
}
@@ -507,12 +512,11 @@ mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
- if (dev->rx_pkt_burst == NULL ||
- dev->rx_pkt_burst == removed_rx_burst) {
+ if (dev->rx_pkt_burst != mlx5_rx_burst) {
rte_errno = ENOTSUP;
return -rte_errno;
}
- if (offset >= (1 << rxq->cqe_n)) {
+ if (offset >= (1 << rxq->elts_n)) {
rte_errno = EINVAL;
return -rte_errno;
}
@@ -642,8 +646,7 @@ mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq;
- if (dev->rx_pkt_burst == NULL ||
- dev->rx_pkt_burst == removed_rx_burst) {
+ if (dev->rx_pkt_burst != mlx5_rx_burst) {
rte_errno = ENOTSUP;
return -rte_errno;
}
--
2.27.0
More information about the dev
mailing list