[dpdk-dev] [PATCH 12/13] net/mlx5: make ASO meter queue thread-safe
Li Zhang
lizh at nvidia.com
Wed Mar 31 09:36:30 CEST 2021
Synchronize ASO meter queue accesses from
different threads using a spinlock.
Signed-off-by: Li Zhang <lizh at nvidia.com>
---
drivers/net/mlx5/mlx5.h | 1 +
drivers/net/mlx5/mlx5_flow_aso.c | 16 +++++++++++++---
2 files changed, 14 insertions(+), 3 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index d0cd402495..27a6b63ba3 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -503,6 +503,7 @@ struct mlx5_aso_sq_elem {
struct mlx5_aso_sq {
uint16_t log_desc_n;
+ rte_spinlock_t sqsl;
struct mlx5_aso_cq cq;
struct mlx5_devx_sq sq_obj;
volatile uint64_t *uar_addr;
diff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c
index 4c1b2ed6e5..cdca7f9a03 100644
--- a/drivers/net/mlx5/mlx5_flow_aso.c
+++ b/drivers/net/mlx5/mlx5_flow_aso.c
@@ -275,6 +275,7 @@ mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
sq->tail = 0;
sq->sqn = sq->sq_obj.sq->id;
sq->uar_addr = mlx5_os_get_devx_uar_reg_addr(uar);
+ rte_spinlock_init(&sq->sqsl);
return 0;
error:
mlx5_aso_destroy_sq(sq);
@@ -666,12 +667,15 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_aso_sq *sq,
struct mlx5_flow_meter_info *fm = NULL;
uint16_t size = 1 << sq->log_desc_n;
uint16_t mask = size - 1;
- uint16_t res = size - (uint16_t)(sq->head - sq->tail);
+ uint16_t res;
uint32_t dseg_idx = 0;
struct mlx5_aso_mtr_pool *pool = NULL;
+ rte_spinlock_lock(&sq->sqsl);
+ res = size - (uint16_t)(sq->head - sq->tail);
if (unlikely(!res)) {
DRV_LOG(ERR, "Fail: SQ is full and no free WQE to send");
+ rte_spinlock_unlock(&sq->sqsl);
return 0;
}
wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
@@ -708,6 +712,7 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_aso_sq *sq,
rte_wmb();
*sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH. */
rte_wmb();
+ rte_spinlock_unlock(&sq->sqsl);
return 1;
}
@@ -738,12 +743,16 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq)
const unsigned int mask = cq_size - 1;
uint32_t idx;
uint32_t next_idx = cq->cq_ci & mask;
- const uint16_t max = (uint16_t)(sq->head - sq->tail);
+ uint16_t max;
uint16_t n = 0;
int ret;
- if (unlikely(!max))
+ rte_spinlock_lock(&sq->sqsl);
+ max = (uint16_t)(sq->head - sq->tail);
+ if (unlikely(!max)) {
+ rte_spinlock_unlock(&sq->sqsl);
return;
+ }
do {
idx = next_idx;
next_idx = (cq->cq_ci + 1) & mask;
@@ -770,6 +779,7 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq)
rte_io_wmb();
cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
}
+ rte_spinlock_unlock(&sq->sqsl);
}
/**
--
2.27.0
More information about the dev
mailing list