[dpdk-dev] [PATCH v1 04/21] net/mlx5: prefix Tx control queue structures
Nelio Laranjeiro
nelio.laranjeiro at 6wind.com
Wed Aug 2 16:10:20 CEST 2017
Prefix struct txq_ctrl with mlx5.
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro at 6wind.com>
---
drivers/net/mlx5/mlx5.c | 8 ++++----
drivers/net/mlx5/mlx5.h | 2 +-
drivers/net/mlx5/mlx5_mr.c | 20 +++++++++---------
drivers/net/mlx5/mlx5_rxtx.c | 25 ++++++++++++-----------
drivers/net/mlx5/mlx5_rxtx.h | 29 ++++++++++++++-------------
drivers/net/mlx5/mlx5_rxtx_vec_sse.c | 14 ++++++-------
drivers/net/mlx5/mlx5_stats.c | 2 +-
drivers/net/mlx5/mlx5_txq.c | 39 ++++++++++++++++++------------------
8 files changed, 72 insertions(+), 67 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index d2fa8b1..c7bc65f 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -174,14 +174,14 @@ mlx5_dev_close(struct rte_eth_dev *dev)
/* XXX race condition if mlx5_tx_burst() is still running. */
usleep(1000);
for (i = 0; (i != priv->txqs_n); ++i) {
- struct txq *txq = (*priv->txqs)[i];
- struct txq_ctrl *txq_ctrl;
+ struct mlx5_txq_data *txq = (*priv->txqs)[i];
+ struct mlx5_txq_ctrl *txq_ctrl;
if (txq == NULL)
continue;
- txq_ctrl = container_of(txq, struct txq_ctrl, txq);
+ txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
(*priv->txqs)[i] = NULL;
- txq_cleanup(txq_ctrl);
+ mlx5_txq_cleanup(txq_ctrl);
rte_free(txq_ctrl);
}
priv->txqs_n = 0;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 55cea6f..155dd76 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -130,7 +130,7 @@ struct priv {
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
struct mlx5_rxq_data *(*rxqs)[]; /* RX queues. */
- struct txq *(*txqs)[]; /* TX queues. */
+ struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
/* Indirection tables referencing all RX WQs. */
struct ibv_exp_rwq_ind_table *(*ind_tables)[];
unsigned int ind_tables_n; /* Number of indirection tables. */
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 9593830..e8adde5 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -175,9 +175,11 @@ mlx5_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)
* mr->lkey on success, (uint32_t)-1 on failure.
*/
uint32_t
-txq_mp2mr_reg(struct txq *txq, struct rte_mempool *mp, unsigned int idx)
+mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
+ unsigned int idx)
{
- struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq);
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
struct ibv_mr *mr;
/* Add a new entry, register MR first. */
@@ -229,8 +231,8 @@ struct txq_mp2mr_mbuf_check_data {
* Object index, unused.
*/
static void
-txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
- uint32_t index __rte_unused)
+mlx5_txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
+ uint32_t index __rte_unused)
{
struct txq_mp2mr_mbuf_check_data *data = arg;
struct rte_mbuf *buf = obj;
@@ -253,9 +255,9 @@ txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
* Pointer to TX queue structure.
*/
void
-txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
+mlx5_txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
{
- struct txq_ctrl *txq_ctrl = arg;
+ struct mlx5_txq_ctrl *txq_ctrl = arg;
struct txq_mp2mr_mbuf_check_data data = {
.ret = 0,
};
@@ -264,8 +266,8 @@ txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
unsigned int i;
/* Register mempool only if the first element looks like a mbuf. */
- if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
- data.ret == -1)
+ if (rte_mempool_obj_iter(mp, mlx5_txq_mp2mr_mbuf_check, &data) == 0 ||
+ data.ret == -1)
return;
if (mlx5_check_mempool(mp, &start, &end) != 0) {
ERROR("mempool %p: not virtually contiguous",
@@ -283,5 +285,5 @@ txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
end <= (uintptr_t)mr->addr + mr->length)
return;
}
- txq_mp2mr_reg(&txq_ctrl->txq, mp, i);
+ mlx5_txq_mp2mr_reg(&txq_ctrl->txq, mp, i);
}
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index cd5182c..986e238 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -200,7 +200,7 @@ mlx5_set_ptype_table(void)
* Size of tailroom.
*/
static inline size_t
-tx_mlx5_wq_tailroom(struct txq *txq, void *addr)
+tx_mlx5_wq_tailroom(struct mlx5_txq_data *txq, void *addr)
{
size_t tailroom;
tailroom = (uintptr_t)(txq->wqes) +
@@ -258,7 +258,7 @@ mlx5_copy_to_wq(void *dst, const void *src, size_t n,
int
mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
{
- struct txq *txq = tx_queue;
+ struct mlx5_txq_data *txq = tx_queue;
uint16_t used;
mlx5_tx_complete(txq);
@@ -334,7 +334,7 @@ mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
uint16_t
mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
- struct txq *txq = (struct txq *)dpdk_txq;
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
uint16_t elts_head = txq->elts_head;
const uint16_t elts_n = 1 << txq->elts_n;
const uint16_t elts_m = elts_n - 1;
@@ -731,7 +731,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* Packet length.
*/
static inline void
-mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
+mlx5_mpw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, uint32_t length)
{
uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] =
@@ -770,7 +770,7 @@ mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
* Pointer to MPW session structure.
*/
static inline void
-mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw)
+mlx5_mpw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
{
unsigned int num = mpw->pkts_n;
@@ -804,7 +804,7 @@ mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw)
uint16_t
mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
- struct txq *txq = (struct txq *)dpdk_txq;
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
uint16_t elts_head = txq->elts_head;
const uint16_t elts_n = 1 << txq->elts_n;
const uint16_t elts_m = elts_n - 1;
@@ -945,7 +945,8 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* Packet length.
*/
static inline void
-mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
+mlx5_mpw_inline_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw,
+ uint32_t length)
{
uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
struct mlx5_wqe_inl_small *inl;
@@ -980,7 +981,7 @@ mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
* Pointer to MPW session structure.
*/
static inline void
-mlx5_mpw_inline_close(struct txq *txq, struct mlx5_mpw *mpw)
+mlx5_mpw_inline_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
{
unsigned int size;
struct mlx5_wqe_inl_small *inl = (struct mlx5_wqe_inl_small *)
@@ -1014,7 +1015,7 @@ uint16_t
mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
uint16_t pkts_n)
{
- struct txq *txq = (struct txq *)dpdk_txq;
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
uint16_t elts_head = txq->elts_head;
const uint16_t elts_n = 1 << txq->elts_n;
const uint16_t elts_m = elts_n - 1;
@@ -1237,7 +1238,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
* Packet length.
*/
static inline void
-mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding)
+mlx5_empw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, int padding)
{
uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
@@ -1278,7 +1279,7 @@ mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding)
* Number of consumed WQEs.
*/
static inline uint16_t
-mlx5_empw_close(struct txq *txq, struct mlx5_mpw *mpw)
+mlx5_empw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
{
uint16_t ret;
@@ -1308,7 +1309,7 @@ mlx5_empw_close(struct txq *txq, struct mlx5_mpw *mpw)
uint16_t
mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
- struct txq *txq = (struct txq *)dpdk_txq;
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
uint16_t elts_head = txq->elts_head;
const uint16_t elts_n = 1 << txq->elts_n;
const uint16_t elts_m = elts_n - 1;
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index bd07b5d..12366c5 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -228,8 +228,8 @@ struct hash_rxq {
};
/* TX queue descriptor. */
-__extension__
-struct txq {
+RTE_STD_C11
+struct mlx5_txq_data {
uint16_t elts_head; /* Current counter in (*elts)[]. */
uint16_t elts_tail; /* Counter of first element awaiting completion. */
uint16_t elts_comp; /* Counter since last completion request. */
@@ -267,12 +267,12 @@ struct txq {
} __rte_cache_aligned;
/* TX queue control descriptor. */
-struct txq_ctrl {
+struct mlx5_txq_ctrl {
struct priv *priv; /* Back pointer to private data. */
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_qp *qp; /* Queue Pair. */
unsigned int socket; /* CPU socket ID for allocations. */
- struct txq txq; /* Data path structure. */
+ struct mlx5_txq_data txq; /* Data path structure. */
};
/* mlx5_rxq.c */
@@ -306,9 +306,9 @@ int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
/* mlx5_txq.c */
-void txq_cleanup(struct txq_ctrl *);
-int txq_ctrl_setup(struct rte_eth_dev *, struct txq_ctrl *, uint16_t,
- unsigned int, const struct rte_eth_txconf *);
+void mlx5_txq_cleanup(struct mlx5_txq_ctrl *);
+int mlx5_txq_ctrl_setup(struct rte_eth_dev *, struct mlx5_txq_ctrl *, uint16_t,
+ unsigned int, const struct rte_eth_txconf *);
int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
const struct rte_eth_txconf *);
void mlx5_tx_queue_release(void *);
@@ -340,8 +340,9 @@ uint16_t mlx5_rx_burst_vec(void *, struct rte_mbuf **, uint16_t);
/* mlx5_mr.c */
struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, struct rte_mempool *);
-void txq_mp2mr_iter(struct rte_mempool *, void *);
-uint32_t txq_mp2mr_reg(struct txq *, struct rte_mempool *, unsigned int);
+void mlx5_txq_mp2mr_iter(struct rte_mempool *, void *);
+uint32_t mlx5_txq_mp2mr_reg(struct mlx5_txq_data *, struct rte_mempool *,
+ unsigned int);
#ifndef NDEBUG
/**
@@ -431,7 +432,7 @@ check_cqe(volatile struct mlx5_cqe *cqe,
* WQE address.
*/
static inline uintptr_t *
-tx_mlx5_wqe(struct txq *txq, uint16_t ci)
+tx_mlx5_wqe(struct mlx5_txq_data *txq, uint16_t ci)
{
ci &= ((1 << txq->wqe_n) - 1);
return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE);
@@ -446,7 +447,7 @@ tx_mlx5_wqe(struct txq *txq, uint16_t ci)
* Pointer to TX queue structure.
*/
static __rte_always_inline void
-mlx5_tx_complete(struct txq *txq)
+mlx5_tx_complete(struct mlx5_txq_data *txq)
{
const uint16_t elts_n = 1 << txq->elts_n;
const uint16_t elts_m = elts_n - 1;
@@ -546,7 +547,7 @@ mlx5_tx_mb2mp(struct rte_mbuf *buf)
* mr->lkey on success, (uint32_t)-1 on failure.
*/
static __rte_always_inline uint32_t
-mlx5_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb)
+mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
{
uint16_t i = txq->mr_cache_idx;
uintptr_t addr = rte_pktmbuf_mtod(mb, uintptr_t);
@@ -569,7 +570,7 @@ mlx5_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb)
}
}
txq->mr_cache_idx = 0;
- return txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i);
+ return mlx5_txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i);
}
/**
@@ -581,7 +582,7 @@ mlx5_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb)
* Pointer to the last WQE posted in the NIC.
*/
static __rte_always_inline void
-mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
+mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
{
uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
volatile uint64_t *src = ((volatile uint64_t *)wqe);
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c
index 245a58e..fb96542 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c
@@ -77,7 +77,7 @@
* Number of packets to be filled.
*/
static inline void
-txq_wr_dseg_v(struct txq *txq, __m128i *dseg,
+txq_wr_dseg_v(struct mlx5_txq_data *txq, __m128i *dseg,
struct rte_mbuf **pkts, unsigned int n)
{
unsigned int pos;
@@ -154,7 +154,7 @@ txq_check_multiseg(struct rte_mbuf **pkts, uint16_t pkts_n)
* Number of packets having same ol_flags.
*/
static inline unsigned int
-txq_calc_offload(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
+txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
uint8_t *cs_flags)
{
unsigned int pos;
@@ -205,7 +205,7 @@ txq_calc_offload(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
* Number of packets successfully transmitted (<= pkts_n).
*/
static uint16_t
-txq_scatter_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
uint16_t elts_head = txq->elts_head;
const uint16_t elts_n = 1 << txq->elts_n;
@@ -331,7 +331,7 @@ txq_scatter_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* Number of packets successfully transmitted (<= pkts_n).
*/
static inline uint16_t
-txq_burst_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
+txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
uint8_t cs_flags)
{
struct rte_mbuf **elts;
@@ -446,7 +446,7 @@ uint16_t
mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
uint16_t pkts_n)
{
- struct txq *txq = (struct txq *)dpdk_txq;
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
uint16_t nb_tx = 0;
while (pkts_n > nb_tx) {
@@ -478,7 +478,7 @@ mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
uint16_t
mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
- struct txq *txq = (struct txq *)dpdk_txq;
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
uint16_t nb_tx = 0;
while (pkts_n > nb_tx) {
@@ -1279,7 +1279,7 @@ priv_check_raw_vec_tx_support(struct priv *priv)
/* All the configured queues should support. */
for (i = 0; i < priv->txqs_n; ++i) {
- struct txq *txq = (*priv->txqs)[i];
+ struct mlx5_txq_data *txq = (*priv->txqs)[i];
if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) ||
!(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index 3c3db24..c188488 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -350,7 +350,7 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
tmp.rx_nombuf += rxq->stats.rx_nombuf;
}
for (i = 0; (i != priv->txqs_n); ++i) {
- struct txq *txq = (*priv->txqs)[i];
+ struct mlx5_txq_data *txq = (*priv->txqs)[i];
if (txq == NULL)
continue;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 4b0b532..5384b51 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -67,7 +67,7 @@
* Number of elements to allocate.
*/
static void
-txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n)
+txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl, unsigned int elts_n)
{
unsigned int i;
@@ -93,7 +93,7 @@ txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n)
* Pointer to TX queue structure.
*/
static void
-txq_free_elts(struct txq_ctrl *txq_ctrl)
+txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
{
const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
const uint16_t elts_m = elts_n - 1;
@@ -130,7 +130,7 @@ txq_free_elts(struct txq_ctrl *txq_ctrl)
* Pointer to TX queue structure.
*/
void
-txq_cleanup(struct txq_ctrl *txq_ctrl)
+mlx5_txq_cleanup(struct mlx5_txq_ctrl *txq_ctrl)
{
size_t i;
@@ -160,7 +160,7 @@ txq_cleanup(struct txq_ctrl *txq_ctrl)
* 0 on success, errno value on failure.
*/
static inline int
-txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)
+txq_setup(struct mlx5_txq_ctrl *tmpl, struct mlx5_txq_ctrl *txq_ctrl)
{
struct mlx5_qp *qp = to_mqp(tmpl->qp);
struct ibv_cq *ibcq = tmpl->cq;
@@ -209,12 +209,12 @@ txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)
* 0 on success, errno value on failure.
*/
int
-txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
- uint16_t desc, unsigned int socket,
- const struct rte_eth_txconf *conf)
+mlx5_txq_ctrl_setup(struct rte_eth_dev *dev, struct mlx5_txq_ctrl *txq_ctrl,
+ uint16_t desc, unsigned int socket,
+ const struct rte_eth_txconf *conf)
{
struct priv *priv = mlx5_get_priv(dev);
- struct txq_ctrl tmpl = {
+ struct mlx5_txq_ctrl tmpl = {
.priv = priv,
.socket = socket,
};
@@ -381,15 +381,15 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
}
/* Clean up txq in case we're reinitializing it. */
DEBUG("%p: cleaning-up old txq just in case", (void *)txq_ctrl);
- txq_cleanup(txq_ctrl);
+ mlx5_txq_cleanup(txq_ctrl);
*txq_ctrl = tmpl;
DEBUG("%p: txq updated with %p", (void *)txq_ctrl, (void *)&tmpl);
/* Pre-register known mempools. */
- rte_mempool_walk(txq_mp2mr_iter, txq_ctrl);
+ rte_mempool_walk(mlx5_txq_mp2mr_iter, txq_ctrl);
assert(ret == 0);
return 0;
error:
- txq_cleanup(&tmpl);
+ mlx5_txq_cleanup(&tmpl);
assert(ret > 0);
return ret;
}
@@ -416,8 +416,9 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_txconf *conf)
{
struct priv *priv = dev->data->dev_private;
- struct txq *txq = (*priv->txqs)[idx];
- struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq);
+ struct mlx5_txq_data *txq = (*priv->txqs)[idx];
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
int ret;
if (mlx5_is_secondary())
@@ -453,7 +454,7 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
return -EEXIST;
}
(*priv->txqs)[idx] = NULL;
- txq_cleanup(txq_ctrl);
+ mlx5_txq_cleanup(txq_ctrl);
/* Resize if txq size is changed. */
if (txq_ctrl->txq.elts_n != log2above(desc)) {
txq_ctrl = rte_realloc(txq_ctrl,
@@ -480,7 +481,7 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
return -ENOMEM;
}
}
- ret = txq_ctrl_setup(dev, txq_ctrl, desc, socket, conf);
+ ret = mlx5_txq_ctrl_setup(dev, txq_ctrl, desc, socket, conf);
if (ret)
rte_free(txq_ctrl);
else {
@@ -502,8 +503,8 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
void
mlx5_tx_queue_release(void *dpdk_txq)
{
- struct txq *txq = (struct txq *)dpdk_txq;
- struct txq_ctrl *txq_ctrl;
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
+ struct mlx5_txq_ctrl *txq_ctrl;
struct priv *priv;
unsigned int i;
@@ -512,7 +513,7 @@ mlx5_tx_queue_release(void *dpdk_txq)
if (txq == NULL)
return;
- txq_ctrl = container_of(txq, struct txq_ctrl, txq);
+ txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
priv = txq_ctrl->priv;
priv_lock(priv);
for (i = 0; (i != priv->txqs_n); ++i)
@@ -522,7 +523,7 @@ mlx5_tx_queue_release(void *dpdk_txq)
(*priv->txqs)[i] = NULL;
break;
}
- txq_cleanup(txq_ctrl);
+ mlx5_txq_cleanup(txq_ctrl);
rte_free(txq_ctrl);
priv_unlock(priv);
}
--
2.1.4
More information about the dev
mailing list