[dpdk-dev] [PATCH v1 21/42] net/txgbe: add RX and TX queues setup
Jiawen Wu
jiawenwu at trustnetic.com
Tue Sep 1 13:50:52 CEST 2020
Add receive and transmit queues setup.
Signed-off-by: Jiawen Wu <jiawenwu at trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev.c | 2 +
drivers/net/txgbe/txgbe_ethdev.h | 9 +
drivers/net/txgbe/txgbe_rxtx.c | 365 +++++++++++++++++++++++++++++++
drivers/net/txgbe/txgbe_rxtx.h | 44 ++++
4 files changed, 420 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 80470c6e7..d2a355524 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -1326,7 +1326,9 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
.rx_queue_stop = txgbe_dev_rx_queue_stop,
.tx_queue_start = txgbe_dev_tx_queue_start,
.tx_queue_stop = txgbe_dev_tx_queue_stop,
+ .rx_queue_setup = txgbe_dev_rx_queue_setup,
.rx_queue_release = txgbe_dev_rx_queue_release,
+ .tx_queue_setup = txgbe_dev_tx_queue_setup,
.tx_queue_release = txgbe_dev_tx_queue_release,
.dev_led_on = txgbe_dev_led_on,
.dev_led_off = txgbe_dev_led_off,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index f5ee1cae6..d38021538 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -90,6 +90,15 @@ void txgbe_dev_rx_queue_release(void *rxq);
void txgbe_dev_tx_queue_release(void *txq);
+int txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+
+int txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
int txgbe_dev_rx_init(struct rte_eth_dev *dev);
void txgbe_dev_tx_init(struct rte_eth_dev *dev);
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index 58824045b..2288332ce 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -15,6 +15,8 @@
#include <rte_ethdev.h>
#include <rte_ethdev_driver.h>
+#include <rte_memzone.h>
+#include <rte_mempool.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
@@ -34,6 +36,10 @@ txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
}
+#ifndef DEFAULT_TX_FREE_THRESH
+#define DEFAULT_TX_FREE_THRESH 32
+#endif
+
uint16_t
txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
@@ -104,6 +110,29 @@ txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
return txgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
}
+static void __rte_cold
+txgbe_tx_queue_release_mbufs(struct txgbe_tx_queue *txq)
+{
+ unsigned i;
+
+ if (txq->sw_ring != NULL) {
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+static void __rte_cold
+txgbe_tx_free_swring(struct txgbe_tx_queue *txq)
+{
+ if (txq != NULL &&
+ txq->sw_ring != NULL)
+ rte_free(txq->sw_ring);
+}
+
static void __rte_cold
txgbe_tx_queue_release(struct txgbe_tx_queue *txq)
{
@@ -120,6 +149,11 @@ txgbe_dev_tx_queue_release(void *txq)
txgbe_tx_queue_release(txq);
}
+static const struct txgbe_txq_ops def_txq_ops = {
+ .release_mbufs = txgbe_tx_queue_release_mbufs,
+ .free_swring = txgbe_tx_free_swring,
+};
+
/* Takes an ethdev and a queue and sets up the tx function to be used based on
* the queue parameters. Used in tx_queue_setup by primary process and then
* in dev_init by secondary process when attaching to an existing ethdev.
@@ -147,6 +181,136 @@ txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq)
}
}
+int __rte_cold
+txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ const struct rte_memzone *tz;
+ struct txgbe_tx_queue *txq;
+ struct txgbe_hw *hw;
+ uint16_t tx_free_thresh;
+ uint64_t offloads;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+ /*
+ * Validate number of transmit descriptors.
+ * It must not exceed hardware maximum, and must be multiple
+ * of TXGBE_ALIGN.
+ */
+ if (nb_desc % TXGBE_TXD_ALIGN != 0 ||
+ (nb_desc > TXGBE_RING_DESC_MAX) ||
+ (nb_desc < TXGBE_RING_DESC_MIN)) {
+ return -EINVAL;
+ }
+
+ /*
+ * The TX descriptor ring will be cleaned after txq->tx_free_thresh
+ * descriptors are used or if the number of descriptors required
+ * to transmit a packet is greater than the number of free TX
+ * descriptors.
+ * One descriptor in the TX ring is used as a sentinel to avoid a
+ * H/W race condition, hence the maximum threshold constraints.
+ * When set to zero use default values.
+ */
+ tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+ tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
+ if (tx_free_thresh >= (nb_desc - 3)) {
+ PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the number of "
+ "TX descriptors minus 3. (tx_free_thresh=%u "
+ "port=%d queue=%d)",
+ (unsigned int)tx_free_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+
+ if ((nb_desc % tx_free_thresh) != 0) {
+ PMD_INIT_LOG(ERR, "tx_free_thresh must be a divisor of the "
+ "number of TX descriptors. (tx_free_thresh=%u "
+ "port=%d queue=%d)", (unsigned int)tx_free_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed... */
+ if (dev->data->tx_queues[queue_idx] != NULL) {
+ txgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* First allocate the tx queue data structure */
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct txgbe_tx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL)
+ return -ENOMEM;
+
+ /*
+ * Allocate TX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ sizeof(struct txgbe_tx_desc) * TXGBE_RING_DESC_MAX,
+ TXGBE_ALIGN, socket_id);
+ if (tz == NULL) {
+ txgbe_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+
+ txq->nb_tx_desc = nb_desc;
+ txq->tx_free_thresh = tx_free_thresh;
+ txq->pthresh = tx_conf->tx_thresh.pthresh;
+ txq->hthresh = tx_conf->tx_thresh.hthresh;
+ txq->wthresh = tx_conf->tx_thresh.wthresh;
+ txq->queue_id = queue_idx;
+ txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+ queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
+ txq->port_id = dev->data->port_id;
+ txq->offloads = offloads;
+ txq->ops = &def_txq_ops;
+ txq->tx_deferred_start = tx_conf->tx_deferred_start;
+
+ /*
+ * Modification to set tail pointer for virtual function if vf is detected
+ */
+ if (hw->mac.type == txgbe_mac_raptor_vf) {
+ txq->tdt_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXWP(queue_idx));
+ txq->tdc_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXCFG(queue_idx));
+ } else {
+ txq->tdt_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXWP(txq->reg_idx));
+ txq->tdc_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXCFG(txq->reg_idx));
+ }
+
+ txq->tx_ring_phys_addr = TMZ_PADDR(tz);
+ txq->tx_ring = (struct txgbe_tx_desc *) TMZ_VADDR(tz);
+
+ /* Allocate software ring */
+ txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
+ sizeof(struct txgbe_tx_entry) * nb_desc,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq->sw_ring == NULL) {
+ txgbe_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+ txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
+
+ /* set up scalar TX function as appropriate */
+ txgbe_set_tx_function(dev, txq);
+
+ txq->ops->reset(txq);
+
+ dev->data->tx_queues[queue_idx] = txq;
+
+ return 0;
+}
+
/**
* txgbe_free_sc_cluster - free the not-yet-completed scattered cluster
*
@@ -220,6 +384,50 @@ txgbe_dev_rx_queue_release(void *rxq)
txgbe_rx_queue_release(rxq);
}
+/*
+ * Check if Rx Burst Bulk Alloc function can be used.
+ * Return
+ * 0: the preconditions are satisfied and the bulk allocation function
+ * can be used.
+ * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
+ * function must be used.
+ */
+static inline int __rte_cold
+check_rx_burst_bulk_alloc_preconditions(struct txgbe_rx_queue *rxq)
+{
+ int ret = 0;
+
+ /*
+ * Make sure the following pre-conditions are satisfied:
+ * rxq->rx_free_thresh >= RTE_PMD_TXGBE_RX_MAX_BURST
+ * rxq->rx_free_thresh < rxq->nb_rx_desc
+ * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
+ * Scattered packets are not supported. This should be checked
+ * outside of this function.
+ */
+ if (!(rxq->rx_free_thresh >= RTE_PMD_TXGBE_RX_MAX_BURST)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->rx_free_thresh=%d, "
+ "RTE_PMD_TXGBE_RX_MAX_BURST=%d",
+ rxq->rx_free_thresh, RTE_PMD_TXGBE_RX_MAX_BURST);
+ ret = -EINVAL;
+ } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->rx_free_thresh=%d, "
+ "rxq->nb_rx_desc=%d",
+ rxq->rx_free_thresh, rxq->nb_rx_desc);
+ ret = -EINVAL;
+ } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->nb_rx_desc=%d, "
+ "rxq->rx_free_thresh=%d",
+ rxq->nb_rx_desc, rxq->rx_free_thresh);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
/* Reset dynamic txgbe_rx_queue fields back to defaults */
static void __rte_cold
txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq)
@@ -265,6 +473,163 @@ txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq)
}
+int __rte_cold
+txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ const struct rte_memzone *rz;
+ struct txgbe_rx_queue *rxq;
+ struct txgbe_hw *hw;
+ uint16_t len;
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+ uint64_t offloads;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
+ /*
+ * Validate number of receive descriptors.
+ * It must not exceed hardware maximum, and must be multiple
+ * of TXGBE_ALIGN.
+ */
+ if (nb_desc % TXGBE_RXD_ALIGN != 0 ||
+ (nb_desc > TXGBE_RING_DESC_MAX) ||
+ (nb_desc < TXGBE_RING_DESC_MIN)) {
+ return -EINVAL;
+ }
+
+ /* Free memory prior to re-allocation if needed... */
+ if (dev->data->rx_queues[queue_idx] != NULL) {
+ txgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* First allocate the rx queue data structure */
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct txgbe_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL)
+ return -ENOMEM;
+ rxq->mb_pool = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+ rxq->queue_id = queue_idx;
+ rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+ queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
+ rxq->port_id = dev->data->port_id;
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
+ rxq->drop_en = rx_conf->rx_drop_en;
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ rxq->offloads = offloads;
+
+ /*
+ * The packet type in RX descriptor is different for different NICs.
+ * So set different masks for different NICs.
+ */
+ rxq->pkt_type_mask = TXGBE_PTID_MASK;
+
+ /*
+ * Allocate RX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ RX_RING_SZ, TXGBE_ALIGN, socket_id);
+ if (rz == NULL) {
+ txgbe_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+
+ /*
+ * Zero init all the descriptors in the ring.
+ */
+ memset(rz->addr, 0, RX_RING_SZ);
+
+ /*
+ * Modified to setup VFRDT for Virtual Function
+ */
+ if (hw->mac.type == txgbe_mac_raptor_vf) {
+ rxq->rdt_reg_addr =
+ TXGBE_REG_ADDR(hw, TXGBE_RXWP(queue_idx));
+ rxq->rdh_reg_addr =
+ TXGBE_REG_ADDR(hw, TXGBE_RXRP(queue_idx));
+ } else {
+ rxq->rdt_reg_addr =
+ TXGBE_REG_ADDR(hw, TXGBE_RXWP(rxq->reg_idx));
+ rxq->rdh_reg_addr =
+ TXGBE_REG_ADDR(hw, TXGBE_RXRP(rxq->reg_idx));
+ }
+
+ rxq->rx_ring_phys_addr = TMZ_PADDR(rz);
+ rxq->rx_ring = (struct txgbe_rx_desc *)TMZ_VADDR(rz);
+
+ /*
+ * Certain constraints must be met in order to use the bulk buffer
+ * allocation Rx burst function. If any of Rx queues doesn't meet them
+ * the feature should be disabled for the whole port.
+ */
+ if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
+ PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
+ "preconditions - canceling the feature for "
+ "the whole port[%d]",
+ rxq->queue_id, rxq->port_id);
+ adapter->rx_bulk_alloc_allowed = false;
+ }
+
+ /*
+ * Allocate software ring. Allow for space at the end of the
+ * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
+ * function does not access an invalid memory region.
+ */
+ len = nb_desc;
+ if (adapter->rx_bulk_alloc_allowed)
+ len += RTE_PMD_TXGBE_RX_MAX_BURST;
+
+ rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
+ sizeof(struct txgbe_rx_entry) * len,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq->sw_ring) {
+ txgbe_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+
+ /*
+ * Always allocate even if it's not going to be needed in order to
+ * simplify the code.
+ *
+ * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
+ * be requested in txgbe_dev_rx_init(), which is called later from
+ * dev_start() flow.
+ */
+ rxq->sw_sc_ring =
+ rte_zmalloc_socket("rxq->sw_sc_ring",
+ sizeof(struct txgbe_scattered_rx_entry) * len,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq->sw_sc_ring) {
+ txgbe_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
+ "dma_addr=0x%"PRIx64,
+ rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
+ rxq->rx_ring_phys_addr);
+
+ dev->data->rx_queues[queue_idx] = rxq;
+
+ txgbe_reset_rx_queue(adapter, rxq);
+
+ return 0;
+}
+
void __rte_cold
txgbe_dev_clear_queues(struct rte_eth_dev *dev)
{
diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h
index 72cbf1f87..763ce3439 100644
--- a/drivers/net/txgbe/txgbe_rxtx.h
+++ b/drivers/net/txgbe/txgbe_rxtx.h
@@ -50,12 +50,26 @@ struct txgbe_rx_desc {
#define TXGBE_RXD_HDRADDR(rxd, v) \
(((volatile __le64 *)(rxd))[1] = cpu_to_le64(v))
+/**
+ * Transmit Data Descriptor (TXGBE_TXD_TYP=DATA)
+ **/
+struct txgbe_tx_desc {
+ __le64 qw0; /* r.buffer_addr , w.reserved */
+ __le32 dw2; /* r.cmd_type_len, w.nxtseq_seed */
+ __le32 dw3; /* r.olinfo_status, w.status */
+};
+
#define RTE_PMD_TXGBE_TX_MAX_BURST 32
#define RTE_PMD_TXGBE_RX_MAX_BURST 32
+#define RX_RING_SZ ((TXGBE_RING_DESC_MAX + RTE_PMD_TXGBE_RX_MAX_BURST) * \
+ sizeof(struct txgbe_rx_desc))
+
#define RTE_TXGBE_REGISTER_POLL_WAIT_10_MS 10
#define RTE_TXGBE_WAIT_100_US 100
+#define TXGBE_PTID_MASK 0xFF
+
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
*/
@@ -67,6 +81,22 @@ struct txgbe_scattered_rx_entry {
struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */
};
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct txgbe_tx_entry {
+ struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+ uint16_t next_id; /**< Index of next descriptor in ring. */
+ uint16_t last_id; /**< Index of last scattered descriptor. */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct txgbe_tx_entry_v {
+ struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+};
+
/**
* Structure associated with each RX queue.
*/
@@ -74,6 +104,8 @@ struct txgbe_rx_queue {
struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
volatile struct txgbe_rx_desc *rx_ring; /**< RX ring virtual address. */
uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
+ volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
+ volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
struct txgbe_rx_entry *sw_ring; /**< address of RX software ring. */
struct txgbe_scattered_rx_entry *sw_sc_ring; /**< address of scattered Rx software ring. */
struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
@@ -87,6 +119,7 @@ struct txgbe_rx_queue {
uint16_t rx_free_thresh; /**< max free RX desc to hold. */
uint16_t queue_id; /**< RX queue index. */
uint16_t reg_idx; /**< RX queue register index. */
+ uint16_t pkt_type_mask; /**< Packet type mask for different NICs. */
uint16_t port_id; /**< Device port identifier. */
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
@@ -102,13 +135,24 @@ struct txgbe_rx_queue {
* Structure associated with each TX queue.
*/
struct txgbe_tx_queue {
+ /** TX ring virtual address. */
+ volatile struct txgbe_tx_desc *tx_ring;
uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
+ union {
+ struct txgbe_tx_entry *sw_ring; /**< address of SW ring for scalar PMD. */
+ struct txgbe_tx_entry_v *sw_ring_v; /**< address of SW ring for vector PMD */
+ };
+ volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
+ volatile uint32_t *tdc_reg_addr; /**< Address of TDC register. */
uint16_t nb_tx_desc; /**< number of TX descriptors. */
uint16_t tx_tail; /**< current value of TDT reg. */
/**< Start freeing TX buffers if there are less free descriptors than
this value. */
uint16_t tx_free_thresh;
+ uint16_t queue_id; /**< TX queue index. */
uint16_t reg_idx; /**< TX queue register index. */
+ uint16_t port_id; /**< Device port identifier. */
+ uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold reg. */
uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
--
2.18.4
More information about the dev
mailing list