[dpdk-dev] [PATCH v5 07/11] net/virtio: implement transmit path for packed queues
Tiwei Bie
tiwei.bie at intel.com
Thu Sep 13 11:15:19 CEST 2018
On Thu, Sep 06, 2018 at 07:19:43PM +0100, Jens Freimann wrote:
> This implements the transmit path for devices with
> support for packed virtqueues.
>
> Add the feature bit and enable code to
> add buffers to vring and mark descriptors as available.
>
> Signed-off-by: Jens Freiman <jfreimann at redhat.com>
> ---
> drivers/net/virtio/virtio_ethdev.c | 8 +-
> drivers/net/virtio/virtio_ethdev.h | 2 +
> drivers/net/virtio/virtio_rxtx.c | 113 ++++++++++++++++++++++++++++-
> 3 files changed, 121 insertions(+), 2 deletions(-)
[...]
> +
> +uint16_t
> +virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
> + uint16_t nb_pkts)
> +{
> + struct virtnet_tx *txvq = tx_queue;
> + struct virtqueue *vq = txvq->vq;
> + uint16_t i;
> + struct vring_desc_packed *desc = vq->vq_ring.desc_packed;
> + uint16_t idx, prev;
> + struct vq_desc_extra *dxp;
> +
> + if (unlikely(nb_pkts < 1))
> + return nb_pkts;
> +
> + PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
> +
> + if (likely(vq->vq_free_cnt < vq->vq_free_thresh))
> + virtio_xmit_cleanup_packed(vq);
> +
> + for (i = 0; i < nb_pkts; i++) {
> + struct rte_mbuf *txm = tx_pkts[i];
> + struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
> + uint16_t head_idx;
> + int wrap_counter;
> + int descs_used;
> +
> + if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) {
> + virtio_xmit_cleanup_packed(vq);
> +
> + if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) {
> + PMD_TX_LOG(ERR,
> + "No free tx descriptors to transmit");
> + break;
> + }
> + }
> +
> + txvq->stats.bytes += txm->pkt_len;
> +
We also need to update the stats by calling
virtio_update_packet_stats()
We also need to handle the offloads. See
virtqueue_xmit_offload().
> + vq->vq_free_cnt -= txm->nb_segs + 1;
> +
> + wrap_counter = vq->vq_ring.avail_wrap_counter;
> + idx = vq->vq_avail_idx;
> + head_idx = idx;
> +
> + dxp = &vq->vq_descx[idx];
> + if (dxp->cookie != NULL)
> + rte_pktmbuf_free(dxp->cookie);
> + dxp->cookie = txm;
> +
> + desc[idx].addr = txvq->virtio_net_hdr_mem +
> + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
> + desc[idx].len = vq->hw->vtnet_hdr_size;
> + desc[idx].flags = VRING_DESC_F_NEXT |
> + VRING_DESC_F_AVAIL(vq->vq_ring.avail_wrap_counter) |
> + VRING_DESC_F_USED(!vq->vq_ring.avail_wrap_counter);
> + descs_used = 1;
> +
> + do {
> + idx = update_pq_avail_index(vq);
> + desc[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(txm, vq);
> + desc[idx].len = txm->data_len;
> + desc[idx].flags = VRING_DESC_F_NEXT |
> + VRING_DESC_F_AVAIL(vq->vq_ring.avail_wrap_counter) |
> + VRING_DESC_F_USED(!vq->vq_ring.avail_wrap_counter);
> + descs_used++;
> + } while ((txm = txm->next) != NULL);
> +
> + desc[idx].flags &= ~VRING_DESC_F_NEXT;
> +
> + rte_smp_wmb();
> + prev = (idx > 0 ? idx : vq->vq_nentries) - 1;
> + desc[prev].index = head_idx; //FIXME
> + desc[head_idx].flags =
> + (VRING_DESC_F_AVAIL(wrap_counter) |
> + VRING_DESC_F_USED(!wrap_counter));
> +
> + vq->vq_descx[head_idx].ndescs = descs_used;
> + idx = update_pq_avail_index(vq);
> + }
> +
> + txvq->stats.packets += i;
> + txvq->stats.errors += nb_pkts - i;
> +
> + return i;
> +}
> +
> int
> virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
> {
> @@ -736,7 +842,12 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
> if (hw->use_inorder_tx)
> vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
>
> - VIRTQUEUE_DUMP(vq);
> + if (vtpci_packed_queue(hw)) {
> + vq->vq_ring.avail_wrap_counter = 1;
> + }
> +
> + if (!vtpci_packed_queue(hw))
> + VIRTQUEUE_DUMP(vq);
>
> return 0;
> }
> --
> 2.17.1
>
More information about the dev
mailing list