[dpdk-dev] [PATCH v2 1/2] net/virtio: update stats when in order xmit done
Tiwei Bie
tiwei.bie at intel.com
Wed Sep 18 04:34:33 CEST 2019
On Wed, Sep 11, 2019 at 12:14:45AM +0800, Marvin Liu wrote:
> When doing xmit in-order enqueue, packets are buffered and then flushed
> into avail ring. Buffered packets can be dropped due to insufficient
> space. Moving stats update action just after successful avail ring
> updates can guarantee correctness.
>
> Signed-off-by: Marvin Liu <yong.liu at intel.com>
> ---
> drivers/net/virtio/virtio_rxtx.c | 87 ++++++++++++++++----------------
> 1 file changed, 44 insertions(+), 43 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index 27ead19fb..d3ca36831 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -106,6 +106,48 @@ vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
> dxp->next = VQ_RING_DESC_CHAIN_END;
> }
>
> +static inline void
> +virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
> +{
> + uint32_t s = mbuf->pkt_len;
> + struct rte_ether_addr *ea;
> +
> + stats->bytes += s;
> +
> + if (s == 64) {
> + stats->size_bins[1]++;
> + } else if (s > 64 && s < 1024) {
> + uint32_t bin;
> +
> + /* count zeros, and offset into correct bin */
> + bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
> + stats->size_bins[bin]++;
> + } else {
> + if (s < 64)
> + stats->size_bins[0]++;
> + else if (s < 1519)
> + stats->size_bins[6]++;
> + else
> + stats->size_bins[7]++;
> + }
> +
> + ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
> + if (rte_is_multicast_ether_addr(ea)) {
> + if (rte_is_broadcast_ether_addr(ea))
> + stats->broadcast++;
> + else
> + stats->multicast++;
> + }
> +}
> +
> +static inline void
> +virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
> +{
> + VIRTIO_DUMP_PACKET(m, m->data_len);
> +
> + virtio_update_packet_stats(&rxvq->stats, m);
> +}
> +
> static uint16_t
> virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
> struct rte_mbuf **rx_pkts,
> @@ -317,7 +359,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
> }
>
> /* Cleanup from completed inorder transmits. */
> -static void
> +static __rte_always_inline void
> virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
> {
> uint16_t i, idx = vq->vq_used_cons_idx;
> @@ -596,6 +638,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
> dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
> dxp->cookie = (void *)cookies[i];
> dxp->ndescs = 1;
> + virtio_update_packet_stats(&txvq->stats, cookies[i]);
The virtio_update_packet_stats() call in virtio_xmit_pkts_inorder()
should be removed.
>
> hdr = (struct virtio_net_hdr *)
> rte_pktmbuf_prepend(cookies[i], head_size);
> @@ -1083,48 +1126,6 @@ virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
> }
> }
>
> -static inline void
> -virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
> -{
> - uint32_t s = mbuf->pkt_len;
> - struct rte_ether_addr *ea;
> -
> - stats->bytes += s;
> -
> - if (s == 64) {
> - stats->size_bins[1]++;
> - } else if (s > 64 && s < 1024) {
> - uint32_t bin;
> -
> - /* count zeros, and offset into correct bin */
> - bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
> - stats->size_bins[bin]++;
> - } else {
> - if (s < 64)
> - stats->size_bins[0]++;
> - else if (s < 1519)
> - stats->size_bins[6]++;
> - else
> - stats->size_bins[7]++;
> - }
> -
> - ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
> - if (rte_is_multicast_ether_addr(ea)) {
> - if (rte_is_broadcast_ether_addr(ea))
> - stats->broadcast++;
> - else
> - stats->multicast++;
> - }
> -}
> -
> -static inline void
> -virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
> -{
> - VIRTIO_DUMP_PACKET(m, m->data_len);
> -
> - virtio_update_packet_stats(&rxvq->stats, m);
> -}
> -
> /* Optionally fill offload information in structure */
> static inline int
> virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
> --
> 2.17.1
>
More information about the dev
mailing list