[dpdk-dev] [PATCH v5 resend 04/12] vhost: rxtx: prepare work for multiple queue support

Yuanhan Liu yuanhan.liu at linux.intel.com
Tue Sep 22 04:54:09 CEST 2015


On Mon, Sep 21, 2015 at 12:04:16PM +0300, Michael S. Tsirkin wrote:
> On Mon, Sep 21, 2015 at 10:25:18AM +0800, Yuanhan Liu wrote:
> > On Sun, Sep 20, 2015 at 12:29:17PM +0300, Michael S. Tsirkin wrote:
> > > On Fri, Sep 18, 2015 at 11:10:53PM +0800, Yuanhan Liu wrote:
> > > > From: Changchun Ouyang <changchun.ouyang at intel.com>
> > > > 
> > > > Do not use VIRTIO_RXQ or VIRTIO_TXQ anymore; use the queue_id,
> > > > instead, which will be set to a proper value for a specific queue
> > > > when we have multiple queue support enabled.
> > > > 
> > > > For now, queue_id is still set with VIRTIO_RXQ or VIRTIO_TXQ,
> > > > so it should not break anything.
> > > > 
> > > > Signed-off-by: Changchun Ouyang <changchun.ouyang at intel.com>
> > > > Signed-off-by: Yuanhan Liu <yuanhan.liu at linux.intel.com>
> > > 
> > > What I'm missing is the logic that tracks which tx vq
> > > did a specific flow use, to send traffic back on the correct
> > > rx vq.
> > > 
> > > Is this something added by one of the follow-up patches?
> > 
> > Yeah, and also, they are offloaded to the application (examples/
> > vhost/vhost-switch): patch 11 does the job.
> > 
> > 	--yliu
> 
> Couldn't find anything dealing with flows there ...
> Could you be more specific pls?

Sure.

For the following code pieces of new_device():

    2868         /* Find a suitable lcore to add the device. */
    2869         for (i = 0; i < rxq; i++) {
    2870                 device_num_min = num_devices;
    2871                 RTE_LCORE_FOREACH_SLAVE(lcore) {
    2872                         if (lcore_info[lcore].lcore_ll->device_num < device_num_min) {
    2873                                 device_num_min = lcore_info[lcore].lcore_ll->device_num;
    2874                                 core_add = lcore;
    2875                         }
    2876                 }

    ...

    2888                 ll_dev->work_qp_idx = i;
    2889                 rte_vhost_core_id_set(dev, i, core_add);
    2890                 add_data_ll_entry(&lcore_info[core_add].lcore_ll->ll_root_used, ll_dev);
    2891
    ....
    2895                 lcore_info[core_add].lcore_ll->device_num++;
    2896                 RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been added to data core %d for vq: %d\n",
    2897                         dev->device_fh, core_add, i);
    2898         }


It will find a suitable lcore and bind a vq pair to it. Here is the
log message that might help you understand it better:

    VHOST_DATA: (0) Device has been added to data core 1 for vq: 0
    VHOST_DATA: (0) Device has been added to data core 2 for vq: 1



For the following code pieces of switch_worker():

    1433           if (likely(!vdev->remove)) {
    1434                   /* Handle guest TX*/
    1435                   uint16_t qp_idx = dev_ll->work_qp_idx;
    1436                   tx_count = rte_vhost_dequeue_burst(dev, VIRTIO_TXQ + qp_idx * VIRTIO_QNUM,
    1437                                   mbuf_pool, pkts_burst, MAX_PKT_BURST);
    1438                   /*
    1439                    * If this is the first received packet we need to learn
    1440                    * the MAC and setup VMDQ
    1441                    */
    1442                   if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && tx_count) {
    1443                           if (vdev->remove || (link_vmdq(vdev, pkts_burst[0]) == -1)) {
    1444                                   while (tx_count)
    1445                                           rte_pktmbuf_free(pkts_burst[--tx_count]);
    1446                           }
    1447                   }
    1448                   while (tx_count)
    1449                           virtio_tx_route(vdev, pkts_burst[--tx_count],
    1450                                   (uint16_t)dev->device_fh, qp_idx);
    1451           }
    

Line 1434 will get the right qp index for the current lcore, and
Line 1436 then gets the packets from corresponding RX queue of
the physical eth device.

Line 1448-1450 will then deliver them one by one to corresponding
TX queue, by invoking virtio_tx_route() function.

And note that the vhost example code is a bit messy. I planed to
do some cleanups, but I'd like to do it only after this patch set
has been merge, for not adding unnecessary mess to this patch set
as well as the patch review.


	--yliu
> 
> > > 
> > > 
> > > > ---
> > > >  lib/librte_vhost/vhost_rxtx.c | 46 ++++++++++++++++++++++++++++++-------------
> > > >  1 file changed, 32 insertions(+), 14 deletions(-)
> > > > 
> > > > diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
> > > > index b2b2bcc..a4ab6ca 100644
> > > > --- a/lib/librte_vhost/vhost_rxtx.c
> > > > +++ b/lib/librte_vhost/vhost_rxtx.c
> > > > @@ -42,6 +42,16 @@
> > > >  
> > > >  #define MAX_PKT_BURST 32
> > > >  
> > > > +static inline int __attribute__((always_inline))
> > > > +is_valid_virt_queue_idx(uint32_t virtq_idx, int is_tx, uint32_t max_qp_idx)
> > > > +{
> > > > +	if ((is_tx ^ (virtq_idx & 0x1)) ||
> > > > +	    (virtq_idx >= max_qp_idx * VIRTIO_QNUM))
> > > > +		return 0;
> > > > +
> > > > +	return 1;
> > > > +}
> > > > +
> > > >  /**
> > > >   * This function adds buffers to the virtio devices RX virtqueue. Buffers can
> > > >   * be received from the physical port or from another virtio device. A packet
> > > > @@ -68,12 +78,14 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
> > > >  	uint8_t success = 0;
> > > >  
> > > >  	LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
> > > > -	if (unlikely(queue_id != VIRTIO_RXQ)) {
> > > > -		LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
> > > > +	if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
> > > > +		RTE_LOG(ERR, VHOST_DATA,
> > > > +			"%s (%"PRIu64"): virtqueue idx:%d invalid.\n",
> > > > +			__func__, dev->device_fh, queue_id);
> > > >  		return 0;
> > > >  	}
> > > >  
> > > > -	vq = dev->virtqueue[VIRTIO_RXQ];
> > > > +	vq = dev->virtqueue[queue_id];
> > > >  	count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
> > > >  
> > > >  	/*
> > > > @@ -235,8 +247,9 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
> > > >  }
> > > >  
> > > >  static inline uint32_t __attribute__((always_inline))
> > > > -copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t res_base_idx,
> > > > -	uint16_t res_end_idx, struct rte_mbuf *pkt)
> > > > +copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
> > > > +			uint16_t res_base_idx, uint16_t res_end_idx,
> > > > +			struct rte_mbuf *pkt)
> > > >  {
> > > >  	uint32_t vec_idx = 0;
> > > >  	uint32_t entry_success = 0;
> > > > @@ -264,7 +277,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t res_base_idx,
> > > >  	 * Convert from gpa to vva
> > > >  	 * (guest physical addr -> vhost virtual addr)
> > > >  	 */
> > > > -	vq = dev->virtqueue[VIRTIO_RXQ];
> > > > +	vq = dev->virtqueue[queue_id];
> > > >  	vb_addr = gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
> > > >  	vb_hdr_addr = vb_addr;
> > > >  
> > > > @@ -464,11 +477,14 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
> > > >  
> > > >  	LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_rx()\n",
> > > >  		dev->device_fh);
> > > > -	if (unlikely(queue_id != VIRTIO_RXQ)) {
> > > > -		LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
> > > > +	if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
> > > > +		RTE_LOG(ERR, VHOST_DATA,
> > > > +			"%s (%"PRIu64"): virtqueue idx:%d invalid.\n",
> > > > +			__func__, dev->device_fh, queue_id);
> > > > +		return 0;
> > > >  	}
> > > >  
> > > > -	vq = dev->virtqueue[VIRTIO_RXQ];
> > > > +	vq = dev->virtqueue[queue_id];
> > > >  	count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
> > > >  
> > > >  	if (count == 0)
> > > > @@ -509,8 +525,8 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
> > > >  							res_cur_idx);
> > > >  		} while (success == 0);
> > > >  
> > > > -		entry_success = copy_from_mbuf_to_vring(dev, res_base_idx,
> > > > -			res_cur_idx, pkts[pkt_idx]);
> > > > +		entry_success = copy_from_mbuf_to_vring(dev, queue_id,
> > > > +			res_base_idx, res_cur_idx, pkts[pkt_idx]);
> > > >  
> > > >  		rte_compiler_barrier();
> > > >  
> > > > @@ -559,12 +575,14 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
> > > >  	uint16_t free_entries, entry_success = 0;
> > > >  	uint16_t avail_idx;
> > > >  
> > > > -	if (unlikely(queue_id != VIRTIO_TXQ)) {
> > > > -		LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
> > > > +	if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->virt_qp_nb))) {
> > > > +		RTE_LOG(ERR, VHOST_DATA,
> > > > +			"%s (%"PRIu64"): virtqueue idx:%d invalid.\n",
> > > > +			__func__, dev->device_fh, queue_id);
> > > >  		return 0;
> > > >  	}
> > > >  
> > > > -	vq = dev->virtqueue[VIRTIO_TXQ];
> > > > +	vq = dev->virtqueue[queue_id];
> > > >  	avail_idx =  *((volatile uint16_t *)&vq->avail->idx);
> > > >  
> > > >  	/* If there are no available buffers then return. */
> > > > -- 
> > > > 1.9.0


More information about the dev mailing list