[dpdk-dev] [PATCH v4 2/8] lib/librte_ether: defind RX/TX lock mode

Ananyev, Konstantin konstantin.ananyev at intel.com
Tue Jun 7 11:58:36 CEST 2016


Hi Zhe & Wenzhuo,

Please find my comments below.
BTW, for clarification - is that patch for 16.11?
I believe it's too late to introduce such significant change in 16.07.
Thanks
Konstantin

> Define lock mode for RX/TX queue. Because when resetting
> the device we want the resetting thread to get the lock
> of the RX/TX queue to make sure the RX/TX is stopped.
> 
> Using next ABI macro for this ABI change as it has too
> much impact. 7 APIs and 1 global variable are impacted.
> 
> Signed-off-by: Wenzhuo Lu <wenzhuo.lu at intel.com>
> Signed-off-by: Zhe Tao <zhe.tao at intel.com>
> ---
>  lib/librte_ether/rte_ethdev.h | 62 +++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 62 insertions(+)
> 
> diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
> index 74e895f..4efb5e9 100644
> --- a/lib/librte_ether/rte_ethdev.h
> +++ b/lib/librte_ether/rte_ethdev.h
> @@ -354,7 +354,12 @@ struct rte_eth_rxmode {
>  		jumbo_frame      : 1, /**< Jumbo Frame Receipt enable. */
>  		hw_strip_crc     : 1, /**< Enable CRC stripping by hardware. */
>  		enable_scatter   : 1, /**< Enable scatter packets rx handler */
> +#ifndef RTE_NEXT_ABI
>  		enable_lro       : 1; /**< Enable LRO */
> +#else
> +		enable_lro       : 1, /**< Enable LRO */
> +		lock_mode        : 1; /**< Using lock path */
> +#endif
>  };
> 
>  /**
> @@ -634,11 +639,68 @@ struct rte_eth_txmode {
>  		/**< If set, reject sending out tagged pkts */
>  		hw_vlan_reject_untagged : 1,
>  		/**< If set, reject sending out untagged pkts */
> +#ifndef RTE_NEXT_ABI
>  		hw_vlan_insert_pvid : 1;
>  		/**< If set, enable port based VLAN insertion */
> +#else
> +		hw_vlan_insert_pvid : 1,
> +		/**< If set, enable port based VLAN insertion */
> +		lock_mode : 1;
> +		/**< If set, using lock path */
> +#endif
>  };
> 
>  /**
> + * The macros for the RX/TX lock mode functions
> + */
> +#ifdef RTE_NEXT_ABI
> +#define RX_LOCK_FUNCTION(dev, func) \
> +	(dev->data->dev_conf.rxmode.lock_mode ? \
> +	func ## _lock : func)
> +
> +#define TX_LOCK_FUNCTION(dev, func) \
> +	(dev->data->dev_conf.txmode.lock_mode ? \
> +	func ## _lock : func)
> +#else
> +#define RX_LOCK_FUNCTION(dev, func) func
> +
> +#define TX_LOCK_FUNCTION(dev, func) func
> +#endif
> +
> +/* Add the lock RX/TX function for VF reset */
> +#define GENERATE_RX_LOCK(func, nic) \
> +uint16_t func ## _lock(void *rx_queue, \
> +		      struct rte_mbuf **rx_pkts, \
> +		      uint16_t nb_pkts) \
> +{					\
> +	struct nic ## _rx_queue *rxq = rx_queue; \
> +	uint16_t nb_rx = 0; \
> +						\
> +	if (rte_spinlock_trylock(&rxq->rx_lock)) { \
> +		nb_rx = func(rx_queue, rx_pkts, nb_pkts); \
> +		rte_spinlock_unlock(&rxq->rx_lock); \
> +	} \
> +	\
> +	return nb_rx; \
> +}
> +
> +#define GENERATE_TX_LOCK(func, nic) \
> +uint16_t func ## _lock(void *tx_queue, \
> +		      struct rte_mbuf **tx_pkts, \
> +		      uint16_t nb_pkts) \
> +{					\
> +	struct nic ## _tx_queue *txq = tx_queue; \
> +	uint16_t nb_tx = 0; \
> +						\
> +	if (rte_spinlock_trylock(&txq->tx_lock)) { \
> +		nb_tx = func(tx_queue, tx_pkts, nb_pkts); \
> +		rte_spinlock_unlock(&txq->tx_lock); \
> +	} \
> +	\
> +	return nb_tx; \
> +}

1. As I said in off-line dicussiion, I think this locking could
(and I think better be) impelented completely on rte_ethdev layer.
So actual PMD code will be unaffected.
Again that avoids us to introduce _lock version of every RX/Tx function
in each PMD.

2. Again, as discussed offline, I think it is better to have an explicit 
rte_eth_(rx|tx)_burst_lock(sync?) API, instead of add new fileds into
RX/TX config strcutures.
Would help to avoid any confusion, I think.

3.  I thought the plan was to introduce a locking in all appropriate control path
functions (dev_start/dev_stop etc.)
Without that locking version of RX/TX seems a bit useless.
Yes, I understand that you do use locking inside dev_reset, but I suppose
the plan was to have a generic solution, no?
Again, interrupt fire when user invokes dev_start/stop or so, so we still 
need some synchronisation between them.

To be more specific, I thought about something like that:

static inline uint16_t
rte_eth_rx_burst_lock(uint8_t port_id, uint16_t queue_id,
                 struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
{
        struct rte_eth_dev *dev = &rte_eth_devices[port_id];

#ifdef RTE_LIBRTE_ETHDEV_DEBUG
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
        RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);

        if (queue_id >= dev->data->nb_rx_queues) {
                RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
                return 0;
        }
#endif

+ if (rte_spinlock_trylock(&dev->data->rx_queue_state[rx_queue_id].lock) == 0)
+	return 0;
+  else if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED)) {
+	rte_spinlock_unlock(&dev->data->rx_queue_state[rx_queue_id].unlock);
+	return 0;
+	
      
 nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
                        rx_pkts, nb_pkts);

+ rte_spinlock_unlock(&dev->data->rx_queue_state[rx_queue_id].unlock);

....

return nb_rx;
}

And inside queue_start:

int
rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
{
        struct rte_eth_dev *dev;

        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);

        dev = &rte_eth_devices[port_id];
        if (rx_queue_id >= dev->data->nb_rx_queues) {
                RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
                return -EINVAL;
        }

        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);

     rte_spinlock_lock(&dev->data->rx_queue_state[rx_queue_id].lock)

        if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
                RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
                        " already started\n",
                        rx_queue_id, port_id);
                ret = -EINVAL 0;
        } else
        	ret = dev->dev_ops->rx_queue_start(dev, rx_queue_id);

    rte_spinlock_unlock(&dev->data->rx_queue_state[rx_queue_id].unlock);

   return ret;
}

Then again, we don't need to do explicit locking inside dev_reset().
Does it make sense to you guys?


> +
> +/**
>   * A structure used to configure an RX ring of an Ethernet port.
>   */
>  struct rte_eth_rxconf {
> --
> 2.1.4



More information about the dev mailing list