[dpdk-dev] [PATCH v2 3/3] ixgbe: Unify the rx_pkt_bulk callback initialization

Ananyev, Konstantin konstantin.ananyev at intel.com
Wed Mar 11 18:18:56 CET 2015



> -----Original Message-----
> From: Vlad Zolotarov [mailto:vladz at cloudius-systems.com]
> Sent: Wednesday, March 11, 2015 3:40 PM
> To: Ananyev, Konstantin; dev at dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v2 3/3] ixgbe: Unify the rx_pkt_bulk callback initialization
> 
> 
> 
> On 03/11/15 14:45, Ananyev, Konstantin wrote:
> >
> >> -----Original Message-----
> >> From: dev [mailto:dev-bounces at dpdk.org] On Behalf Of Vlad Zolotarov
> >> Sent: Monday, March 09, 2015 4:29 PM
> >> To: dev at dpdk.org
> >> Subject: [dpdk-dev] [PATCH v2 3/3] ixgbe: Unify the rx_pkt_bulk callback initialization
> >>
> >>     - Set the callback in a single function that is called from
> >>       ixgbe_dev_rx_init() for a primary process and from eth_ixgbe_dev_init()
> >>       for a secondary processes. This is instead of multiple, hard to track places.
> >>     - Added ixgbe_hw.rx_bulk_alloc_allowed - see ixgbe_hw.rx_vec_allowed description below.
> >>     - Added ixgbe_hw.rx_vec_allowed: like with Bulk Allocation, Vector Rx is
> >>       enabled or disabled on a per-port level. All queues have to meet the appropriate
> >>       preconditions and if any of them doesn't - the feature has to be disabled.
> >>       Therefore ixgbe_hw.rx_vec_allowed will be updated during each queues configuration
> >>       (rte_eth_rx_queue_setup()) and then used in rte_eth_dev_start() to configure the
> >>       appropriate callbacks. The same happens with ixgbe_hw.rx_vec_allowed in a Bulk Allocation
> >>       context.
> >>     - Bugs fixed:
> >>        - Vector scattered packets callback was called regardless the appropriate
> >>          preconditions:
> >>           - Vector Rx specific preconditions.
> >>           - Bulk Allocation preconditions.
> >>        - Vector Rx was enabled/disabled according to the last queue setting and not
> >>          based on all queues setting (which may be different for each queue).
> >>
> >> Signed-off-by: Vlad Zolotarov <vladz at cloudius-systems.com>
> >> ---
> >> New in v2:
> >>     - Fixed an broken compilation.
> >> ---
> >>   lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h |   2 +
> >>   lib/librte_pmd_ixgbe/ixgbe_ethdev.c     |  13 ++-
> >>   lib/librte_pmd_ixgbe/ixgbe_rxtx.c       | 183 +++++++++++++++++++++-----------
> >>   lib/librte_pmd_ixgbe/ixgbe_rxtx.h       |  22 +++-
> >>   4 files changed, 152 insertions(+), 68 deletions(-)
> >>
> >> diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h
> >> index c67d462..9a66370 100644
> >> --- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h
> >> +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h
> >> @@ -3657,6 +3657,8 @@ struct ixgbe_hw {
> >>   	bool force_full_reset;
> >>   	bool allow_unsupported_sfp;
> >>   	bool wol_enabled;
> >> +	bool rx_bulk_alloc_allowed;
> >> +	bool rx_vec_allowed;
> >>   };
> >>
> >>   #define ixgbe_call_func(hw, func, params, error) \
> >> diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> >> index 9bdc046..9d3de1a 100644
> >> --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> >> +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> >> @@ -760,8 +760,8 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
> >>   			                   "Using default TX function.");
> >>   		}
> >>
> >> -		if (eth_dev->data->scattered_rx)
> >> -			eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
> >> +		set_rx_function(eth_dev);
> >> +
> >>   		return 0;
> >>   	}
> >>   	pci_dev = eth_dev->pci_dev;
> >> @@ -772,6 +772,13 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
> >>   	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
> >>   	hw->allow_unsupported_sfp = 1;
> >>
> >> +	/*
> >> +	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
> >> +	 * allocation or vector Rx preconditions we will reset it.
> >> +	 */
> >> +	hw->rx_bulk_alloc_allowed = true;
> >> +	hw->rx_vec_allowed = true;
> >> +
> >>   	/* Initialize the shared code (base driver) */
> >>   #ifdef RTE_NIC_BYPASS
> >>   	diag = ixgbe_bypass_init_shared_code(hw);
> >> @@ -1641,6 +1648,8 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
> >>
> >>   	/* Clear stored conf */
> >>   	dev->data->scattered_rx = 0;
> >> +	hw->rx_bulk_alloc_allowed = false;
> >> +	hw->rx_vec_allowed = false;
> > If dev_stop() sets it to 'false', who will reset it back to 'true' then?
> 
> Oops. I'm afraid I've messed the things up a bit... ;) I've seen this
> issue when I checked the fix for the dev_stop()->dev_start() issue in
> the LRO patch. To remind, u have noticed that I wasn't freeing the
> not-yet-completed RSC aggregations in ixgbe_rx_queue_release_mbufs(). So
> when I was verifying the fix I've noticed the issue with the flags above
> and fixed it. But I've put the fix in the PATCH3 v7 of LRO series
> instead of putting it here... ;) I think I'll have to respin both series
> again... Ufff.... :(
> 
> >
> >>   	/* Clear recorded link status */
> >>   	memset(&link, 0, sizeof(link));
> >> diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> >> index ce9658e..a00f5c9 100644
> >> --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> >> +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> >> @@ -2074,12 +2074,12 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
> >>
> >>   /* Reset dynamic igb_rx_queue fields back to defaults */
> >>   static void
> >> -ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
> >> +ixgbe_reset_rx_queue(struct ixgbe_hw *hw, struct igb_rx_queue *rxq)
> >>   {
> >>   	static const union ixgbe_adv_rx_desc zeroed_desc = { .read = {
> >>   			.pkt_addr = 0}};
> >>   	unsigned i;
> >> -	uint16_t len;
> >> +	uint16_t len = rxq->nb_rx_desc;
> >>
> >>   	/*
> >>   	 * By default, the Rx queue setup function allocates enough memory for
> >> @@ -2091,14 +2091,9 @@ ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
> >>   	 * constraints here to see if we need to zero out memory after the end
> >>   	 * of the H/W descriptor ring.
> >>   	 */
> >> -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
> >> -	if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
> >> +	if (hw->rx_bulk_alloc_allowed)
> >>   		/* zero out extra memory */
> >> -		len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
> >> -	else
> >> -#endif
> >> -		/* do not zero out extra memory */
> >> -		len = rxq->nb_rx_desc;
> >> +		len += RTE_PMD_IXGBE_RX_MAX_BURST;
> >>
> >>   	/*
> >>   	 * Zero out HW ring memory. Zero out extra memory at the end of
> >> @@ -2140,7 +2135,6 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
> >>   	const struct rte_memzone *rz;
> >>   	struct igb_rx_queue *rxq;
> >>   	struct ixgbe_hw     *hw;
> >> -	int use_def_burst_func = 1;
> >>   	uint16_t len;
> >>
> >>   	PMD_INIT_FUNC_TRACE();
> >> @@ -2221,15 +2215,27 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
> >>   	rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
> >>
> >>   	/*
> >> +	 * Certain constraints must be met in order to use the bulk buffer
> >> +	 * allocation Rx burst function. If any of Rx queues doesn't meet them
> >> +	 * the feature should be disabled for the whole port.
> >> +	 */
> >> +	if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
> >> +		PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
> >> +				    "preconditions - canceling the feature for "
> >> +				    "the whole port[%d]",
> >> +			     rxq->queue_id, rxq->port_id);
> >> +		hw->rx_bulk_alloc_allowed = false;
> >> +	}
> >> +
> >> +	/*
> >>   	 * Allocate software ring. Allow for space at the end of the
> >>   	 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
> >>   	 * function does not access an invalid memory region.
> >>   	 */
> >> -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
> >> -	len = (uint16_t)(nb_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
> >> -#else
> >>   	len = nb_desc;
> >> -#endif
> >> +	if (hw->rx_bulk_alloc_allowed)
> >> +		len += RTE_PMD_IXGBE_RX_MAX_BURST;
> >> +
> > Looking at it: it is probably easier and cleaner to remove all these ifdefs if/else
> > And always setup len = nb_desc + RTE_PMD_IXGBE_RX_MAX_BURST;
> 
> Sounds reasonable. Since we talk about it - could u, pls., explain to me
> why is all this padding needed the first place? I must say I don't
> exactly understand why is it needed at all... ;)

As I remember: to speedup things ixgbe_rx_scan_hw_ring() always scans HW descriptors in bulk of 8. 
So even if we have 128 RXDs used by HW and rx_tail==125, it would scan  rx_ring[125-132] entries.
So after proper 128 RXDs that will be accessed by HW,  we setup RTE_PMD_IXGBE_RX_MAX_BURST
RXDs. They supposed to be zeroed by SW at startup and should never be overwritten by HW.
So their DD bit will always be 0.
Same thing for corresponding  sw_ring[] entries.
Now while thinking about it couldn't remember why do we need whole RTE_PMD_IXGBE_RX_MAX_BURST fake entries,
not just  LOOK_AHEAD...
But anyway, that's the idea behind it.

About always setup len = nb_desc + RTE_PMD_IXGBE_RX_MAX_BURST -
I am probably spoke too early :(
After second thought - it could be situation when use requested IXGBE_MAX_RING_DESC RXDs in total with
'plain' (no bulk) recv.
In that case our len might be bigger than rx_ring memzone.
Of course we could always allocate for rx_ring (IXGBE_MAX_RING_DESC + RTE_PMD_IXGBE_RX_MAX_BURST) entries...
But that's probably too many unrelated changes in that patch.
Safer for to leave it as it is for now.

> 
> >
> >
> >>   	rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
> >>   					  sizeof(struct igb_rx_entry) * len,
> >>   					  RTE_CACHE_LINE_SIZE, socket_id);
> >> @@ -2240,42 +2246,18 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
> >>   	PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
> >>   		     rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
> >>
> >> -	/*
> >> -	 * Certain constraints must be met in order to use the bulk buffer
> >> -	 * allocation Rx burst function.
> >> -	 */
> >> -	use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
> >> +	if (!rte_is_power_of_2(nb_desc)) {
> >> +		PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
> >> +				    "preconditions - canceling the feature for "
> >> +				    "the whole port[%d]",
> >> +			     rxq->queue_id, rxq->port_id);
> >> +		hw->rx_vec_allowed = false;
> >> +	} else
> >> +		ixgbe_rxq_vec_setup(rxq);
> >>
> >> -#ifdef RTE_IXGBE_INC_VECTOR
> >> -	ixgbe_rxq_vec_setup(rxq);
> >> -#endif
> >> -	/* Check if pre-conditions are satisfied, and no Scattered Rx */
> >> -	if (!use_def_burst_func && !dev->data->scattered_rx) {
> >> -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
> >> -		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
> >> -			     "satisfied. Rx Burst Bulk Alloc function will be "
> >> -			     "used on port=%d, queue=%d.",
> >> -			     rxq->port_id, rxq->queue_id);
> >> -		dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
> >> -#ifdef RTE_IXGBE_INC_VECTOR
> >> -		if (!ixgbe_rx_vec_condition_check(dev) &&
> >> -		    (rte_is_power_of_2(nb_desc))) {
> >> -			PMD_INIT_LOG(INFO, "Vector rx enabled, please make "
> >> -				     "sure RX burst size no less than 32.");
> >> -			dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
> >> -		}
> >> -#endif
> >> -#endif
> >> -	} else {
> >> -		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions "
> >> -			     "are not satisfied, Scattered Rx is requested, "
> >> -			     "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC is not "
> >> -			     "enabled (port=%d, queue=%d).",
> >> -			     rxq->port_id, rxq->queue_id);
> >> -	}
> >>   	dev->data->rx_queues[queue_idx] = rxq;
> >>
> >> -	ixgbe_reset_rx_queue(rxq);
> >> +	ixgbe_reset_rx_queue(hw, rxq);
> >>
> >>   	return 0;
> >>   }
> >> @@ -2329,6 +2311,7 @@ void
> >>   ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
> >>   {
> >>   	unsigned i;
> >> +	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> >>
> >>   	PMD_INIT_FUNC_TRACE();
> >>
> >> @@ -2344,7 +2327,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
> >>   		struct igb_rx_queue *rxq = dev->data->rx_queues[i];
> >>   		if (rxq != NULL) {
> >>   			ixgbe_rx_queue_release_mbufs(rxq);
> >> -			ixgbe_reset_rx_queue(rxq);
> >> +			ixgbe_reset_rx_queue(hw, rxq);
> >>   		}
> >>   	}
> >>   }
> >> @@ -3506,6 +3489,58 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
> >>   	return 0;
> >>   }
> >>
> >> +void set_rx_function(struct rte_eth_dev *dev)
> >> +{
> >> +	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> >> +
> >> +	if (ixgbe_rx_vec_condition_check(dev)) {
> >> +		PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
> >> +				    "preconditions or RTE_IXGBE_INC_VECTOR is "
> >> +				    "not enabled",
> >> +			     dev->data->port_id);
> >> +		hw->rx_vec_allowed = false;
> >> +	}
> >> +
> >> +	/* Check if bulk alloc is allowed and no Scattered Rx */
> >> +	if (hw->rx_bulk_alloc_allowed && !dev->data->scattered_rx) {
> >> +		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
> >> +				    "satisfied. Rx Burst Bulk Alloc function "
> >> +				    "will be used on port=%d.",
> >> +			     dev->data->port_id);
> >> +		dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
> >> +
> >> +		if (hw->rx_vec_allowed) {
> >> +			PMD_INIT_LOG(INFO, "Vector rx enabled, please make "
> >> +					   "sure RX burst size no less "
> >> +					   "than 32.");
> >> +			dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
> >> +		}
> >> +	} else {
> >> +		dev->rx_pkt_burst = ixgbe_recv_pkts;
> >> +
> >> +		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
> >> +				    "satisfied, or Scattered Rx is requested, "
> >> +				    "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC "
> >> +				    "is not enabled (port=%d).",
> >> +			     dev->data->port_id);
> >> +	}
> >> +
> >> +	if (dev->data->scattered_rx) {
> >> +		if (hw->rx_vec_allowed && hw->rx_bulk_alloc_allowed) {
> >> +			PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
> >> +					    "callback (port=%d).",
> >> +				     dev->data->port_id);
> >> +			dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
> >> +		} else {
> >> +			PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector) "
> >> +					    "Scattered Rx callback "
> >> +					    "(port=%d).",
> >> +				     dev->data->port_id);
> >> +			dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
> >> +		}
> >> +	}
> >> +}
> > if scattered_rx != 0 it will setup rx_pkt_burst twice.
> 
> I don't understand what u mean here. If scattered_rx != 0 then if vector
> Rx preconditions are met then rx_pkt_burst will be set to a vector-ed
> version of the callback and if not - to the non-vectored one. It will be
> exactly one setting... What do I miss? ;)

I meant for 'dev->data->scattered_rx == 1' we at first would assign rx_pkt_burst here: 
	if (hw->rx_bulk_alloc_allowed && !dev->data->scattered_rx) {
	....
              } else {
		dev->rx_pkt_burst = ixgbe_recv_pkts;
            }

And then re-assign again here:
if (dev->data->scattered_rx) {
      ...
}

> 
> > Nothing wrong, but looks a bit clumsy.
> > Might be a bit clearer to reorder it a bit:
> >
> > If (scattered_rx) {
> >     ...
> > } else if (rx_bulk_alloc_allowed) {
> >     ...
> > } else {
> >     ...
> > }
> >
> >
> >> +
> >>   /*
> >>    * Initializes Receive Unit.
> >>    */
> >> @@ -3641,23 +3676,17 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
> >>   		buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
> >>   				       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
> >>
> >> -		if (dev->data->dev_conf.rxmode.enable_scatter ||
> >> -		    /* It adds dual VLAN length for supporting dual VLAN */
> >> -		    (dev->data->dev_conf.rxmode.max_rx_pkt_len +
> >> -				2 * IXGBE_VLAN_TAG_SIZE) > buf_size){
> >> -			if (!dev->data->scattered_rx)
> >> -				PMD_INIT_LOG(DEBUG, "forcing scatter mode");
> >> +		/* It adds dual VLAN length for supporting dual VLAN */
> >> +		if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
> >> +					    2 * IXGBE_VLAN_TAG_SIZE > buf_size)
> >>   			dev->data->scattered_rx = 1;
> >> -#ifdef RTE_IXGBE_INC_VECTOR
> >> -			if (rte_is_power_of_2(rxq->nb_rx_desc))
> >> -				dev->rx_pkt_burst =
> >> -					ixgbe_recv_scattered_pkts_vec;
> >> -			else
> >> -#endif
> >> -				dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
> >> -		}
> >>   	}
> >>
> >> +	if (dev->data->dev_conf.rxmode.enable_scatter)
> >> +		dev->data->scattered_rx = 1;
> >> +
> >> +	set_rx_function(dev);
> >> +
> >>   	/*
> >>   	 * Device configured with multiple RX queues.
> >>   	 */
> >> @@ -3933,7 +3962,7 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
> >>   		rte_delay_us(RTE_IXGBE_WAIT_100_US);
> >>
> >>   		ixgbe_rx_queue_release_mbufs(rxq);
> >> -		ixgbe_reset_rx_queue(rxq);
> >> +		ixgbe_reset_rx_queue(hw, rxq);
> >>   	} else
> >>   		return -1;
> >>
> >> @@ -4289,3 +4318,31 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
> >>
> >>   	}
> >>   }
> >> +
> >> +/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
> >> +#ifndef RTE_IXGBE_INC_VECTOR
> > Instead of ifndef above, can these function below be defined with _attribute__ ((weak))?
> 
> Yes they could. Do u think it would be more readable then? What I mean
> here is that DPDK is full of ifdefs because there is a million of
> configuration macros in rte_config.h. So, IMHO it would be better to
> explicitly indicate when these stubs are going to be used. However I'm
> also against the #ifdef's in general and actually like your proposal.
> Pls., comment.

My vote would be to remove ifndef and make them 'weak'.
As you said - less ifdefs is better.

> 
> >
> >> +int ixgbe_rx_vec_condition_check(
> >> +	struct rte_eth_dev __rte_unused *dev)
> >> +{
> >> +	return -1;
> >> +}
> >> +
> >> +uint16_t
> >> +ixgbe_recv_pkts_vec(void __rte_unused *rx_queue,
> >> +		    struct rte_mbuf __rte_unused **rx_pkts,
> >> +		    uint16_t __rte_unused nb_pkts)
> >> +{
> >> +	return 0;
> >> +}
> >> +
> >> +uint16_t ixgbe_recv_scattered_pkts_vec(void __rte_unused *rx_queue,
> >> +	struct rte_mbuf __rte_unused **rx_pkts, uint16_t __rte_unused nb_pkts)
> >> +{
> >> +	return 0;
> >> +}
> >> +
> >> +int ixgbe_rxq_vec_setup(struct igb_rx_queue __rte_unused *rxq)
> >> +{
> >> +	return -1;
> >> +}
> >> +#endif
> >> diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
> >> index 329007c..bbe5ff3 100644
> >> --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
> >> +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
> >> @@ -255,16 +255,32 @@ struct ixgbe_txq_ops {
> >>    */
> >>   void set_tx_function(struct rte_eth_dev *dev, struct igb_tx_queue *txq);
> >>
> >> -#ifdef RTE_IXGBE_INC_VECTOR
> >> +/**
> >> + * Sets the rx_pkt_burst callback in the ixgbe rte_eth_dev instance.
> >> + *
> >> + * Sets the callback based on the device parameters:
> >> + *  - ixgbe_hw.rx_bulk_alloc_allowed
> >> + *  - rte_eth_dev_data.scattered_rx
> >> + *  - rte_eth_dev_data.lro
> >> + *  - conditions checked in ixgbe_rx_vec_condition_check()
> >> + *
> >> + *  This means that the parameters above have to be configured prior to calling
> >> + *  to this function.
> >> + *
> >> + * @dev rte_eth_dev handle
> >> + */
> >> +void set_rx_function(struct rte_eth_dev *dev);
> >> +
> >>   uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
> >>   		uint16_t nb_pkts);
> >>   uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue,
> >>   		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
> >> +int ixgbe_rx_vec_condition_check(struct rte_eth_dev *dev);
> >> +int ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq);
> >
> >> +#ifdef RTE_IXGBE_INC_VECTOR
> > Please add an empty line(s) around 'ifdef' -  makes it much easier to read.
> 
> NP. Here I just followed the previous code style of this section. ;) I
> agree with u though... ;)
> 
> >
> >>   uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
> >>   		uint16_t nb_pkts);
> >>   int ixgbe_txq_vec_setup(struct igb_tx_queue *txq);
> >> -int ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq);
> >> -int ixgbe_rx_vec_condition_check(struct rte_eth_dev *dev);
> >>   #endif
> >>
> >>   #endif
> >> --
> >> 2.1.0



More information about the dev mailing list