[dpdk-dev] [PATCH 7/7] eventdev: fix ABI breakage due to event vector
Jerin Jacob
jerinjacobk at gmail.com
Mon Mar 8 19:44:36 CET 2021
On Sun, Feb 21, 2021 at 3:41 AM <pbhagavatula at marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula at marvell.com>
>
> Fix ABI breakage due to event vector configuration by moving
> the vector configuration into a new structure and having a separate
> function for enabling the vector config on a given ethernet device and
> queue pair.
> This vector config and function can be merged to queue config in
> v21.11.
>
> Fixes: 44c81670cf0a ("eventdev: introduce event vector Rx capability")
Hi @Ray Kinsella @Neil Horman @Thomas Monjalon @David Marchand
Is the ABI breakage contract between release to release. Right? i.e it
is not between each patch. Right?
Summary:
1) Ideal way of adding this feature is to add elements in the
existing structure as mentioned
in ("eventdev: introduce event vector Rx capability") in this series.
2) Since this breaking ABI, Introducing a new structure to fix this. I
think, we can remove this
limitation in 21.11 as that time we can change ABI as required.
So, Is this patch needs to be squashed to ("eventdev: introduce event
vector Rx capability") to avoid
ABI compatibility between patches? Or Is it OK to break the ABI
compatibility in a patch in the series
and later fix it in the same series?(This is for more readability as
we can revert this patch in 21.11).
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula at marvell.com>
> ---
> app/test-eventdev/test_pipeline_common.c | 16 +-
> lib/librte_eventdev/eventdev_pmd.h | 29 +++
> .../rte_event_eth_rx_adapter.c | 168 ++++++++++++------
> .../rte_event_eth_rx_adapter.h | 27 +++
> lib/librte_eventdev/version.map | 1 +
> 5 files changed, 184 insertions(+), 57 deletions(-)
>
> diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
> index 89f73be86..9aeefdd5f 100644
> --- a/app/test-eventdev/test_pipeline_common.c
> +++ b/app/test-eventdev/test_pipeline_common.c
> @@ -331,6 +331,7 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
> uint16_t prod;
> struct rte_mempool *vector_pool = NULL;
> struct rte_event_eth_rx_adapter_queue_conf queue_conf;
> + struct rte_event_eth_rx_adapter_event_vector_config vec_conf;
>
> memset(&queue_conf, 0,
> sizeof(struct rte_event_eth_rx_adapter_queue_conf));
> @@ -360,12 +361,8 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
> }
> if (opt->ena_vector) {
> if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) {
> - queue_conf.vector_sz = opt->vector_size;
> - queue_conf.vector_timeout_ns =
> - opt->vector_tmo_nsec;
> queue_conf.rx_queue_flags |=
> RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR;
> - queue_conf.vector_mp = vector_pool;
> } else {
> evt_err("Rx adapter doesn't support event vector");
> return -EINVAL;
> @@ -385,6 +382,17 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
> return ret;
> }
>
> + if (opt->ena_vector) {
> + vec_conf.vector_sz = opt->vector_size;
> + vec_conf.vector_timeout_ns = opt->vector_tmo_nsec;
> + vec_conf.vector_mp = vector_pool;
> + if (rte_event_eth_rx_adapter_queue_event_vector_config(
> + prod, prod, -1, &vec_conf) < 0) {
> + evt_err("Failed to configure event vectorization for Rx adapter");
> + return -EINVAL;
> + }
> + }
> +
> if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
> uint32_t service_id = -1U;
>
> diff --git a/lib/librte_eventdev/eventdev_pmd.h b/lib/librte_eventdev/eventdev_pmd.h
> index 60bfaebc0..d79dfd612 100644
> --- a/lib/librte_eventdev/eventdev_pmd.h
> +++ b/lib/librte_eventdev/eventdev_pmd.h
> @@ -667,6 +667,32 @@ typedef int (*eventdev_eth_rx_adapter_vector_limits_get_t)(
> const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev,
> struct rte_event_eth_rx_adapter_vector_limits *limits);
>
> +struct rte_event_eth_rx_adapter_event_vector_config;
> +/**
> + * Enable event vector on an given Rx queue of a ethernet devices belonging to
> + * the Rx adapter.
> + *
> + * @param dev
> + * Event device pointer
> + *
> + * @param eth_dev
> + * Ethernet device pointer
> + *
> + * @param rx_queue_id
> + * The Rx queue identifier
> + *
> + * @param config
> + * Pointer to the event vector configuration structure.
> + *
> + * @return
> + * - 0: Success.
> + * - <0: Error code returned by the driver function.
> + */
> +typedef int (*eventdev_eth_rx_adapter_event_vector_config_t)(
> + const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev,
> + int32_t rx_queue_id,
> + const struct rte_event_eth_rx_adapter_event_vector_config *config);
> +
> typedef uint32_t rte_event_pmd_selftest_seqn_t;
> extern int rte_event_pmd_selftest_seqn_dynfield_offset;
>
> @@ -1092,6 +1118,9 @@ struct rte_eventdev_ops {
> eventdev_eth_rx_adapter_vector_limits_get_t
> eth_rx_adapter_vector_limits_get;
> /**< Get event vector limits for the Rx adapter */
> + eventdev_eth_rx_adapter_event_vector_config_t
> + eth_rx_adapter_event_vector_config;
> + /**< Configure Rx adapter with event vector */
>
> eventdev_timer_adapter_caps_get_t timer_adapter_caps_get;
> /**< Get timer adapter capabilities */
> diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
> index a1990637f..c71990078 100644
> --- a/lib/librte_eventdev/rte_event_eth_rx_adapter.c
> +++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
> @@ -1882,25 +1882,6 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
> } else
> qi_ev->flow_id = 0;
>
> - if (conf->rx_queue_flags &
> - RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
> - queue_info->ena_vector = 1;
> - qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
> - rxa_set_vector_data(queue_info, conf->vector_sz,
> - conf->vector_timeout_ns, conf->vector_mp,
> - rx_queue_id, dev_info->dev->data->port_id);
> - rx_adapter->ena_vector = 1;
> - rx_adapter->vector_tmo_ticks =
> - rx_adapter->vector_tmo_ticks
> - ? RTE_MIN(queue_info->vector_data
> - .vector_timeout_ticks,
> - rx_adapter->vector_tmo_ticks)
> - : queue_info->vector_data.vector_timeout_ticks;
> - rx_adapter->vector_tmo_ticks <<= 1;
> - TAILQ_INIT(&rx_adapter->vector_list);
> - rx_adapter->prev_expiry_ts = 0;
> - }
> -
> rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1);
> if (rxa_polled_queue(dev_info, rx_queue_id)) {
> rx_adapter->num_rx_polled += !pollq;
> @@ -1926,6 +1907,44 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
> }
> }
>
> +static void
> +rxa_sw_event_vector_configure(
> + struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
> + int rx_queue_id,
> + const struct rte_event_eth_rx_adapter_event_vector_config *config)
> +{
> + struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
> + struct eth_rx_queue_info *queue_info;
> + struct rte_event *qi_ev;
> +
> + if (rx_queue_id == -1) {
> + uint16_t nb_rx_queues;
> + uint16_t i;
> +
> + nb_rx_queues = dev_info->dev->data->nb_rx_queues;
> + for (i = 0; i < nb_rx_queues; i++)
> + rxa_sw_event_vector_configure(rx_adapter, eth_dev_id, i,
> + config);
> + return;
> + }
> +
> + queue_info = &dev_info->rx_queue[rx_queue_id];
> + qi_ev = (struct rte_event *)&queue_info->event;
> + queue_info->ena_vector = 1;
> + qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
> + rxa_set_vector_data(queue_info, config->vector_sz,
> + config->vector_timeout_ns, config->vector_mp,
> + rx_queue_id, dev_info->dev->data->port_id);
> + rx_adapter->ena_vector = 1;
> + rx_adapter->vector_tmo_ticks =
> + rx_adapter->vector_tmo_ticks ?
> + RTE_MIN(config->vector_timeout_ns << 1,
> + rx_adapter->vector_tmo_ticks) :
> + config->vector_timeout_ns << 1;
> + rx_adapter->prev_expiry_ts = 0;
> + TAILQ_INIT(&rx_adapter->vector_list);
> +}
> +
> static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
> uint16_t eth_dev_id,
> int rx_queue_id,
> @@ -2239,7 +2258,6 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
> struct rte_event_eth_rx_adapter *rx_adapter;
> struct rte_eventdev *dev;
> struct eth_device_info *dev_info;
> - struct rte_event_eth_rx_adapter_vector_limits limits;
>
> RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
> @@ -2276,39 +2294,6 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
> return -EINVAL;
> }
>
> - if (queue_conf->rx_queue_flags &
> - RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
> - ret = rte_event_eth_rx_adapter_vector_limits_get(
> - rx_adapter->eventdev_id, eth_dev_id, &limits);
> - if (ret < 0) {
> - RTE_EDEV_LOG_ERR("Failed to get event device vector limits,"
> - " eth port: %" PRIu16
> - " adapter id: %" PRIu8,
> - eth_dev_id, id);
> - return -EINVAL;
> - }
> - if (queue_conf->vector_sz < limits.min_sz ||
> - queue_conf->vector_sz > limits.max_sz ||
> - queue_conf->vector_timeout_ns < limits.min_timeout_ns ||
> - queue_conf->vector_timeout_ns > limits.max_timeout_ns ||
> - queue_conf->vector_mp == NULL) {
> - RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
> - " eth port: %" PRIu16
> - " adapter id: %" PRIu8,
> - eth_dev_id, id);
> - return -EINVAL;
> - }
> - if (queue_conf->vector_mp->elt_size <
> - (sizeof(struct rte_event_vector) +
> - (sizeof(uintptr_t) * queue_conf->vector_sz))) {
> - RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
> - " eth port: %" PRIu16
> - " adapter id: %" PRIu8,
> - eth_dev_id, id);
> - return -EINVAL;
> - }
> - }
> -
> if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
> (rx_queue_id != -1)) {
> RTE_EDEV_LOG_ERR("Rx queues can only be connected to single "
> @@ -2502,6 +2487,83 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
> return ret;
> }
>
> +int
> +rte_event_eth_rx_adapter_queue_event_vector_config(
> + uint8_t id, uint16_t eth_dev_id, int32_t rx_queue_id,
> + struct rte_event_eth_rx_adapter_event_vector_config *config)
> +{
> + struct rte_event_eth_rx_adapter_vector_limits limits;
> + struct rte_event_eth_rx_adapter *rx_adapter;
> + struct rte_eventdev *dev;
> + uint32_t cap;
> + int ret;
> +
> + RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> + RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
> +
> + rx_adapter = rxa_id_to_adapter(id);
> + if ((rx_adapter == NULL) || (config == NULL))
> + return -EINVAL;
> +
> + dev = &rte_eventdevs[rx_adapter->eventdev_id];
> + ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
> + eth_dev_id, &cap);
> + if (ret) {
> + RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
> + "eth port %" PRIu16,
> + id, eth_dev_id);
> + return ret;
> + }
> +
> + if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR)) {
> + RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
> + " eth port: %" PRIu16 " adapter id: %" PRIu8,
> + eth_dev_id, id);
> + return -EINVAL;
> + }
> +
> + ret = rte_event_eth_rx_adapter_vector_limits_get(
> + rx_adapter->eventdev_id, eth_dev_id, &limits);
> + if (ret) {
> + RTE_EDEV_LOG_ERR("Failed to get vector limits edev %" PRIu8
> + "eth port %" PRIu16,
> + rx_adapter->eventdev_id, eth_dev_id);
> + return ret;
> + }
> +
> + if (config->vector_sz < limits.min_sz ||
> + config->vector_sz > limits.max_sz ||
> + config->vector_timeout_ns < limits.min_timeout_ns ||
> + config->vector_timeout_ns > limits.max_timeout_ns ||
> + config->vector_mp == NULL) {
> + RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
> + " eth port: %" PRIu16 " adapter id: %" PRIu8,
> + eth_dev_id, id);
> + return -EINVAL;
> + }
> + if (config->vector_mp->elt_size <
> + (sizeof(struct rte_event_vector) +
> + (sizeof(uintptr_t) * config->vector_sz))) {
> + RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
> + " eth port: %" PRIu16 " adapter id: %" PRIu8,
> + eth_dev_id, id);
> + return -EINVAL;
> + }
> +
> + if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
> + RTE_FUNC_PTR_OR_ERR_RET(
> + *dev->dev_ops->eth_rx_adapter_event_vector_config,
> + -ENOTSUP);
> + ret = dev->dev_ops->eth_rx_adapter_event_vector_config(
> + dev, &rte_eth_devices[eth_dev_id], rx_queue_id, config);
> + } else {
> + rxa_sw_event_vector_configure(rx_adapter, eth_dev_id,
> + rx_queue_id, config);
> + }
> +
> + return ret;
> +}
> +
> int
> rte_event_eth_rx_adapter_vector_limits_get(
> uint8_t dev_id, uint16_t eth_port_id,
> diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.h b/lib/librte_eventdev/rte_event_eth_rx_adapter.h
> index 4bdb38f08..ceef6d565 100644
> --- a/lib/librte_eventdev/rte_event_eth_rx_adapter.h
> +++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.h
> @@ -171,6 +171,9 @@ struct rte_event_eth_rx_adapter_queue_conf {
> * The event adapter sets ev.event_type to RTE_EVENT_TYPE_ETHDEV in the
> * enqueued event.
> */
> +};
> +
> +struct rte_event_eth_rx_adapter_event_vector_config {
> uint16_t vector_sz;
> /**<
> * Indicates the maximum number for mbufs to combine and form a vector.
> @@ -418,6 +421,30 @@ int rte_event_eth_rx_adapter_queue_add(uint8_t id,
> int rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
> int32_t rx_queue_id);
>
> +/**
> + * Configure event vectorization for a given ethernet device queue, that has
> + * been added to a event eth Rx adapter.
> + *
> + * @param id
> + * The identifier of the ethernet Rx event adapter.
> + *
> + * @param eth_dev_id
> + * The identifier of the ethernet device.
> + *
> + * @param rx_queue_id
> + * Ethernet device receive queue index.
> + * If rx_queue_id is -1, then all Rx queues configured for the ethernet device
> + * are configured with event vectorization.
> + *
> + * @return
> + * - 0: Success, Receive queue configured correctly.
> + * - <0: Error code on failure.
> + */
> +__rte_experimental
> +int rte_event_eth_rx_adapter_queue_event_vector_config(
> + uint8_t id, uint16_t eth_dev_id, int32_t rx_queue_id,
> + struct rte_event_eth_rx_adapter_event_vector_config *config);
> +
> /**
> * Start ethernet Rx event adapter
> *
> diff --git a/lib/librte_eventdev/version.map b/lib/librte_eventdev/version.map
> index 34c1c830e..902df0ae3 100644
> --- a/lib/librte_eventdev/version.map
> +++ b/lib/librte_eventdev/version.map
> @@ -142,6 +142,7 @@ EXPERIMENTAL {
> #added in 21.05
> rte_event_vector_pool_create;
> rte_event_eth_rx_adapter_vector_limits_get;
> + rte_event_eth_rx_adapter_queue_event_vector_config;
> };
>
> INTERNAL {
> --
> 2.17.1
>
More information about the dev
mailing list