[dpdk-dev] [EXT] Re: [PATCH v4 1/8] eventdev: introduce event vector capability
Pavan Nikhilesh Bhagavatula
pbhagavatula at marvell.com
Mon Mar 22 10:10:55 CET 2021
>On 19/03/2021 20:57, pbhagavatula at marvell.com wrote:
>> From: Pavan Nikhilesh <pbhagavatula at marvell.com>
>>
>> Introduce rte_event_vector datastructure which is capable of holding
>> multiple uintptr_t of the same flow thereby allowing applications
>> to vectorize their pipeline and reducing the complexity of pipelining
>> the events across multiple stages.
>> This approach also reduces the scheduling overhead on a event
>device.
>>
>> Add a event vector mempool create handler to create mempools
>based on
>> the best mempool ops available on a given platform.
>>
>> Signed-off-by: Pavan Nikhilesh <pbhagavatula at marvell.com>
>> ---
>> doc/guides/prog_guide/eventdev.rst | 36 +++++++++-
>> lib/librte_eventdev/rte_eventdev.h | 112
>++++++++++++++++++++++++++++-
>> lib/librte_eventdev/version.map | 3 +
>> 3 files changed, 148 insertions(+), 3 deletions(-)
>>
>
>[SNIP]
>
>>
>> diff --git a/lib/librte_eventdev/rte_eventdev.h
>b/lib/librte_eventdev/rte_eventdev.h
>> index ce1fc2ce0..5586a3f15 100644
>> --- a/lib/librte_eventdev/rte_eventdev.h
>> +++ b/lib/librte_eventdev/rte_eventdev.h
>> @@ -212,8 +212,10 @@ extern "C" {
>>
>> #include <rte_common.h>
>> #include <rte_config.h>
>> -#include <rte_memory.h>
>> #include <rte_errno.h>
>> +#include <rte_mbuf_pool_ops.h>
>> +#include <rte_memory.h>
>> +#include <rte_mempool.h>
>>
>> #include "rte_eventdev_trace_fp.h"
>>
>> @@ -913,6 +915,25 @@
>rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
>> int
>> rte_event_dev_close(uint8_t dev_id);
>>
>> +/**
>> + * Event vector structure.
>> + */
>> +struct rte_event_vector {
>> + uint64_t nb_elem : 16;
>> + /**< Number of elements in this event vector. */
>> + uint64_t rsvd : 48;
>> + uint64_t impl_opaque;
>> + union {
>> + struct rte_mbuf *mbufs[0];
>> + void *ptrs[0];
>> + uint64_t *u64s[0];
>> + } __rte_aligned(16);
>> + /**< Start of the vector array union. Depending upon the event
>type the
>> + * vector array can be an array of mbufs or pointers or opaque
>u64
>> + * values.
>> + */
>> +};
>> +
>> /* Scheduler type definitions */
>> #define RTE_SCHED_TYPE_ORDERED 0
>> /**< Ordered scheduling
>> @@ -986,6 +1007,21 @@ rte_event_dev_close(uint8_t dev_id);
>> */
>> #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
>> /**< The event generated from event eth Rx adapter */
>> +#define RTE_EVENT_TYPE_VECTOR 0x8
>> +/**< Indicates that event is a vector.
>> + * All vector event types should be an logical OR of
>EVENT_TYPE_VECTOR.
>> + * This simplifies the pipeline design as we can split processing the
>events
>> + * between vector events and normal event across event types.
>> + * Example:
>> + * if (ev.event_type & RTE_EVENT_TYPE_VECTOR) {
>> + * // Classify and handle vector event.
>> + * } else {
>> + * // Classify and handle event.
>> + * }
>> + */
>> +#define RTE_EVENT_TYPE_CPU_VECTOR
>(RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
>> +/**< The event vector generated from cpu for pipelining. */
>> +
>> #define RTE_EVENT_TYPE_MAX 0x10
>> /**< Maximum number of event types */
>>
>> @@ -1108,6 +1144,8 @@ struct rte_event {
>> /**< Opaque event pointer */
>> struct rte_mbuf *mbuf;
>> /**< mbuf pointer if dequeued event is associated with
>mbuf */
>> + struct rte_event_vector *vec;
>> + /**< Event vector pointer. */
>> };
>> };
>>
>> @@ -2023,6 +2061,78 @@ rte_event_dev_xstats_reset(uint8_t
>dev_id,
>> */
>> int rte_event_dev_selftest(uint8_t dev_id);
>>
>> +/**
>> + * Get the memory required per event vector based on the number of
>elements per
>> + * vector.
>> + * This should be used to create the mempool that holds the event
>vectors.
>> + *
>> + * @param name
>> + * The name of the vector pool.
>> + * @param n
>> + * The number of elements in the mbuf pool.
>> + * @param cache_size
>> + * Size of the per-core object cache. See rte_mempool_create() for
>> + * details.
>> + * @param nb_elem
>> + * The number of elements then a single event vector should be
>able to hold.
>> + * @param socket_id
>> + * The socket identifier where the memory should be allocated. The
>> + * value can be *SOCKET_ID_ANY* if there is no NUMA constraint
>for the
>> + * reserved zone
>> + *
>> + * @return
>> + * The pointer to the newly allocated mempool, on success. NULL
>on error
>> + * with rte_errno set appropriately. Possible rte_errno values
>include:
>> + * - E_RTE_NO_CONFIG - function could not get pointer to
>rte_config structure
>> + * - E_RTE_SECONDARY - function was called from a secondary
>process instance
>> + * - EINVAL - cache size provided is too large, or priv_size is not
>aligned.
>> + * - ENOSPC - the maximum number of memzones has already been
>allocated
>> + * - EEXIST - a memzone with the same name already exists
>> + * - ENOMEM - no appropriate memory area found in which to
>create memzone
>> + */
>> +__rte_experimental
>> +static inline struct rte_mempool *
>> +rte_event_vector_pool_create(const char *name, unsigned int n,
>> + unsigned int cache_size, uint16_t nb_elem,
>> + int socket_id)
>
>Handling in-lined function is tricky at best from an ABI stability PoV.
>
>Since this function is used at initialization time and I would suggest since
>performance is not issue here.
>There is no need for this function to be an inline.
Makes sense, I will move it to .c in the next version.
Thanks,
Pavan.
>
>> +{
>> + const char *mp_ops_name;
>> + struct rte_mempool *mp;
>> + unsigned int elt_sz;
>> + int ret;
>> +
>> + if (!nb_elem) {
>> + RTE_LOG(ERR, EVENTDEV,
>> + "Invalid number of elements=%d requested\n",
>nb_elem);
>> + rte_errno = -EINVAL;
>> + return NULL;
>> + }
>> +
>> + elt_sz =
>> + sizeof(struct rte_event_vector) + (nb_elem *
>sizeof(uintptr_t));
>> + mp = rte_mempool_create_empty(name, n, elt_sz, cache_size,
>0, socket_id,
>> + 0);
>> + if (mp == NULL)
>> + return NULL;
>> +
>> + mp_ops_name = rte_mbuf_best_mempool_ops();
>> + ret = rte_mempool_set_ops_byname(mp, mp_ops_name,
>NULL);
>> + if (ret != 0) {
>> + RTE_LOG(ERR, EVENTDEV, "error setting mempool
>handler\n");
>> + goto err;
>> + }
>> +
>> + ret = rte_mempool_populate_default(mp);
>> + if (ret < 0)
>> + goto err;
>> +
>> + return mp;
>> +err:
>> + rte_mempool_free(mp);
>> + rte_errno = -ret;
>> + return NULL;
>> +}
>> +
>> #ifdef __cplusplus
>> }
>> #endif
>> diff --git a/lib/librte_eventdev/version.map
>b/lib/librte_eventdev/version.map
>> index 3e5c09cfd..a070ef56e 100644
>> --- a/lib/librte_eventdev/version.map
>> +++ b/lib/librte_eventdev/version.map
>> @@ -138,6 +138,9 @@ EXPERIMENTAL {
>> __rte_eventdev_trace_port_setup;
>> # added in 20.11
>> rte_event_pmd_pci_probe_named;
>> +
>> + #added in 21.05
>> + rte_event_vector_pool_create;
>> };
>>
>> INTERNAL {
>>
More information about the dev
mailing list