[dpdk-dev] [PATCH 2/4] eventdev: implement the northbound APIs

Jerin Jacob jerin.jacob at caviumnetworks.com
Mon Nov 21 20:31:33 CET 2016


On Tue, Nov 22, 2016 at 12:43:58AM +0530, Jerin Jacob wrote:
> On Mon, Nov 21, 2016 at 05:45:51PM +0000, Eads, Gage wrote:
> > Hi Jerin,
> > 
> > I did a quick review and overall this implementation looks good. I noticed just one issue in rte_event_queue_setup(): the check of nb_atomic_order_sequences is being applied to atomic-type queues, but that field applies to ordered-type queues.
> 
> Thanks Gage. I will fix that in v2.
> 
> > 
> > One open issue I noticed is the "typical workflow" description starting in rte_eventdev.h:204 conflicts with the centralized software PMD that Harry posted last week. Specifically, that PMD expects a single core to call the schedule function. We could extend the documentation to account for this alternative style of scheduler invocation, or discuss ways to make the software PMD work with the documented workflow. I prefer the former, but either way I think we ought to expose the scheduler's expected usage to the user -- perhaps through an RTE_EVENT_DEV_CAP flag?
> 
> I prefer former too, you can propose the documentation change required for software PMD.
> 
> On same note, If software PMD based workflow need  a separate core(s) for
> schedule function then, Can we hide that from API specification and pass an
> argument to SW pmd to define the scheduling core(s)?
> 
> Something like --vdev=eventsw0,schedule_cmask=0x2

Just a thought,

Perhaps, We could introduce generic "service" cores concept to DPDK to hide the
requirement where the implementation needs dedicated core to do certain
work. I guess it would useful for other NPU integration in DPDK.

> 
> > 
> > Thanks,
> > Gage
> > 
> > >  -----Original Message-----
> > >  From: Jerin Jacob [mailto:jerin.jacob at caviumnetworks.com]
> > >  Sent: Thursday, November 17, 2016 11:45 PM
> > >  To: dev at dpdk.org
> > >  Cc: Richardson, Bruce <bruce.richardson at intel.com>; Van Haaren, Harry
> > >  <harry.van.haaren at intel.com>; hemant.agrawal at nxp.com; Eads, Gage
> > >  <gage.eads at intel.com>; Jerin Jacob <jerin.jacob at caviumnetworks.com>
> > >  Subject: [dpdk-dev] [PATCH 2/4] eventdev: implement the northbound APIs
> > >  
> > >  This patch set defines the southbound driver interface
> > >  and implements the common code required for northbound
> > >  eventdev API interface.
> > >  
> > >  Signed-off-by: Jerin Jacob <jerin.jacob at caviumnetworks.com>
> > >  ---
> > >   config/common_base                           |    6 +
> > >   lib/Makefile                                 |    1 +
> > >   lib/librte_eal/common/include/rte_log.h      |    1 +
> > >   lib/librte_eventdev/Makefile                 |   57 ++
> > >   lib/librte_eventdev/rte_eventdev.c           | 1211
> > >  ++++++++++++++++++++++++++
> > >   lib/librte_eventdev/rte_eventdev_pmd.h       |  504 +++++++++++
> > >   lib/librte_eventdev/rte_eventdev_version.map |   39 +
> > >   mk/rte.app.mk                                |    1 +
> > >   8 files changed, 1820 insertions(+)
> > >   create mode 100644 lib/librte_eventdev/Makefile
> > >   create mode 100644 lib/librte_eventdev/rte_eventdev.c
> > >   create mode 100644 lib/librte_eventdev/rte_eventdev_pmd.h
> > >   create mode 100644 lib/librte_eventdev/rte_eventdev_version.map
> > >  
> > >  diff --git a/config/common_base b/config/common_base
> > >  index 4bff83a..7a8814e 100644
> > >  --- a/config/common_base
> > >  +++ b/config/common_base
> > >  @@ -411,6 +411,12 @@ CONFIG_RTE_LIBRTE_PMD_ZUC_DEBUG=n
> > >   CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
> > >  
> > >   #
> > >  +# Compile generic event device library
> > >  +#
> > >  +CONFIG_RTE_LIBRTE_EVENTDEV=y
> > >  +CONFIG_RTE_LIBRTE_EVENTDEV_DEBUG=n
> > >  +CONFIG_RTE_EVENT_MAX_DEVS=16
> > >  +CONFIG_RTE_EVENT_MAX_QUEUES_PER_DEV=64
> > >   # Compile librte_ring
> > >   #
> > >   CONFIG_RTE_LIBRTE_RING=y
> > >  diff --git a/lib/Makefile b/lib/Makefile
> > >  index 990f23a..1a067bf 100644
> > >  --- a/lib/Makefile
> > >  +++ b/lib/Makefile
> > >  @@ -41,6 +41,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += librte_cfgfile
> > >   DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline
> > >   DIRS-$(CONFIG_RTE_LIBRTE_ETHER) += librte_ether
> > >   DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += librte_cryptodev
> > >  +DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += librte_eventdev
> > >   DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += librte_vhost
> > >   DIRS-$(CONFIG_RTE_LIBRTE_HASH) += librte_hash
> > >   DIRS-$(CONFIG_RTE_LIBRTE_LPM) += librte_lpm
> > >  diff --git a/lib/librte_eal/common/include/rte_log.h
> > >  b/lib/librte_eal/common/include/rte_log.h
> > >  index 29f7d19..9a07d92 100644
> > >  --- a/lib/librte_eal/common/include/rte_log.h
> > >  +++ b/lib/librte_eal/common/include/rte_log.h
> > >  @@ -79,6 +79,7 @@ extern struct rte_logs rte_logs;
> > >   #define RTE_LOGTYPE_PIPELINE 0x00008000 /**< Log related to pipeline. */
> > >   #define RTE_LOGTYPE_MBUF    0x00010000 /**< Log related to mbuf. */
> > >   #define RTE_LOGTYPE_CRYPTODEV 0x00020000 /**< Log related to
> > >  cryptodev. */
> > >  +#define RTE_LOGTYPE_EVENTDEV 0x00040000 /**< Log related to eventdev.
> > >  */
> > >  
> > >   /* these log types can be used in an application */
> > >   #define RTE_LOGTYPE_USER1   0x01000000 /**< User-defined log type 1. */
> > >  diff --git a/lib/librte_eventdev/Makefile b/lib/librte_eventdev/Makefile
> > >  new file mode 100644
> > >  index 0000000..dac0663
> > >  --- /dev/null
> > >  +++ b/lib/librte_eventdev/Makefile
> > >  @@ -0,0 +1,57 @@
> > >  +#   BSD LICENSE
> > >  +#
> > >  +#   Copyright(c) 2016 Cavium networks. All rights reserved.
> > >  +#
> > >  +#   Redistribution and use in source and binary forms, with or without
> > >  +#   modification, are permitted provided that the following conditions
> > >  +#   are met:
> > >  +#
> > >  +#     * Redistributions of source code must retain the above copyright
> > >  +#       notice, this list of conditions and the following disclaimer.
> > >  +#     * Redistributions in binary form must reproduce the above copyright
> > >  +#       notice, this list of conditions and the following disclaimer in
> > >  +#       the documentation and/or other materials provided with the
> > >  +#       distribution.
> > >  +#     * Neither the name of Cavium networks nor the names of its
> > >  +#       contributors may be used to endorse or promote products derived
> > >  +#       from this software without specific prior written permission.
> > >  +#
> > >  +#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> > >  CONTRIBUTORS
> > >  +#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
> > >  NOT
> > >  +#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
> > >  FITNESS FOR
> > >  +#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> > >  COPYRIGHT
> > >  +#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> > >  INCIDENTAL,
> > >  +#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
> > >  NOT
> > >  +#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
> > >  OF USE,
> > >  +#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
> > >  ON ANY
> > >  +#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
> > >  TORT
> > >  +#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
> > >  THE USE
> > >  +#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
> > >  DAMAGE.
> > >  +
> > >  +include $(RTE_SDK)/mk/rte.vars.mk
> > >  +
> > >  +# library name
> > >  +LIB = librte_eventdev.a
> > >  +
> > >  +# library version
> > >  +LIBABIVER := 1
> > >  +
> > >  +# build flags
> > >  +CFLAGS += -O3
> > >  +CFLAGS += $(WERROR_FLAGS)
> > >  +
> > >  +# library source files
> > >  +SRCS-y += rte_eventdev.c
> > >  +
> > >  +# export include files
> > >  +SYMLINK-y-include += rte_eventdev.h
> > >  +SYMLINK-y-include += rte_eventdev_pmd.h
> > >  +
> > >  +# versioning export map
> > >  +EXPORT_MAP := rte_eventdev_version.map
> > >  +
> > >  +# library dependencies
> > >  +DEPDIRS-y += lib/librte_eal
> > >  +DEPDIRS-y += lib/librte_mbuf
> > >  +
> > >  +include $(RTE_SDK)/mk/rte.lib.mk
> > >  diff --git a/lib/librte_eventdev/rte_eventdev.c
> > >  b/lib/librte_eventdev/rte_eventdev.c
> > >  new file mode 100644
> > >  index 0000000..17ce5c3
> > >  --- /dev/null
> > >  +++ b/lib/librte_eventdev/rte_eventdev.c
> > >  @@ -0,0 +1,1211 @@
> > >  +/*
> > >  + *   BSD LICENSE
> > >  + *
> > >  + *   Copyright(c) 2016 Cavium networks. All rights reserved.
> > >  + *
> > >  + *   Redistribution and use in source and binary forms, with or without
> > >  + *   modification, are permitted provided that the following conditions
> > >  + *   are met:
> > >  + *
> > >  + *     * Redistributions of source code must retain the above copyright
> > >  + *       notice, this list of conditions and the following disclaimer.
> > >  + *     * Redistributions in binary form must reproduce the above copyright
> > >  + *       notice, this list of conditions and the following disclaimer in
> > >  + *       the documentation and/or other materials provided with the
> > >  + *       distribution.
> > >  + *     * Neither the name of Cavium networks nor the names of its
> > >  + *       contributors may be used to endorse or promote products derived
> > >  + *       from this software without specific prior written permission.
> > >  + *
> > >  + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> > >  CONTRIBUTORS
> > >  + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
> > >  NOT
> > >  + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
> > >  FITNESS FOR
> > >  + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> > >  COPYRIGHT
> > >  + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> > >  INCIDENTAL,
> > >  + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
> > >  NOT
> > >  + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
> > >  OF USE,
> > >  + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
> > >  AND ON ANY
> > >  + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
> > >  TORT
> > >  + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
> > >  THE USE
> > >  + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
> > >  DAMAGE.
> > >  + */
> > >  +
> > >  +#include <ctype.h>
> > >  +#include <stdio.h>
> > >  +#include <stdlib.h>
> > >  +#include <string.h>
> > >  +#include <stdarg.h>
> > >  +#include <errno.h>
> > >  +#include <stdint.h>
> > >  +#include <inttypes.h>
> > >  +#include <sys/types.h>
> > >  +#include <sys/queue.h>
> > >  +
> > >  +#include <rte_byteorder.h>
> > >  +#include <rte_log.h>
> > >  +#include <rte_debug.h>
> > >  +#include <rte_dev.h>
> > >  +#include <rte_pci.h>
> > >  +#include <rte_memory.h>
> > >  +#include <rte_memcpy.h>
> > >  +#include <rte_memzone.h>
> > >  +#include <rte_eal.h>
> > >  +#include <rte_per_lcore.h>
> > >  +#include <rte_lcore.h>
> > >  +#include <rte_atomic.h>
> > >  +#include <rte_branch_prediction.h>
> > >  +#include <rte_common.h>
> > >  +#include <rte_malloc.h>
> > >  +#include <rte_errno.h>
> > >  +
> > >  +#include "rte_eventdev.h"
> > >  +#include "rte_eventdev_pmd.h"
> > >  +
> > >  +struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
> > >  +
> > >  +struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];
> > >  +
> > >  +static struct rte_eventdev_global eventdev_globals = {
> > >  +	.nb_devs		= 0
> > >  +};
> > >  +
> > >  +struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;
> > >  +
> > >  +/* Event dev north bound API implementation */
> > >  +
> > >  +uint8_t
> > >  +rte_event_dev_count(void)
> > >  +{
> > >  +	return rte_eventdev_globals->nb_devs;
> > >  +}
> > >  +
> > >  +int
> > >  +rte_event_dev_get_dev_id(const char *name)
> > >  +{
> > >  +	int i;
> > >  +
> > >  +	if (!name)
> > >  +		return -EINVAL;
> > >  +
> > >  +	for (i = 0; i < rte_eventdev_globals->nb_devs; i++)
> > >  +		if ((strcmp(rte_event_devices[i].data->name, name)
> > >  +				== 0) &&
> > >  +				(rte_event_devices[i].attached ==
> > >  +						RTE_EVENTDEV_ATTACHED))
> > >  +			return i;
> > >  +	return -ENODEV;
> > >  +}
> > >  +
> > >  +int
> > >  +rte_event_dev_socket_id(uint8_t dev_id)
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +
> > >  +	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> > >  +	dev = &rte_eventdevs[dev_id];
> > >  +
> > >  +	return dev->data->socket_id;
> > >  +}
> > >  +
> > >  +int
> > >  +rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +
> > >  +	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> > >  +	dev = &rte_eventdevs[dev_id];
> > >  +
> > >  +	if (dev_info == NULL)
> > >  +		return -EINVAL;
> > >  +
> > >  +	memset(dev_info, 0, sizeof(struct rte_event_dev_info));
> > >  +
> > >  +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -
> > >  ENOTSUP);
> > >  +	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
> > >  +
> > >  +	dev_info->pci_dev = dev->pci_dev;
> > >  +	if (dev->driver)
> > >  +		dev_info->driver_name = dev->driver->pci_drv.driver.name;
> > >  +	return 0;
> > >  +}
> > >  +
> > >  +static inline int
> > >  +rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
> > >  +{
> > >  +	uint8_t old_nb_queues = dev->data->nb_queues;
> > >  +	void **queues;
> > >  +	uint8_t *queues_prio;
> > >  +	unsigned int i;
> > >  +
> > >  +	EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
> > >  +			 dev->data->dev_id);
> > >  +
> > >  +	/* First time configuration */
> > >  +	if (dev->data->queues == NULL && nb_queues != 0) {
> > >  +		dev->data->queues = rte_zmalloc_socket("eventdev->data-
> > >  >queues",
> > >  +				sizeof(dev->data->queues[0]) * nb_queues,
> > >  +				RTE_CACHE_LINE_SIZE, dev->data-
> > >  >socket_id);
> > >  +		if (dev->data->queues == NULL) {
> > >  +			dev->data->nb_queues = 0;
> > >  +			EDEV_LOG_ERR("failed to get memory for queue meta
> > >  data,"
> > >  +					"nb_queues %u", nb_queues);
> > >  +			return -(ENOMEM);
> > >  +		}
> > >  +		/* Allocate memory to store queue priority */
> > >  +		dev->data->queues_prio = rte_zmalloc_socket(
> > >  +				"eventdev->data->queues_prio",
> > >  +				sizeof(dev->data->queues_prio[0]) *
> > >  nb_queues,
> > >  +				RTE_CACHE_LINE_SIZE, dev->data-
> > >  >socket_id);
> > >  +		if (dev->data->queues_prio == NULL) {
> > >  +			dev->data->nb_queues = 0;
> > >  +			EDEV_LOG_ERR("failed to get memory for queue
> > >  priority,"
> > >  +					"nb_queues %u", nb_queues);
> > >  +			return -(ENOMEM);
> > >  +		}
> > >  +
> > >  +	} else if (dev->data->queues != NULL && nb_queues != 0) {/* re-config
> > >  */
> > >  +		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops-
> > >  >queue_release, -ENOTSUP);
> > >  +
> > >  +		queues = dev->data->queues;
> > >  +		for (i = nb_queues; i < old_nb_queues; i++)
> > >  +			(*dev->dev_ops->queue_release)(queues[i]);
> > >  +
> > >  +		queues = rte_realloc(queues, sizeof(queues[0]) * nb_queues,
> > >  +				RTE_CACHE_LINE_SIZE);
> > >  +		if (queues == NULL) {
> > >  +			EDEV_LOG_ERR("failed to realloc queue meta data,"
> > >  +						" nb_queues %u",
> > >  nb_queues);
> > >  +			return -(ENOMEM);
> > >  +		}
> > >  +		dev->data->queues = queues;
> > >  +
> > >  +		/* Re allocate memory to store queue priority */
> > >  +		queues_prio = dev->data->queues_prio;
> > >  +		queues_prio = rte_realloc(queues_prio,
> > >  +				sizeof(queues_prio[0]) * nb_queues,
> > >  +				RTE_CACHE_LINE_SIZE);
> > >  +		if (queues_prio == NULL) {
> > >  +			EDEV_LOG_ERR("failed to realloc queue priority,"
> > >  +						" nb_queues %u",
> > >  nb_queues);
> > >  +			return -(ENOMEM);
> > >  +		}
> > >  +		dev->data->queues_prio = queues_prio;
> > >  +
> > >  +		if (nb_queues > old_nb_queues) {
> > >  +			uint8_t new_qs = nb_queues - old_nb_queues;
> > >  +
> > >  +			memset(queues + old_nb_queues, 0,
> > >  +				sizeof(queues[0]) * new_qs);
> > >  +			memset(queues_prio + old_nb_queues, 0,
> > >  +				sizeof(queues_prio[0]) * new_qs);
> > >  +		}
> > >  +	} else if (dev->data->queues != NULL && nb_queues == 0) {
> > >  +		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops-
> > >  >queue_release, -ENOTSUP);
> > >  +
> > >  +		queues = dev->data->queues;
> > >  +		for (i = nb_queues; i < old_nb_queues; i++)
> > >  +			(*dev->dev_ops->queue_release)(queues[i]);
> > >  +	}
> > >  +
> > >  +	dev->data->nb_queues = nb_queues;
> > >  +	return 0;
> > >  +}
> > >  +
> > >  +static inline int
> > >  +rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
> > >  +{
> > >  +	uint8_t old_nb_ports = dev->data->nb_ports;
> > >  +	void **ports;
> > >  +	uint16_t *links_map;
> > >  +	uint8_t *ports_dequeue_depth;
> > >  +	uint8_t *ports_enqueue_depth;
> > >  +	unsigned int i;
> > >  +
> > >  +	EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
> > >  +			 dev->data->dev_id);
> > >  +
> > >  +	/* First time configuration */
> > >  +	if (dev->data->ports == NULL && nb_ports != 0) {
> > >  +		dev->data->ports = rte_zmalloc_socket("eventdev->data-
> > >  >ports",
> > >  +				sizeof(dev->data->ports[0]) * nb_ports,
> > >  +				RTE_CACHE_LINE_SIZE, dev->data-
> > >  >socket_id);
> > >  +		if (dev->data->ports == NULL) {
> > >  +			dev->data->nb_ports = 0;
> > >  +			EDEV_LOG_ERR("failed to get memory for port meta
> > >  data,"
> > >  +					"nb_ports %u", nb_ports);
> > >  +			return -(ENOMEM);
> > >  +		}
> > >  +
> > >  +		/* Allocate memory to store ports dequeue depth */
> > >  +		dev->data->ports_dequeue_depth =
> > >  +			rte_zmalloc_socket("eventdev-
> > >  >ports_dequeue_depth",
> > >  +			sizeof(dev->data->ports_dequeue_depth[0]) *
> > >  nb_ports,
> > >  +			RTE_CACHE_LINE_SIZE, dev->data->socket_id);
> > >  +		if (dev->data->ports_dequeue_depth == NULL) {
> > >  +			dev->data->nb_ports = 0;
> > >  +			EDEV_LOG_ERR("failed to get memory for port deq
> > >  meta,"
> > >  +					"nb_ports %u", nb_ports);
> > >  +			return -(ENOMEM);
> > >  +		}
> > >  +
> > >  +		/* Allocate memory to store ports enqueue depth */
> > >  +		dev->data->ports_enqueue_depth =
> > >  +			rte_zmalloc_socket("eventdev-
> > >  >ports_enqueue_depth",
> > >  +			sizeof(dev->data->ports_enqueue_depth[0]) *
> > >  nb_ports,
> > >  +			RTE_CACHE_LINE_SIZE, dev->data->socket_id);
> > >  +		if (dev->data->ports_enqueue_depth == NULL) {
> > >  +			dev->data->nb_ports = 0;
> > >  +			EDEV_LOG_ERR("failed to get memory for port enq
> > >  meta,"
> > >  +					"nb_ports %u", nb_ports);
> > >  +			return -(ENOMEM);
> > >  +		}
> > >  +
> > >  +		/* Allocate memory to store queue to port link connection */
> > >  +		dev->data->links_map =
> > >  +			rte_zmalloc_socket("eventdev->links_map",
> > >  +			sizeof(dev->data->links_map[0]) * nb_ports *
> > >  +			RTE_EVENT_MAX_QUEUES_PER_DEV,
> > >  +			RTE_CACHE_LINE_SIZE, dev->data->socket_id);
> > >  +		if (dev->data->links_map == NULL) {
> > >  +			dev->data->nb_ports = 0;
> > >  +			EDEV_LOG_ERR("failed to get memory for port_map
> > >  area,"
> > >  +					"nb_ports %u", nb_ports);
> > >  +			return -(ENOMEM);
> > >  +		}
> > >  +	} else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
> > >  +		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release,
> > >  -ENOTSUP);
> > >  +
> > >  +		ports = dev->data->ports;
> > >  +		ports_dequeue_depth = dev->data->ports_dequeue_depth;
> > >  +		ports_enqueue_depth = dev->data->ports_enqueue_depth;
> > >  +		links_map = dev->data->links_map;
> > >  +
> > >  +		for (i = nb_ports; i < old_nb_ports; i++)
> > >  +			(*dev->dev_ops->port_release)(ports[i]);
> > >  +
> > >  +		/* Realloc memory for ports */
> > >  +		ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
> > >  +				RTE_CACHE_LINE_SIZE);
> > >  +		if (ports == NULL) {
> > >  +			EDEV_LOG_ERR("failed to realloc port meta data,"
> > >  +						" nb_ports %u", nb_ports);
> > >  +			return -(ENOMEM);
> > >  +		}
> > >  +
> > >  +		/* Realloc memory for ports_dequeue_depth */
> > >  +		ports_dequeue_depth = rte_realloc(ports_dequeue_depth,
> > >  +			sizeof(ports_dequeue_depth[0]) * nb_ports,
> > >  +			RTE_CACHE_LINE_SIZE);
> > >  +		if (ports_dequeue_depth == NULL) {
> > >  +			EDEV_LOG_ERR("failed to realloc port deqeue meta
> > >  data,"
> > >  +						" nb_ports %u", nb_ports);
> > >  +			return -(ENOMEM);
> > >  +		}
> > >  +
> > >  +		/* Realloc memory for ports_enqueue_depth */
> > >  +		ports_enqueue_depth = rte_realloc(ports_enqueue_depth,
> > >  +			sizeof(ports_enqueue_depth[0]) * nb_ports,
> > >  +			RTE_CACHE_LINE_SIZE);
> > >  +		if (ports_enqueue_depth == NULL) {
> > >  +			EDEV_LOG_ERR("failed to realloc port enqueue meta
> > >  data,"
> > >  +						" nb_ports %u", nb_ports);
> > >  +			return -(ENOMEM);
> > >  +		}
> > >  +
> > >  +		/* Realloc memory to store queue to port link connection */
> > >  +		links_map = rte_realloc(links_map,
> > >  +			sizeof(dev->data->links_map[0]) * nb_ports *
> > >  +			RTE_EVENT_MAX_QUEUES_PER_DEV,
> > >  +			RTE_CACHE_LINE_SIZE);
> > >  +		if (dev->data->links_map == NULL) {
> > >  +			dev->data->nb_ports = 0;
> > >  +			EDEV_LOG_ERR("failed to realloc mem for port_map
> > >  area,"
> > >  +					"nb_ports %u", nb_ports);
> > >  +			return -(ENOMEM);
> > >  +		}
> > >  +
> > >  +		if (nb_ports > old_nb_ports) {
> > >  +			uint8_t new_ps = nb_ports - old_nb_ports;
> > >  +
> > >  +			memset(ports + old_nb_ports, 0,
> > >  +				sizeof(ports[0]) * new_ps);
> > >  +			memset(ports_dequeue_depth + old_nb_ports, 0,
> > >  +				sizeof(ports_dequeue_depth[0]) * new_ps);
> > >  +			memset(ports_enqueue_depth + old_nb_ports, 0,
> > >  +				sizeof(ports_enqueue_depth[0]) * new_ps);
> > >  +			memset(links_map +
> > >  +				(old_nb_ports *
> > >  RTE_EVENT_MAX_QUEUES_PER_DEV),
> > >  +				0, sizeof(ports_enqueue_depth[0]) * new_ps);
> > >  +		}
> > >  +
> > >  +		dev->data->ports = ports;
> > >  +		dev->data->ports_dequeue_depth = ports_dequeue_depth;
> > >  +		dev->data->ports_enqueue_depth = ports_enqueue_depth;
> > >  +		dev->data->links_map = links_map;
> > >  +	} else if (dev->data->ports != NULL && nb_ports == 0) {
> > >  +		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release,
> > >  -ENOTSUP);
> > >  +
> > >  +		ports = dev->data->ports;
> > >  +		for (i = nb_ports; i < old_nb_ports; i++)
> > >  +			(*dev->dev_ops->port_release)(ports[i]);
> > >  +	}
> > >  +
> > >  +	dev->data->nb_ports = nb_ports;
> > >  +	return 0;
> > >  +}
> > >  +
> > >  +int
> > >  +rte_event_dev_configure(uint8_t dev_id, struct rte_event_dev_config
> > >  *dev_conf)
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +	struct rte_event_dev_info info;
> > >  +	int diag;
> > >  +
> > >  +	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> > >  +	dev = &rte_eventdevs[dev_id];
> > >  +
> > >  +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -
> > >  ENOTSUP);
> > >  +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -
> > >  ENOTSUP);
> > >  +
> > >  +	if (dev->data->dev_started) {
> > >  +		EDEV_LOG_ERR(
> > >  +		    "device %d must be stopped to allow configuration",
> > >  dev_id);
> > >  +		return -EBUSY;
> > >  +	}
> > >  +
> > >  +	if (dev_conf == NULL)
> > >  +		return -EINVAL;
> > >  +
> > >  +	(*dev->dev_ops->dev_infos_get)(dev, &info);
> > >  +
> > >  +	/* Check dequeue_wait_ns value is in limit */
> > >  +	if (!dev_conf->event_dev_cfg &
> > >  RTE_EVENT_DEV_CFG_PER_DEQUEUE_WAIT) {
> > >  +		if (dev_conf->dequeue_wait_ns < info.min_dequeue_wait_ns
> > >  ||
> > >  +			dev_conf->dequeue_wait_ns >
> > >  info.max_dequeue_wait_ns) {
> > >  +			EDEV_LOG_ERR("dev%d invalid dequeue_wait_ns=%d"
> > >  +			" min_dequeue_wait_ns=%d
> > >  max_dequeue_wait_ns=%d",
> > >  +			dev_id, dev_conf->dequeue_wait_ns,
> > >  +			info.min_dequeue_wait_ns,
> > >  +			info.max_dequeue_wait_ns);
> > >  +			return -EINVAL;
> > >  +		}
> > >  +	}
> > >  +
> > >  +	/* Check nb_events_limit is in limit */
> > >  +	if (dev_conf->nb_events_limit > info.max_num_events) {
> > >  +		EDEV_LOG_ERR("dev%d nb_events_limit=%d >
> > >  max_num_events=%d",
> > >  +		dev_id, dev_conf->nb_events_limit, info.max_num_events);
> > >  +		return -EINVAL;
> > >  +	}
> > >  +
> > >  +	/* Check nb_event_queues is in limit */
> > >  +	if (!dev_conf->nb_event_queues) {
> > >  +		EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
> > >  dev_id);
> > >  +		return -EINVAL;
> > >  +	}
> > >  +	if (dev_conf->nb_event_queues > info.max_event_queues) {
> > >  +		EDEV_LOG_ERR("dev%d nb_event_queues=%d >
> > >  max_event_queues=%d",
> > >  +		dev_id, dev_conf->nb_event_queues,
> > >  info.max_event_queues);
> > >  +		return -EINVAL;
> > >  +	}
> > >  +
> > >  +	/* Check nb_event_ports is in limit */
> > >  +	if (!dev_conf->nb_event_ports) {
> > >  +		EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero",
> > >  dev_id);
> > >  +		return -EINVAL;
> > >  +	}
> > >  +	if (dev_conf->nb_event_ports > info.max_event_ports) {
> > >  +		EDEV_LOG_ERR("dev%d nb_event_ports=%d >
> > >  max_event_ports= %d",
> > >  +		dev_id, dev_conf->nb_event_ports, info.max_event_ports);
> > >  +		return -EINVAL;
> > >  +	}
> > >  +
> > >  +	/* Check nb_event_queue_flows is in limit */
> > >  +	if (!dev_conf->nb_event_queue_flows) {
> > >  +		EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
> > >  +		return -EINVAL;
> > >  +	}
> > >  +	if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows)
> > >  {
> > >  +		EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
> > >  +		dev_id, dev_conf->nb_event_queue_flows,
> > >  +		info.max_event_queue_flows);
> > >  +		return -EINVAL;
> > >  +	}
> > >  +
> > >  +	/* Check nb_event_port_dequeue_depth is in limit */
> > >  +	if (!dev_conf->nb_event_port_dequeue_depth) {
> > >  +		EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
> > >  dev_id);
> > >  +		return -EINVAL;
> > >  +	}
> > >  +	if (dev_conf->nb_event_port_dequeue_depth >
> > >  +			 info.max_event_port_dequeue_depth) {
> > >  +		EDEV_LOG_ERR("dev%d nb_dequeue_depth=%d >
> > >  max_dequeue_depth=%d",
> > >  +		dev_id, dev_conf->nb_event_port_dequeue_depth,
> > >  +		info.max_event_port_dequeue_depth);
> > >  +		return -EINVAL;
> > >  +	}
> > >  +
> > >  +	/* Check nb_event_port_enqueue_depth is in limit */
> > >  +	if (!dev_conf->nb_event_port_enqueue_depth) {
> > >  +		EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
> > >  dev_id);
> > >  +		return -EINVAL;
> > >  +	}
> > >  +	if (dev_conf->nb_event_port_enqueue_depth >
> > >  +			 info.max_event_port_enqueue_depth) {
> > >  +		EDEV_LOG_ERR("dev%d nb_enqueue_depth=%d >
> > >  max_enqueue_depth=%d",
> > >  +		dev_id, dev_conf->nb_event_port_enqueue_depth,
> > >  +		info.max_event_port_enqueue_depth);
> > >  +		return -EINVAL;
> > >  +	}
> > >  +
> > >  +	/* Copy the dev_conf parameter into the dev structure */
> > >  +	memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data-
> > >  >dev_conf));
> > >  +
> > >  +	/* Setup new number of queues and reconfigure device. */
> > >  +	diag = rte_event_dev_queue_config(dev, dev_conf-
> > >  >nb_event_queues);
> > >  +	if (diag != 0) {
> > >  +		EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
> > >  +				dev_id, diag);
> > >  +		return diag;
> > >  +	}
> > >  +
> > >  +	/* Setup new number of ports and reconfigure device. */
> > >  +	diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
> > >  +	if (diag != 0) {
> > >  +		rte_event_dev_queue_config(dev, 0);
> > >  +		EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
> > >  +				dev_id, diag);
> > >  +		return diag;
> > >  +	}
> > >  +
> > >  +	/* Configure the device */
> > >  +	diag = (*dev->dev_ops->dev_configure)(dev);
> > >  +	if (diag != 0) {
> > >  +		EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
> > >  +		rte_event_dev_queue_config(dev, 0);
> > >  +		rte_event_dev_port_config(dev, 0);
> > >  +	}
> > >  +
> > >  +	dev->data->event_dev_cap = info.event_dev_cap;
> > >  +	return diag;
> > >  +}
> > >  +
> > >  +static inline int
> > >  +is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
> > >  +{
> > >  +	if (queue_id < dev->data->nb_queues && queue_id <
> > >  +				RTE_EVENT_MAX_QUEUES_PER_DEV)
> > >  +		return 1;
> > >  +	else
> > >  +		return 0;
> > >  +}
> > >  +
> > >  +int
> > >  +rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
> > >  +				 struct rte_event_queue_conf *queue_conf)
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +
> > >  +	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> > >  +	dev = &rte_eventdevs[dev_id];
> > >  +
> > >  +	if (queue_conf == NULL)
> > >  +		return -EINVAL;
> > >  +
> > >  +	if (!is_valid_queue(dev, queue_id)) {
> > >  +		EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
> > >  +		return -EINVAL;
> > >  +	}
> > >  +
> > >  +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -
> > >  ENOTSUP);
> > >  +	memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
> > >  +	(*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
> > >  +	return 0;
> > >  +}
> > >  +
> > >  +static inline int
> > >  +is_valid_atomic_queue_conf(struct rte_event_queue_conf *queue_conf)
> > >  +{
> > >  +	if (queue_conf && (
> > >  +		((queue_conf->event_queue_cfg &
> > >  RTE_EVENT_QUEUE_CFG_TYPE_MASK)
> > >  +			== RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
> > >  +		((queue_conf->event_queue_cfg &
> > >  RTE_EVENT_QUEUE_CFG_TYPE_MASK)
> > >  +			== RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY)
> > >  +		))
> > >  +		return 1;
> > >  +	else
> > >  +		return 0;
> > >  +}
> > >  +
> > >  +int
> > >  +rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
> > >  +		      struct rte_event_queue_conf *queue_conf)
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +	struct rte_event_queue_conf def_conf;
> > >  +
> > >  +	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> > >  +	dev = &rte_eventdevs[dev_id];
> > >  +
> > >  +	if (!is_valid_queue(dev, queue_id)) {
> > >  +		EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
> > >  +		return -EINVAL;
> > >  +	}
> > >  +
> > >  +	/* Check nb_atomic_flows limit */
> > >  +	if (is_valid_atomic_queue_conf(queue_conf)) {
> > >  +		if (queue_conf->nb_atomic_flows == 0 ||
> > >  +		    queue_conf->nb_atomic_flows >
> > >  +			dev->data->dev_conf.nb_event_queue_flows) {
> > >  +			EDEV_LOG_ERR(
> > >  +		"dev%d queue%d Invalid nb_atomic_flows=%d
> > >  max_flows=%d",
> > >  +			dev_id, queue_id, queue_conf->nb_atomic_flows,
> > >  +			dev->data->dev_conf.nb_event_queue_flows);
> > >  +			return -EINVAL;
> > >  +		}
> > >  +	}
> > >  +
> > >  +	/* Check nb_atomic_order_sequences limit */
> > >  +	if (is_valid_atomic_queue_conf(queue_conf)) {
> > >  +		if (queue_conf->nb_atomic_order_sequences == 0 ||
> > >  +		    queue_conf->nb_atomic_order_sequences >
> > >  +			dev->data->dev_conf.nb_event_queue_flows) {
> > >  +			EDEV_LOG_ERR(
> > >  +		"dev%d queue%d Invalid nb_atomic_order_seq=%d
> > >  max_flows=%d",
> > >  +			dev_id, queue_id, queue_conf-
> > >  >nb_atomic_order_sequences,
> > >  +			dev->data->dev_conf.nb_event_queue_flows);
> > >  +			return -EINVAL;
> > >  +		}
> > >  +	}
> > >  +
> > >  +	if (dev->data->dev_started) {
> > >  +		EDEV_LOG_ERR(
> > >  +		    "device %d must be stopped to allow queue setup", dev_id);
> > >  +		return -EBUSY;
> > >  +	}
> > >  +
> > >  +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -
> > >  ENOTSUP);
> > >  +
> > >  +	if (queue_conf == NULL) {
> > >  +		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops-
> > >  >queue_def_conf,
> > >  +					-ENOTSUP);
> > >  +		(*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
> > >  +		def_conf.event_queue_cfg =
> > >  RTE_EVENT_QUEUE_CFG_DEFAULT;
> > >  +		queue_conf = &def_conf;
> > >  +	}
> > >  +
> > >  +	dev->data->queues_prio[queue_id] = queue_conf->priority;
> > >  +	return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
> > >  +}
> > >  +
> > >  +uint8_t
> > >  +rte_event_queue_count(uint8_t dev_id)
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +
> > >  +	dev = &rte_eventdevs[dev_id];
> > >  +	return dev->data->nb_queues;
> > >  +}
> > >  +
> > >  +uint8_t
> > >  +rte_event_queue_priority(uint8_t dev_id, uint8_t queue_id)
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +
> > >  +	dev = &rte_eventdevs[dev_id];
> > >  +	if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
> > >  +		return dev->data->queues_prio[queue_id];
> > >  +	else
> > >  +		return RTE_EVENT_QUEUE_PRIORITY_NORMAL;
> > >  +}
> > >  +
> > >  +static inline int
> > >  +is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
> > >  +{
> > >  +	if (port_id < dev->data->nb_ports)
> > >  +		return 1;
> > >  +	else
> > >  +		return 0;
> > >  +}
> > >  +
> > >  +int
> > >  +rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
> > >  +				 struct rte_event_port_conf *port_conf)
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +
> > >  +	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> > >  +	dev = &rte_eventdevs[dev_id];
> > >  +
> > >  +	if (port_conf == NULL)
> > >  +		return -EINVAL;
> > >  +
> > >  +	if (!is_valid_port(dev, port_id)) {
> > >  +		EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
> > >  +		return -EINVAL;
> > >  +	}
> > >  +
> > >  +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -
> > >  ENOTSUP);
> > >  +	memset(port_conf, 0, sizeof(struct rte_event_port_conf));
> > >  +	(*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
> > >  +	return 0;
> > >  +}
> > >  +
> > >  +int
> > >  +rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
> > >  +		      struct rte_event_port_conf *port_conf)
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +	struct rte_event_port_conf def_conf;
> > >  +	int diag;
> > >  +
> > >  +	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> > >  +	dev = &rte_eventdevs[dev_id];
> > >  +
> > >  +	if (!is_valid_port(dev, port_id)) {
> > >  +		EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
> > >  +		return -EINVAL;
> > >  +	}
> > >  +
> > >  +	/* Check new_event_threshold limit */
> > >  +	if ((port_conf && !port_conf->new_event_threshold) ||
> > >  +			(port_conf && port_conf->new_event_threshold >
> > >  +				 dev->data->dev_conf.nb_events_limit)) {
> > >  +		EDEV_LOG_ERR(
> > >  +		   "dev%d port%d Invalid event_threshold=%d
> > >  nb_events_limit=%d",
> > >  +			dev_id, port_id, port_conf->new_event_threshold,
> > >  +			dev->data->dev_conf.nb_events_limit);
> > >  +		return -EINVAL;
> > >  +	}
> > >  +
> > >  +	/* Check dequeue_depth limit */
> > >  +	if ((port_conf && !port_conf->dequeue_depth) ||
> > >  +			(port_conf && port_conf->dequeue_depth >
> > >  +		dev->data->dev_conf.nb_event_port_dequeue_depth)) {
> > >  +		EDEV_LOG_ERR(
> > >  +		   "dev%d port%d Invalid dequeue depth=%d
> > >  max_dequeue_depth=%d",
> > >  +			dev_id, port_id, port_conf->dequeue_depth,
> > >  +			dev->data-
> > >  >dev_conf.nb_event_port_dequeue_depth);
> > >  +		return -EINVAL;
> > >  +	}
> > >  +
> > >  +	/* Check enqueue_depth limit */
> > >  +	if ((port_conf && !port_conf->enqueue_depth) ||
> > >  +			(port_conf && port_conf->enqueue_depth >
> > >  +		dev->data->dev_conf.nb_event_port_enqueue_depth)) {
> > >  +		EDEV_LOG_ERR(
> > >  +		   "dev%d port%d Invalid enqueue depth=%d
> > >  max_enqueue_depth=%d",
> > >  +			dev_id, port_id, port_conf->enqueue_depth,
> > >  +			dev->data-
> > >  >dev_conf.nb_event_port_enqueue_depth);
> > >  +		return -EINVAL;
> > >  +	}
> > >  +
> > >  +	if (dev->data->dev_started) {
> > >  +		EDEV_LOG_ERR(
> > >  +		    "device %d must be stopped to allow port setup", dev_id);
> > >  +		return -EBUSY;
> > >  +	}
> > >  +
> > >  +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -
> > >  ENOTSUP);
> > >  +
> > >  +	if (port_conf == NULL) {
> > >  +		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops-
> > >  >port_def_conf,
> > >  +					-ENOTSUP);
> > >  +		(*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
> > >  +		port_conf = &def_conf;
> > >  +	}
> > >  +
> > >  +	dev->data->ports_dequeue_depth[port_id] =
> > >  +			port_conf->dequeue_depth;
> > >  +	dev->data->ports_enqueue_depth[port_id] =
> > >  +			port_conf->enqueue_depth;
> > >  +
> > >  +	diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
> > >  +
> > >  +	/* Unlink all the queues from this port(default state after setup) */
> > >  +	if (!diag)
> > >  +		diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
> > >  +
> > >  +	if (diag < 0)
> > >  +		return diag;
> > >  +
> > >  +	return 0;
> > >  +}
> > >  +
> > >  +uint8_t
> > >  +rte_event_port_dequeue_depth(uint8_t dev_id, uint8_t port_id)
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +
> > >  +	dev = &rte_eventdevs[dev_id];
> > >  +	return dev->data->ports_dequeue_depth[port_id];
> > >  +}
> > >  +
> > >  +uint8_t
> > >  +rte_event_port_enqueue_depth(uint8_t dev_id, uint8_t port_id)
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +
> > >  +	dev = &rte_eventdevs[dev_id];
> > >  +	return dev->data->ports_enqueue_depth[port_id];
> > >  +}
> > >  +
> > >  +uint8_t
> > >  +rte_event_port_count(uint8_t dev_id)
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +
> > >  +	dev = &rte_eventdevs[dev_id];
> > >  +	return dev->data->nb_ports;
> > >  +}
> > >  +
> > >  +int
> > >  +rte_event_port_link(uint8_t dev_id, uint8_t port_id,
> > >  +		    struct rte_event_queue_link link[], uint16_t nb_links)
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +	struct rte_event_queue_link
> > >  all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
> > >  +	uint16_t *links_map;
> > >  +	int i, diag;
> > >  +
> > >  +	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> > >  +	dev = &rte_eventdevs[dev_id];
> > >  +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_link, -ENOTSUP);
> > >  +
> > >  +	if (!is_valid_port(dev, port_id)) {
> > >  +		EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
> > >  +		return -EINVAL;
> > >  +	}
> > >  +
> > >  +	if (link == NULL) {
> > >  +		for (i = 0; i < dev->data->nb_queues; i++) {
> > >  +			all_queues[i].queue_id = i;
> > >  +			all_queues[i].priority =
> > >  +
> > >  	RTE_EVENT_QUEUE_SERVICE_PRIORITY_NORMAL;
> > >  +		}
> > >  +		link = all_queues;
> > >  +		nb_links = dev->data->nb_queues;
> > >  +	}
> > >  +
> > >  +	for (i = 0; i < nb_links; i++)
> > >  +		if (link[i].queue_id >= RTE_EVENT_MAX_QUEUES_PER_DEV)
> > >  +			return -EINVAL;
> > >  +
> > >  +	diag = (*dev->dev_ops->port_link)(dev->data->ports[port_id], link,
> > >  +						 nb_links);
> > >  +	if (diag < 0)
> > >  +		return diag;
> > >  +
> > >  +	links_map = dev->data->links_map;
> > >  +	/* Point links_map to this port specific area */
> > >  +	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
> > >  +	for (i = 0; i < diag; i++)
> > >  +		links_map[link[i].queue_id] = (uint8_t)link[i].priority;
> > >  +
> > >  +	return diag;
> > >  +}
> > >  +
> > >  +#define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
> > >  +
> > >  +int
> > >  +rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
> > >  +		      uint8_t queues[], uint16_t nb_unlinks)
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +	uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
> > >  +	int i, diag;
> > >  +	uint16_t *links_map;
> > >  +
> > >  +	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> > >  +	dev = &rte_eventdevs[dev_id];
> > >  +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlink, -
> > >  ENOTSUP);
> > >  +
> > >  +	if (!is_valid_port(dev, port_id)) {
> > >  +		EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
> > >  +		return -EINVAL;
> > >  +	}
> > >  +
> > >  +	if (queues == NULL) {
> > >  +		for (i = 0; i < dev->data->nb_queues; i++)
> > >  +			all_queues[i] = i;
> > >  +		queues = all_queues;
> > >  +		nb_unlinks = dev->data->nb_queues;
> > >  +	}
> > >  +
> > >  +	for (i = 0; i < nb_unlinks; i++)
> > >  +		if (queues[i] >= RTE_EVENT_MAX_QUEUES_PER_DEV)
> > >  +			return -EINVAL;
> > >  +
> > >  +	diag = (*dev->dev_ops->port_unlink)(dev->data->ports[port_id],
> > >  queues,
> > >  +					nb_unlinks);
> > >  +
> > >  +	if (diag < 0)
> > >  +		return diag;
> > >  +
> > >  +	links_map = dev->data->links_map;
> > >  +	/* Point links_map to this port specific area */
> > >  +	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
> > >  +	for (i = 0; i < diag; i++)
> > >  +		links_map[queues[i]] =
> > >  EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
> > >  +
> > >  +	return diag;
> > >  +}
> > >  +
> > >  +int
> > >  +rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
> > >  +			struct rte_event_queue_link link[])
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +	uint16_t *links_map;
> > >  +	int i, count = 0;
> > >  +
> > >  +	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> > >  +	dev = &rte_eventdevs[dev_id];
> > >  +	if (!is_valid_port(dev, port_id)) {
> > >  +		EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
> > >  +		return -EINVAL;
> > >  +	}
> > >  +
> > >  +	links_map = dev->data->links_map;
> > >  +	/* Point links_map to this port specific area */
> > >  +	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
> > >  +	for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
> > >  +		if (links_map[i] !=
> > >  EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
> > >  +			link[count].queue_id = i;
> > >  +			link[count].priority = (uint8_t)links_map[i];
> > >  +			++count;
> > >  +		}
> > >  +	}
> > >  +	return count;
> > >  +}
> > >  +
> > >  +int
> > >  +rte_event_dequeue_wait_time(uint8_t dev_id, uint64_t ns, uint64_t
> > >  *wait_ticks)
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +
> > >  +	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> > >  +	dev = &rte_eventdevs[dev_id];
> > >  +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->wait_time, -
> > >  ENOTSUP);
> > >  +
> > >  +	if (wait_ticks == NULL)
> > >  +		return -EINVAL;
> > >  +
> > >  +	(*dev->dev_ops->wait_time)(dev, ns, wait_ticks);
> > >  +	return 0;
> > >  +}
> > >  +
> > >  +int
> > >  +rte_event_dev_dump(uint8_t dev_id, FILE *f)
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +
> > >  +	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> > >  +	dev = &rte_eventdevs[dev_id];
> > >  +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
> > >  +
> > >  +	(*dev->dev_ops->dump)(dev, f);
> > >  +	return 0;
> > >  +
> > >  +}
> > >  +
> > >  +int
> > >  +rte_event_dev_start(uint8_t dev_id)
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +	int diag;
> > >  +
> > >  +	EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
> > >  +
> > >  +	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> > >  +	dev = &rte_eventdevs[dev_id];
> > >  +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
> > >  +
> > >  +	if (dev->data->dev_started != 0) {
> > >  +		EDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already
> > >  started",
> > >  +			dev_id);
> > >  +		return 0;
> > >  +	}
> > >  +
> > >  +	diag = (*dev->dev_ops->dev_start)(dev);
> > >  +	if (diag == 0)
> > >  +		dev->data->dev_started = 1;
> > >  +	else
> > >  +		return diag;
> > >  +
> > >  +	return 0;
> > >  +}
> > >  +
> > >  +void
> > >  +rte_event_dev_stop(uint8_t dev_id)
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +
> > >  +	EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
> > >  +
> > >  +	RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
> > >  +	dev = &rte_eventdevs[dev_id];
> > >  +	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
> > >  +
> > >  +	if (dev->data->dev_started == 0) {
> > >  +		EDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already
> > >  stopped",
> > >  +			dev_id);
> > >  +		return;
> > >  +	}
> > >  +
> > >  +	dev->data->dev_started = 0;
> > >  +	(*dev->dev_ops->dev_stop)(dev);
> > >  +}
> > >  +
> > >  +int
> > >  +rte_event_dev_close(uint8_t dev_id)
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +
> > >  +	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> > >  +	dev = &rte_eventdevs[dev_id];
> > >  +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -
> > >  ENOTSUP);
> > >  +
> > >  +	/* Device must be stopped before it can be closed */
> > >  +	if (dev->data->dev_started == 1) {
> > >  +		EDEV_LOG_ERR("Device %u must be stopped before closing",
> > >  +				dev_id);
> > >  +		return -EBUSY;
> > >  +	}
> > >  +
> > >  +	return (*dev->dev_ops->dev_close)(dev);
> > >  +}
> > >  +
> > >  +static inline int
> > >  +rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
> > >  +		int socket_id)
> > >  +{
> > >  +	char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
> > >  +	const struct rte_memzone *mz;
> > >  +	int n;
> > >  +
> > >  +	/* Generate memzone name */
> > >  +	n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
> > >  dev_id);
> > >  +	if (n >= (int)sizeof(mz_name))
> > >  +		return -EINVAL;
> > >  +
> > >  +	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> > >  +		mz = rte_memzone_reserve(mz_name,
> > >  +				sizeof(struct rte_eventdev_data),
> > >  +				socket_id, 0);
> > >  +	} else
> > >  +		mz = rte_memzone_lookup(mz_name);
> > >  +
> > >  +	if (mz == NULL)
> > >  +		return -ENOMEM;
> > >  +
> > >  +	*data = mz->addr;
> > >  +	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
> > >  +		memset(*data, 0, sizeof(struct rte_eventdev_data));
> > >  +
> > >  +	return 0;
> > >  +}
> > >  +
> > >  +static uint8_t
> > >  +rte_eventdev_find_free_device_index(void)
> > >  +{
> > >  +	uint8_t dev_id;
> > >  +
> > >  +	for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
> > >  +		if (rte_eventdevs[dev_id].attached ==
> > >  +				RTE_EVENTDEV_DETACHED)
> > >  +			return dev_id;
> > >  +	}
> > >  +	return RTE_EVENT_MAX_DEVS;
> > >  +}
> > >  +
> > >  +struct rte_eventdev *
> > >  +rte_eventdev_pmd_allocate(const char *name, int socket_id)
> > >  +{
> > >  +	struct rte_eventdev *eventdev;
> > >  +	uint8_t dev_id;
> > >  +
> > >  +	if (rte_eventdev_pmd_get_named_dev(name) != NULL) {
> > >  +		EDEV_LOG_ERR("Event device with name %s already "
> > >  +				"allocated!", name);
> > >  +		return NULL;
> > >  +	}
> > >  +
> > >  +	dev_id = rte_eventdev_find_free_device_index();
> > >  +	if (dev_id == RTE_EVENT_MAX_DEVS) {
> > >  +		EDEV_LOG_ERR("Reached maximum number of event
> > >  devices");
> > >  +		return NULL;
> > >  +	}
> > >  +
> > >  +	eventdev = &rte_eventdevs[dev_id];
> > >  +
> > >  +	if (eventdev->data == NULL) {
> > >  +		struct rte_eventdev_data *eventdev_data = NULL;
> > >  +
> > >  +		int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
> > >  +				socket_id);
> > >  +
> > >  +		if (retval < 0 || eventdev_data == NULL)
> > >  +			return NULL;
> > >  +
> > >  +		eventdev->data = eventdev_data;
> > >  +
> > >  +		snprintf(eventdev->data->name,
> > >  RTE_EVENTDEV_NAME_MAX_LEN,
> > >  +				"%s", name);
> > >  +
> > >  +		eventdev->data->dev_id = dev_id;
> > >  +		eventdev->data->socket_id = socket_id;
> > >  +		eventdev->data->dev_started = 0;
> > >  +
> > >  +		eventdev->attached = RTE_EVENTDEV_ATTACHED;
> > >  +
> > >  +		eventdev_globals.nb_devs++;
> > >  +	}
> > >  +
> > >  +	return eventdev;
> > >  +}
> > >  +
> > >  +int
> > >  +rte_eventdev_pmd_release(struct rte_eventdev *eventdev)
> > >  +{
> > >  +	int ret;
> > >  +
> > >  +	if (eventdev == NULL)
> > >  +		return -EINVAL;
> > >  +
> > >  +	ret = rte_event_dev_close(eventdev->data->dev_id);
> > >  +	if (ret < 0)
> > >  +		return ret;
> > >  +
> > >  +	eventdev->attached = RTE_EVENTDEV_DETACHED;
> > >  +	eventdev_globals.nb_devs--;
> > >  +	eventdev->data = NULL;
> > >  +
> > >  +	return 0;
> > >  +}
> > >  +
> > >  +struct rte_eventdev *
> > >  +rte_eventdev_pmd_vdev_init(const char *name, size_t dev_private_size,
> > >  +		int socket_id)
> > >  +{
> > >  +	struct rte_eventdev *eventdev;
> > >  +
> > >  +	/* Allocate device structure */
> > >  +	eventdev = rte_eventdev_pmd_allocate(name, socket_id);
> > >  +	if (eventdev == NULL)
> > >  +		return NULL;
> > >  +
> > >  +	/* Allocate private device structure */
> > >  +	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> > >  +		eventdev->data->dev_private =
> > >  +				rte_zmalloc_socket("eventdev device private",
> > >  +						dev_private_size,
> > >  +						RTE_CACHE_LINE_SIZE,
> > >  +						socket_id);
> > >  +
> > >  +		if (eventdev->data->dev_private == NULL)
> > >  +			rte_panic("Cannot allocate memzone for private
> > >  device"
> > >  +					" data");
> > >  +	}
> > >  +
> > >  +	return eventdev;
> > >  +}
> > >  +
> > >  +int
> > >  +rte_eventdev_pmd_pci_probe(struct rte_pci_driver *pci_drv,
> > >  +			struct rte_pci_device *pci_dev)
> > >  +{
> > >  +	struct rte_eventdev_driver *eventdrv;
> > >  +	struct rte_eventdev *eventdev;
> > >  +
> > >  +	char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN];
> > >  +
> > >  +	int retval;
> > >  +
> > >  +	eventdrv = (struct rte_eventdev_driver *)pci_drv;
> > >  +	if (eventdrv == NULL)
> > >  +		return -ENODEV;
> > >  +
> > >  +	rte_eal_pci_device_name(&pci_dev->addr, eventdev_name,
> > >  +			sizeof(eventdev_name));
> > >  +
> > >  +	eventdev = rte_eventdev_pmd_allocate(eventdev_name,
> > >  +			 pci_dev->device.numa_node);
> > >  +	if (eventdev == NULL)
> > >  +		return -ENOMEM;
> > >  +
> > >  +	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> > >  +		eventdev->data->dev_private =
> > >  +				rte_zmalloc_socket(
> > >  +						"eventdev private structure",
> > >  +						eventdrv->dev_private_size,
> > >  +						RTE_CACHE_LINE_SIZE,
> > >  +						rte_socket_id());
> > >  +
> > >  +		if (eventdev->data->dev_private == NULL)
> > >  +			rte_panic("Cannot allocate memzone for private "
> > >  +					"device data");
> > >  +	}
> > >  +
> > >  +	eventdev->pci_dev = pci_dev;
> > >  +	eventdev->driver = eventdrv;
> > >  +
> > >  +	/* Invoke PMD device initialization function */
> > >  +	retval = (*eventdrv->eventdev_init)(eventdev);
> > >  +	if (retval == 0)
> > >  +		return 0;
> > >  +
> > >  +	EDEV_LOG_ERR("driver %s: event_dev_init(vendor_id=0x%x
> > >  device_id=0x%x)"
> > >  +			" failed", pci_drv->driver.name,
> > >  +			(unsigned int) pci_dev->id.vendor_id,
> > >  +			(unsigned int) pci_dev->id.device_id);
> > >  +
> > >  +	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
> > >  +		rte_free(eventdev->data->dev_private);
> > >  +
> > >  +	eventdev->attached = RTE_EVENTDEV_DETACHED;
> > >  +	eventdev_globals.nb_devs--;
> > >  +
> > >  +	return -ENXIO;
> > >  +}
> > >  +
> > >  +int
> > >  +rte_eventdev_pmd_pci_remove(struct rte_pci_device *pci_dev)
> > >  +{
> > >  +	const struct rte_eventdev_driver *eventdrv;
> > >  +	struct rte_eventdev *eventdev;
> > >  +	char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN];
> > >  +	int ret;
> > >  +
> > >  +	if (pci_dev == NULL)
> > >  +		return -EINVAL;
> > >  +
> > >  +	rte_eal_pci_device_name(&pci_dev->addr, eventdev_name,
> > >  +			sizeof(eventdev_name));
> > >  +
> > >  +	eventdev = rte_eventdev_pmd_get_named_dev(eventdev_name);
> > >  +	if (eventdev == NULL)
> > >  +		return -ENODEV;
> > >  +
> > >  +	eventdrv = (const struct rte_eventdev_driver *)pci_dev->driver;
> > >  +	if (eventdrv == NULL)
> > >  +		return -ENODEV;
> > >  +
> > >  +	/* Invoke PMD device uninit function */
> > >  +	if (*eventdrv->eventdev_uninit) {
> > >  +		ret = (*eventdrv->eventdev_uninit)(eventdev);
> > >  +		if (ret)
> > >  +			return ret;
> > >  +	}
> > >  +
> > >  +	/* Free event device */
> > >  +	rte_eventdev_pmd_release(eventdev);
> > >  +
> > >  +	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
> > >  +		rte_free(eventdev->data->dev_private);
> > >  +
> > >  +	eventdev->pci_dev = NULL;
> > >  +	eventdev->driver = NULL;
> > >  +
> > >  +	return 0;
> > >  +}
> > >  diff --git a/lib/librte_eventdev/rte_eventdev_pmd.h
> > >  b/lib/librte_eventdev/rte_eventdev_pmd.h
> > >  new file mode 100644
> > >  index 0000000..e9d9b83
> > >  --- /dev/null
> > >  +++ b/lib/librte_eventdev/rte_eventdev_pmd.h
> > >  @@ -0,0 +1,504 @@
> > >  +/*
> > >  + *
> > >  + *   Copyright(c) 2016 Cavium networks. All rights reserved.
> > >  + *
> > >  + *   Redistribution and use in source and binary forms, with or without
> > >  + *   modification, are permitted provided that the following conditions
> > >  + *   are met:
> > >  + *
> > >  + *     * Redistributions of source code must retain the above copyright
> > >  + *       notice, this list of conditions and the following disclaimer.
> > >  + *     * Redistributions in binary form must reproduce the above copyright
> > >  + *       notice, this list of conditions and the following disclaimer in
> > >  + *       the documentation and/or other materials provided with the
> > >  + *       distribution.
> > >  + *     * Neither the name of Cavium networks nor the names of its
> > >  + *       contributors may be used to endorse or promote products derived
> > >  + *       from this software without specific prior written permission.
> > >  + *
> > >  + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> > >  CONTRIBUTORS
> > >  + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
> > >  NOT
> > >  + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
> > >  FITNESS FOR
> > >  + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> > >  COPYRIGHT
> > >  + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> > >  INCIDENTAL,
> > >  + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
> > >  NOT
> > >  + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
> > >  OF USE,
> > >  + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
> > >  AND ON ANY
> > >  + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
> > >  TORT
> > >  + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
> > >  THE USE
> > >  + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
> > >  DAMAGE.
> > >  + */
> > >  +
> > >  +#ifndef _RTE_EVENTDEV_PMD_H_
> > >  +#define _RTE_EVENTDEV_PMD_H_
> > >  +
> > >  +/** @file
> > >  + * RTE Event PMD APIs
> > >  + *
> > >  + * @note
> > >  + * These API are from event PMD only and user applications should not call
> > >  + * them directly.
> > >  + */
> > >  +
> > >  +#ifdef __cplusplus
> > >  +extern "C" {
> > >  +#endif
> > >  +
> > >  +#include <string.h>
> > >  +
> > >  +#include <rte_dev.h>
> > >  +#include <rte_pci.h>
> > >  +#include <rte_malloc.h>
> > >  +#include <rte_log.h>
> > >  +#include <rte_common.h>
> > >  +
> > >  +#include "rte_eventdev.h"
> > >  +
> > >  +#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> > >  +#define RTE_PMD_DEBUG_TRACE(...) \
> > >  +	rte_pmd_debug_trace(__func__, __VA_ARGS__)
> > >  +#else
> > >  +#define RTE_PMD_DEBUG_TRACE(...)
> > >  +#endif
> > >  +
> > >  +/* Logging Macros */
> > >  +#define EDEV_LOG_ERR(fmt, args...) \
> > >  +	RTE_LOG(ERR, EVENTDEV, "%s() line %u: " fmt "\n",  \
> > >  +			__func__, __LINE__, ## args)
> > >  +
> > >  +#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
> > >  +#define EDEV_LOG_DEBUG(fmt, args...) \
> > >  +	RTE_LOG(DEBUG, EVENTDEV, "%s() line %u: " fmt "\n",  \
> > >  +			__func__, __LINE__, ## args)
> > >  +#else
> > >  +#define EDEV_LOG_DEBUG(fmt, args...) (void)0
> > >  +#endif
> > >  +
> > >  +/* Macros to check for valid device */
> > >  +#define RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, retval) do { \
> > >  +	if (!rte_eventdev_pmd_is_valid_dev((dev_id))) { \
> > >  +		EDEV_LOG_ERR("Invalid dev_id=%d\n", dev_id); \
> > >  +		return retval; \
> > >  +	} \
> > >  +} while (0)
> > >  +
> > >  +#define RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id) do { \
> > >  +	if (!rte_eventdev_pmd_is_valid_dev((dev_id))) { \
> > >  +		EDEV_LOG_ERR("Invalid dev_id=%d\n", dev_id); \
> > >  +		return; \
> > >  +	} \
> > >  +} while (0)
> > >  +
> > >  +#define RTE_EVENTDEV_DETACHED  (0)
> > >  +#define RTE_EVENTDEV_ATTACHED  (1)
> > >  +
> > >  +/**
> > >  + * Initialisation function of a event driver invoked for each matching
> > >  + * event PCI device detected during the PCI probing phase.
> > >  + *
> > >  + * @param dev
> > >  + *   The dev pointer is the address of the *rte_eventdev* structure associated
> > >  + *   with the matching device and which has been [automatically] allocated in
> > >  + *   the *rte_event_devices* array.
> > >  + *
> > >  + * @return
> > >  + *   - 0: Success, the device is properly initialised by the driver.
> > >  + *        In particular, the driver MUST have set up the *dev_ops* pointer
> > >  + *        of the *dev* structure.
> > >  + *   - <0: Error code of the device initialisation failure.
> > >  + */
> > >  +typedef int (*eventdev_init_t)(struct rte_eventdev *dev);
> > >  +
> > >  +/**
> > >  + * Finalisation function of a driver invoked for each matching
> > >  + * PCI device detected during the PCI closing phase.
> > >  + *
> > >  + * @param dev
> > >  + *   The dev pointer is the address of the *rte_eventdev* structure associated
> > >  + *   with the matching device and which	has been [automatically] allocated in
> > >  + *   the *rte_event_devices* array.
> > >  + *
> > >  + * @return
> > >  + *   - 0: Success, the device is properly finalised by the driver.
> > >  + *        In particular, the driver MUST free the *dev_ops* pointer
> > >  + *        of the *dev* structure.
> > >  + *   - <0: Error code of the device initialisation failure.
> > >  + */
> > >  +typedef int (*eventdev_uninit_t)(struct rte_eventdev *dev);
> > >  +
> > >  +/**
> > >  + * The structure associated with a PMD driver.
> > >  + *
> > >  + * Each driver acts as a PCI driver and is represented by a generic
> > >  + * *event_driver* structure that holds:
> > >  + *
> > >  + * - An *rte_pci_driver* structure (which must be the first field).
> > >  + *
> > >  + * - The *eventdev_init* function invoked for each matching PCI device.
> > >  + *
> > >  + * - The size of the private data to allocate for each matching device.
> > >  + */
> > >  +struct rte_eventdev_driver {
> > >  +	struct rte_pci_driver pci_drv;	/**< The PMD is also a PCI driver. */
> > >  +	unsigned int dev_private_size;	/**< Size of device private data. */
> > >  +
> > >  +	eventdev_init_t eventdev_init;	/**< Device init function. */
> > >  +	eventdev_uninit_t eventdev_uninit; /**< Device uninit function. */
> > >  +};
> > >  +
> > >  +/** Global structure used for maintaining state of allocated event devices */
> > >  +struct rte_eventdev_global {
> > >  +	uint8_t nb_devs;	/**< Number of devices found */
> > >  +	uint8_t max_devs;	/**< Max number of devices */
> > >  +};
> > >  +
> > >  +extern struct rte_eventdev_global *rte_eventdev_globals;
> > >  +/** Pointer to global event devices data structure. */
> > >  +extern struct rte_eventdev *rte_eventdevs;
> > >  +/** The pool of rte_eventdev structures. */
> > >  +
> > >  +/**
> > >  + * Get the rte_eventdev structure device pointer for the named device.
> > >  + *
> > >  + * @param name
> > >  + *   device name to select the device structure.
> > >  + *
> > >  + * @return
> > >  + *   - The rte_eventdev structure pointer for the given device ID.
> > >  + */
> > >  +static inline struct rte_eventdev *
> > >  +rte_eventdev_pmd_get_named_dev(const char *name)
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +	unsigned int i;
> > >  +
> > >  +	if (name == NULL)
> > >  +		return NULL;
> > >  +
> > >  +	for (i = 0, dev = &rte_eventdevs[i];
> > >  +			i < rte_eventdev_globals->max_devs; i++) {
> > >  +		if ((dev->attached == RTE_EVENTDEV_ATTACHED) &&
> > >  +				(strcmp(dev->data->name, name) == 0))
> > >  +			return dev;
> > >  +	}
> > >  +
> > >  +	return NULL;
> > >  +}
> > >  +
> > >  +/**
> > >  + * Validate if the event device index is valid attached event device.
> > >  + *
> > >  + * @param dev_id
> > >  + *   Event device index.
> > >  + *
> > >  + * @return
> > >  + *   - If the device index is valid (1) or not (0).
> > >  + */
> > >  +static inline unsigned
> > >  +rte_eventdev_pmd_is_valid_dev(uint8_t dev_id)
> > >  +{
> > >  +	struct rte_eventdev *dev;
> > >  +
> > >  +	if (dev_id >= rte_eventdev_globals->nb_devs)
> > >  +		return 0;
> > >  +
> > >  +	dev = &rte_eventdevs[dev_id];
> > >  +	if (dev->attached != RTE_EVENTDEV_ATTACHED)
> > >  +		return 0;
> > >  +	else
> > >  +		return 1;
> > >  +}
> > >  +
> > >  +/**
> > >  + * Definitions of all functions exported by a driver through the
> > >  + * the generic structure of type *event_dev_ops* supplied in the
> > >  + * *rte_eventdev* structure associated with a device.
> > >  + */
> > >  +
> > >  +/**
> > >  + * Get device information of a device.
> > >  + *
> > >  + * @param dev
> > >  + *   Event device pointer
> > >  + * @param dev_info
> > >  + *   Event device information structure
> > >  + *
> > >  + * @return
> > >  + *   Returns 0 on success
> > >  + */
> > >  +typedef void (*eventdev_info_get_t)(struct rte_eventdev *dev,
> > >  +		struct rte_event_dev_info *dev_info);
> > >  +
> > >  +/**
> > >  + * Configure a device.
> > >  + *
> > >  + * @param dev
> > >  + *   Event device pointer
> > >  + *
> > >  + * @return
> > >  + *   Returns 0 on success
> > >  + */
> > >  +typedef int (*eventdev_configure_t)(struct rte_eventdev *dev);
> > >  +
> > >  +/**
> > >  + * Start a configured device.
> > >  + *
> > >  + * @param dev
> > >  + *   Event device pointer
> > >  + *
> > >  + * @return
> > >  + *   Returns 0 on success
> > >  + */
> > >  +typedef int (*eventdev_start_t)(struct rte_eventdev *dev);
> > >  +
> > >  +/**
> > >  + * Stop a configured device.
> > >  + *
> > >  + * @param dev
> > >  + *   Event device pointer
> > >  + */
> > >  +typedef void (*eventdev_stop_t)(struct rte_eventdev *dev);
> > >  +
> > >  +/**
> > >  + * Close a configured device.
> > >  + *
> > >  + * @param dev
> > >  + *   Event device pointer
> > >  + *
> > >  + * @return
> > >  + * - 0 on success
> > >  + * - (-EAGAIN) if can't close as device is busy
> > >  + */
> > >  +typedef int (*eventdev_close_t)(struct rte_eventdev *dev);
> > >  +
> > >  +/**
> > >  + * Retrieve the default event queue configuration.
> > >  + *
> > >  + * @param dev
> > >  + *   Event device pointer
> > >  + * @param queue_id
> > >  + *   Event queue index
> > >  + * @param[out] queue_conf
> > >  + *   Event queue configuration structure
> > >  + *
> > >  + */
> > >  +typedef void (*eventdev_queue_default_conf_get_t)(struct rte_eventdev
> > >  *dev,
> > >  +		uint8_t queue_id, struct rte_event_queue_conf *queue_conf);
> > >  +
> > >  +/**
> > >  + * Setup an event queue.
> > >  + *
> > >  + * @param dev
> > >  + *   Event device pointer
> > >  + * @param queue_id
> > >  + *   Event queue index
> > >  + * @param queue_conf
> > >  + *   Event queue configuration structure
> > >  + *
> > >  + * @return
> > >  + *   Returns 0 on success.
> > >  + */
> > >  +typedef int (*eventdev_queue_setup_t)(struct rte_eventdev *dev,
> > >  +		uint8_t queue_id, struct rte_event_queue_conf *queue_conf);
> > >  +
> > >  +/**
> > >  + * Release memory resources allocated by given event queue.
> > >  + *
> > >  + * @param queue
> > >  + *   Event queue pointer
> > >  + *
> > >  + */
> > >  +typedef void (*eventdev_queue_release_t)(void *queue);
> > >  +
> > >  +/**
> > >  + * Retrieve the default event port configuration.
> > >  + *
> > >  + * @param dev
> > >  + *   Event device pointer
> > >  + * @param port_id
> > >  + *   Event port index
> > >  + * @param[out] port_conf
> > >  + *   Event port configuration structure
> > >  + *
> > >  + */
> > >  +typedef void (*eventdev_port_default_conf_get_t)(struct rte_eventdev *dev,
> > >  +		uint8_t port_id, struct rte_event_port_conf *port_conf);
> > >  +
> > >  +/**
> > >  + * Setup an event port.
> > >  + *
> > >  + * @param dev
> > >  + *   Event device pointer
> > >  + * @param port_id
> > >  + *   Event port index
> > >  + * @param port_conf
> > >  + *   Event port configuration structure
> > >  + *
> > >  + * @return
> > >  + *   Returns 0 on success.
> > >  + */
> > >  +typedef int (*eventdev_port_setup_t)(struct rte_eventdev *dev,
> > >  +		uint8_t port_id, struct rte_event_port_conf *port_conf);
> > >  +
> > >  +/**
> > >  + * Release memory resources allocated by given event port.
> > >  + *
> > >  + * @param port
> > >  + *   Event port pointer
> > >  + *
> > >  + */
> > >  +typedef void (*eventdev_port_release_t)(void *port);
> > >  +
> > >  +/**
> > >  + * Link multiple source event queues to destination event port.
> > >  + *
> > >  + * @param port
> > >  + *   Event port pointer
> > >  + * @param link
> > >  + *   An array of *nb_links* pointers to *rte_event_queue_link* structure
> > >  + * @param nb_links
> > >  + *   The number of links to establish
> > >  + *
> > >  + * @return
> > >  + *   Returns 0 on success.
> > >  + *
> > >  + */
> > >  +typedef int (*eventdev_port_link_t)(void *port,
> > >  +		struct rte_event_queue_link link[], uint16_t nb_links);
> > >  +
> > >  +/**
> > >  + * Unlink multiple source event queues from destination event port.
> > >  + *
> > >  + * @param port
> > >  + *   Event port pointer
> > >  + * @param queues
> > >  + *   An array of *nb_unlinks* event queues to be unlinked from the event port.
> > >  + * @param nb_unlinks
> > >  + *   The number of unlinks to establish
> > >  + *
> > >  + * @return
> > >  + *   Returns 0 on success.
> > >  + *
> > >  + */
> > >  +typedef int (*eventdev_port_unlink_t)(void *port,
> > >  +		uint8_t queues[], uint16_t nb_unlinks);
> > >  +
> > >  +/**
> > >  + * Converts nanoseconds to *wait* value for rte_event_dequeue()
> > >  + *
> > >  + * @param dev
> > >  + *   Event device pointer
> > >  + * @param ns
> > >  + *   Wait time in nanosecond
> > >  + * @param[out] wait_ticks
> > >  + *   Value for the *wait* parameter in rte_event_dequeue() function
> > >  + *
> > >  + */
> > >  +typedef void (*eventdev_dequeue_wait_time_t)(struct rte_eventdev *dev,
> > >  +		uint64_t ns, uint64_t *wait_ticks);
> > >  +
> > >  +/**
> > >  + * Dump internal information
> > >  + *
> > >  + * @param dev
> > >  + *   Event device pointer
> > >  + * @param f
> > >  + *   A pointer to a file for output
> > >  + *
> > >  + */
> > >  +typedef void (*eventdev_dump_t)(struct rte_eventdev *dev, FILE *f);
> > >  +
> > >  +/** Event device operations function pointer table */
> > >  +struct rte_eventdev_ops {
> > >  +	eventdev_info_get_t dev_infos_get;	/**< Get device info. */
> > >  +	eventdev_configure_t dev_configure;	/**< Configure device. */
> > >  +	eventdev_start_t dev_start;		/**< Start device. */
> > >  +	eventdev_stop_t dev_stop;		/**< Stop device. */
> > >  +	eventdev_close_t dev_close;		/**< Close device. */
> > >  +
> > >  +	eventdev_queue_default_conf_get_t queue_def_conf;
> > >  +	/**< Get default queue configuration. */
> > >  +	eventdev_queue_setup_t queue_setup;
> > >  +	/**< Set up an event queue. */
> > >  +	eventdev_queue_release_t queue_release;
> > >  +	/**< Release an event queue. */
> > >  +
> > >  +	eventdev_port_default_conf_get_t port_def_conf;
> > >  +	/**< Get default port configuration. */
> > >  +	eventdev_port_setup_t port_setup;
> > >  +	/**< Set up an event port. */
> > >  +	eventdev_port_release_t port_release;
> > >  +	/**< Release an event port. */
> > >  +
> > >  +	eventdev_port_link_t port_link;
> > >  +	/**< Link event queues to an event port. */
> > >  +	eventdev_port_unlink_t port_unlink;
> > >  +	/**< Unlink event queues from an event port. */
> > >  +	eventdev_dequeue_wait_time_t wait_time;
> > >  +	/**< Converts nanoseconds to *wait* value for rte_event_dequeue()
> > >  */
> > >  +	eventdev_dump_t dump;
> > >  +	/* Dump internal information */
> > >  +};
> > >  +
> > >  +/**
> > >  + * Allocates a new eventdev slot for an event device and returns the pointer
> > >  + * to that slot for the driver to use.
> > >  + *
> > >  + * @param name
> > >  + *   Unique identifier name for each device
> > >  + * @param socket_id
> > >  + *   Socket to allocate resources on.
> > >  + * @return
> > >  + *   - Slot in the rte_dev_devices array for a new device;
> > >  + */
> > >  +struct rte_eventdev *
> > >  +rte_eventdev_pmd_allocate(const char *name, int socket_id);
> > >  +
> > >  +/**
> > >  + * Release the specified eventdev device.
> > >  + *
> > >  + * @param eventdev
> > >  + * The *eventdev* pointer is the address of the *rte_eventdev* structure.
> > >  + * @return
> > >  + *   - 0 on success, negative on error
> > >  + */
> > >  +int
> > >  +rte_eventdev_pmd_release(struct rte_eventdev *eventdev);
> > >  +
> > >  +/**
> > >  + * Creates a new virtual event device and returns the pointer to that device.
> > >  + *
> > >  + * @param name
> > >  + *   PMD type name
> > >  + * @param dev_private_size
> > >  + *   Size of event PMDs private data
> > >  + * @param socket_id
> > >  + *   Socket to allocate resources on.
> > >  + *
> > >  + * @return
> > >  + *   - Eventdev pointer if device is successfully created.
> > >  + *   - NULL if device cannot be created.
> > >  + */
> > >  +struct rte_eventdev *
> > >  +rte_eventdev_pmd_vdev_init(const char *name, size_t dev_private_size,
> > >  +		int socket_id);
> > >  +
> > >  +
> > >  +/**
> > >  + * Wrapper for use by pci drivers as a .probe function to attach to a event
> > >  + * interface.
> > >  + */
> > >  +int rte_eventdev_pmd_pci_probe(struct rte_pci_driver *pci_drv,
> > >  +			    struct rte_pci_device *pci_dev);
> > >  +
> > >  +/**
> > >  + * Wrapper for use by pci drivers as a .remove function to detach a event
> > >  + * interface.
> > >  + */
> > >  +int rte_eventdev_pmd_pci_remove(struct rte_pci_device *pci_dev);
> > >  +
> > >  +#ifdef __cplusplus
> > >  +}
> > >  +#endif
> > >  +
> > >  +#endif /* _RTE_EVENTDEV_PMD_H_ */
> > >  diff --git a/lib/librte_eventdev/rte_eventdev_version.map
> > >  b/lib/librte_eventdev/rte_eventdev_version.map
> > >  new file mode 100644
> > >  index 0000000..ef40aae
> > >  --- /dev/null
> > >  +++ b/lib/librte_eventdev/rte_eventdev_version.map
> > >  @@ -0,0 +1,39 @@
> > >  +DPDK_17.02 {
> > >  +	global:
> > >  +
> > >  +	rte_eventdevs;
> > >  +
> > >  +	rte_event_dev_count;
> > >  +	rte_event_dev_get_dev_id;
> > >  +	rte_event_dev_socket_id;
> > >  +	rte_event_dev_info_get;
> > >  +	rte_event_dev_configure;
> > >  +	rte_event_dev_start;
> > >  +	rte_event_dev_stop;
> > >  +	rte_event_dev_close;
> > >  +	rte_event_dev_dump;
> > >  +
> > >  +	rte_event_port_default_conf_get;
> > >  +	rte_event_port_setup;
> > >  +	rte_event_port_dequeue_depth;
> > >  +	rte_event_port_enqueue_depth;
> > >  +	rte_event_port_count;
> > >  +	rte_event_port_link;
> > >  +	rte_event_port_unlink;
> > >  +	rte_event_port_links_get;
> > >  +
> > >  +	rte_event_queue_default_conf_get
> > >  +	rte_event_queue_setup;
> > >  +	rte_event_queue_count;
> > >  +	rte_event_queue_priority;
> > >  +
> > >  +	rte_event_dequeue_wait_time;
> > >  +
> > >  +	rte_eventdev_pmd_allocate;
> > >  +	rte_eventdev_pmd_release;
> > >  +	rte_eventdev_pmd_vdev_init;
> > >  +	rte_eventdev_pmd_pci_probe;
> > >  +	rte_eventdev_pmd_pci_remove;
> > >  +
> > >  +	local: *;
> > >  +};
> > >  diff --git a/mk/rte.app.mk b/mk/rte.app.mk
> > >  index f75f0e2..716725a 100644
> > >  --- a/mk/rte.app.mk
> > >  +++ b/mk/rte.app.mk
> > >  @@ -93,6 +93,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_MBUF)           += -
> > >  lrte_mbuf
> > >   _LDLIBS-$(CONFIG_RTE_LIBRTE_NET)            += -lrte_net
> > >   _LDLIBS-$(CONFIG_RTE_LIBRTE_ETHER)          += -lrte_ethdev
> > >   _LDLIBS-$(CONFIG_RTE_LIBRTE_CRYPTODEV)      += -lrte_cryptodev
> > >  +_LDLIBS-$(CONFIG_RTE_LIBRTE_EVENTDEV)       += -lrte_eventdev
> > >   _LDLIBS-$(CONFIG_RTE_LIBRTE_MEMPOOL)        += -lrte_mempool
> > >   _LDLIBS-$(CONFIG_RTE_LIBRTE_RING)           += -lrte_ring
> > >   _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lrte_eal
> > >  --
> > >  2.5.5
> > 


More information about the dev mailing list