[dpdk-dev] [PATCH 23/27] event/dlb: add eventdev_start

McDaniel, Timothy timothy.mcdaniel at intel.com
Fri Jun 12 23:24:30 CEST 2020


Change-Id: I9dcf8cf95c1001e09ab3e44e3fbd2ccbf0ddec72
Signed-off-by: McDaniel, Timothy <timothy.mcdaniel at intel.com>
---
 drivers/event/dlb/dlb.c | 142 ++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 142 insertions(+)

diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c
index 2d7f4479a..70bd2b4d6 100644
--- a/drivers/event/dlb/dlb.c
+++ b/drivers/event/dlb/dlb.c
@@ -1162,6 +1162,72 @@ dlb_eventdev_info_get(struct rte_eventdev *dev,
 	*dev_info = evdev_dlb_default_info;
 }
 
+static int
+dlb_eventdev_reapply_configuration(struct rte_eventdev *dev);
+
+static int
+dlb_eventdev_apply_port_links(struct rte_eventdev *dev);
+
+static int
+dlb_eventdev_start(struct rte_eventdev *dev)
+{
+	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+	struct dlb_hw_dev *handle = &dlb->qm_instance;
+	struct dlb_start_domain_args cfg;
+	struct dlb_cmd_response response;
+	int ret, i;
+
+	rte_spinlock_lock(&dlb->qm_instance.resource_lock);
+	if (dlb->run_state != DLB_RUN_STATE_STOPPED) {
+		DLB_LOG_ERR("bad state %d for dev_start\n",
+			    (int)dlb->run_state);
+		rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
+		return -EINVAL;
+	}
+	dlb->run_state	= DLB_RUN_STATE_STARTING;
+	rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
+
+	/* If the device was configured more than once, some event ports and/or
+	 * queues may need to be reconfigured.
+	 */
+	ret = dlb_eventdev_reapply_configuration(dev);
+	if (ret)
+		return ret;
+
+	/* The DLB PMD delays port links until the device is started. */
+	ret = dlb_eventdev_apply_port_links(dev);
+	if (ret)
+		return ret;
+
+	cfg.response = (uintptr_t)&response;
+
+	for (i = 0; i < dlb->num_ports; i++) {
+		if (!dlb->ev_ports[i].setup_done) {
+			DLB_LOG_ERR("dlb: port %d not setup", i);
+			return -ESTALE;
+		}
+	}
+
+	for (i = 0; i < dlb->num_queues; i++) {
+		if (dlb->ev_queues[i].num_links == 0) {
+			DLB_LOG_ERR("dlb: queue %d is not linked", i);
+			return -ENOLINK;
+		}
+	}
+
+	ret = dlb_iface_sched_domain_start(handle, &cfg);
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb: sched_domain_start ret=%d (driver status: %s)\n",
+			    ret, dlb_error_strings[response.status]);
+		return ret;
+	}
+
+	dlb->run_state = DLB_RUN_STATE_STARTED;
+	DLB_LOG_DBG("dlb: sched_domain_start completed OK\n");
+
+	return 0;
+}
+
 static inline int
 dlb_check_enqueue_sw_credits(struct dlb_eventdev *dlb,
 			     struct dlb_eventdev_port *ev_port)
@@ -2058,6 +2124,40 @@ dlb_validate_port_link(struct dlb_eventdev_port *ev_port,
 }
 
 static int
+dlb_eventdev_apply_port_links(struct rte_eventdev *dev)
+{
+	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+	int i;
+
+	/* Perform requested port->queue links */
+	for (i = 0; i < dlb->num_ports; i++) {
+		struct dlb_eventdev_port *ev_port = &dlb->ev_ports[i];
+		int j;
+
+		for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
+			struct dlb_eventdev_queue *ev_queue;
+			uint8_t prio, queue_id;
+
+			if (!ev_port->link[j].valid)
+				continue;
+
+			prio = ev_port->link[j].priority;
+			queue_id = ev_port->link[j].queue_id;
+
+			if (dlb_validate_port_link(ev_port, queue_id, true, j))
+				return -EINVAL;
+
+			ev_queue = &dlb->ev_queues[queue_id];
+
+			if (dlb_do_port_link(dev, ev_queue, ev_port, prio))
+				return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
 dlb_eventdev_port_link(struct rte_eventdev *dev, void *event_port,
 		       const uint8_t queues[], const uint8_t priorities[],
 		       uint16_t nb_links)
@@ -2538,6 +2638,47 @@ dlb_eventdev_port_setup(struct rte_eventdev *dev,
 }
 
 static int
+dlb_eventdev_reapply_configuration(struct rte_eventdev *dev)
+{
+	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+	int ret, i;
+
+	/* If an event queue or port was previously configured, but hasn't been
+	 * reconfigured, reapply its original configuration.
+	 */
+	for (i = 0; i < dlb->num_queues; i++) {
+		struct dlb_eventdev_queue *ev_queue;
+
+		ev_queue = &dlb->ev_queues[i];
+
+		if (ev_queue->qm_queue.config_state != DLB_PREV_CONFIGURED)
+			continue;
+
+		ret = dlb_eventdev_queue_setup(dev, i, &ev_queue->conf);
+		if (ret < 0) {
+			DLB_LOG_ERR("dlb: failed to reconfigure queue %d", i);
+			return ret;
+		}
+	}
+
+	for (i = 0; i < dlb->num_ports; i++) {
+		struct dlb_eventdev_port *ev_port = &dlb->ev_ports[i];
+
+		if (ev_port->qm_port.config_state != DLB_PREV_CONFIGURED)
+			continue;
+
+		ret = dlb_eventdev_port_setup(dev, i, &ev_port->conf);
+		if (ret < 0) {
+			DLB_LOG_ERR("dlb: failed to reconfigure ev_port %d",
+				    i);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
 dlb_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port,
 			 uint8_t queues[], uint16_t nb_unlinks)
 {
@@ -2739,6 +2880,7 @@ dlb_entry_points_init(struct rte_eventdev *dev)
 	static struct rte_eventdev_ops dlb_eventdev_entry_ops = {
 		.dev_infos_get    = dlb_eventdev_info_get,
 		.dev_configure    = dlb_eventdev_configure,
+		.dev_start        = dlb_eventdev_start,
 		.queue_def_conf   = dlb_eventdev_queue_default_conf_get,
 		.queue_setup      = dlb_eventdev_queue_setup,
 		.queue_release    = dlb_eventdev_queue_release,
-- 
2.13.6



More information about the dev mailing list