[dpdk-dev] [PATCH 18/27] event/dlb: add queue setup

McDaniel, Timothy timothy.mcdaniel at intel.com
Fri Jun 12 23:24:25 CEST 2020


Change-Id: I88389542d5dbb64b255ce06605ace9d3315f7b22
Signed-off-by: McDaniel, Timothy <timothy.mcdaniel at intel.com>
---
 drivers/event/dlb/dlb.c | 295 ++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 295 insertions(+)

diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c
index adaeafedc..2e8000047 100644
--- a/drivers/event/dlb/dlb.c
+++ b/drivers/event/dlb/dlb.c
@@ -223,6 +223,65 @@ set_num_dir_credits(const char *key __rte_unused,
 	return 0;
 }
 
+static int32_t
+dlb_hw_create_ldb_queue(struct dlb_eventdev *dlb,
+			struct dlb_queue *queue,
+			const struct rte_event_queue_conf *evq_conf)
+{
+	struct dlb_hw_dev *handle = &dlb->qm_instance;
+	struct dlb_create_ldb_queue_args cfg;
+	struct dlb_cmd_response response;
+	int32_t ret;
+	uint32_t qm_qid;
+	int sched_type = -1;
+
+	if (evq_conf == NULL)
+		return -EINVAL;
+
+	if (evq_conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) {
+		if (evq_conf->nb_atomic_order_sequences != 0)
+			sched_type = RTE_SCHED_TYPE_ORDERED;
+		else
+			sched_type = RTE_SCHED_TYPE_PARALLEL;
+	} else {
+		sched_type = evq_conf->schedule_type;
+	}
+
+	cfg.response = (uintptr_t)&response;
+	cfg.num_atomic_inflights = dlb->num_atm_inflights_per_queue;
+	cfg.num_sequence_numbers = evq_conf->nb_atomic_order_sequences;
+	cfg.num_qid_inflights = evq_conf->nb_atomic_order_sequences;
+
+	if (sched_type != RTE_SCHED_TYPE_ORDERED) {
+		cfg.num_sequence_numbers = 0;
+		cfg.num_qid_inflights = 2048;
+	}
+
+	ret = dlb_iface_ldb_queue_create(handle, &cfg);
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb: create LB event queue error, ret=%d (driver status: %s)\n",
+			    ret, dlb_error_strings[response.status]);
+		return -EINVAL;
+	}
+
+	qm_qid = response.id;
+
+	/* Save off queue config for debug, resource lookups, and reconfig */
+	queue->num_qid_inflights = cfg.num_qid_inflights;
+	queue->num_atm_inflights = cfg.num_atomic_inflights;
+
+	queue->sched_type = sched_type;
+	queue->config_state = DLB_CONFIGURED;
+
+	DLB_LOG_DBG("Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid inflights=%d\n",
+		    qm_qid,
+		    cfg.num_atomic_inflights,
+		    cfg.num_sequence_numbers,
+		    cfg.num_qid_inflights);
+
+	return qm_qid;
+}
+
 /* VDEV-only notes:
  * This function first unmaps all memory mappings and closes the
  * domain's file descriptor, which causes the driver to reset the
@@ -444,6 +503,7 @@ dlb_hw_create_sched_domain(struct dlb_hw_dev *handle,
 }
 
 /* End HW specific */
+
 static void
 dlb_eventdev_info_get(struct rte_eventdev *dev,
 		      struct rte_event_dev_info *dev_info)
@@ -641,6 +701,240 @@ dlb_eventdev_queue_default_conf_get(struct rte_eventdev *dev,
 	queue_conf->priority = 0;
 }
 
+static int32_t
+dlb_get_sn_allocation(struct dlb_eventdev *dlb, int group)
+{
+	struct dlb_hw_dev *handle = &dlb->qm_instance;
+	struct dlb_get_sn_allocation_args cfg;
+	struct dlb_cmd_response response;
+	int ret;
+
+	cfg.group = group;
+	cfg.response = (uintptr_t)&response;
+
+	ret = dlb_iface_get_sn_allocation(handle, &cfg);
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb: get_sn_allocation ret=%d (driver status: %s)\n",
+			    ret, dlb_error_strings[response.status]);
+		return ret;
+	}
+
+	return response.id;
+}
+
+static int
+dlb_set_sn_allocation(struct dlb_eventdev *dlb, int group, int num)
+{
+	struct dlb_hw_dev *handle = &dlb->qm_instance;
+	struct dlb_set_sn_allocation_args cfg;
+	struct dlb_cmd_response response;
+	int ret;
+
+	cfg.num = num;
+	cfg.group = group;
+	cfg.response = (uintptr_t)&response;
+
+	ret = dlb_iface_set_sn_allocation(handle, &cfg);
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb: set_sn_allocation ret=%d (driver status: %s)\n",
+			    ret, dlb_error_strings[response.status]);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int32_t
+dlb_get_sn_occupancy(struct dlb_eventdev *dlb, int group)
+{
+	struct dlb_hw_dev *handle = &dlb->qm_instance;
+	struct dlb_get_sn_occupancy_args cfg;
+	struct dlb_cmd_response response;
+	int ret;
+
+	cfg.group = group;
+	cfg.response = (uintptr_t)&response;
+
+	ret = dlb_iface_get_sn_occupancy(handle, &cfg);
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb: get_sn_occupancy ret=%d (driver status: %s)\n",
+			    ret, dlb_error_strings[response.status]);
+		return ret;
+	}
+
+	return response.id;
+}
+
+/* Query the current sequence number allocations and, if they conflict with the
+ * requested LDB queue configuration, attempt to re-allocate sequence numbers.
+ * This is best-effort; if it fails, the PMD will attempt to configure the
+ * load-balanced queue and return an error.
+ */
+static void
+dlb_program_sn_allocation(struct dlb_eventdev *dlb,
+			  const struct rte_event_queue_conf *queue_conf)
+{
+	int grp_occupancy[DLB_NUM_SN_GROUPS];
+	int grp_alloc[DLB_NUM_SN_GROUPS];
+	int i, sequence_numbers;
+
+	sequence_numbers = (int)queue_conf->nb_atomic_order_sequences;
+
+	for (i = 0; i < DLB_NUM_SN_GROUPS; i++) {
+		int total_slots;
+
+		grp_alloc[i] = dlb_get_sn_allocation(dlb, i);
+		if (grp_alloc[i] < 0)
+			return;
+
+		total_slots = DLB_MAX_LDB_SN_ALLOC / grp_alloc[i];
+
+		grp_occupancy[i] = dlb_get_sn_occupancy(dlb, i);
+		if (grp_occupancy[i] < 0)
+			return;
+
+		/* DLB has at least one available slot for the requested
+		 * sequence numbers, so no further configuration required.
+		 */
+		if (grp_alloc[i] == sequence_numbers &&
+		    grp_occupancy[i] < total_slots)
+			return;
+	}
+
+	/* None of the sequence number groups are configured for the requested
+	 * sequence numbers, so we have to reconfigure one of them. This is
+	 * only possible if a group is not in use.
+	 */
+	for (i = 0; i < DLB_NUM_SN_GROUPS; i++) {
+		if (grp_occupancy[i] == 0)
+			break;
+	}
+
+	if (i == DLB_NUM_SN_GROUPS) {
+		printf("[%s()] No groups with %d sequence_numbers are available or have free slots\n",
+		       __func__, sequence_numbers);
+		return;
+	}
+
+	/* Attempt to configure slot i with the requested number of sequence
+	 * numbers. Ignore the return value -- if this fails, the error will be
+	 * caught during subsequent queue configuration.
+	 */
+	dlb_set_sn_allocation(dlb, i, sequence_numbers);
+}
+
+static int
+dlb_eventdev_ldb_queue_setup(struct rte_eventdev *dev,
+			     struct dlb_eventdev_queue *ev_queue,
+			     const struct rte_event_queue_conf *queue_conf)
+{
+	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+	int32_t qm_qid;
+
+	if (queue_conf->nb_atomic_order_sequences)
+		dlb_program_sn_allocation(dlb, queue_conf);
+
+	qm_qid = dlb_hw_create_ldb_queue(dlb,
+					 &ev_queue->qm_queue,
+					 queue_conf);
+	if (qm_qid < 0) {
+		DLB_LOG_ERR("Failed to create the load-balanced queue\n");
+
+		return qm_qid;
+	}
+
+	dlb->qm_ldb_to_ev_queue_id[qm_qid] = ev_queue->id;
+
+	ev_queue->qm_queue.id = qm_qid;
+
+	return 0;
+}
+
+static int dlb_num_dir_queues_setup(struct dlb_eventdev *dlb)
+{
+	int i, num = 0;
+
+	for (i = 0; i < dlb->num_queues; i++) {
+		if (dlb->ev_queues[i].setup_done &&
+		    dlb->ev_queues[i].qm_queue.is_directed)
+			num++;
+	}
+
+	return num;
+}
+
+static void
+dlb_queue_link_teardown(struct dlb_eventdev *dlb,
+			struct dlb_eventdev_queue *ev_queue)
+{
+	struct dlb_eventdev_port *ev_port;
+	int i, j;
+
+	for (i = 0; i < dlb->num_ports; i++) {
+		ev_port = &dlb->ev_ports[i];
+
+		for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
+			if (!ev_port->link[j].valid ||
+			    ev_port->link[j].queue_id != ev_queue->id)
+				continue;
+
+			ev_port->link[j].valid = false;
+			ev_port->num_links--;
+		}
+	}
+
+	ev_queue->num_links = 0;
+}
+
+static int
+dlb_eventdev_queue_setup(struct rte_eventdev *dev,
+			 uint8_t ev_qid,
+			 const struct rte_event_queue_conf *queue_conf)
+{
+	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+	struct dlb_eventdev_queue *ev_queue;
+	int ret;
+
+	if (!queue_conf)
+		return -EINVAL;
+
+	if (ev_qid >= dlb->num_queues)
+		return -EINVAL;
+
+	ev_queue = &dlb->ev_queues[ev_qid];
+
+	ev_queue->qm_queue.is_directed = queue_conf->event_queue_cfg &
+		RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
+	ev_queue->id = ev_qid;
+	ev_queue->conf = *queue_conf;
+
+	if (!ev_queue->qm_queue.is_directed) {
+		ret = dlb_eventdev_ldb_queue_setup(dev, ev_queue, queue_conf);
+	} else {
+		/* The directed queue isn't setup until link time, at which
+		 * point we know its directed port ID. Directed queue setup
+		 * will only fail if this queue is already setup or there are
+		 * no directed queues left to configure.
+		 */
+		ret = 0;
+
+		ev_queue->qm_queue.config_state = DLB_NOT_CONFIGURED;
+
+		if (ev_queue->setup_done ||
+		    dlb_num_dir_queues_setup(dlb) == dlb->num_dir_queues)
+			ret = -EINVAL;
+	}
+
+	/* Tear down pre-existing port->queue links */
+	if (!ret && dlb->run_state == DLB_RUN_STATE_STOPPED)
+		dlb_queue_link_teardown(dlb, ev_queue);
+
+	if (!ret)
+		ev_queue->setup_done = true;
+
+	return ret;
+}
+
 static int
 set_dev_id(const char *key __rte_unused,
 	   const char *value,
@@ -718,6 +1012,7 @@ dlb_entry_points_init(struct rte_eventdev *dev)
 		.dev_infos_get    = dlb_eventdev_info_get,
 		.dev_configure    = dlb_eventdev_configure,
 		.queue_def_conf   = dlb_eventdev_queue_default_conf_get,
+		.queue_setup      = dlb_eventdev_queue_setup,
 		.port_def_conf    = dlb_eventdev_port_default_conf_get,
 	};
 
-- 
2.13.6



More information about the dev mailing list