[dpdk-dev] [PATCH v5 14/23] event/dlb2: add port link

Timothy McDaniel timothy.mcdaniel at intel.com
Fri Oct 30 10:43:31 CET 2020


Add port link entry point. Directed queues are identified and created
at this stage. Their setup deferred until link-time, at which
point we know the directed port ID. Directed queue setup
will only fail if this queue is already setup or there are
no directed queues left to configure.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel at intel.com>
Reviewed-by: Gage Eads <gage.eads at intel.com>
---
 drivers/event/dlb2/dlb2.c                  | 302 ++++++++++++++
 drivers/event/dlb2/dlb2_iface.c            |   6 +
 drivers/event/dlb2/dlb2_iface.h            |   6 +
 drivers/event/dlb2/pf/base/dlb2_resource.c | 633 +++++++++++++++++++++++++++++
 drivers/event/dlb2/pf/dlb2_main.c          |  10 +
 drivers/event/dlb2/pf/dlb2_pf.c            |  50 +++
 6 files changed, 1007 insertions(+)

diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index 9e8dcce..c6593c7 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -1496,6 +1496,307 @@ dlb2_eventdev_port_setup(struct rte_eventdev *dev,
 	return 0;
 }
 
+static int16_t
+dlb2_hw_map_ldb_qid_to_port(struct dlb2_hw_dev *handle,
+			    uint32_t qm_port_id,
+			    uint16_t qm_qid,
+			    uint8_t priority)
+{
+	struct dlb2_map_qid_args cfg;
+	int32_t ret;
+
+	if (handle == NULL)
+		return -EINVAL;
+
+	/* Build message */
+	cfg.port_id = qm_port_id;
+	cfg.qid = qm_qid;
+	cfg.priority = EV_TO_DLB2_PRIO(priority);
+
+	ret = dlb2_iface_map_qid(handle, &cfg);
+	if (ret < 0) {
+		DLB2_LOG_ERR("dlb2: map qid error, ret=%d (driver status: %s)\n",
+			     ret, dlb2_error_strings[cfg.response.status]);
+		DLB2_LOG_ERR("dlb2: grp=%d, qm_port=%d, qm_qid=%d prio=%d\n",
+			     handle->domain_id, cfg.port_id,
+			     cfg.qid,
+			     cfg.priority);
+	} else {
+		DLB2_LOG_DBG("dlb2: mapped queue %d to qm_port %d\n",
+			     qm_qid, qm_port_id);
+	}
+
+	return ret;
+}
+
+static int
+dlb2_event_queue_join_ldb(struct dlb2_eventdev *dlb2,
+			  struct dlb2_eventdev_port *ev_port,
+			  struct dlb2_eventdev_queue *ev_queue,
+			  uint8_t priority)
+{
+	int first_avail = -1;
+	int ret, i;
+
+	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
+		if (ev_port->link[i].valid) {
+			if (ev_port->link[i].queue_id == ev_queue->id &&
+			    ev_port->link[i].priority == priority) {
+				if (ev_port->link[i].mapped)
+					return 0; /* already mapped */
+				first_avail = i;
+			}
+		} else if (first_avail == -1)
+			first_avail = i;
+	}
+	if (first_avail == -1) {
+		DLB2_LOG_ERR("dlb2: qm_port %d has no available QID slots.\n",
+			     ev_port->qm_port.id);
+		return -EINVAL;
+	}
+
+	ret = dlb2_hw_map_ldb_qid_to_port(&dlb2->qm_instance,
+					  ev_port->qm_port.id,
+					  ev_queue->qm_queue.id,
+					  priority);
+
+	if (!ret)
+		ev_port->link[first_avail].mapped = true;
+
+	return ret;
+}
+
+static int32_t
+dlb2_hw_create_dir_queue(struct dlb2_eventdev *dlb2,
+			 struct dlb2_eventdev_queue *ev_queue,
+			 int32_t qm_port_id)
+{
+	struct dlb2_hw_dev *handle = &dlb2->qm_instance;
+	struct dlb2_create_dir_queue_args cfg;
+	int32_t ret;
+
+	/* The directed port is always configured before its queue */
+	cfg.port_id = qm_port_id;
+
+	if (ev_queue->depth_threshold == 0) {
+		cfg.depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
+		ev_queue->depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
+	} else
+		cfg.depth_threshold = ev_queue->depth_threshold;
+
+	ret = dlb2_iface_dir_queue_create(handle, &cfg);
+	if (ret < 0) {
+		DLB2_LOG_ERR("dlb2: create DIR event queue error, ret=%d (driver status: %s)\n",
+			     ret, dlb2_error_strings[cfg.response.status]);
+		return -EINVAL;
+	}
+
+	return cfg.response.id;
+}
+
+static int
+dlb2_eventdev_dir_queue_setup(struct dlb2_eventdev *dlb2,
+			      struct dlb2_eventdev_queue *ev_queue,
+			      struct dlb2_eventdev_port *ev_port)
+{
+	int32_t qm_qid;
+
+	qm_qid = dlb2_hw_create_dir_queue(dlb2, ev_queue, ev_port->qm_port.id);
+
+	if (qm_qid < 0) {
+		DLB2_LOG_ERR("Failed to create the DIR queue\n");
+		return qm_qid;
+	}
+
+	dlb2->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id;
+
+	ev_queue->qm_queue.id = qm_qid;
+
+	return 0;
+}
+
+static int
+dlb2_do_port_link(struct rte_eventdev *dev,
+		  struct dlb2_eventdev_queue *ev_queue,
+		  struct dlb2_eventdev_port *ev_port,
+		  uint8_t prio)
+{
+	struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
+	int err;
+
+	/* Don't link until start time. */
+	if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
+		return 0;
+
+	if (ev_queue->qm_queue.is_directed)
+		err = dlb2_eventdev_dir_queue_setup(dlb2, ev_queue, ev_port);
+	else
+		err = dlb2_event_queue_join_ldb(dlb2, ev_port, ev_queue, prio);
+
+	if (err) {
+		DLB2_LOG_ERR("port link failure for %s ev_q %d, ev_port %d\n",
+			     ev_queue->qm_queue.is_directed ? "DIR" : "LDB",
+			     ev_queue->id, ev_port->id);
+
+		rte_errno = err;
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+dlb2_validate_port_link(struct dlb2_eventdev_port *ev_port,
+			uint8_t queue_id,
+			bool link_exists,
+			int index)
+{
+	struct dlb2_eventdev *dlb2 = ev_port->dlb2;
+	struct dlb2_eventdev_queue *ev_queue;
+	bool port_is_dir, queue_is_dir;
+
+	if (queue_id > dlb2->num_queues) {
+		rte_errno = -EINVAL;
+		return -1;
+	}
+
+	ev_queue = &dlb2->ev_queues[queue_id];
+
+	if (!ev_queue->setup_done &&
+	    ev_queue->qm_queue.config_state != DLB2_PREV_CONFIGURED) {
+		rte_errno = -EINVAL;
+		return -1;
+	}
+
+	port_is_dir = ev_port->qm_port.is_directed;
+	queue_is_dir = ev_queue->qm_queue.is_directed;
+
+	if (port_is_dir != queue_is_dir) {
+		DLB2_LOG_ERR("%s queue %u can't link to %s port %u\n",
+			     queue_is_dir ? "DIR" : "LDB", ev_queue->id,
+			     port_is_dir ? "DIR" : "LDB", ev_port->id);
+
+		rte_errno = -EINVAL;
+		return -1;
+	}
+
+	/* Check if there is space for the requested link */
+	if (!link_exists && index == -1) {
+		DLB2_LOG_ERR("no space for new link\n");
+		rte_errno = -ENOSPC;
+		return -1;
+	}
+
+	/* Check if the directed port is already linked */
+	if (ev_port->qm_port.is_directed && ev_port->num_links > 0 &&
+	    !link_exists) {
+		DLB2_LOG_ERR("Can't link DIR port %d to >1 queues\n",
+			     ev_port->id);
+		rte_errno = -EINVAL;
+		return -1;
+	}
+
+	/* Check if the directed queue is already linked */
+	if (ev_queue->qm_queue.is_directed && ev_queue->num_links > 0 &&
+	    !link_exists) {
+		DLB2_LOG_ERR("Can't link DIR queue %d to >1 ports\n",
+			     ev_queue->id);
+		rte_errno = -EINVAL;
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+dlb2_eventdev_port_link(struct rte_eventdev *dev, void *event_port,
+			const uint8_t queues[], const uint8_t priorities[],
+			uint16_t nb_links)
+
+{
+	struct dlb2_eventdev_port *ev_port = event_port;
+	struct dlb2_eventdev *dlb2;
+	int i, j;
+
+	RTE_SET_USED(dev);
+
+	if (ev_port == NULL) {
+		DLB2_LOG_ERR("dlb2: evport not setup\n");
+		rte_errno = -EINVAL;
+		return 0;
+	}
+
+	if (!ev_port->setup_done &&
+	    ev_port->qm_port.config_state != DLB2_PREV_CONFIGURED) {
+		DLB2_LOG_ERR("dlb2: evport not setup\n");
+		rte_errno = -EINVAL;
+		return 0;
+	}
+
+	/* Note: rte_event_port_link() ensures the PMD won't receive a NULL
+	 * queues pointer.
+	 */
+	if (nb_links == 0) {
+		DLB2_LOG_DBG("dlb2: nb_links is 0\n");
+		return 0; /* Ignore and return success */
+	}
+
+	dlb2 = ev_port->dlb2;
+
+	DLB2_LOG_DBG("Linking %u queues to %s port %d\n",
+		     nb_links,
+		     ev_port->qm_port.is_directed ? "DIR" : "LDB",
+		     ev_port->id);
+
+	for (i = 0; i < nb_links; i++) {
+		struct dlb2_eventdev_queue *ev_queue;
+		uint8_t queue_id, prio;
+		bool found = false;
+		int index = -1;
+
+		queue_id = queues[i];
+		prio = priorities[i];
+
+		/* Check if the link already exists. */
+		for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
+			if (ev_port->link[j].valid) {
+				if (ev_port->link[j].queue_id == queue_id) {
+					found = true;
+					index = j;
+					break;
+				}
+			} else if (index == -1) {
+				index = j;
+			}
+
+		/* could not link */
+		if (index == -1)
+			break;
+
+		/* Check if already linked at the requested priority */
+		if (found && ev_port->link[j].priority == prio)
+			continue;
+
+		if (dlb2_validate_port_link(ev_port, queue_id, found, index))
+			break; /* return index of offending queue */
+
+		ev_queue = &dlb2->ev_queues[queue_id];
+
+		if (dlb2_do_port_link(dev, ev_queue, ev_port, prio))
+			break; /* return index of offending queue */
+
+		ev_queue->num_links++;
+
+		ev_port->link[index].queue_id = queue_id;
+		ev_port->link[index].priority = prio;
+		ev_port->link[index].valid = true;
+		/* Entry already exists?  If so, then must be prio change */
+		if (!found)
+			ev_port->num_links++;
+	}
+	return i;
+}
+
 static void
 dlb2_entry_points_init(struct rte_eventdev *dev)
 {
@@ -1507,6 +1808,7 @@ dlb2_entry_points_init(struct rte_eventdev *dev)
 		.queue_setup      = dlb2_eventdev_queue_setup,
 		.port_def_conf    = dlb2_eventdev_port_default_conf_get,
 		.port_setup       = dlb2_eventdev_port_setup,
+		.port_link        = dlb2_eventdev_port_link,
 		.dump             = dlb2_eventdev_dump,
 		.xstats_get       = dlb2_eventdev_xstats_get,
 		.xstats_get_names = dlb2_eventdev_xstats_get_names,
diff --git a/drivers/event/dlb2/dlb2_iface.c b/drivers/event/dlb2/dlb2_iface.c
index 01818c7..7d1ba73 100644
--- a/drivers/event/dlb2/dlb2_iface.c
+++ b/drivers/event/dlb2/dlb2_iface.c
@@ -51,3 +51,9 @@ int (*dlb2_iface_ldb_port_create)(struct dlb2_hw_dev *handle,
 int (*dlb2_iface_dir_port_create)(struct dlb2_hw_dev *handle,
 				  struct dlb2_create_dir_port_args *cfg,
 				  enum dlb2_cq_poll_modes poll_mode);
+
+int (*dlb2_iface_dir_queue_create)(struct dlb2_hw_dev *handle,
+				   struct dlb2_create_dir_queue_args *cfg);
+
+int (*dlb2_iface_map_qid)(struct dlb2_hw_dev *handle,
+			  struct dlb2_map_qid_args *cfg);
diff --git a/drivers/event/dlb2/dlb2_iface.h b/drivers/event/dlb2/dlb2_iface.h
index a6b923a..4eddd14 100644
--- a/drivers/event/dlb2/dlb2_iface.h
+++ b/drivers/event/dlb2/dlb2_iface.h
@@ -50,4 +50,10 @@ extern int (*dlb2_iface_ldb_port_create)(struct dlb2_hw_dev *handle,
 extern int (*dlb2_iface_dir_port_create)(struct dlb2_hw_dev *handle,
 					 struct dlb2_create_dir_port_args *cfg,
 					 enum dlb2_cq_poll_modes poll_mode);
+
+extern int (*dlb2_iface_dir_queue_create)(struct dlb2_hw_dev *handle,
+					struct dlb2_create_dir_queue_args *cfg);
+
+extern int (*dlb2_iface_map_qid)(struct dlb2_hw_dev *handle,
+				 struct dlb2_map_qid_args *cfg);
 #endif /* _DLB2_IFACE_H_ */
diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c
index a633d3e..14ad262 100644
--- a/drivers/event/dlb2/pf/base/dlb2_resource.c
+++ b/drivers/event/dlb2/pf/base/dlb2_resource.c
@@ -4895,3 +4895,636 @@ int dlb2_hw_create_dir_port(struct dlb2_hw *hw,
 
 	return 0;
 }
+
+static void dlb2_configure_dir_queue(struct dlb2_hw *hw,
+				     struct dlb2_hw_domain *domain,
+				     struct dlb2_dir_pq_pair *queue,
+				     struct dlb2_create_dir_queue_args *args,
+				     bool vdev_req,
+				     unsigned int vdev_id)
+{
+	union dlb2_sys_dir_vasqid_v r0 = { {0} };
+	union dlb2_sys_dir_qid_its r1 = { {0} };
+	union dlb2_lsp_qid_dir_depth_thrsh r2 = { {0} };
+	union dlb2_sys_dir_qid_v r5 = { {0} };
+
+	unsigned int offs;
+
+	/* QID write permissions are turned on when the domain is started */
+	r0.field.vasqid_v = 0;
+
+	offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_QUEUES +
+		queue->id.phys_id;
+
+	DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), r0.val);
+
+	/* Don't timestamp QEs that pass through this queue */
+	r1.field.qid_its = 0;
+
+	DLB2_CSR_WR(hw,
+		    DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),
+		    r1.val);
+
+	r2.field.thresh = args->depth_threshold;
+
+	DLB2_CSR_WR(hw,
+		    DLB2_LSP_QID_DIR_DEPTH_THRSH(queue->id.phys_id),
+		    r2.val);
+
+	if (vdev_req) {
+		union dlb2_sys_vf_dir_vqid_v r3 = { {0} };
+		union dlb2_sys_vf_dir_vqid2qid r4 = { {0} };
+
+		offs = vdev_id * DLB2_MAX_NUM_DIR_QUEUES + queue->id.virt_id;
+
+		r3.field.vqid_v = 1;
+
+		DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(offs), r3.val);
+
+		r4.field.qid = queue->id.phys_id;
+
+		DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(offs), r4.val);
+	}
+
+	r5.field.qid_v = 1;
+
+	DLB2_CSR_WR(hw, DLB2_SYS_DIR_QID_V(queue->id.phys_id), r5.val);
+
+	queue->queue_configured = true;
+}
+
+static void
+dlb2_log_create_dir_queue_args(struct dlb2_hw *hw,
+			       u32 domain_id,
+			       struct dlb2_create_dir_queue_args *args,
+			       bool vdev_req,
+			       unsigned int vdev_id)
+{
+	DLB2_HW_DBG(hw, "DLB2 create directed queue arguments:\n");
+	if (vdev_req)
+		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
+	DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
+	DLB2_HW_DBG(hw, "\tPort ID:   %d\n", args->port_id);
+}
+
+static int
+dlb2_verify_create_dir_queue_args(struct dlb2_hw *hw,
+				  u32 domain_id,
+				  struct dlb2_create_dir_queue_args *args,
+				  struct dlb2_cmd_response *resp,
+				  bool vdev_req,
+				  unsigned int vdev_id)
+{
+	struct dlb2_hw_domain *domain;
+
+	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
+
+	if (domain == NULL) {
+		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
+		return -EINVAL;
+	}
+
+	if (!domain->configured) {
+		resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
+		return -EINVAL;
+	}
+
+	if (domain->started) {
+		resp->status = DLB2_ST_DOMAIN_STARTED;
+		return -EINVAL;
+	}
+
+	/*
+	 * If the user claims the port is already configured, validate the port
+	 * ID, its domain, and whether the port is configured.
+	 */
+	if (args->port_id != -1) {
+		struct dlb2_dir_pq_pair *port;
+
+		port = dlb2_get_domain_used_dir_pq(args->port_id,
+						   vdev_req,
+						   domain);
+
+		if (port == NULL || port->domain_id.phys_id !=
+				domain->id.phys_id || !port->port_configured) {
+			resp->status = DLB2_ST_INVALID_PORT_ID;
+			return -EINVAL;
+		}
+	}
+
+	/*
+	 * If the queue's port is not configured, validate that a free
+	 * port-queue pair is available.
+	 */
+	if (args->port_id == -1 &&
+	    dlb2_list_empty(&domain->avail_dir_pq_pairs)) {
+		resp->status = DLB2_ST_DIR_QUEUES_UNAVAILABLE;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * dlb2_hw_create_dir_queue() - Allocate and initialize a DLB DIR queue.
+ * @hw:	Contains the current state of the DLB2 hardware.
+ * @domain_id: Domain ID
+ * @args: User-provided arguments.
+ * @resp: Response to user.
+ * @vdev_req: Request came from a virtual device.
+ * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
+ *
+ * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
+ * satisfy a request, resp->status will be set accordingly.
+ */
+int dlb2_hw_create_dir_queue(struct dlb2_hw *hw,
+			     u32 domain_id,
+			     struct dlb2_create_dir_queue_args *args,
+			     struct dlb2_cmd_response *resp,
+			     bool vdev_req,
+			     unsigned int vdev_id)
+{
+	struct dlb2_dir_pq_pair *queue;
+	struct dlb2_hw_domain *domain;
+	int ret;
+
+	dlb2_log_create_dir_queue_args(hw, domain_id, args, vdev_req, vdev_id);
+
+	/*
+	 * Verify that hardware resources are available before attempting to
+	 * satisfy the request. This simplifies the error unwinding code.
+	 */
+	ret = dlb2_verify_create_dir_queue_args(hw,
+						domain_id,
+						args,
+						resp,
+						vdev_req,
+						vdev_id);
+	if (ret)
+		return ret;
+
+	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
+	if (domain == NULL) {
+		DLB2_HW_ERR(hw,
+			    "[%s():%d] Internal error: domain not found\n",
+			    __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	if (args->port_id != -1)
+		queue = dlb2_get_domain_used_dir_pq(args->port_id,
+						    vdev_req,
+						    domain);
+	else
+		queue = DLB2_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
+					   typeof(*queue));
+	if (queue == NULL) {
+		DLB2_HW_ERR(hw,
+			    "[%s():%d] Internal error: no available dir queues\n",
+			    __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	dlb2_configure_dir_queue(hw, domain, queue, args, vdev_req, vdev_id);
+
+	/*
+	 * Configuration succeeded, so move the resource from the 'avail' to
+	 * the 'used' list (if it's not already there).
+	 */
+	if (args->port_id == -1) {
+		dlb2_list_del(&domain->avail_dir_pq_pairs,
+			      &queue->domain_list);
+
+		dlb2_list_add(&domain->used_dir_pq_pairs,
+			      &queue->domain_list);
+	}
+
+	resp->status = 0;
+
+	resp->id = (vdev_req) ? queue->id.virt_id : queue->id.phys_id;
+
+	return 0;
+}
+
+static bool
+dlb2_port_find_slot_with_pending_map_queue(struct dlb2_ldb_port *port,
+					   struct dlb2_ldb_queue *queue,
+					   int *slot)
+{
+	int i;
+
+	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
+		struct dlb2_ldb_port_qid_map *map = &port->qid_map[i];
+
+		if (map->state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP &&
+		    map->pending_qid == queue->id.phys_id)
+			break;
+	}
+
+	*slot = i;
+
+	return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
+}
+
+static void dlb2_ldb_port_change_qid_priority(struct dlb2_hw *hw,
+					      struct dlb2_ldb_port *port,
+					      int slot,
+					      struct dlb2_map_qid_args *args)
+{
+	union dlb2_lsp_cq2priov r0;
+
+	/* Read-modify-write the priority and valid bit register */
+	r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(port->id.phys_id));
+
+	r0.field.v |= 1 << slot;
+	r0.field.prio |= (args->priority & 0x7) << slot * 3;
+
+	DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(port->id.phys_id), r0.val);
+
+	dlb2_flush_csr(hw);
+
+	port->qid_map[slot].priority = args->priority;
+}
+
+static int dlb2_verify_map_qid_slot_available(struct dlb2_ldb_port *port,
+					      struct dlb2_ldb_queue *queue,
+					      struct dlb2_cmd_response *resp)
+{
+	enum dlb2_qid_map_state state;
+	int i;
+
+	/* Unused slot available? */
+	if (port->num_mappings < DLB2_MAX_NUM_QIDS_PER_LDB_CQ)
+		return 0;
+
+	/*
+	 * If the queue is already mapped (from the application's perspective),
+	 * this is simply a priority update.
+	 */
+	state = DLB2_QUEUE_MAPPED;
+	if (dlb2_port_find_slot_queue(port, state, queue, &i))
+		return 0;
+
+	state = DLB2_QUEUE_MAP_IN_PROG;
+	if (dlb2_port_find_slot_queue(port, state, queue, &i))
+		return 0;
+
+	if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i))
+		return 0;
+
+	/*
+	 * If the slot contains an unmap in progress, it's considered
+	 * available.
+	 */
+	state = DLB2_QUEUE_UNMAP_IN_PROG;
+	if (dlb2_port_find_slot(port, state, &i))
+		return 0;
+
+	state = DLB2_QUEUE_UNMAPPED;
+	if (dlb2_port_find_slot(port, state, &i))
+		return 0;
+
+	resp->status = DLB2_ST_NO_QID_SLOTS_AVAILABLE;
+	return -EINVAL;
+}
+
+static struct dlb2_ldb_queue *
+dlb2_get_domain_ldb_queue(u32 id,
+			  bool vdev_req,
+			  struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_ldb_queue *queue;
+	RTE_SET_USED(iter);
+
+	if (id >= DLB2_MAX_NUM_LDB_QUEUES)
+		return NULL;
+
+	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter)
+		if ((!vdev_req && queue->id.phys_id == id) ||
+		    (vdev_req && queue->id.virt_id == id))
+			return queue;
+
+	return NULL;
+}
+
+static struct dlb2_ldb_port *
+dlb2_get_domain_used_ldb_port(u32 id,
+			      bool vdev_req,
+			      struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_ldb_port *port;
+	int i;
+	RTE_SET_USED(iter);
+
+	if (id >= DLB2_MAX_NUM_LDB_PORTS)
+		return NULL;
+
+	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
+			if ((!vdev_req && port->id.phys_id == id) ||
+			    (vdev_req && port->id.virt_id == id))
+				return port;
+
+		DLB2_DOM_LIST_FOR(domain->avail_ldb_ports[i], port, iter)
+			if ((!vdev_req && port->id.phys_id == id) ||
+			    (vdev_req && port->id.virt_id == id))
+				return port;
+	}
+
+	return NULL;
+}
+
+static int dlb2_verify_map_qid_args(struct dlb2_hw *hw,
+				    u32 domain_id,
+				    struct dlb2_map_qid_args *args,
+				    struct dlb2_cmd_response *resp,
+				    bool vdev_req,
+				    unsigned int vdev_id)
+{
+	struct dlb2_hw_domain *domain;
+	struct dlb2_ldb_port *port;
+	struct dlb2_ldb_queue *queue;
+	int id;
+
+	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
+
+	if (domain == NULL) {
+		resp->status = DLB2_ST_INVALID_DOMAIN_ID;
+		return -EINVAL;
+	}
+
+	if (!domain->configured) {
+		resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED;
+		return -EINVAL;
+	}
+
+	id = args->port_id;
+
+	port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
+
+	if (port == NULL || !port->configured) {
+		resp->status = DLB2_ST_INVALID_PORT_ID;
+		return -EINVAL;
+	}
+
+	if (args->priority >= DLB2_QID_PRIORITIES) {
+		resp->status = DLB2_ST_INVALID_PRIORITY;
+		return -EINVAL;
+	}
+
+	queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
+
+	if (queue == NULL || !queue->configured) {
+		resp->status = DLB2_ST_INVALID_QID;
+		return -EINVAL;
+	}
+
+	if (queue->domain_id.phys_id != domain->id.phys_id) {
+		resp->status = DLB2_ST_INVALID_QID;
+		return -EINVAL;
+	}
+
+	if (port->domain_id.phys_id != domain->id.phys_id) {
+		resp->status = DLB2_ST_INVALID_PORT_ID;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void dlb2_log_map_qid(struct dlb2_hw *hw,
+			     u32 domain_id,
+			     struct dlb2_map_qid_args *args,
+			     bool vdev_req,
+			     unsigned int vdev_id)
+{
+	DLB2_HW_DBG(hw, "DLB2 map QID arguments:\n");
+	if (vdev_req)
+		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
+	DLB2_HW_DBG(hw, "\tDomain ID: %d\n",
+		    domain_id);
+	DLB2_HW_DBG(hw, "\tPort ID:   %d\n",
+		    args->port_id);
+	DLB2_HW_DBG(hw, "\tQueue ID:  %d\n",
+		    args->qid);
+	DLB2_HW_DBG(hw, "\tPriority:  %d\n",
+		    args->priority);
+}
+
+int dlb2_hw_map_qid(struct dlb2_hw *hw,
+		    u32 domain_id,
+		    struct dlb2_map_qid_args *args,
+		    struct dlb2_cmd_response *resp,
+		    bool vdev_req,
+		    unsigned int vdev_id)
+{
+	struct dlb2_hw_domain *domain;
+	struct dlb2_ldb_queue *queue;
+	enum dlb2_qid_map_state st;
+	struct dlb2_ldb_port *port;
+	int ret, i, id;
+	u8 prio;
+
+	dlb2_log_map_qid(hw, domain_id, args, vdev_req, vdev_id);
+
+	/*
+	 * Verify that hardware resources are available before attempting to
+	 * satisfy the request. This simplifies the error unwinding code.
+	 */
+	ret = dlb2_verify_map_qid_args(hw,
+				       domain_id,
+				       args,
+				       resp,
+				       vdev_req,
+				       vdev_id);
+	if (ret)
+		return ret;
+
+	prio = args->priority;
+
+	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
+	if (domain == NULL) {
+		DLB2_HW_ERR(hw,
+			    "[%s():%d] Internal error: domain not found\n",
+			    __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	id = args->port_id;
+
+	port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain);
+	if (port == NULL) {
+		DLB2_HW_ERR(hw,
+			    "[%s():%d] Internal error: port not found\n",
+			    __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain);
+	if (queue == NULL) {
+		DLB2_HW_ERR(hw,
+			    "[%s():%d] Internal error: queue not found\n",
+			    __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	/*
+	 * If there are any outstanding detach operations for this port,
+	 * attempt to complete them. This may be necessary to free up a QID
+	 * slot for this requested mapping.
+	 */
+	if (port->num_pending_removals)
+		dlb2_domain_finish_unmap_port(hw, domain, port);
+
+	ret = dlb2_verify_map_qid_slot_available(port, queue, resp);
+	if (ret)
+		return ret;
+
+	/* Hardware requires disabling the CQ before mapping QIDs. */
+	if (port->enabled)
+		dlb2_ldb_port_cq_disable(hw, port);
+
+	/*
+	 * If this is only a priority change, don't perform the full QID->CQ
+	 * mapping procedure
+	 */
+	st = DLB2_QUEUE_MAPPED;
+	if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
+		if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
+			DLB2_HW_ERR(hw,
+				    "[%s():%d] Internal error: port slot tracking failed\n",
+				    __func__, __LINE__);
+			return -EFAULT;
+		}
+
+		if (prio != port->qid_map[i].priority) {
+			dlb2_ldb_port_change_qid_priority(hw, port, i, args);
+			DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
+		}
+
+		st = DLB2_QUEUE_MAPPED;
+		ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
+		if (ret)
+			return ret;
+
+		goto map_qid_done;
+	}
+
+	st = DLB2_QUEUE_UNMAP_IN_PROG;
+	if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
+		if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
+			DLB2_HW_ERR(hw,
+				    "[%s():%d] Internal error: port slot tracking failed\n",
+				    __func__, __LINE__);
+			return -EFAULT;
+		}
+
+		if (prio != port->qid_map[i].priority) {
+			dlb2_ldb_port_change_qid_priority(hw, port, i, args);
+			DLB2_HW_DBG(hw, "DLB2 map: priority change\n");
+		}
+
+		st = DLB2_QUEUE_MAPPED;
+		ret = dlb2_port_slot_state_transition(hw, port, queue, i, st);
+		if (ret)
+			return ret;
+
+		goto map_qid_done;
+	}
+
+	/*
+	 * If this is a priority change on an in-progress mapping, don't
+	 * perform the full QID->CQ mapping procedure.
+	 */
+	st = DLB2_QUEUE_MAP_IN_PROG;
+	if (dlb2_port_find_slot_queue(port, st, queue, &i)) {
+		if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
+			DLB2_HW_ERR(hw,
+				    "[%s():%d] Internal error: port slot tracking failed\n",
+				    __func__, __LINE__);
+			return -EFAULT;
+		}
+
+		port->qid_map[i].priority = prio;
+
+		DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
+
+		goto map_qid_done;
+	}
+
+	/*
+	 * If this is a priority change on a pending mapping, update the
+	 * pending priority
+	 */
+	if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) {
+		if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
+			DLB2_HW_ERR(hw,
+				    "[%s():%d] Internal error: port slot tracking failed\n",
+				    __func__, __LINE__);
+			return -EFAULT;
+		}
+
+		port->qid_map[i].pending_priority = prio;
+
+		DLB2_HW_DBG(hw, "DLB2 map: priority change only\n");
+
+		goto map_qid_done;
+	}
+
+	/*
+	 * If all the CQ's slots are in use, then there's an unmap in progress
+	 * (guaranteed by dlb2_verify_map_qid_slot_available()), so add this
+	 * mapping to pending_map and return. When the removal is completed for
+	 * the slot's current occupant, this mapping will be performed.
+	 */
+	if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &i)) {
+		if (dlb2_port_find_slot(port, DLB2_QUEUE_UNMAP_IN_PROG, &i)) {
+			enum dlb2_qid_map_state st;
+
+			if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
+				DLB2_HW_ERR(hw,
+					    "[%s():%d] Internal error: port slot tracking failed\n",
+					    __func__, __LINE__);
+				return -EFAULT;
+			}
+
+			port->qid_map[i].pending_qid = queue->id.phys_id;
+			port->qid_map[i].pending_priority = prio;
+
+			st = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
+
+			ret = dlb2_port_slot_state_transition(hw, port, queue,
+							      i, st);
+			if (ret)
+				return ret;
+
+			DLB2_HW_DBG(hw, "DLB2 map: map pending removal\n");
+
+			goto map_qid_done;
+		}
+	}
+
+	/*
+	 * If the domain has started, a special "dynamic" CQ->queue mapping
+	 * procedure is required in order to safely update the CQ<->QID tables.
+	 * The "static" procedure cannot be used when traffic is flowing,
+	 * because the CQ<->QID tables cannot be updated atomically and the
+	 * scheduler won't see the new mapping unless the queue's if_status
+	 * changes, which isn't guaranteed.
+	 */
+	ret = dlb2_ldb_port_map_qid(hw, domain, port, queue, prio);
+
+	/* If ret is less than zero, it's due to an internal error */
+	if (ret < 0)
+		return ret;
+
+map_qid_done:
+	if (port->enabled)
+		dlb2_ldb_port_cq_enable(hw, port);
+
+	resp->status = 0;
+
+	return 0;
+}
diff --git a/drivers/event/dlb2/pf/dlb2_main.c b/drivers/event/dlb2/pf/dlb2_main.c
index 2a089f6..a010e25 100644
--- a/drivers/event/dlb2/pf/dlb2_main.c
+++ b/drivers/event/dlb2/pf/dlb2_main.c
@@ -651,3 +651,13 @@ dlb2_pf_create_dir_port(struct dlb2_hw *hw,
 				       NOT_VF_REQ,
 				       PF_ID_ZERO);
 }
+
+int
+dlb2_pf_create_dir_queue(struct dlb2_hw *hw,
+			 u32 id,
+			 struct dlb2_create_dir_queue_args *args,
+			 struct dlb2_cmd_response *resp)
+{
+	return dlb2_hw_create_dir_queue(hw, id, args, resp, NOT_VF_REQ,
+					PF_ID_ZERO);
+}
diff --git a/drivers/event/dlb2/pf/dlb2_pf.c b/drivers/event/dlb2/pf/dlb2_pf.c
index 2a82b8f..37be4b1 100644
--- a/drivers/event/dlb2/pf/dlb2_pf.c
+++ b/drivers/event/dlb2/pf/dlb2_pf.c
@@ -406,6 +406,54 @@ dlb2_pf_dir_port_create(struct dlb2_hw_dev *handle,
 	return ret;
 }
 
+static int
+dlb2_pf_dir_queue_create(struct dlb2_hw_dev *handle,
+			 struct dlb2_create_dir_queue_args *cfg)
+{
+	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
+	struct dlb2_cmd_response response = {0};
+	int ret;
+
+	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
+
+	ret = dlb2_pf_create_dir_queue(&dlb2_dev->hw,
+				       handle->domain_id,
+				       cfg,
+				       &response);
+
+	cfg->response = response;
+
+	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
+		  __func__, ret);
+
+	return ret;
+}
+
+static int
+dlb2_pf_map_qid(struct dlb2_hw_dev *handle,
+		struct dlb2_map_qid_args *cfg)
+{
+	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
+	struct dlb2_cmd_response response = {0};
+	int ret;
+
+	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
+
+	ret = dlb2_hw_map_qid(&dlb2_dev->hw,
+			      handle->domain_id,
+			      cfg,
+			      &response,
+			      false,
+			      0);
+
+	cfg->response = response;
+
+	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
+		  __func__, ret);
+
+	return ret;
+}
+
 static void
 dlb2_pf_iface_fn_ptrs_init(void)
 {
@@ -419,7 +467,9 @@ dlb2_pf_iface_fn_ptrs_init(void)
 	dlb2_iface_sched_domain_create = dlb2_pf_sched_domain_create;
 	dlb2_iface_ldb_queue_create = dlb2_pf_ldb_queue_create;
 	dlb2_iface_ldb_port_create = dlb2_pf_ldb_port_create;
+	dlb2_iface_dir_queue_create = dlb2_pf_dir_queue_create;
 	dlb2_iface_dir_port_create = dlb2_pf_dir_port_create;
+	dlb2_iface_map_qid = dlb2_pf_map_qid;
 	dlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation;
 	dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation;
 	dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy;
-- 
2.6.4



More information about the dev mailing list