[dpdk-dev] [PATCH v2 2/2] kni: add support for core_id param in single threaded mode

Vladyslav Buslov vladyslav.buslov at harmonicinc.com
Sat Sep 10 15:50:16 CEST 2016


Allow binding KNI thread to specific core in single threaded mode
by setting core_id and force_bind config parameters.

Signed-off-by: Vladyslav Buslov <vladyslav.buslov at harmonicinc.com>
---

v2:
* Fixed formatting.
* Refactored kthread create/bind functionality into separate function.
* Moved thread mode print into kni_init.
* Added short description to KNI Programmer's Gude doc.
* Fixed outdated mbuf processing description in KNI Programmer's Gude doc.

 doc/guides/prog_guide/kernel_nic_interface.rst |  5 +-
 lib/librte_eal/linuxapp/kni/kni_misc.c         | 72 +++++++++++++++++---------
 2 files changed, 51 insertions(+), 26 deletions(-)

diff --git a/doc/guides/prog_guide/kernel_nic_interface.rst b/doc/guides/prog_guide/kernel_nic_interface.rst
index fac1960..0fdc307 100644
--- a/doc/guides/prog_guide/kernel_nic_interface.rst
+++ b/doc/guides/prog_guide/kernel_nic_interface.rst
@@ -102,6 +102,9 @@ Refer to rte_kni_common.h in the DPDK source code for more details.
 
 The physical addresses will be re-mapped into the kernel address space and stored in separate KNI contexts.
 
+The affinity of kernel RX thread (both single and multi-threaded modes) is controlled by force_bind and
+core_id config parameters.
+
 The KNI interfaces can be deleted by a DPDK application dynamically after being created.
 Furthermore, all those KNI interfaces not deleted will be deleted on the release operation
 of the miscellaneous device (when the DPDK application is closed).
@@ -128,7 +131,7 @@ Use Case: Ingress
 On the DPDK RX side, the mbuf is allocated by the PMD in the RX thread context.
 This thread will enqueue the mbuf in the rx_q FIFO.
 The KNI thread will poll all KNI active devices for the rx_q.
-If an mbuf is dequeued, it will be converted to a sk_buff and sent to the net stack via netif_rx().
+If an mbuf is dequeued, it will be converted to a sk_buff and sent to the net stack via netif_rx_ni().
 The dequeued mbuf must be freed, so the same pointer is sent back in the free_q FIFO.
 
 The RX thread, in the same main loop, polls this FIFO and frees the mbuf after dequeuing it.
diff --git a/lib/librte_eal/linuxapp/kni/kni_misc.c b/lib/librte_eal/linuxapp/kni/kni_misc.c
index 5e7cf21..c79f5a8 100644
--- a/lib/librte_eal/linuxapp/kni/kni_misc.c
+++ b/lib/librte_eal/linuxapp/kni/kni_misc.c
@@ -172,6 +172,11 @@ kni_init(void)
 		return -EINVAL;
 	}
 
+	if (multiple_kthread_on == 0)
+		KNI_PRINT("Single kernel thread for all KNI devices\n");
+	else
+		KNI_PRINT("Multiple kernel thread mode enabled\n");
+
 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
 	rc = register_pernet_subsys(&kni_net_ops);
 #else
@@ -240,12 +245,6 @@ kni_open(struct inode *inode, struct file *file)
 	if (test_and_set_bit(KNI_DEV_IN_USE_BIT_NUM, &knet->device_in_use))
 		return -EBUSY;
 
-	/* Create kernel thread for single mode */
-	if (multiple_kthread_on == 0)
-		KNI_PRINT("Single kernel thread for all KNI devices\n");
-	else
-		KNI_PRINT("Multiple kernel thread mode enabled\n");
-
 	file->private_data = get_net(net);
 	KNI_PRINT("/dev/kni opened\n");
 
@@ -391,6 +390,32 @@ kni_check_param(struct kni_dev *kni, struct rte_kni_device_info *dev)
 	return 0;
 }
 
+__printf(5, 6) static struct task_struct *
+kni_run_thread(int (*threadfn)(void *data),
+	void *data,
+	uint8_t force_bind,
+	unsigned core_id,
+	const char namefmt[], ...)
+{
+	struct task_struct *kni_thread = NULL;
+	char task_comm[TASK_COMM_LEN];
+	va_list args;
+
+	va_start(args, namefmt);
+	vsnprintf(task_comm, sizeof(task_comm), namefmt, args);
+	va_end(args);
+
+	kni_thread = kthread_create(threadfn, data, task_comm);
+	if (IS_ERR(kni_thread))
+		return NULL;
+
+	if (force_bind)
+		kthread_bind(kni_thread, core_id);
+	wake_up_process(kni_thread);
+
+	return kni_thread;
+}
+
 static int
 kni_ioctl_create(struct net *net,
 		unsigned int ioctl_num, unsigned long ioctl_param)
@@ -419,8 +444,7 @@ kni_ioctl_create(struct net *net,
 	/**
 	 * Check if the cpu core id is valid for binding.
 	 */
-	if (dev_info.force_bind &&
-				!cpu_online(dev_info.core_id)) {
+	if (dev_info.force_bind && !cpu_online(dev_info.core_id)) {
 		KNI_ERR("cpu %u is not online\n", dev_info.core_id);
 		return -EINVAL;
 	}
@@ -572,31 +596,29 @@ kni_ioctl_create(struct net *net,
 	 * and finally wake it up.
 	 */
 	if (multiple_kthread_on) {
-		kni->pthread = kthread_create(kni_thread_multiple,
-					      (void *)kni,
-					      "kni_%s", kni->name);
-		if (IS_ERR(kni->pthread)) {
+		kni->pthread = kni_run_thread(kni_thread_multiple,
+			(void *)kni,
+			dev_info.force_bind,
+			kni->core_id,
+			"kni_%s", kni->name);
+		if (kni->pthread == NULL) {
 			kni_dev_remove(kni);
 			return -ECANCELED;
 		}
-		if (dev_info.force_bind)
-			kthread_bind(kni->pthread, kni->core_id);
-		wake_up_process(kni->pthread);
 	} else {
 		mutex_lock(&knet->kni_kthread_lock);
 		if (knet->kni_kthread == NULL) {
-			knet->kni_kthread = kthread_create(kni_thread_single,
-									(void *)knet,
-									"kni_single");
-			if (IS_ERR(knet->kni_kthread)) {
-				kni_dev_remove(kni);
-				return -ECANCELED;
-			}
-			if (dev_info.force_bind)
-				kthread_bind(knet->kni_kthread, kni->core_id);
-			wake_up_process(knet->kni_kthread);
+			knet->kni_kthread = kni_run_thread(kni_thread_single,
+				(void *)knet,
+				dev_info.force_bind,
+				kni->core_id,
+				"kni_single");
 		}
 		mutex_unlock(&knet->kni_kthread_lock);
+		if (knet->kni_kthread == NULL) {
+			kni_dev_remove(kni);
+			return -ECANCELED;
+		}
 	}
 
 	down_write(&knet->kni_list_lock);
-- 
2.8.3



More information about the dev mailing list