[dpdk-dev] [PATCH v3 6/9] eal: register non-EAL threads as lcores

Ananyev, Konstantin konstantin.ananyev at intel.com
Mon Jun 22 17:49:26 CEST 2020


Hi David,

> diff --git a/lib/librte_eal/common/eal_common_lcore.c b/lib/librte_eal/common/eal_common_lcore.c
> index 86d32a3dd7..7db05428e7 100644
> --- a/lib/librte_eal/common/eal_common_lcore.c
> +++ b/lib/librte_eal/common/eal_common_lcore.c
> @@ -6,12 +6,13 @@
>  #include <limits.h>
>  #include <string.h>
> 
> -#include <rte_errno.h>
> -#include <rte_log.h>
> -#include <rte_eal.h>
> -#include <rte_lcore.h>
>  #include <rte_common.h>
>  #include <rte_debug.h>
> +#include <rte_eal.h>
> +#include <rte_errno.h>
> +#include <rte_lcore.h>
> +#include <rte_log.h>
> +#include <rte_spinlock.h>
> 
>  #include "eal_private.h"
>  #include "eal_thread.h"
> @@ -220,3 +221,38 @@ rte_socket_id_by_idx(unsigned int idx)
>  	}
>  	return config->numa_nodes[idx];
>  }
> +
> +static rte_spinlock_t lcore_lock = RTE_SPINLOCK_INITIALIZER;
> +
> +unsigned int
> +eal_lcore_non_eal_allocate(void)
> +{
> +	struct rte_config *cfg = rte_eal_get_configuration();
> +	unsigned int lcore_id;
> +
> +	rte_spinlock_lock(&lcore_lock);

I think it will break current DPDK MP modes.
The problem here - rte_config (and lcore_role[]) is in shared memory,
while the lock is local.
Simplest way probably to move lcore_lock to rte_config.

> +	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
> +		if (cfg->lcore_role[lcore_id] != ROLE_OFF)
> +			continue;
> +		cfg->lcore_role[lcore_id] = ROLE_NON_EAL;
> +		cfg->lcore_count++;
> +		break;
> +	}
> +	if (lcore_id == RTE_MAX_LCORE)
> +		RTE_LOG(DEBUG, EAL, "No lcore available.\n");
> +	rte_spinlock_unlock(&lcore_lock);
> +	return lcore_id;
> +}
> +
> +void
> +eal_lcore_non_eal_release(unsigned int lcore_id)
> +{
> +	struct rte_config *cfg = rte_eal_get_configuration();
> +
> +	rte_spinlock_lock(&lcore_lock);
> +	if (cfg->lcore_role[lcore_id] == ROLE_NON_EAL) {
> +		cfg->lcore_role[lcore_id] = ROLE_OFF;
> +		cfg->lcore_count--;
> +	}
> +	rte_spinlock_unlock(&lcore_lock);
> +}
> diff --git a/lib/librte_eal/common/eal_common_thread.c b/lib/librte_eal/common/eal_common_thread.c
> index a7ae0691bf..1cbddc4b5b 100644
> --- a/lib/librte_eal/common/eal_common_thread.c
> +++ b/lib/librte_eal/common/eal_common_thread.c
> @@ -236,3 +236,36 @@ rte_ctrl_thread_create(pthread_t *thread, const char *name,
>  	pthread_join(*thread, NULL);
>  	return -ret;
>  }
> +
> +void
> +rte_thread_register(void)
> +{
> +	unsigned int lcore_id;
> +	rte_cpuset_t cpuset;
> +
> +	/* EAL init flushes all lcores, we can't register before. */
> +	assert(internal_config.init_complete == 1);
> +	if (pthread_getaffinity_np(pthread_self(), sizeof(cpuset),
> +			&cpuset) != 0)
> +		CPU_ZERO(&cpuset);
> +	lcore_id = eal_lcore_non_eal_allocate();
> +	if (lcore_id >= RTE_MAX_LCORE)
> +		lcore_id = LCORE_ID_ANY;
> +	rte_thread_init(lcore_id, &cpuset);

So we just setting affinity to the same value, right?
Not a big deal, but might be easier to allow rte_thread_init()
to accept cpuset==NULL (and just don't change thread affinity in that case)

> +	if (lcore_id != LCORE_ID_ANY)
> +		RTE_LOG(DEBUG, EAL, "Registered non-EAL thread as lcore %u.\n",
> +			lcore_id);
> +}
> +
> +void
> +rte_thread_unregister(void)
> +{
> +	unsigned int lcore_id = rte_lcore_id();
> +
> +	if (lcore_id != LCORE_ID_ANY)
> +		eal_lcore_non_eal_release(lcore_id);
> +	rte_thread_uninit();
> +	if (lcore_id != LCORE_ID_ANY)
> +		RTE_LOG(DEBUG, EAL, "Unregistered non-EAL thread (was lcore %u).\n",
> +			lcore_id);
> +}
> diff --git a/lib/librte_eal/common/eal_private.h b/lib/librte_eal/common/eal_private.h
> index 0592fcd694..73238ff157 100644
> --- a/lib/librte_eal/common/eal_private.h
> +++ b/lib/librte_eal/common/eal_private.h
> @@ -396,6 +396,24 @@ uint64_t get_tsc_freq(void);
>   */
>  uint64_t get_tsc_freq_arch(void);
> 
> +/**
> + * Allocate a free lcore to associate to a non-EAL thread.
> + *
> + * @return
> + *   - the id of a lcore with role ROLE_NON_EAL on success.
> + *   - RTE_MAX_LCORE if none was available.
> + */
> +unsigned int eal_lcore_non_eal_allocate(void);
> +
> +/**
> + * Release the lcore used by a non-EAL thread.
> + * Counterpart of eal_lcore_non_eal_allocate().
> + *
> + * @param lcore_id
> + *   The lcore with role ROLE_NON_EAL to release.
> + */
> +void eal_lcore_non_eal_release(unsigned int lcore_id);
> +
>  /**
>   * Prepare physical memory mapping
>   * i.e. hugepages on Linux and
> diff --git a/lib/librte_eal/include/rte_lcore.h b/lib/librte_eal/include/rte_lcore.h
> index 3968c40693..ea86220394 100644
> --- a/lib/librte_eal/include/rte_lcore.h
> +++ b/lib/librte_eal/include/rte_lcore.h
> @@ -31,6 +31,7 @@ enum rte_lcore_role_t {
>  	ROLE_RTE,
>  	ROLE_OFF,
>  	ROLE_SERVICE,
> +	ROLE_NON_EAL,
>  };
> 
>  /**
> @@ -67,7 +68,8 @@ rte_lcore_has_role(unsigned int lcore_id, enum rte_lcore_role_t role);
>   *   to run threads with lcore IDs 0, 1, 2 and 3 on physical core 10..
>   *
>   * @return
> - *  Logical core ID (in EAL thread) or LCORE_ID_ANY (in non-EAL thread)
> + *  Logical core ID (in EAL thread or registered non-EAL thread) or
> + *  LCORE_ID_ANY (in unregistered non-EAL thread)
>   */
>  static inline unsigned
>  rte_lcore_id(void)
> @@ -279,6 +281,20 @@ int rte_thread_setname(pthread_t id, const char *name);
>  __rte_experimental
>  int rte_thread_getname(pthread_t id, char *name, size_t len);
> 
> +/**
> + * Register current non-EAL thread as a lcore.
> + */
> +__rte_experimental
> +void
> +rte_thread_register(void);
> +
> +/**
> + * Unregister current thread and release lcore if one was associated.
> + */
> +__rte_experimental
> +void
> +rte_thread_unregister(void);
> +
>  /**
>   * Create a control thread.
>   *
> diff --git a/lib/librte_eal/rte_eal_version.map b/lib/librte_eal/rte_eal_version.map
> index 5831eea4b0..39c41d445d 100644
> --- a/lib/librte_eal/rte_eal_version.map
> +++ b/lib/librte_eal/rte_eal_version.map
> @@ -396,6 +396,8 @@ EXPERIMENTAL {
> 
>  	# added in 20.08
>  	__rte_trace_mem_per_thread_free;
> +	rte_thread_register;
> +	rte_thread_unregister;
>  };
> 
>  INTERNAL {
> diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
> index 652d19f9f1..9e0ee052b3 100644
> --- a/lib/librte_mempool/rte_mempool.h
> +++ b/lib/librte_mempool/rte_mempool.h
> @@ -28,9 +28,9 @@
>   * rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL
>   * thread due to the internal per-lcore cache. Due to the lack of caching,
>   * rte_mempool_get() or rte_mempool_put() performance will suffer when called
> - * by non-EAL threads. Instead, non-EAL threads should call
> - * rte_mempool_generic_get() or rte_mempool_generic_put() with a user cache
> - * created with rte_mempool_cache_create().
> + * by unregistered non-EAL threads. Instead, unregistered non-EAL threads
> + * should call rte_mempool_generic_get() or rte_mempool_generic_put() with a
> + * user cache created with rte_mempool_cache_create().
>   */
> 
>  #include <stdio.h>
> @@ -1233,7 +1233,7 @@ void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
>  /**
>   * Create a user-owned mempool cache.
>   *
> - * This can be used by non-EAL threads to enable caching when they
> + * This can be used by unregistered non-EAL threads to enable caching when they
>   * interact with a mempool.
>   *
>   * @param size
> @@ -1264,7 +1264,8 @@ rte_mempool_cache_free(struct rte_mempool_cache *cache);
>   * @param lcore_id
>   *   The logical core id.
>   * @return
> - *   A pointer to the mempool cache or NULL if disabled or non-EAL thread.
> + *   A pointer to the mempool cache or NULL if disabled or unregistered non-EAL
> + *   thread.
>   */
>  static __rte_always_inline struct rte_mempool_cache *
>  rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
> --
> 2.23.0



More information about the dev mailing list