[dpdk-dev] [PATCH v7 3/3] test/ticketlock: add ticket lock test case

Ananyev, Konstantin konstantin.ananyev at intel.com
Fri Mar 22 12:38:30 CET 2019



> 
> Add test cases for ticket lock, recursive ticket lock,
> and ticket lock performance.
> 
> Signed-off-by: Joyce Kong <joyce.kong at arm.com>
> Reviewed-by: Gavin Hu <gavin.hu at arm.com>
> Reviewed-by: Ruifeng Wang <ruifeng.wang at arm.com>
> ---
>  MAINTAINERS                |   1 +
>  app/test/Makefile          |   1 +
>  app/test/autotest_data.py  |   6 +
>  app/test/meson.build       |   1 +
>  app/test/test_ticketlock.c | 311 +++++++++++++++++++++++++++++++++++++++++++++
>  5 files changed, 320 insertions(+)
>  create mode 100644 app/test/test_ticketlock.c
> 
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 3521271..b1ed4cc 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -213,6 +213,7 @@ F: app/test/test_bitmap.c
>  Ticketlock
>  M: Joyce Kong <joyce.kong at arm.com>
>  F: lib/librte_eal/common/include/generic/rte_ticketlock.h
> +F: app/test/test_ticketlock.c
> 
>  ARM v7
>  M: Jan Viktorin <viktorin at rehivetech.com>
> diff --git a/app/test/Makefile b/app/test/Makefile
> index 89949c2..d6aa28b 100644
> --- a/app/test/Makefile
> +++ b/app/test/Makefile
> @@ -65,6 +65,7 @@ SRCS-y += test_barrier.c
>  SRCS-y += test_malloc.c
>  SRCS-y += test_cycles.c
>  SRCS-y += test_spinlock.c
> +SRCS-y += test_ticketlock.c
>  SRCS-y += test_memory.c
>  SRCS-y += test_memzone.c
>  SRCS-y += test_bitmap.c
> diff --git a/app/test/autotest_data.py b/app/test/autotest_data.py
> index 5f87bb9..db25274 100644
> --- a/app/test/autotest_data.py
> +++ b/app/test/autotest_data.py
> @@ -171,6 +171,12 @@
>          "Report":  None,
>      },
>      {
> +        "Name":    "Ticketlock autotest",
> +        "Command": "ticketlock_autotest",
> +        "Func":    ticketlock_autotest,
> +        "Report":  None,
> +    }
> +    {
>          "Name":    "Byte order autotest",
>          "Command": "byteorder_autotest",
>          "Func":    default_autotest,
> diff --git a/app/test/meson.build b/app/test/meson.build
> index 05e5dde..ddb4d09 100644
> --- a/app/test/meson.build
> +++ b/app/test/meson.build
> @@ -107,6 +107,7 @@ test_sources = files('commands.c',
>  	'test_timer.c',
>  	'test_timer_perf.c',
>  	'test_timer_racecond.c',
> +	'test_ticketlock.c',
>  	'test_version.c',
>  	'virtual_pmd.c'
>  )
> diff --git a/app/test/test_ticketlock.c b/app/test/test_ticketlock.c
> new file mode 100644
> index 0000000..67281ce
> --- /dev/null
> +++ b/app/test/test_ticketlock.c
> @@ -0,0 +1,311 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2018-2019 Arm Limited
> + */
> +
> +#include <inttypes.h>
> +#include <stdint.h>
> +#include <stdio.h>
> +#include <string.h>
> +#include <sys/queue.h>
> +#include <unistd.h>
> +
> +#include <rte_atomic.h>
> +#include <rte_common.h>
> +#include <rte_cycles.h>
> +#include <rte_eal.h>
> +#include <rte_launch.h>
> +#include <rte_lcore.h>
> +#include <rte_memory.h>
> +#include <rte_per_lcore.h>
> +#include <rte_ticketlock.h>
> +
> +#include "test.h"
> +
> +/*
> + * Ticketlock test
> + * =============
> + *
> + * - There is a global ticketlock and a table of ticketlocks (one per lcore).
> + *
> + * - The test function takes all of these locks and launches the
> + *   ``test_ticketlock_per_core()`` function on each core (except the master).
> + *
> + *   - The function takes the global lock, display something, then releases
> + *     the global lock.
> + *   - The function takes the per-lcore lock, display something, then releases
> + *     the per-core lock.
> + *
> + * - The main function unlocks the per-lcore locks sequentially and
> + *   waits between each lock. This triggers the display of a message
> + *   for each core, in the correct order. The autotest script checks that
> + *   this order is correct.
> + *
> + * - A load test is carried out, with all cores attempting to lock a single lock
> + *   multiple times
> + */
> +
> +static rte_ticketlock_t tl, tl_try;
> +static rte_ticketlock_t tl_tab[RTE_MAX_LCORE];
> +static rte_ticketlock_recursive_t tlr;
> +static unsigned int count;
> +
> +static rte_atomic32_t synchro;
> +
> +static int
> +test_ticketlock_per_core(__attribute__((unused)) void *arg)
> +{
> +	rte_ticketlock_lock(&tl);
> +	printf("Global lock taken on core %u\n", rte_lcore_id());
> +	rte_ticketlock_unlock(&tl);
> +
> +	rte_ticketlock_lock(&tl_tab[rte_lcore_id()]);
> +	printf("Hello from core %u !\n", rte_lcore_id());
> +	rte_ticketlock_unlock(&tl_tab[rte_lcore_id()]);
> +
> +	return 0;
> +}

I think that's probably no enough for functional testing.
Need something extra to ensure that it provides correct locking in MT env.
Probably extend the perf test below to do both?
Something like that:

static uint64_t lcount __rte_cache_aligned;
static uint64_t lcore_count[RTE_MAX_LCORE] __rte_cache_aligned;

...

load_loop_fn(...)
{
   ...
   rte_ticketlock_lock(&lk);
   lcount++;
   rte_ticketlock_unlock(&lk);
   lcore_count[current_lcore]++;
}

Then in test_ticketlock_perf() make sure that sum of al
lcore_count[] values equals to lcount value:
tcount = 0;
for (i = 0; i != RTE_DIM(lcore_count); i++)
   tcount += lcore_count[i];

if (tcount != lcount)
  <error>

Same thought for trylock.
Konstantin

> +
> +static int
> +test_ticketlock_recursive_per_core(__attribute__((unused)) void *arg)
> +{
> +	unsigned int id = rte_lcore_id();
> +
> +	rte_ticketlock_recursive_lock(&tlr);
> +	printf("Global recursive lock taken on core %u - count = %d\n",
> +	       id, tlr.count);
> +	rte_ticketlock_recursive_lock(&tlr);
> +	printf("Global recursive lock taken on core %u - count = %d\n",
> +	       id, tlr.count);
> +	rte_ticketlock_recursive_lock(&tlr);
> +	printf("Global recursive lock taken on core %u - count = %d\n",
> +	       id, tlr.count);
> +
> +	printf("Hello from within recursive locks from core %u !\n", id);
> +
> +	rte_ticketlock_recursive_unlock(&tlr);
> +	printf("Global recursive lock released on core %u - count = %d\n",
> +	       id, tlr.count);
> +	rte_ticketlock_recursive_unlock(&tlr);
> +	printf("Global recursive lock released on core %u - count = %d\n",
> +	       id, tlr.count);
> +	rte_ticketlock_recursive_unlock(&tlr);
> +	printf("Global recursive lock released on core %u - count = %d\n",
> +	       id, tlr.count);
> +
> +	return 0;
> +}
> +
> +static rte_ticketlock_t lk = RTE_TICKETLOCK_INITIALIZER;
> +static uint64_t lock_count[RTE_MAX_LCORE] = {0};
> +
> +#define TIME_MS 100
> +
> +static int
> +load_loop_fn(void *func_param)
> +{
> +	uint64_t time_diff = 0, begin;
> +	uint64_t hz = rte_get_timer_hz();
> +	uint64_t lcount = 0;
> +	const int use_lock = *(int *)func_param;
> +	const unsigned int lcore = rte_lcore_id();
> +
> +	/* wait synchro for slaves */
> +	if (lcore != rte_get_master_lcore())
> +		while (rte_atomic32_read(&synchro) == 0)
> +			;
> +
> +	begin = rte_get_timer_cycles();
> +	while (time_diff < hz * TIME_MS / 1000) {
> +		if (use_lock)
> +			rte_ticketlock_lock(&lk);
> +		lcount++;
> +		if (use_lock)
> +			rte_ticketlock_unlock(&lk);
> +		/* delay to make lock duty cycle slighlty realistic */

Probably better to do here the same as in test spinlock patches:
 - remove delay_us()
- move
  time_diff = rte_get_timer_cycles() - begin;
 out of the loop and report aggregate cycles.

> +		rte_delay_us(1);
> +		time_diff = rte_get_timer_cycles() - begin;
> +	}
> +	lock_count[lcore] = lcount;
> +	return 0;
> +}
> +
> +static int
> +test_ticketlock_perf(void)
> +{
> +	unsigned int i;
> +	uint64_t total = 0;
> +	int lock = 0;
> +	const unsigned int lcore = rte_lcore_id();
> +
> +	printf("\nTest with no lock on single core...\n");
> +	load_loop_fn(&lock);
> +	printf("Core [%u] count = %"PRIu64"\n", lcore, lock_count[lcore]);
> +	memset(lock_count, 0, sizeof(lock_count));
> +
> +	printf("\nTest with lock on single core...\n");
> +	lock = 1;
> +	load_loop_fn(&lock);
> +	printf("Core [%u] count = %"PRIu64"\n", lcore, lock_count[lcore]);
> +	memset(lock_count, 0, sizeof(lock_count));
> +
> +	printf("\nTest with lock on %u cores...\n", rte_lcore_count());
> +
> +	/* Clear synchro and start slaves */
> +	rte_atomic32_set(&synchro, 0);
> +	rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MASTER);
> +
> +	/* start synchro and launch test on master */
> +	rte_atomic32_set(&synchro, 1);
> +	load_loop_fn(&lock);
> +
> +	rte_eal_mp_wait_lcore();
> +
> +	RTE_LCORE_FOREACH(i) {
> +		printf("Core [%u] count = %"PRIu64"\n", i, lock_count[i]);
> +		total += lock_count[i];
> +	}
> +
> +	printf("Total count = %"PRIu64"\n", total);
> +
> +	return 0;
> +}
> +
> +/*
> + * Use rte_ticketlock_trylock() to trylock a ticketlock object,
> + * If it could not lock the object successfully, it would
> + * return immediately and the variable of "count" would be
> + * increased by one per times. the value of "count" could be
> + * checked as the result later.
> + */
> +static int
> +test_ticketlock_try(__attribute__((unused)) void *arg)
> +{
> +	if (rte_ticketlock_trylock(&tl_try) == 0) {
> +		rte_ticketlock_lock(&tl);
> +		count++;
> +		rte_ticketlock_unlock(&tl);
> +	}
> +
> +	return 0;
> +}
> +
> +
> +/*
> + * Test rte_eal_get_lcore_state() in addition to ticketlocks
> + * as we have "waiting" then "running" lcores.
> + */
> +static int
> +test_ticketlock(void)
> +{
> +	int ret = 0;
> +	int i;
> +
> +	/* slave cores should be waiting: print it */
> +	RTE_LCORE_FOREACH_SLAVE(i) {
> +		printf("lcore %d state: %d\n", i,
> +		       (int) rte_eal_get_lcore_state(i));
> +	}
> +
> +	rte_ticketlock_init(&tl);
> +	rte_ticketlock_init(&tl_try);
> +	rte_ticketlock_recursive_init(&tlr);
> +	RTE_LCORE_FOREACH_SLAVE(i) {
> +		rte_ticketlock_init(&tl_tab[i]);
> +	}
> +
> +	rte_ticketlock_lock(&tl);
> +
> +	RTE_LCORE_FOREACH_SLAVE(i) {
> +		rte_ticketlock_lock(&tl_tab[i]);
> +		rte_eal_remote_launch(test_ticketlock_per_core, NULL, i);
> +	}
> +
> +	/* slave cores should be busy: print it */
> +	RTE_LCORE_FOREACH_SLAVE(i) {
> +		printf("lcore %d state: %d\n", i,
> +		       (int) rte_eal_get_lcore_state(i));
> +	}
> +	rte_ticketlock_unlock(&tl);
> +
> +	RTE_LCORE_FOREACH_SLAVE(i) {
> +		rte_ticketlock_unlock(&tl_tab[i]);
> +		rte_delay_ms(10);
> +	}
> +
> +	rte_eal_mp_wait_lcore();
> +
> +	rte_ticketlock_recursive_lock(&tlr);
> +
> +	/*
> +	 * Try to acquire a lock that we already own
> +	 */
> +	if (!rte_ticketlock_recursive_trylock(&tlr)) {
> +		printf("rte_ticketlock_recursive_trylock failed on a lock that "
> +		       "we already own\n");
> +		ret = -1;
> +	} else
> +		rte_ticketlock_recursive_unlock(&tlr);
> +
> +	RTE_LCORE_FOREACH_SLAVE(i) {
> +		rte_eal_remote_launch(test_ticketlock_recursive_per_core,
> +					NULL, i);
> +	}
> +	rte_ticketlock_recursive_unlock(&tlr);
> +	rte_eal_mp_wait_lcore();
> +
> +	/*
> +	 * Test if it could return immediately from try-locking a locked object.
> +	 * Here it will lock the ticketlock object first, then launch all the
> +	 * slave lcores to trylock the same ticketlock object.
> +	 * All the slave lcores should give up try-locking a locked object and
> +	 * return immediately, and then increase the "count" initialized with
> +	 * zero by one per times.
> +	 * We can check if the "count" is finally equal to the number of all
> +	 * slave lcores to see if the behavior of try-locking a locked
> +	 * ticketlock object is correct.
> +	 */
> +	if (rte_ticketlock_trylock(&tl_try) == 0)
> +		return -1;
> +
> +	count = 0;
> +	RTE_LCORE_FOREACH_SLAVE(i) {
> +		rte_eal_remote_launch(test_ticketlock_try, NULL, i);
> +	}
> +	rte_eal_mp_wait_lcore();
> +	rte_ticketlock_unlock(&tl_try);
> +	if (rte_ticketlock_is_locked(&tl)) {
> +		printf("ticketlock is locked but it should not be\n");
> +		return -1;
> +	}
> +	rte_ticketlock_lock(&tl);
> +	if (count != (rte_lcore_count() - 1))
> +		ret = -1;
> +
> +	rte_ticketlock_unlock(&tl);
> +
> +	/*
> +	 * Test if it can trylock recursively.
> +	 * Use rte_ticketlock_recursive_trylock() to check if it can lock
> +	 * a ticketlock object recursively. Here it will try to lock a
> +	 * ticketlock object twice.
> +	 */
> +	if (rte_ticketlock_recursive_trylock(&tlr) == 0) {
> +		printf("It failed to do the first ticketlock_recursive_trylock "
> +			   "but it should able to do\n");
> +		return -1;
> +	}
> +	if (rte_ticketlock_recursive_trylock(&tlr) == 0) {
> +		printf("It failed to do the second ticketlock_recursive_trylock "
> +			   "but it should able to do\n");
> +		return -1;
> +	}
> +	rte_ticketlock_recursive_unlock(&tlr);
> +	rte_ticketlock_recursive_unlock(&tlr);
> +
> +	if (test_ticketlock_perf() < 0)
> +		return -1;
> +
> +	return ret;
> +}
> +
> +REGISTER_TEST_COMMAND(ticketlock_autotest, test_ticketlock);
> --
> 2.7.4



More information about the dev mailing list