[dpdk-dev] [PATCH v6 3/7] eal: add power monitor for multiple events

Anatoly Burakov anatoly.burakov at intel.com
Mon Jul 5 17:21:58 CEST 2021


Use RTM and WAITPKG instructions to perform a wait-for-writes similar to
what UMWAIT does, but without the limitation of having to listen for
just one event. This works because the optimized power state used by the
TPAUSE instruction will cause a wake up on RTM transaction abort, so if
we add the addresses we're interested in to the read-set, any write to
those addresses will wake us up.

Signed-off-by: Konstantin Ananyev <konstantin.ananyev at intel.com>
Signed-off-by: Anatoly Burakov <anatoly.burakov at intel.com>
---

Notes:
    v4:
    - Fixed bugs in accessing the monitor condition
    - Abort on any monitor condition not having a defined callback
    
    v2:
    - Adapt to callback mechanism

 doc/guides/rel_notes/release_21_08.rst        |  2 +
 lib/eal/arm/rte_power_intrinsics.c            | 11 +++
 lib/eal/include/generic/rte_cpuflags.h        |  2 +
 .../include/generic/rte_power_intrinsics.h    | 35 +++++++++
 lib/eal/ppc/rte_power_intrinsics.c            | 11 +++
 lib/eal/version.map                           |  3 +
 lib/eal/x86/rte_cpuflags.c                    |  2 +
 lib/eal/x86/rte_power_intrinsics.c            | 73 +++++++++++++++++++
 8 files changed, 139 insertions(+)

diff --git a/doc/guides/rel_notes/release_21_08.rst b/doc/guides/rel_notes/release_21_08.rst
index c84ac280f5..9d1cfac395 100644
--- a/doc/guides/rel_notes/release_21_08.rst
+++ b/doc/guides/rel_notes/release_21_08.rst
@@ -55,6 +55,8 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* eal: added ``rte_power_monitor_multi`` to support waiting for multiple events.
+
 
 Removed Items
 -------------
diff --git a/lib/eal/arm/rte_power_intrinsics.c b/lib/eal/arm/rte_power_intrinsics.c
index e83f04072a..78f55b7203 100644
--- a/lib/eal/arm/rte_power_intrinsics.c
+++ b/lib/eal/arm/rte_power_intrinsics.c
@@ -38,3 +38,14 @@ rte_power_monitor_wakeup(const unsigned int lcore_id)
 
 	return -ENOTSUP;
 }
+
+int
+rte_power_monitor_multi(const struct rte_power_monitor_cond pmc[],
+		const uint32_t num, const uint64_t tsc_timestamp)
+{
+	RTE_SET_USED(pmc);
+	RTE_SET_USED(num);
+	RTE_SET_USED(tsc_timestamp);
+
+	return -ENOTSUP;
+}
diff --git a/lib/eal/include/generic/rte_cpuflags.h b/lib/eal/include/generic/rte_cpuflags.h
index 28a5aecde8..d35551e931 100644
--- a/lib/eal/include/generic/rte_cpuflags.h
+++ b/lib/eal/include/generic/rte_cpuflags.h
@@ -24,6 +24,8 @@ struct rte_cpu_intrinsics {
 	/**< indicates support for rte_power_monitor function */
 	uint32_t power_pause : 1;
 	/**< indicates support for rte_power_pause function */
+	uint32_t power_monitor_multi : 1;
+	/**< indicates support for rte_power_monitor_multi function */
 };
 
 /**
diff --git a/lib/eal/include/generic/rte_power_intrinsics.h b/lib/eal/include/generic/rte_power_intrinsics.h
index c9aa52a86d..04e8c2ab37 100644
--- a/lib/eal/include/generic/rte_power_intrinsics.h
+++ b/lib/eal/include/generic/rte_power_intrinsics.h
@@ -128,4 +128,39 @@ int rte_power_monitor_wakeup(const unsigned int lcore_id);
 __rte_experimental
 int rte_power_pause(const uint64_t tsc_timestamp);
 
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Monitor a set of addresses for changes. This will cause the CPU to enter an
+ * architecture-defined optimized power state until either one of the specified
+ * memory addresses is written to, a certain TSC timestamp is reached, or other
+ * reasons cause the CPU to wake up.
+ *
+ * Additionally, `expected` 64-bit values and 64-bit masks are provided. If
+ * mask is non-zero, the current value pointed to by the `p` pointer will be
+ * checked against the expected value, and if they do not match, the entering of
+ * optimized power state may be aborted.
+ *
+ * @warning It is responsibility of the user to check if this function is
+ *   supported at runtime using `rte_cpu_get_intrinsics_support()` API call.
+ *   Failing to do so may result in an illegal CPU instruction error.
+ *
+ * @param pmc
+ *   An array of monitoring condition structures.
+ * @param num
+ *   Length of the `pmc` array.
+ * @param tsc_timestamp
+ *   Maximum TSC timestamp to wait for. Note that the wait behavior is
+ *   architecture-dependent.
+ *
+ * @return
+ *   0 on success
+ *   -EINVAL on invalid parameters
+ *   -ENOTSUP if unsupported
+ */
+__rte_experimental
+int rte_power_monitor_multi(const struct rte_power_monitor_cond pmc[],
+		const uint32_t num, const uint64_t tsc_timestamp);
+
 #endif /* _RTE_POWER_INTRINSIC_H_ */
diff --git a/lib/eal/ppc/rte_power_intrinsics.c b/lib/eal/ppc/rte_power_intrinsics.c
index 7fc9586da7..f00b58ade5 100644
--- a/lib/eal/ppc/rte_power_intrinsics.c
+++ b/lib/eal/ppc/rte_power_intrinsics.c
@@ -38,3 +38,14 @@ rte_power_monitor_wakeup(const unsigned int lcore_id)
 
 	return -ENOTSUP;
 }
+
+int
+rte_power_monitor_multi(const struct rte_power_monitor_cond pmc[],
+		const uint32_t num, const uint64_t tsc_timestamp)
+{
+	RTE_SET_USED(pmc);
+	RTE_SET_USED(num);
+	RTE_SET_USED(tsc_timestamp);
+
+	return -ENOTSUP;
+}
diff --git a/lib/eal/version.map b/lib/eal/version.map
index fe5c3dac98..4ccd5475d6 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -423,6 +423,9 @@ EXPERIMENTAL {
 	rte_version_release; # WINDOWS_NO_EXPORT
 	rte_version_suffix; # WINDOWS_NO_EXPORT
 	rte_version_year; # WINDOWS_NO_EXPORT
+
+	# added in 21.08
+	rte_power_monitor_multi; # WINDOWS_NO_EXPORT
 };
 
 INTERNAL {
diff --git a/lib/eal/x86/rte_cpuflags.c b/lib/eal/x86/rte_cpuflags.c
index a96312ff7f..d339734a8c 100644
--- a/lib/eal/x86/rte_cpuflags.c
+++ b/lib/eal/x86/rte_cpuflags.c
@@ -189,5 +189,7 @@ rte_cpu_get_intrinsics_support(struct rte_cpu_intrinsics *intrinsics)
 	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_WAITPKG)) {
 		intrinsics->power_monitor = 1;
 		intrinsics->power_pause = 1;
+		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_RTM))
+			intrinsics->power_monitor_multi = 1;
 	}
 }
diff --git a/lib/eal/x86/rte_power_intrinsics.c b/lib/eal/x86/rte_power_intrinsics.c
index 66fea28897..f749da9b85 100644
--- a/lib/eal/x86/rte_power_intrinsics.c
+++ b/lib/eal/x86/rte_power_intrinsics.c
@@ -4,6 +4,7 @@
 
 #include <rte_common.h>
 #include <rte_lcore.h>
+#include <rte_rtm.h>
 #include <rte_spinlock.h>
 
 #include "rte_power_intrinsics.h"
@@ -28,6 +29,7 @@ __umwait_wakeup(volatile void *addr)
 }
 
 static bool wait_supported;
+static bool wait_multi_supported;
 
 static inline uint64_t
 __get_umwait_val(const volatile void *p, const uint8_t sz)
@@ -166,6 +168,8 @@ RTE_INIT(rte_power_intrinsics_init) {
 
 	if (i.power_monitor && i.power_pause)
 		wait_supported = 1;
+	if (i.power_monitor_multi)
+		wait_multi_supported = 1;
 }
 
 int
@@ -204,6 +208,9 @@ rte_power_monitor_wakeup(const unsigned int lcore_id)
 	 * In this case, since we've already woken up, the "wakeup" was
 	 * unneeded, and since T1 is still waiting on T2 releasing the lock, the
 	 * wakeup address is still valid so it's perfectly safe to write it.
+	 *
+	 * For multi-monitor case, the act of locking will in itself trigger the
+	 * wakeup, so no additional writes necessary.
 	 */
 	rte_spinlock_lock(&s->lock);
 	if (s->monitor_addr != NULL)
@@ -212,3 +219,69 @@ rte_power_monitor_wakeup(const unsigned int lcore_id)
 
 	return 0;
 }
+
+int
+rte_power_monitor_multi(const struct rte_power_monitor_cond pmc[],
+		const uint32_t num, const uint64_t tsc_timestamp)
+{
+	const unsigned int lcore_id = rte_lcore_id();
+	struct power_wait_status *s = &wait_status[lcore_id];
+	uint32_t i, rc;
+
+	/* check if supported */
+	if (!wait_multi_supported)
+		return -ENOTSUP;
+
+	if (pmc == NULL || num == 0)
+		return -EINVAL;
+
+	/* we are already inside transaction region, return */
+	if (rte_xtest() != 0)
+		return 0;
+
+	/* start new transaction region */
+	rc = rte_xbegin();
+
+	/* transaction abort, possible write to one of wait addresses */
+	if (rc != RTE_XBEGIN_STARTED)
+		return 0;
+
+	/*
+	 * the mere act of reading the lock status here adds the lock to
+	 * the read set. This means that when we trigger a wakeup from another
+	 * thread, even if we don't have a defined wakeup address and thus don't
+	 * actually cause any writes, the act of locking our lock will itself
+	 * trigger the wakeup and abort the transaction.
+	 */
+	rte_spinlock_is_locked(&s->lock);
+
+	/*
+	 * add all addresses to wait on into transaction read-set and check if
+	 * any of wakeup conditions are already met.
+	 */
+	rc = 0;
+	for (i = 0; i < num; i++) {
+		const struct rte_power_monitor_cond *c = &pmc[i];
+
+		/* cannot be NULL */
+		if (c->fn == NULL) {
+			rc = -EINVAL;
+			break;
+		}
+
+		const uint64_t val = __get_umwait_val(c->addr, c->size);
+
+		/* abort if callback indicates that we need to stop */
+		if (c->fn(val, c->opaque) != 0)
+			break;
+	}
+
+	/* none of the conditions were met, sleep until timeout */
+	if (i == num)
+		rte_power_pause(tsc_timestamp);
+
+	/* end transaction region */
+	rte_xend();
+
+	return rc;
+}
-- 
2.25.1



More information about the dev mailing list