[dpdk-dev] [PATCH v4 1/7] app/flow-perf: start using more generic wrapper for cycles
Wisam Jaddo
wisamm at nvidia.com
Sun Mar 14 10:54:21 CET 2021
rdtsc() is x86 related, while this might fail for other archs,
so it's better to use more generic API for cycles measurement.
Signed-off-by: Wisam Jaddo <wisamm at nvidia.com>
Acked-by: Alexander Kozyrev <akozyrev at nvidia.com>
---
app/test-flow-perf/main.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/app/test-flow-perf/main.c b/app/test-flow-perf/main.c
index 99d0463456..8b5a11c15e 100644
--- a/app/test-flow-perf/main.c
+++ b/app/test-flow-perf/main.c
@@ -969,7 +969,7 @@ meters_handler(int port_id, uint8_t core_id, uint8_t ops)
end_counter = (core_id + 1) * rules_count_per_core;
cpu_time_used = 0;
- start_batch = rte_rdtsc();
+ start_batch = rte_get_timer_cycles();
for (counter = start_counter; counter < end_counter; counter++) {
if (ops == METER_CREATE)
create_meter_rule(port_id, counter);
@@ -984,10 +984,10 @@ meters_handler(int port_id, uint8_t core_id, uint8_t ops)
if (!((counter + 1) % rules_batch)) {
rules_batch_idx = ((counter + 1) / rules_batch) - 1;
cpu_time_per_batch[rules_batch_idx] =
- ((double)(rte_rdtsc() - start_batch))
- / rte_get_tsc_hz();
+ ((double)(rte_get_timer_cycles() - start_batch))
+ / rte_get_timer_hz();
cpu_time_used += cpu_time_per_batch[rules_batch_idx];
- start_batch = rte_rdtsc();
+ start_batch = rte_get_timer_cycles();
}
}
@@ -1089,7 +1089,7 @@ destroy_flows(int port_id, uint8_t core_id, struct rte_flow **flows_list)
if (flow_group > 0 && core_id == 0)
rules_count_per_core++;
- start_batch = rte_rdtsc();
+ start_batch = rte_get_timer_cycles();
for (i = 0; i < (uint32_t) rules_count_per_core; i++) {
if (flows_list[i] == 0)
break;
@@ -1107,12 +1107,12 @@ destroy_flows(int port_id, uint8_t core_id, struct rte_flow **flows_list)
* for this batch.
*/
if (!((i + 1) % rules_batch)) {
- end_batch = rte_rdtsc();
+ end_batch = rte_get_timer_cycles();
delta = (double) (end_batch - start_batch);
rules_batch_idx = ((i + 1) / rules_batch) - 1;
- cpu_time_per_batch[rules_batch_idx] = delta / rte_get_tsc_hz();
+ cpu_time_per_batch[rules_batch_idx] = delta / rte_get_timer_hz();
cpu_time_used += cpu_time_per_batch[rules_batch_idx];
- start_batch = rte_rdtsc();
+ start_batch = rte_get_timer_cycles();
}
}
@@ -1185,7 +1185,7 @@ insert_flows(int port_id, uint8_t core_id)
flows_list[flow_index++] = flow;
}
- start_batch = rte_rdtsc();
+ start_batch = rte_get_timer_cycles();
for (counter = start_counter; counter < end_counter; counter++) {
flow = generate_flow(port_id, flow_group,
flow_attrs, flow_items, flow_actions,
@@ -1211,12 +1211,12 @@ insert_flows(int port_id, uint8_t core_id)
* for this batch.
*/
if (!((counter + 1) % rules_batch)) {
- end_batch = rte_rdtsc();
+ end_batch = rte_get_timer_cycles();
delta = (double) (end_batch - start_batch);
rules_batch_idx = ((counter + 1) / rules_batch) - 1;
- cpu_time_per_batch[rules_batch_idx] = delta / rte_get_tsc_hz();
+ cpu_time_per_batch[rules_batch_idx] = delta / rte_get_timer_hz();
cpu_time_used += cpu_time_per_batch[rules_batch_idx];
- start_batch = rte_rdtsc();
+ start_batch = rte_get_timer_cycles();
}
}
--
2.17.1
More information about the dev
mailing list