Re: [PATCH 3/6] KVM: selftests: Convert iterations to int in dirty_log_perf_test

2021-01-12 Thread Thomas Huth

On 12/01/2021 22.42, Ben Gardon wrote:

In order to add an iteration -1 to indicate that the memory population
phase has not yet completed, convert the interations counters to ints.

No functional change intended.

Reviewed-by: Jacob Xu 

Signed-off-by: Ben Gardon 
---
  .../selftests/kvm/dirty_log_perf_test.c   | 26 +--
  1 file changed, 13 insertions(+), 13 deletions(-)

diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c 
b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index 15a9c45bdb5f..3875f22d7283 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -28,8 +28,8 @@ static uint64_t guest_percpu_mem_size = 
DEFAULT_PER_VCPU_MEM_SIZE;
  /* Host variables */
  static u64 dirty_log_manual_caps;
  static bool host_quit;
-static uint64_t iteration;
-static uint64_t vcpu_last_completed_iteration[KVM_MAX_VCPUS];
+static int iteration;
+static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];


Wouldn't it be better to use signed 64-bit variables instead? I.e. "int64_t" ?

 Thomas



[PATCH 3/6] KVM: selftests: Convert iterations to int in dirty_log_perf_test

2021-01-12 Thread Ben Gardon
In order to add an iteration -1 to indicate that the memory population
phase has not yet completed, convert the interations counters to ints.

No functional change intended.

Reviewed-by: Jacob Xu 

Signed-off-by: Ben Gardon 
---
 .../selftests/kvm/dirty_log_perf_test.c   | 26 +--
 1 file changed, 13 insertions(+), 13 deletions(-)

diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c 
b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index 15a9c45bdb5f..3875f22d7283 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -28,8 +28,8 @@ static uint64_t guest_percpu_mem_size = 
DEFAULT_PER_VCPU_MEM_SIZE;
 /* Host variables */
 static u64 dirty_log_manual_caps;
 static bool host_quit;
-static uint64_t iteration;
-static uint64_t vcpu_last_completed_iteration[KVM_MAX_VCPUS];
+static int iteration;
+static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
 
 static void *vcpu_worker(void *data)
 {
@@ -48,7 +48,7 @@ static void *vcpu_worker(void *data)
run = vcpu_state(vm, vcpu_id);
 
while (!READ_ONCE(host_quit)) {
-   uint64_t current_iteration = READ_ONCE(iteration);
+   int current_iteration = READ_ONCE(iteration);
 
clock_gettime(CLOCK_MONOTONIC, );
ret = _vcpu_run(vm, vcpu_id);
@@ -61,17 +61,17 @@ static void *vcpu_worker(void *data)
 
pr_debug("Got sync event from vCPU %d\n", vcpu_id);
vcpu_last_completed_iteration[vcpu_id] = current_iteration;
-   pr_debug("vCPU %d updated last completed iteration to %lu\n",
+   pr_debug("vCPU %d updated last completed iteration to %d\n",
 vcpu_id, vcpu_last_completed_iteration[vcpu_id]);
 
if (current_iteration) {
pages_count += vcpu_args->pages;
total = timespec_add(total, ts_diff);
-   pr_debug("vCPU %d iteration %lu dirty memory time: 
%ld.%.9lds\n",
+   pr_debug("vCPU %d iteration %d dirty memory time: 
%ld.%.9lds\n",
vcpu_id, current_iteration, ts_diff.tv_sec,
ts_diff.tv_nsec);
} else {
-   pr_debug("vCPU %d iteration %lu populate memory time: 
%ld.%.9lds\n",
+   pr_debug("vCPU %d iteration %d populate memory time: 
%ld.%.9lds\n",
vcpu_id, current_iteration, ts_diff.tv_sec,
ts_diff.tv_nsec);
}
@@ -81,7 +81,7 @@ static void *vcpu_worker(void *data)
}
 
avg = timespec_div(total, vcpu_last_completed_iteration[vcpu_id]);
-   pr_debug("\nvCPU %d dirtied 0x%lx pages over %lu iterations in 
%ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
+   pr_debug("\nvCPU %d dirtied 0x%lx pages over %d iterations in 
%ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
vcpu_id, pages_count, vcpu_last_completed_iteration[vcpu_id],
total.tv_sec, total.tv_nsec, avg.tv_sec, avg.tv_nsec);
 
@@ -144,7 +144,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
}
 
/* Allow the vCPU to populate memory */
-   pr_debug("Starting iteration %lu - Populating\n", iteration);
+   pr_debug("Starting iteration %d - Populating\n", iteration);
while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) != iteration)
;
 
@@ -168,7 +168,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
clock_gettime(CLOCK_MONOTONIC, );
iteration++;
 
-   pr_debug("Starting iteration %lu\n", iteration);
+   pr_debug("Starting iteration %d\n", iteration);
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id])
   != iteration)
@@ -177,7 +177,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 
ts_diff = timespec_elapsed(start);
vcpu_dirty_total = timespec_add(vcpu_dirty_total, ts_diff);
-   pr_info("Iteration %lu dirty memory time: %ld.%.9lds\n",
+   pr_info("Iteration %d dirty memory time: %ld.%.9lds\n",
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
 
clock_gettime(CLOCK_MONOTONIC, );
@@ -186,7 +186,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
ts_diff = timespec_elapsed(start);
get_dirty_log_total = timespec_add(get_dirty_log_total,
   ts_diff);
-   pr_info("Iteration %lu get dirty log time: %ld.%.9lds\n",
+   pr_info("Iteration %d get dirty log time: %ld.%.9lds\n",
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
 
if (dirty_log_manual_caps) {