ping: Re: [PATCH] cpu-throttle: Fix vcpu missed throttle work

2023-09-19 Thread alloc young

Hi pbonzini:
please take some to review this patch. It fixes
autoconverge migration issue for heavy memory dirty
pages. Any comment will be welcome, Thx.


On 2023/9/18 11:29, alloc.yo...@outlook.com wrote:

From: alloc 

During migrations, vcpu may run longer than 10ms and not exit
on time. If the vcpu runs over 20ms, then it'll miss a throttle
kick and will run the whole tick. When this happens and vcpu
dirties pages fast, the migration will take long time or event
not enable to auto converge. To fix this issue, take overrun
vcpu time into account and adjust the whole sleep time.

Signed-off-by: yangchunguang 
---
  include/hw/core/cpu.h  |  5 
  softmmu/cpu-throttle.c | 58 +-
  2 files changed, 56 insertions(+), 7 deletions(-)

diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 92a4234439..0b3cc3e81e 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -430,6 +430,11 @@ struct CPUState {
   */
  bool throttle_thread_scheduled;
  
+/* Used to keep last cpu throttle tick

+ *
+ */
+int64_t throttle_last_tick;
+
  /*
   * Sleep throttle_us_per_full microseconds once dirty ring is full
   * if dirty page rate limit is enabled.
diff --git a/softmmu/cpu-throttle.c b/softmmu/cpu-throttle.c
index d9bb30a223..bdec8dc954 100644
--- a/softmmu/cpu-throttle.c
+++ b/softmmu/cpu-throttle.c
@@ -36,22 +36,66 @@ static unsigned int throttle_percentage;
  #define CPU_THROTTLE_PCT_MIN 1
  #define CPU_THROTTLE_PCT_MAX 99
  #define CPU_THROTTLE_TIMESLICE_NS 1000
+#define CPU_THROTTLE_RUN_MIN_NS (CPU_THROTTLE_TIMESLICE_NS / 100)
  
  static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)

  {
  double pct;
  double throttle_ratio;
-int64_t sleeptime_ns, endtime_ns;
+int64_t sleeptime_ns, endtime_ns, now, overrun_ns;
  
  if (!cpu_throttle_get_percentage()) {

  return;
  }
  
+now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);

  pct = (double)cpu_throttle_get_percentage() / 100;
  throttle_ratio = pct / (1 - pct);
-/* Add 1ns to fix double's rounding error (like 0.999...) */
-sleeptime_ns = (int64_t)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS + 1);
-endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns;
+overrun_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) - 
cpu->throttle_last_tick;
+/* If vcpu runs longer than 20ms, then the vcpu will miss next throttle 
tick and
+*  will run almost the full tick frame. When this happens and vcpu runs 
fast dirty
+*  pages, migration may take long time or can't converge at all.
+*
+*  Example of guest run longer than 30ms when cpu throttle is 99%
+*
+*  guest run(x) throttle tick(*) guest sleep(+)
+*
+*+...+x xx+...++x...xx  vcpu
+*
+*  --*...--*--...-*--...*-- 
timeframe
+*
+*/
+if (overrun_ns > (CPU_THROTTLE_TIMESLICE_NS - CPU_THROTTLE_RUN_MIN_NS)) {
+int64_t timeframe = CPU_THROTTLE_TIMESLICE_NS / (1 - pct) + 1;
+int64_t new_ns = overrun_ns / (1 - pct) + 1;
+int frames;
+int64_t adj, remainder;
+
+frames = overrun_ns / CPU_THROTTLE_TIMESLICE_NS;
+sleeptime_ns = overrun_ns * throttle_ratio + 1;
+remainder = new_ns - frames * timeframe;
+if (remainder > 0) {
+int64_t left_ns = timeframe - remainder;
+int64_t left_run = (1 - pct) * left_ns;
+
+adj = left_run < CPU_THROTTLE_RUN_MIN_NS ? CPU_THROTTLE_RUN_MIN_NS 
- left_run : 0;
+sleeptime_ns += left_ns * pct;
+} else
+adj = CPU_THROTTLE_RUN_MIN_NS;
+
+/* Limit max vcpu sleep time to avoid guest hang,
+ * max sleep time is 10s when cpu throttle is 99%
+ */
+if (sleeptime_ns > 10 * timeframe) {
+adj = remainder + CPU_THROTTLE_RUN_MIN_NS;
+sleeptime_ns = 10 * timeframe;
+}
+sleeptime_ns -=  adj;
+} else
+/* Add 1ns to fix double's rounding error (like 0.999...) */
+sleeptime_ns = (int64_t)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS + 
1);
+
+endtime_ns = now + sleeptime_ns;
  while (sleeptime_ns > 0 && !cpu->stop) {
  if (sleeptime_ns > SCALE_MS) {
  qemu_cond_timedwait_iothread(cpu->halt_cond,
@@ -70,6 +114,7 @@ static void cpu_throttle_timer_tick(void *opaque)
  {
  CPUState *cpu;
  double pct;
+int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
  
  /* Stop the timer if needed */

  if (!cpu_throttle_get_percentage()) {
@@ -77,14 +122,13 @@ static void cpu_throttle_timer_tick(void *opaque)
  }
  CPU_FOREACH(cpu) {
  if (!qatomic_xchg(>throttle_thread_scheduled, 1)) {
+cpu->throttle_last_tick = now;
  async_run_on_cpu(cpu, cpu_throttle_thread,
   

[PATCH] cpu-throttle: Fix vcpu missed throttle work

2023-09-17 Thread alloc . young
From: alloc 

During migrations, vcpu may run longer than 10ms and not exit
on time. If the vcpu runs over 20ms, then it'll miss a throttle
kick and will run the whole tick. When this happens and vcpu
dirties pages fast, the migration will take long time or event
not enable to auto converge. To fix this issue, take overrun
vcpu time into account and adjust the whole sleep time.

Signed-off-by: yangchunguang 
---
 include/hw/core/cpu.h  |  5 
 softmmu/cpu-throttle.c | 58 +-
 2 files changed, 56 insertions(+), 7 deletions(-)

diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 92a4234439..0b3cc3e81e 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -430,6 +430,11 @@ struct CPUState {
  */
 bool throttle_thread_scheduled;
 
+/* Used to keep last cpu throttle tick
+ *
+ */
+int64_t throttle_last_tick;
+
 /*
  * Sleep throttle_us_per_full microseconds once dirty ring is full
  * if dirty page rate limit is enabled.
diff --git a/softmmu/cpu-throttle.c b/softmmu/cpu-throttle.c
index d9bb30a223..bdec8dc954 100644
--- a/softmmu/cpu-throttle.c
+++ b/softmmu/cpu-throttle.c
@@ -36,22 +36,66 @@ static unsigned int throttle_percentage;
 #define CPU_THROTTLE_PCT_MIN 1
 #define CPU_THROTTLE_PCT_MAX 99
 #define CPU_THROTTLE_TIMESLICE_NS 1000
+#define CPU_THROTTLE_RUN_MIN_NS (CPU_THROTTLE_TIMESLICE_NS / 100)
 
 static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
 {
 double pct;
 double throttle_ratio;
-int64_t sleeptime_ns, endtime_ns;
+int64_t sleeptime_ns, endtime_ns, now, overrun_ns;
 
 if (!cpu_throttle_get_percentage()) {
 return;
 }
 
+now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
 pct = (double)cpu_throttle_get_percentage() / 100;
 throttle_ratio = pct / (1 - pct);
-/* Add 1ns to fix double's rounding error (like 0.999...) */
-sleeptime_ns = (int64_t)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS + 1);
-endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns;
+overrun_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) - 
cpu->throttle_last_tick;
+/* If vcpu runs longer than 20ms, then the vcpu will miss next throttle 
tick and
+*  will run almost the full tick frame. When this happens and vcpu runs 
fast dirty
+*  pages, migration may take long time or can't converge at all.
+*
+*  Example of guest run longer than 30ms when cpu throttle is 99%
+*
+*  guest run(x) throttle tick(*) guest sleep(+)
+*
+*+...+x xx+...++x...xx  vcpu
+*
+*  --*...--*--...-*--...*-- 
timeframe
+*
+*/
+if (overrun_ns > (CPU_THROTTLE_TIMESLICE_NS - CPU_THROTTLE_RUN_MIN_NS)) {
+int64_t timeframe = CPU_THROTTLE_TIMESLICE_NS / (1 - pct) + 1;
+int64_t new_ns = overrun_ns / (1 - pct) + 1;
+int frames;
+int64_t adj, remainder;
+
+frames = overrun_ns / CPU_THROTTLE_TIMESLICE_NS;
+sleeptime_ns = overrun_ns * throttle_ratio + 1;
+remainder = new_ns - frames * timeframe;
+if (remainder > 0) {
+int64_t left_ns = timeframe - remainder;
+int64_t left_run = (1 - pct) * left_ns;
+
+adj = left_run < CPU_THROTTLE_RUN_MIN_NS ? CPU_THROTTLE_RUN_MIN_NS 
- left_run : 0;
+sleeptime_ns += left_ns * pct;
+} else
+adj = CPU_THROTTLE_RUN_MIN_NS;
+
+/* Limit max vcpu sleep time to avoid guest hang,
+ * max sleep time is 10s when cpu throttle is 99%
+ */
+if (sleeptime_ns > 10 * timeframe) {
+adj = remainder + CPU_THROTTLE_RUN_MIN_NS;
+sleeptime_ns = 10 * timeframe;
+}
+sleeptime_ns -=  adj;
+} else
+/* Add 1ns to fix double's rounding error (like 0.999...) */
+sleeptime_ns = (int64_t)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS + 
1);
+
+endtime_ns = now + sleeptime_ns;
 while (sleeptime_ns > 0 && !cpu->stop) {
 if (sleeptime_ns > SCALE_MS) {
 qemu_cond_timedwait_iothread(cpu->halt_cond,
@@ -70,6 +114,7 @@ static void cpu_throttle_timer_tick(void *opaque)
 {
 CPUState *cpu;
 double pct;
+int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
 
 /* Stop the timer if needed */
 if (!cpu_throttle_get_percentage()) {
@@ -77,14 +122,13 @@ static void cpu_throttle_timer_tick(void *opaque)
 }
 CPU_FOREACH(cpu) {
 if (!qatomic_xchg(>throttle_thread_scheduled, 1)) {
+cpu->throttle_last_tick = now;
 async_run_on_cpu(cpu, cpu_throttle_thread,
  RUN_ON_CPU_NULL);
 }
 }
-
 pct = (double)cpu_throttle_get_percentage() / 100;
-timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
-   CPU_THROTTLE_TIMESLICE_NS / (1 - pct));
+

Re: [PATCH] softmmu/dirtylimit: Fix usleep early return on signal

2023-09-07 Thread alloc young




On 2023/9/4 21:27, Yong Huang wrote:



On Fri, Sep 1, 2023 at 10:19 AM > wrote:


From: alloc mailto:alloc.yo...@outlook.com>>

Timeout functions like usleep can return early on signal, which reduces
more dirty pages than expected. In dirtylimit case, dirtyrate meter
thread needs to kick all vcpus out to sync. The callchain:

vcpu_calculate_dirtyrate
     global_dirty_log_sync
         memory_global_dirty_log_sync
             kvm_log_sync_global
                 kvm_dirty_ring_flush
                     kvm_cpu_synchronize_kick_all < send vcpu signal

For long time sleep, use qemu_cond_timedwait_iothread to handle cpu stop
event.


The Dirty Limit algorithm seeks to keep the vCPU dirty page rate within
the set limit; since it focuses more emphasis on processing time and
precision, I feel that improvement should strive for the same result.
Could you please provide the final test results showing the impact of
that improvement?


The kvm_cpu_sync in dirty ring flush has to wait all vcpu to exit to run 
sync action, while the vcpu sleep may blocks this. Before this patch, 
the kick can reduce early vcpu return and add more dirty pages. It seems
the kvm_cpu_sync conflicts with vcpu sleep, why not measure dirty rate 
when dirty ring fulls?




Signed-off-by: alloc mailto:alloc.yo...@outlook.com>>
---
  softmmu/dirtylimit.c | 19 +--
  1 file changed, 17 insertions(+), 2 deletions(-)

diff --git a/softmmu/dirtylimit.c b/softmmu/dirtylimit.c
index fa959d7743..ee938c636d 100644
--- a/softmmu/dirtylimit.c
+++ b/softmmu/dirtylimit.c
@@ -411,13 +411,28 @@ void dirtylimit_set_all(uint64_t quota,

  void dirtylimit_vcpu_execute(CPUState *cpu)
  {
+    int64_t sleep_us, endtime_us;
+
+    dirtylimit_state_lock();
      if (dirtylimit_in_service() &&
          dirtylimit_vcpu_get_state(cpu->cpu_index)->enabled &&
          cpu->throttle_us_per_full) {
          trace_dirtylimit_vcpu_execute(cpu->cpu_index,
                  cpu->throttle_us_per_full);
-        usleep(cpu->throttle_us_per_full);
-    }
+        sleep_us = cpu->throttle_us_per_full;
+        dirtylimit_state_unlock();
+        endtime_us = qemu_clock_get_us(QEMU_CLOCK_REALTIME) + sleep_us;
+        while (sleep_us > 0 && !cpu->stop) {
+            if (sleep_us > SCALE_US) {
+                qemu_mutex_lock_iothread();
+                qemu_cond_timedwait_iothread(cpu->halt_cond,
sleep_us / SCALE_US);
+                qemu_mutex_unlock_iothread();
+            } else
+                g_usleep(sleep_us);
+            sleep_us = endtime_us -
qemu_clock_get_us(QEMU_CLOCK_REALTIME);
+        }
+    } else
+        dirtylimit_state_unlock();
  }

  static void dirtylimit_init(void)
-- 
2.39.3




--
Best regards




Re: [PATCH] softmmu/dirtylimit: Fix usleep early return on signal

2023-09-04 Thread alloc young




On 2023/9/4 21:27, Yong Huang wrote:



On Fri, Sep 1, 2023 at 10:19 AM > wrote:


From: alloc mailto:alloc.yo...@outlook.com>>

Timeout functions like usleep can return early on signal, which reduces
more dirty pages than expected. In dirtylimit case, dirtyrate meter
thread needs to kick all vcpus out to sync. The callchain:

vcpu_calculate_dirtyrate
     global_dirty_log_sync
         memory_global_dirty_log_sync
             kvm_log_sync_global
                 kvm_dirty_ring_flush
                     kvm_cpu_synchronize_kick_all < send vcpu signal

For long time sleep, use qemu_cond_timedwait_iothread to handle cpu stop
event.


The Dirty Limit algorithm seeks to keep the vCPU dirty page rate within
the set limit; since it focuses more emphasis on processing time and
precision, I feel that improvement should strive for the same result.
Could you please provide the final test results showing the impact of
that improvement?
Use the command line below with initrd built in tests/migration, guest 
runs stress at 5GiB/s. Migration with dirty limit on and default 
parameters, migration can't finish within 1 hour. The default vcpu dirty 
limit set is 1 MB/s, however, the mig_src.log show copy rate at 
128MiB/s. With this patch, migration finsih in 39s.


/usr/libexec/qemu-kvm -display none -vga none -name 
mig_src,debug-threads=on -monitor stdio -accel kvm,dirty-ring-size=4096 
-cpu host -kernel /boot/vmlinuz-5.14.0-70.22.1.el9_0.x86_64 -initrd 
/root/initrd-stress.img -append noapic edd=off printk.time=1 
noreplace-smp cgroup_disable=memory pci=noearly console=ttyS0 debug 
ramsize=1 ncpus=1 -chardev file,id=charserial0,path=/var/log/mig_src.log 
-serial chardev:charserial0 -m 1536 -smp 1





Signed-off-by: alloc mailto:alloc.yo...@outlook.com>>
---
  softmmu/dirtylimit.c | 19 +--
  1 file changed, 17 insertions(+), 2 deletions(-)

diff --git a/softmmu/dirtylimit.c b/softmmu/dirtylimit.c
index fa959d7743..ee938c636d 100644
--- a/softmmu/dirtylimit.c
+++ b/softmmu/dirtylimit.c
@@ -411,13 +411,28 @@ void dirtylimit_set_all(uint64_t quota,

  void dirtylimit_vcpu_execute(CPUState *cpu)
  {
+    int64_t sleep_us, endtime_us;
+
+    dirtylimit_state_lock();
      if (dirtylimit_in_service() &&
          dirtylimit_vcpu_get_state(cpu->cpu_index)->enabled &&
          cpu->throttle_us_per_full) {
          trace_dirtylimit_vcpu_execute(cpu->cpu_index,
                  cpu->throttle_us_per_full);
-        usleep(cpu->throttle_us_per_full);
-    }
+        sleep_us = cpu->throttle_us_per_full;
+        dirtylimit_state_unlock();
+        endtime_us = qemu_clock_get_us(QEMU_CLOCK_REALTIME) + sleep_us;
+        while (sleep_us > 0 && !cpu->stop) {
+            if (sleep_us > SCALE_US) {
+                qemu_mutex_lock_iothread();
+                qemu_cond_timedwait_iothread(cpu->halt_cond,
sleep_us / SCALE_US);
+                qemu_mutex_unlock_iothread();
+            } else
+                g_usleep(sleep_us);
+            sleep_us = endtime_us -
qemu_clock_get_us(QEMU_CLOCK_REALTIME);
+        }
+    } else
+        dirtylimit_state_unlock();
  }

  static void dirtylimit_init(void)
-- 
2.39.3




--
Best regards




[PATCH] softmmu/dirtylimit: Fix usleep early return on signal

2023-08-31 Thread alloc . young
From: alloc 

Timeout functions like usleep can return early on signal, which reduces
more dirty pages than expected. In dirtylimit case, dirtyrate meter
thread needs to kick all vcpus out to sync. The callchain:

vcpu_calculate_dirtyrate
global_dirty_log_sync
memory_global_dirty_log_sync
kvm_log_sync_global
kvm_dirty_ring_flush
kvm_cpu_synchronize_kick_all < send vcpu signal

For long time sleep, use qemu_cond_timedwait_iothread to handle cpu stop
event.

Signed-off-by: alloc 
---
 softmmu/dirtylimit.c | 19 +--
 1 file changed, 17 insertions(+), 2 deletions(-)

diff --git a/softmmu/dirtylimit.c b/softmmu/dirtylimit.c
index fa959d7743..ee938c636d 100644
--- a/softmmu/dirtylimit.c
+++ b/softmmu/dirtylimit.c
@@ -411,13 +411,28 @@ void dirtylimit_set_all(uint64_t quota,
 
 void dirtylimit_vcpu_execute(CPUState *cpu)
 {
+int64_t sleep_us, endtime_us;
+
+dirtylimit_state_lock();
 if (dirtylimit_in_service() &&
 dirtylimit_vcpu_get_state(cpu->cpu_index)->enabled &&
 cpu->throttle_us_per_full) {
 trace_dirtylimit_vcpu_execute(cpu->cpu_index,
 cpu->throttle_us_per_full);
-usleep(cpu->throttle_us_per_full);
-}
+sleep_us = cpu->throttle_us_per_full;
+dirtylimit_state_unlock();
+endtime_us = qemu_clock_get_us(QEMU_CLOCK_REALTIME) + sleep_us;
+while (sleep_us > 0 && !cpu->stop) {
+if (sleep_us > SCALE_US) {
+qemu_mutex_lock_iothread();
+qemu_cond_timedwait_iothread(cpu->halt_cond, sleep_us / 
SCALE_US);
+qemu_mutex_unlock_iothread();
+} else
+g_usleep(sleep_us);
+sleep_us = endtime_us - qemu_clock_get_us(QEMU_CLOCK_REALTIME);
+}
+} else
+dirtylimit_state_unlock();
 }
 
 static void dirtylimit_init(void)
-- 
2.39.3




[PATCH] accel/kvm: Fix dirty reaper thread crash

2023-08-28 Thread alloc . young
From: alloc 

kvm_dirty_ring_reaper_init is called much early than vcpu creation,
so it's possibe the reaper get a crash before vcpu mmap kvm_dirty_gfns.
Add a machine done notifier to ensure dirty reaper get run after vcpu
inited.

Signed-off-by: alloc 
---
 accel/kvm/kvm-all.c | 9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index d07f1ecbd3..5ae7e27a72 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -51,6 +51,7 @@
 
 #include "hw/boards.h"
 #include "sysemu/stats.h"
+#include "sysemu/sysemu.h"
 
 /* This check must be after config-host.h is included */
 #ifdef CONFIG_EVENTFD
@@ -133,6 +134,8 @@ static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list =
 
 static QemuMutex kml_slots_lock;
 
+static Notifier dirty_ring_reaper_machine_done;
+
 #define kvm_slots_lock()qemu_mutex_lock(_slots_lock)
 #define kvm_slots_unlock()  qemu_mutex_unlock(_slots_lock)
 
@@ -1454,8 +1457,9 @@ static void *kvm_dirty_ring_reaper_thread(void *data)
 return NULL;
 }
 
-static void kvm_dirty_ring_reaper_init(KVMState *s)
+static void kvm_dirty_ring_reaper_init(Notifier *n, void *unused)
 {
+KVMState *s = kvm_state;
 struct KVMDirtyRingReaper *r = >reaper;
 
 qemu_thread_create(>reaper_thr, "kvm-reaper",
@@ -2742,7 +2746,8 @@ static int kvm_init(MachineState *ms)
 }
 
 if (s->kvm_dirty_ring_size) {
-kvm_dirty_ring_reaper_init(s);
+dirty_ring_reaper_machine_done.notify = kvm_dirty_ring_reaper_init;
+qemu_add_machine_init_done_notifier(_ring_reaper_machine_done);
 }
 
 if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) {
-- 
2.39.3




[PATCH v2 0/2] softmmu/dirtylimit: Fix memory leak issue

2023-08-24 Thread alloc . young
From: alloc 

Changes in v2:
- Split into two patches, one fixing memory leak issue and another
  converting free to g_free.
- Fix typos

alloc (1):
  softmmu/dirtylimit: Convert free to g_free

alloc.young (1):
  softmmu: Fix dirtylimit memory leak

 softmmu/dirtylimit.c | 26 --
 1 file changed, 12 insertions(+), 14 deletions(-)

-- 
2.39.3




[PATCH v2 2/2] softmmu/dirtylimit: Convert free to g_free

2023-08-24 Thread alloc . young
From: alloc 

Convert free to g_free to match g_new and g_malloc functions.

Signed-off-by: alloc 
---
 softmmu/dirtylimit.c | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/softmmu/dirtylimit.c b/softmmu/dirtylimit.c
index e3ff53b8fc..fa959d7743 100644
--- a/softmmu/dirtylimit.c
+++ b/softmmu/dirtylimit.c
@@ -100,7 +100,7 @@ static void vcpu_dirty_rate_stat_collect(void)
 stat.rates[i].dirty_rate;
 }
 
-free(stat.rates);
+g_free(stat.rates);
 }
 
 static void *vcpu_dirty_rate_stat_thread(void *opaque)
@@ -171,10 +171,10 @@ void vcpu_dirty_rate_stat_initialize(void)
 
 void vcpu_dirty_rate_stat_finalize(void)
 {
-free(vcpu_dirty_rate_stat->stat.rates);
+g_free(vcpu_dirty_rate_stat->stat.rates);
 vcpu_dirty_rate_stat->stat.rates = NULL;
 
-free(vcpu_dirty_rate_stat);
+g_free(vcpu_dirty_rate_stat);
 vcpu_dirty_rate_stat = NULL;
 }
 
@@ -220,10 +220,10 @@ void dirtylimit_state_initialize(void)
 
 void dirtylimit_state_finalize(void)
 {
-free(dirtylimit_state->states);
+g_free(dirtylimit_state->states);
 dirtylimit_state->states = NULL;
 
-free(dirtylimit_state);
+g_free(dirtylimit_state);
 dirtylimit_state = NULL;
 
 trace_dirtylimit_state_finalize();
-- 
2.39.3




[PATCH v2 1/2] softmmu: Fix dirtylimit memory leak

2023-08-24 Thread alloc . young
From: "alloc.young" 

Fix memory leak in hmp_info_vcpu_dirty_limit,use g_autoptr
to handle memory deallocation.

Signed-off-by: alloc.young 
---
 softmmu/dirtylimit.c | 16 +++-
 1 file changed, 7 insertions(+), 9 deletions(-)

diff --git a/softmmu/dirtylimit.c b/softmmu/dirtylimit.c
index 3c275ee55b..e3ff53b8fc 100644
--- a/softmmu/dirtylimit.c
+++ b/softmmu/dirtylimit.c
@@ -653,7 +653,8 @@ struct DirtyLimitInfoList *qmp_query_vcpu_dirty_limit(Error 
**errp)
 
 void hmp_info_vcpu_dirty_limit(Monitor *mon, const QDict *qdict)
 {
-DirtyLimitInfoList *limit, *head, *info = NULL;
+DirtyLimitInfoList *info;
+g_autoptr(DirtyLimitInfoList) head = NULL;
 Error *err = NULL;
 
 if (!dirtylimit_in_service()) {
@@ -661,20 +662,17 @@ void hmp_info_vcpu_dirty_limit(Monitor *mon, const QDict 
*qdict)
 return;
 }
 
-info = qmp_query_vcpu_dirty_limit();
+head = qmp_query_vcpu_dirty_limit();
 if (err) {
 hmp_handle_error(mon, err);
 return;
 }
 
-head = info;
-for (limit = head; limit != NULL; limit = limit->next) {
+for (info = head; info != NULL; info = info->next) {
 monitor_printf(mon, "vcpu[%"PRIi64"], limit rate %"PRIi64 " (MB/s),"
 " current rate %"PRIi64 " (MB/s)\n",
-limit->value->cpu_index,
-limit->value->limit_rate,
-limit->value->current_rate);
+info->value->cpu_index,
+info->value->limit_rate,
+info->value->current_rate);
 }
-
-g_free(info);
 }
-- 
2.39.3




Re: [PATCH] softmmu: Fix dirtylimit memory leak

2023-08-24 Thread alloc young



On 2023/8/24 13:16, Michael Tokarev wrote:

23.08.2023 10:47, alloc.yo...@outlook.com wrote:

From: "alloc.young" 

Fix memory leak in hmp_info_vcpu_dirty_limit,use g_autoptr
handle memory deallocation, alse use g_free to match g_malloc
&& g_new functions.


"..use g_autoptr TO handle.." ("to" is missing).
"alse" - I guess should be "Also".


I'll fix these typos in next patch.


I think it is better to split this into two parts, one fixing
the memleak and another converting to g_free().


Ok, I'll make another patch, Thx.




/mjt




[PATCH] softmmu: Fix dirtylimit memory leak

2023-08-23 Thread alloc . young
From: "alloc.young" 

Fix memory leak in hmp_info_vcpu_dirty_limit,use g_autoptr
to handle memory deallocation.

Signed-off-by: alloc.young 
Reviewed-by: Yong Huang 
---

v1->v2
drop g_free, just focus on fix memory leak issue

---
 softmmu/dirtylimit.c | 16 +++-
 1 file changed, 7 insertions(+), 9 deletions(-)

diff --git a/softmmu/dirtylimit.c b/softmmu/dirtylimit.c
index 3c275ee55b..e3ff53b8fc 100644
--- a/softmmu/dirtylimit.c
+++ b/softmmu/dirtylimit.c
@@ -653,7 +653,8 @@ struct DirtyLimitInfoList *qmp_query_vcpu_dirty_limit(Error 
**errp)
 
 void hmp_info_vcpu_dirty_limit(Monitor *mon, const QDict *qdict)
 {
-DirtyLimitInfoList *limit, *head, *info = NULL;
+DirtyLimitInfoList *info;
+g_autoptr(DirtyLimitInfoList) head = NULL;
 Error *err = NULL;
 
 if (!dirtylimit_in_service()) {
@@ -661,20 +662,17 @@ void hmp_info_vcpu_dirty_limit(Monitor *mon, const QDict 
*qdict)
 return;
 }
 
-info = qmp_query_vcpu_dirty_limit();
+head = qmp_query_vcpu_dirty_limit();
 if (err) {
 hmp_handle_error(mon, err);
 return;
 }
 
-head = info;
-for (limit = head; limit != NULL; limit = limit->next) {
+for (info = head; info != NULL; info = info->next) {
 monitor_printf(mon, "vcpu[%"PRIi64"], limit rate %"PRIi64 " (MB/s),"
 " current rate %"PRIi64 " (MB/s)\n",
-limit->value->cpu_index,
-limit->value->limit_rate,
-limit->value->current_rate);
+info->value->cpu_index,
+info->value->limit_rate,
+info->value->current_rate);
 }
-
-g_free(info);
 }
-- 
2.39.3




[PATCH] softmmu: Fix dirtylimit memory leak

2023-08-23 Thread alloc . young
From: "alloc.young" 

Fix memory leak in hmp_info_vcpu_dirty_limit,use g_autoptr
handle memory deallocation, alse use g_free to match g_malloc
&& g_new functions.

Signed-off-by: alloc.young 
---
 softmmu/dirtylimit.c | 26 --
 1 file changed, 12 insertions(+), 14 deletions(-)

diff --git a/softmmu/dirtylimit.c b/softmmu/dirtylimit.c
index 3c275ee55b..fa959d7743 100644
--- a/softmmu/dirtylimit.c
+++ b/softmmu/dirtylimit.c
@@ -100,7 +100,7 @@ static void vcpu_dirty_rate_stat_collect(void)
 stat.rates[i].dirty_rate;
 }
 
-free(stat.rates);
+g_free(stat.rates);
 }
 
 static void *vcpu_dirty_rate_stat_thread(void *opaque)
@@ -171,10 +171,10 @@ void vcpu_dirty_rate_stat_initialize(void)
 
 void vcpu_dirty_rate_stat_finalize(void)
 {
-free(vcpu_dirty_rate_stat->stat.rates);
+g_free(vcpu_dirty_rate_stat->stat.rates);
 vcpu_dirty_rate_stat->stat.rates = NULL;
 
-free(vcpu_dirty_rate_stat);
+g_free(vcpu_dirty_rate_stat);
 vcpu_dirty_rate_stat = NULL;
 }
 
@@ -220,10 +220,10 @@ void dirtylimit_state_initialize(void)
 
 void dirtylimit_state_finalize(void)
 {
-free(dirtylimit_state->states);
+g_free(dirtylimit_state->states);
 dirtylimit_state->states = NULL;
 
-free(dirtylimit_state);
+g_free(dirtylimit_state);
 dirtylimit_state = NULL;
 
 trace_dirtylimit_state_finalize();
@@ -653,7 +653,8 @@ struct DirtyLimitInfoList *qmp_query_vcpu_dirty_limit(Error 
**errp)
 
 void hmp_info_vcpu_dirty_limit(Monitor *mon, const QDict *qdict)
 {
-DirtyLimitInfoList *limit, *head, *info = NULL;
+DirtyLimitInfoList *info;
+g_autoptr(DirtyLimitInfoList) head = NULL;
 Error *err = NULL;
 
 if (!dirtylimit_in_service()) {
@@ -661,20 +662,17 @@ void hmp_info_vcpu_dirty_limit(Monitor *mon, const QDict 
*qdict)
 return;
 }
 
-info = qmp_query_vcpu_dirty_limit();
+head = qmp_query_vcpu_dirty_limit();
 if (err) {
 hmp_handle_error(mon, err);
 return;
 }
 
-head = info;
-for (limit = head; limit != NULL; limit = limit->next) {
+for (info = head; info != NULL; info = info->next) {
 monitor_printf(mon, "vcpu[%"PRIi64"], limit rate %"PRIi64 " (MB/s),"
 " current rate %"PRIi64 " (MB/s)\n",
-limit->value->cpu_index,
-limit->value->limit_rate,
-limit->value->current_rate);
+info->value->cpu_index,
+info->value->limit_rate,
+info->value->current_rate);
 }
-
-g_free(info);
 }
-- 
2.39.3