From: Hyman Huang(黄勇) <huang...@chinatelecom.cn> Export dirty limit throttle time and estimated ring full time, through which we can observe if dirty limit take effect during live migration.
Signed-off-by: Hyman Huang(黄勇) <huang...@chinatelecom.cn> --- include/sysemu/dirtylimit.h | 2 ++ migration/migration.c | 10 ++++++++++ monitor/hmp-cmds.c | 10 ++++++++++ qapi/migration.json | 15 ++++++++++++++- softmmu/dirtylimit.c | 39 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 75 insertions(+), 1 deletion(-) diff --git a/include/sysemu/dirtylimit.h b/include/sysemu/dirtylimit.h index 8d2c1f3..f15e01d 100644 --- a/include/sysemu/dirtylimit.h +++ b/include/sysemu/dirtylimit.h @@ -34,4 +34,6 @@ void dirtylimit_set_vcpu(int cpu_index, void dirtylimit_set_all(uint64_t quota, bool enable); void dirtylimit_vcpu_execute(CPUState *cpu); +int64_t dirtylimit_throttle_time_per_full(void); +int64_t dirtylimit_ring_full_time(void); #endif diff --git a/migration/migration.c b/migration/migration.c index 127d0fe..3f92389 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -62,6 +62,7 @@ #include "yank_functions.h" #include "sysemu/qtest.h" #include "sysemu/kvm.h" +#include "sysemu/dirtylimit.h" #define MAX_THROTTLE (128 << 20) /* Migration transfer speed throttling */ @@ -1114,6 +1115,15 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s) info->ram->remaining = ram_bytes_remaining(); info->ram->dirty_pages_rate = ram_counters.dirty_pages_rate; } + + if (migrate_dirty_limit() && dirtylimit_in_service()) { + info->has_dirty_limit_throttle_time_per_full = true; + info->dirty_limit_throttle_time_per_full = + dirtylimit_throttle_time_per_full(); + + info->has_dirty_limit_ring_full_time = true; + info->dirty_limit_ring_full_time = dirtylimit_us_ring_full(); + } } static void populate_disk_info(MigrationInfo *info) diff --git a/monitor/hmp-cmds.c b/monitor/hmp-cmds.c index 9ad6ee5..c3aaba3 100644 --- a/monitor/hmp-cmds.c +++ b/monitor/hmp-cmds.c @@ -339,6 +339,16 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict) info->cpu_throttle_percentage); } + if (info->has_dirty_limit_throttle_time_per_full) { + monitor_printf(mon, "dirty-limit throttle time: %" PRIi64 " us\n", + info->dirty_limit_throttle_time_per_full); + } + + if (info->has_dirty_limit_ring_full_time) { + monitor_printf(mon, "dirty-limit ring full time: %" PRIi64 " us\n", + info->dirty_limit_ring_full_time); + } + if (info->has_postcopy_blocktime) { monitor_printf(mon, "postcopy blocktime: %u\n", info->postcopy_blocktime); diff --git a/qapi/migration.json b/qapi/migration.json index 6055fdc..ae7d22d 100644 --- a/qapi/migration.json +++ b/qapi/migration.json @@ -242,6 +242,17 @@ # Present and non-empty when migration is blocked. # (since 6.0) # +# @dirty-limit-throttle-time-per-full: Maximum throttle time (in microseconds) of virtual +# CPUs each dirty ring full round, used to observe +# if dirty-limit take effect during live migration. +# (since 7.3) +# +# @dirty-limit-ring-full-time: Estimated average dirty ring full time (in microseconds) +# each dirty ring full round, note that the value equals +# dirty ring memory size divided by average dirty page rate +# of virtual CPU, which can be used to observe the average +# memory load of virtual CPU indirectly. (since 7.3) +# # Since: 0.14 ## { 'struct': 'MigrationInfo', @@ -259,7 +270,9 @@ '*postcopy-blocktime' : 'uint32', '*postcopy-vcpu-blocktime': ['uint32'], '*compression': 'CompressionStats', - '*socket-address': ['SocketAddress'] } } + '*socket-address': ['SocketAddress'], + '*dirty-limit-throttle-time-per-full': 'int64', + '*dirty-limit-ring-full-time': 'int64'} } ## # @query-migrate: diff --git a/softmmu/dirtylimit.c b/softmmu/dirtylimit.c index b63032c..06de099 100644 --- a/softmmu/dirtylimit.c +++ b/softmmu/dirtylimit.c @@ -569,6 +569,45 @@ static struct DirtyLimitInfo *dirtylimit_query_vcpu(int cpu_index) return info; } +/* Return the max throttle time of each virtual CPU */ +int64_t dirtylimit_throttle_time_per_full(void) +{ + CPUState *cpu; + int64_t max = 0; + + CPU_FOREACH(cpu) { + if (cpu->throttle_us_per_full > max) { + max = cpu->throttle_us_per_full; + } + } + + return max; +} + +/* + * Estimate average dirty ring full time of each virtaul CPU. + * Return -1 if guest doesn't dirty memory. + */ +int64_t dirtylimit_us_ring_full(void) +{ + CPUState *cpu; + uint64_t curr_rate = 0; + int nvcpus = 0; + + CPU_FOREACH(cpu) { + if (cpu->running) { + nvcpus++; + curr_rate += vcpu_dirty_rate_get(cpu->cpu_index); + } + } + + if (!curr_rate || !nvcpus) { + return -1; + } + + return dirtylimit_dirty_ring_full_time(curr_rate / nvcpus); +} + static struct DirtyLimitInfoList *dirtylimit_query_all(void) { int i, index; -- 1.8.3.1