The code is initializing an unsigned int to UINT_MAX using "-1", so that the following always-true comparison seems to be always-false at a first look. Since alarm timer initializations are never nested, it is simpler to unconditionally store the result of timeGetDevCaps into data->period.
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com> --- vl.c | 6 ++---- 1 files changed, 2 insertions(+), 4 deletions(-) diff --git a/vl.c b/vl.c index d8328c7..6b1e1a7 100644 --- a/vl.c +++ b/vl.c @@ -626,7 +626,7 @@ static struct qemu_alarm_timer *alarm_timer; struct qemu_alarm_win32 { MMRESULT timerId; unsigned int period; -} alarm_win32_data = {0, -1}; +} alarm_win32_data = {0, 0}; static int win32_start_timer(struct qemu_alarm_timer *t); static void win32_stop_timer(struct qemu_alarm_timer *t); @@ -1360,9 +1360,7 @@ static int win32_start_timer(struct qemu_alarm_timer *t) memset(&tc, 0, sizeof(tc)); timeGetDevCaps(&tc, sizeof(tc)); - if (data->period < tc.wPeriodMin) - data->period = tc.wPeriodMin; - + data->period = tc.wPeriodMin; timeBeginPeriod(data->period); flags = TIME_CALLBACK_FUNCTION; -- 1.6.6