On 16/01/2019 01:24, Hans van Kranenburg wrote:
> Hi,
> 
> On 1/14/19 1:44 PM, Juergen Gross wrote:
>> Commit f94c8d11699759 ("sched/clock, x86/tsc: Rework the x86 'unstable'
>> sched_clock() interface") broke Xen guest time handling across
>> migration:
>>
>> [  187.249951] Freezing user space processes ... (elapsed 0.001 seconds) 
>> done.
>> [  187.251137] OOM killer disabled.
>> [  187.251137] Freezing remaining freezable tasks ... (elapsed 0.001 
>> seconds) done.
>> [  187.252299] suspending xenstore...
>> [  187.266987] xen:grant_table: Grant tables using version 1 layout
>> [18446743811.706476] OOM killer enabled.
>> [18446743811.706478] Restarting tasks ... done.
>> [18446743811.720505] Setting capacity to 16777216
>>
>> Fix that by setting xen_sched_clock_offset at resume time to ensure a
>> monotonic clock value.
>>
>> [...]
> 
> With v3 of the patch, I see the time jump in one log line happen, but
> only when using PVH.
> 
> [   49.486453] Freezing user space processes ... (elapsed 0.002 seconds)
> done.
> [   49.488743] OOM killer disabled.
> [   49.488764] Freezing remaining freezable tasks ... (elapsed 0.001
> seconds) done.
> [   49.491117] suspending xenstore...
> [2000731.388722] xen:events: Xen HVM callback vector for event delivery
> is enabled
> [   49.491750] xen:grant_table: Grant tables using version 1 layout
> [   49.810722] OOM killer enabled.
> [   49.810744] Restarting tasks ... done.
> [   49.856263] Setting capacity to 6291456
> [   50.006002] Setting capacity to 10485760
> 
> If I start as PV, it never seems to happen.
> 
> Up to you to decide how important this is. :)

We could do something like below. Boris?


Juergen
---
diff --git a/arch/x86/xen/suspend_hvm.c b/arch/x86/xen/suspend_hvm.c
index e666b614cf6d..088f3a6b4be9 100644
--- a/arch/x86/xen/suspend_hvm.c
+++ b/arch/x86/xen/suspend_hvm.c
@@ -13,6 +13,6 @@ void xen_hvm_post_suspend(int suspend_cancelled)
                xen_hvm_init_shared_info();
                xen_vcpu_restore();
        }
-       xen_callback_vector();
+       xen_callback_vector(true);
        xen_unplug_emulated_devices();
 }
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 0e60bd918695..ba293fda3265 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -55,7 +55,7 @@ void xen_enable_sysenter(void);
 void xen_enable_syscall(void);
 void xen_vcpu_restore(void);

-void xen_callback_vector(void);
+void xen_callback_vector(bool silent);
 void xen_hvm_init_shared_info(void);
 void xen_unplug_emulated_devices(void);

diff --git a/drivers/xen/events/events_base.c
b/drivers/xen/events/events_base.c
index 93194f3e7540..8d8d50bea215 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1637,7 +1637,7 @@ EXPORT_SYMBOL_GPL(xen_set_callback_via);
 /* Vector callbacks are better than PCI interrupts to receive event
  * channel notifications because we can receive vector callbacks on any
  * vcpu and we don't need PCI support or APIC interactions. */
-void xen_callback_vector(void)
+void xen_callback_vector(bool silent)
 {
        int rc;
        uint64_t callback_via;
@@ -1650,13 +1650,14 @@ void xen_callback_vector(void)
                        xen_have_vector_callback = 0;
                        return;
                }
-               pr_info("Xen HVM callback vector for event delivery is
enabled\n");
+               if (!silent)
+                       pr_info("Xen HVM callback vector for event
delivery is enabled\n");
                alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
                                xen_hvm_callback_vector);
        }
 }
 #else
-void xen_callback_vector(void) {}
+void xen_callback_vector(bool silent) {}
 #endif

 #undef MODULE_PARAM_PREFIX
@@ -1692,7 +1693,7 @@ void __init xen_init_IRQ(void)
                        pci_xen_initial_domain();
        }
        if (xen_feature(XENFEAT_hvm_callback_vector))
-               xen_callback_vector();
+               xen_callback_vector(false);

        if (xen_hvm_domain()) {
                native_init_IRQ();

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Reply via email to