Hello community,

here is the log from the commit of package xen for openSUSE:Factory checked in 
at 2020-07-14 07:44:00
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/xen (Old)
 and      /work/SRC/openSUSE:Factory/.xen.new.3060 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "xen"

Tue Jul 14 07:44:00 2020 rev:287 rq:820049 version:4.13.1_04

Changes:
--------
--- /work/SRC/openSUSE:Factory/xen/xen.changes  2020-06-11 14:47:29.105868961 
+0200
+++ /work/SRC/openSUSE:Factory/.xen.new.3060/xen.changes        2020-07-14 
07:44:35.838938763 +0200
@@ -1,0 +2,49 @@
+Tue Jun 30 18:03:40 UTC 2020 - [email protected]
+
+- Add libxc.migrate_tracking.patch to track live migrations
+  unconditionally in logfiles, especially in libvirt.
+  This will track how long a domU was suspended during transit.
+
+-------------------------------------------------------------------
+Mon Jun 29 11:28:27 MDT 2020 - [email protected]
+
+- bsc#1173376 - VUL-0: CVE-2020-15566: xen: XSA-317 - Incorrect
+  error handling in event channel port allocation
+  xsa317.patch
+- bsc#1173377 - VUL-0: CVE-2020-15563: xen: XSA-319 - inverted code
+  paths in x86 dirty VRAM tracking
+  xsa319.patch
+- bsc#1173378 - VUL-0: CVE-2020-15565: xen: XSA-321 - insufficient
+  cache write- back under VT-d
+  xsa321-1.patch
+  xsa321-2.patch
+  xsa321-3.patch
+  xsa321-4.patch
+  xsa321-5.patch
+  xsa321-6.patch
+  xsa321-7.patch
+- bsc#1173380 - VUL-0: CVE-2020-15567: xen: XSA-328 - non-atomic
+  modification of live EPT PTE
+  xsa328-1.patch
+  xsa328-2.patch
+
+-------------------------------------------------------------------
+Mon Jun 22 11:24:48 MDT 2020 - [email protected]
+
+- bsc#1172205 - VUL-0: CVE-2020-0543: xen: Special Register Buffer
+  Data Sampling (SRBDS) aka "CrossTalk" (XSA-320)
+  5ee24d0e-x86-spec-ctrl-document-SRBDS-workaround.patch
+  5edfbbea-x86-spec-ctrl-CPUID-MSR-defs-for-SRBDS.patch (Replaces 
xsa320-1.patch)
+  5edfbbea-x86-spec-ctrl-mitigate-SRBDS.patch (Replaces xsa320-2.patch)
+- Upstream bug fixes (bsc#1027519)
+  5ec50b05-x86-idle-rework-C6-EOI-workaround.patch
+  5ec7dcaa-x86-dont-enter-C6-with-in-service-intr.patch
+  5ec7dcf6-x86-dont-enter-C3-C6-with-errata.patch
+  5ec82237-x86-extend-ISR-C6-workaround-to-Haswell.patch
+  5ece1b91-x86-clear-RDRAND-CPUID-bit-on-AMD-fam-15-16.patch
+  5ece8ac4-x86-load_system_tables-NMI-MC-safe.patch
+  5ed69804-x86-ucode-fix-start-end-update.patch
+  5eda60cb-SVM-split-recalc-NPT-fault-handling.patch
+  5edf6ad8-ioreq-pending-emulation-server-destruction-race.patch
+
+-------------------------------------------------------------------
@@ -114,0 +164,7 @@
+
+-------------------------------------------------------------------
+Wed Mar 25 18:18:18 UTC 2020 - [email protected]
+
+- bsc#1167608 - unbound limit for max_event_channels
+  domUs with many vcpus and/or resources fail to start
+  libxl.max_event_channels.patch

Old:
----
  xsa320-1.patch
  xsa320-2.patch

New:
----
  5ec50b05-x86-idle-rework-C6-EOI-workaround.patch
  5ec7dcaa-x86-dont-enter-C6-with-in-service-intr.patch
  5ec7dcf6-x86-dont-enter-C3-C6-with-errata.patch
  5ec82237-x86-extend-ISR-C6-workaround-to-Haswell.patch
  5ece1b91-x86-clear-RDRAND-CPUID-bit-on-AMD-fam-15-16.patch
  5ece8ac4-x86-load_system_tables-NMI-MC-safe.patch
  5ed69804-x86-ucode-fix-start-end-update.patch
  5eda60cb-SVM-split-recalc-NPT-fault-handling.patch
  5edf6ad8-ioreq-pending-emulation-server-destruction-race.patch
  5edfbbea-x86-spec-ctrl-CPUID-MSR-defs-for-SRBDS.patch
  5edfbbea-x86-spec-ctrl-mitigate-SRBDS.patch
  5ee24d0e-x86-spec-ctrl-document-SRBDS-workaround.patch
  libxc.migrate_tracking.patch
  libxl.max_event_channels.patch
  xsa317.patch
  xsa319.patch
  xsa321-1.patch
  xsa321-2.patch
  xsa321-3.patch
  xsa321-4.patch
  xsa321-5.patch
  xsa321-6.patch
  xsa321-7.patch
  xsa328-1.patch
  xsa328-2.patch

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ xen.spec ++++++
--- /var/tmp/diff_new_pack.Lgx3u0/_old  2020-07-14 07:44:54.186997996 +0200
+++ /var/tmp/diff_new_pack.Lgx3u0/_new  2020-07-14 07:44:54.190998010 +0200
@@ -123,7 +123,7 @@
 BuildRequires:  pesign-obs-integration
 %endif
 
-Version:        4.13.1_02
+Version:        4.13.1_04
 Release:        0
 Summary:        Xen Virtualization: Hypervisor (aka VMM aka Microkernel)
 License:        GPL-2.0-only
@@ -165,8 +165,29 @@
 Patch1:         5eb51be6-cpupool-fix-removing-cpu-from-pool.patch
 Patch2:         5eb51caa-sched-vcpu-pause-flags-atomic.patch
 Patch3:         5ec2a760-x86-determine-MXCSR-mask-always.patch
-Patch100:       xsa320-1.patch
-Patch101:       xsa320-2.patch
+Patch4:         5ec50b05-x86-idle-rework-C6-EOI-workaround.patch
+Patch5:         5ec7dcaa-x86-dont-enter-C6-with-in-service-intr.patch
+Patch6:         5ec7dcf6-x86-dont-enter-C3-C6-with-errata.patch
+Patch7:         5ec82237-x86-extend-ISR-C6-workaround-to-Haswell.patch
+Patch8:         5ece1b91-x86-clear-RDRAND-CPUID-bit-on-AMD-fam-15-16.patch
+Patch9:         5ece8ac4-x86-load_system_tables-NMI-MC-safe.patch
+Patch10:        5ed69804-x86-ucode-fix-start-end-update.patch
+Patch11:        5eda60cb-SVM-split-recalc-NPT-fault-handling.patch
+Patch12:        5edf6ad8-ioreq-pending-emulation-server-destruction-race.patch
+Patch13:        5edfbbea-x86-spec-ctrl-CPUID-MSR-defs-for-SRBDS.patch
+Patch14:        5edfbbea-x86-spec-ctrl-mitigate-SRBDS.patch
+Patch15:        5ee24d0e-x86-spec-ctrl-document-SRBDS-workaround.patch
+Patch317:       xsa317.patch
+Patch319:       xsa319.patch
+Patch32101:     xsa321-1.patch
+Patch32102:     xsa321-2.patch
+Patch32103:     xsa321-3.patch
+Patch32104:     xsa321-4.patch
+Patch32105:     xsa321-5.patch
+Patch32106:     xsa321-6.patch
+Patch32107:     xsa321-7.patch
+Patch32801:     xsa328-1.patch
+Patch32802:     xsa328-2.patch
 # Our platform specific patches
 Patch400:       xen-destdir.patch
 Patch401:       vif-bridge-no-iptables.patch
@@ -193,6 +214,7 @@
 Patch457:       pygrub-handle-one-line-menu-entries.patch
 Patch458:       aarch64-rename-PSR_MODE_ELxx-to-match-linux-headers.patch
 Patch459:       aarch64-maybe-uninitialized.patch
+Patch461:       libxl.max_event_channels.patch
 Patch462:       libxc.sr.superpage.patch
 Patch463:       libxl.add-option-to-disable-disk-cache-flushes-in-qdisk.patch
 Patch464:       libxl.pvscsi.patch
@@ -202,6 +224,7 @@
 Patch468:       libxl.libxl__domain_pvcontrol.patch
 Patch469:       libxl.helper_done-crash.patch
 Patch470:       libxl.LIBXL_HOTPLUG_TIMEOUT.patch
+Patch471:       libxc.migrate_tracking.patch
 # python3 conversion patches
 Patch500:       build-python3-conversion.patch
 Patch501:       migration-python3-conversion.patch
@@ -395,8 +418,29 @@
 %patch1 -p1
 %patch2 -p1
 %patch3 -p1
-%patch100 -p1
-%patch101 -p1
+%patch4 -p1
+%patch5 -p1
+%patch6 -p1
+%patch7 -p1
+%patch8 -p1
+%patch9 -p1
+%patch10 -p1
+%patch11 -p1
+%patch12 -p1
+%patch13 -p1
+%patch14 -p1
+%patch15 -p1
+%patch317 -p1
+%patch319 -p1
+%patch32801 -p1
+%patch32802 -p1
+%patch32101 -p1
+%patch32102 -p1
+%patch32103 -p1
+%patch32104 -p1
+%patch32105 -p1
+%patch32106 -p1
+%patch32107 -p1
 # Our platform specific patches
 %patch400 -p1
 %patch401 -p1
@@ -423,6 +467,7 @@
 %patch457 -p1
 %patch458 -p1
 %patch459 -p1
+%patch461 -p1
 %patch462 -p1
 %patch463 -p1
 %patch464 -p1
@@ -432,6 +477,7 @@
 %patch468 -p1
 %patch469 -p1
 %patch470 -p1
+%patch471 -p1
 # python3 conversion patches
 %patch500 -p1
 %patch501 -p1

++++++ 5ec50b05-x86-idle-rework-C6-EOI-workaround.patch ++++++
# Commit 5fef1fd713660406a6187ef352fbf79986abfe43
# Date 2020-05-20 12:48:37 +0200
# Author Roger Pau Monné <[email protected]>
# Committer Jan Beulich <[email protected]>
x86/idle: rework C6 EOI workaround

Change the C6 EOI workaround (errata AAJ72) to use x86_match_cpu. Also
call the workaround from mwait_idle, previously it was only used by
the ACPI idle driver. Finally make sure the routine is called for all
states equal or greater than ACPI_STATE_C3, note that the ACPI driver
doesn't currently handle them, but the errata condition shouldn't be
limited by that.

Signed-off-by: Roger Pau Monné <[email protected]>
Reviewed-by: Jan Beulich <[email protected]>

--- a/xen/arch/x86/acpi/cpu_idle.c
+++ b/xen/arch/x86/acpi/cpu_idle.c
@@ -537,26 +537,35 @@ void trace_exit_reason(u32 *irq_traced)
     }
 }
 
-/*
- * "AAJ72. EOI Transaction May Not be Sent if Software Enters Core C6 During 
- * an Interrupt Service Routine"
- * 
- * There was an errata with some Core i7 processors that an EOI transaction 
- * may not be sent if software enters core C6 during an interrupt service 
- * routine. So we don't enter deep Cx state if there is an EOI pending.
- */
-static bool errata_c6_eoi_workaround(void)
+bool errata_c6_eoi_workaround(void)
 {
-    static int8_t fix_needed = -1;
+    static int8_t __read_mostly fix_needed = -1;
 
     if ( unlikely(fix_needed == -1) )
     {
-        int model = boot_cpu_data.x86_model;
-        fix_needed = (cpu_has_apic && !directed_eoi_enabled &&
-                      (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
-                      (boot_cpu_data.x86 == 6) &&
-                      ((model == 0x1a) || (model == 0x1e) || (model == 0x1f) ||
-                       (model == 0x25) || (model == 0x2c) || (model == 0x2f)));
+#define INTEL_FAM6_MODEL(m) { X86_VENDOR_INTEL, 6, m, X86_FEATURE_ALWAYS }
+        /*
+         * Errata AAJ72: EOI Transaction May Not be Sent if Software Enters
+         * Core C6 During an Interrupt Service Routine"
+         *
+         * There was an errata with some Core i7 processors that an EOI
+         * transaction may not be sent if software enters core C6 during an
+         * interrupt service routine. So we don't enter deep Cx state if
+         * there is an EOI pending.
+         */
+        static const struct x86_cpu_id eoi_errata[] = {
+            INTEL_FAM6_MODEL(0x1a),
+            INTEL_FAM6_MODEL(0x1e),
+            INTEL_FAM6_MODEL(0x1f),
+            INTEL_FAM6_MODEL(0x25),
+            INTEL_FAM6_MODEL(0x2c),
+            INTEL_FAM6_MODEL(0x2f),
+            { }
+        };
+#undef INTEL_FAM6_MODEL
+
+        fix_needed = cpu_has_apic && !directed_eoi_enabled &&
+                     x86_match_cpu(eoi_errata);
     }
 
     return (fix_needed && cpu_has_pending_apic_eoi());
@@ -664,7 +673,7 @@ static void acpi_processor_idle(void)
         return;
     }
 
-    if ( (cx->type == ACPI_STATE_C3) && errata_c6_eoi_workaround() )
+    if ( (cx->type >= ACPI_STATE_C3) && errata_c6_eoi_workaround() )
         cx = power->safe_state;
 
 
--- a/xen/arch/x86/cpu/mwait-idle.c
+++ b/xen/arch/x86/cpu/mwait-idle.c
@@ -769,6 +769,9 @@ static void mwait_idle(void)
                return;
        }
 
+       if ((cx->type >= 3) && errata_c6_eoi_workaround())
+               cx = power->safe_state;
+
        eax = cx->address;
        cstate = ((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
 
--- a/xen/include/asm-x86/cpuidle.h
+++ b/xen/include/asm-x86/cpuidle.h
@@ -26,4 +26,6 @@ void update_idle_stats(struct acpi_proce
 void update_last_cx_stat(struct acpi_processor_power *,
                          struct acpi_processor_cx *, uint64_t);
 
+bool errata_c6_eoi_workaround(void);
+
 #endif /* __X86_ASM_CPUIDLE_H__ */
++++++ 5ec7dcaa-x86-dont-enter-C6-with-in-service-intr.patch ++++++
# Commit fc44a7014cafe28b8c53eeaf6ac2a71f5bc8b815
# Date 2020-05-22 16:07:38 +0200
# Author Roger Pau Monné <[email protected]>
# Committer Jan Beulich <[email protected]>
x86/idle: prevent entering C6 with in service interrupts on Intel

Apply a workaround for Intel errata BDX99, CLX30, SKX100, CFW125,
BDF104, BDH85, BDM135, KWB131: "A Pending Fixed Interrupt May Be
Dispatched Before an Interrupt of The Same Priority Completes".

Apply the errata to all server and client models (big cores) from
Broadwell to Cascade Lake. The workaround is grouped together with the
existing fix for errata AAJ72, and the eoi from the function name is
removed.

Signed-off-by: Roger Pau Monné <[email protected]>
Reviewed-by: Jan Beulich <[email protected]>

--- a/xen/arch/x86/acpi/cpu_idle.c
+++ b/xen/arch/x86/acpi/cpu_idle.c
@@ -537,7 +537,7 @@ void trace_exit_reason(u32 *irq_traced)
     }
 }
 
-bool errata_c6_eoi_workaround(void)
+bool errata_c6_workaround(void)
 {
     static int8_t __read_mostly fix_needed = -1;
 
@@ -562,10 +562,40 @@ bool errata_c6_eoi_workaround(void)
             INTEL_FAM6_MODEL(0x2f),
             { }
         };
+        /*
+         * Errata BDX99, CLX30, SKX100, CFW125, BDF104, BDH85, BDM135, KWB131:
+         * A Pending Fixed Interrupt May Be Dispatched Before an Interrupt of
+         * The Same Priority Completes.
+         *
+         * Resuming from C6 Sleep-State, with Fixed Interrupts of the same
+         * priority queued (in the corresponding bits of the IRR and ISR APIC
+         * registers), the processor may dispatch the second interrupt (from
+         * the IRR bit) before the first interrupt has completed and written to
+         * the EOI register, causing the first interrupt to never complete.
+         */
+        static const struct x86_cpu_id isr_errata[] = {
+            /* Broadwell */
+            INTEL_FAM6_MODEL(0x47),
+            INTEL_FAM6_MODEL(0x3d),
+            INTEL_FAM6_MODEL(0x4f),
+            INTEL_FAM6_MODEL(0x56),
+            /* Skylake (client) */
+            INTEL_FAM6_MODEL(0x5e),
+            INTEL_FAM6_MODEL(0x4e),
+            /* {Sky/Cascade}lake (server) */
+            INTEL_FAM6_MODEL(0x55),
+            /* {Kaby/Coffee/Whiskey/Amber} Lake */
+            INTEL_FAM6_MODEL(0x9e),
+            INTEL_FAM6_MODEL(0x8e),
+            /* Cannon Lake */
+            INTEL_FAM6_MODEL(0x66),
+            { }
+        };
 #undef INTEL_FAM6_MODEL
 
-        fix_needed = cpu_has_apic && !directed_eoi_enabled &&
-                     x86_match_cpu(eoi_errata);
+        fix_needed = cpu_has_apic &&
+                     ((!directed_eoi_enabled && x86_match_cpu(eoi_errata)) ||
+                      x86_match_cpu(isr_errata));
     }
 
     return (fix_needed && cpu_has_pending_apic_eoi());
@@ -673,7 +703,7 @@ static void acpi_processor_idle(void)
         return;
     }
 
-    if ( (cx->type >= ACPI_STATE_C3) && errata_c6_eoi_workaround() )
+    if ( (cx->type >= ACPI_STATE_C3) && errata_c6_workaround() )
         cx = power->safe_state;
 
 
--- a/xen/arch/x86/cpu/mwait-idle.c
+++ b/xen/arch/x86/cpu/mwait-idle.c
@@ -769,7 +769,7 @@ static void mwait_idle(void)
                return;
        }
 
-       if ((cx->type >= 3) && errata_c6_eoi_workaround())
+       if ((cx->type >= 3) && errata_c6_workaround())
                cx = power->safe_state;
 
        eax = cx->address;
--- a/xen/include/asm-x86/cpuidle.h
+++ b/xen/include/asm-x86/cpuidle.h
@@ -26,6 +26,6 @@ void update_idle_stats(struct acpi_proce
 void update_last_cx_stat(struct acpi_processor_power *,
                          struct acpi_processor_cx *, uint64_t);
 
-bool errata_c6_eoi_workaround(void);
+bool errata_c6_workaround(void);
 
 #endif /* __X86_ASM_CPUIDLE_H__ */
++++++ 5ec7dcf6-x86-dont-enter-C3-C6-with-errata.patch ++++++
# Commit b2d502466547e6782ccadd501b8ef1482c391f2c
# Date 2020-05-22 16:08:54 +0200
# Author Roger Pau Monné <[email protected]>
# Committer Jan Beulich <[email protected]>
x86/idle: prevent entering C3/C6 on some Intel CPUs due to errata

Apply a workaround for errata BA80, AAK120, AAM108, AAO67, BD59,
AAY54: Rapid Core C3/C6 Transition May Cause Unpredictable System
Behavior.

Limit maximum C state to C1 when SMT is enabled on the affected CPUs.

Signed-off-by: Roger Pau Monné <[email protected]>
Acked-by: Andrew Cooper <[email protected]>

--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -297,6 +297,41 @@ static void early_init_intel(struct cpui
 }
 
 /*
+ * Errata BA80, AAK120, AAM108, AAO67, BD59, AAY54: Rapid Core C3/C6 Transition
+ * May Cause Unpredictable System Behavior
+ *
+ * Under a complex set of internal conditions, cores rapidly performing C3/C6
+ * transitions in a system with Intel Hyper-Threading Technology enabled may
+ * cause a machine check error (IA32_MCi_STATUS.MCACOD = 0x0106), system hang
+ * or unpredictable system behavior.
+ */
+static void probe_c3_errata(const struct cpuinfo_x86 *c)
+{
+#define INTEL_FAM6_MODEL(m) { X86_VENDOR_INTEL, 6, m, X86_FEATURE_ALWAYS }
+    static const struct x86_cpu_id models[] = {
+        /* Nehalem */
+        INTEL_FAM6_MODEL(0x1a),
+        INTEL_FAM6_MODEL(0x1e),
+        INTEL_FAM6_MODEL(0x1f),
+        INTEL_FAM6_MODEL(0x2e),
+        /* Westmere (note Westmere-EX is not affected) */
+        INTEL_FAM6_MODEL(0x2c),
+        INTEL_FAM6_MODEL(0x25),
+        { }
+    };
+#undef INTEL_FAM6_MODEL
+
+    /* Serialized by the AP bringup code. */
+    if ( max_cstate > 1 && (c->apicid & (c->x86_num_siblings - 1)) &&
+         x86_match_cpu(models) )
+    {
+        printk(XENLOG_WARNING
+              "Disabling C-states C3 and C6 due to CPU errata\n");
+        max_cstate = 1;
+    }
+}
+
+/*
  * P4 Xeon errata 037 workaround.
  * Hardware prefetcher may cause stale data to be loaded into the cache.
  *
@@ -323,6 +358,8 @@ static void Intel_errata_workarounds(str
 
        if (cpu_has_tsx_force_abort && opt_rtm_abort)
                wrmsrl(MSR_TSX_FORCE_ABORT, TSX_FORCE_ABORT_RTM);
+
+       probe_c3_errata(c);
 }
 
 
++++++ 5ec82237-x86-extend-ISR-C6-workaround-to-Haswell.patch ++++++
# Commit b72d8870b5f68f06b083e6bfdb28f081bcb6ab3b
# Date 2020-05-22 20:04:23 +0100
# Author Andrew Cooper <[email protected]>
# Committer Andrew Cooper <[email protected]>
x86/idle: Extend ISR/C6 erratum workaround to Haswell

This bug was first discovered against Haswell.  It is definitely affected.

(The XenServer ticket for this bug was opened on 2013-05-30 which is coming up
on 7 years old, and predates Broadwell).

Signed-off-by: Andrew Cooper <[email protected]>
Acked-by: Jan Beulich <[email protected]>

--- a/xen/arch/x86/acpi/cpu_idle.c
+++ b/xen/arch/x86/acpi/cpu_idle.c
@@ -572,8 +572,16 @@ bool errata_c6_workaround(void)
          * registers), the processor may dispatch the second interrupt (from
          * the IRR bit) before the first interrupt has completed and written to
          * the EOI register, causing the first interrupt to never complete.
+         *
+         * Note: Haswell hasn't had errata issued, but this issue was first
+         * discovered on Haswell hardware, and is affected.
          */
         static const struct x86_cpu_id isr_errata[] = {
+            /* Haswell */
+            INTEL_FAM6_MODEL(0x3c),
+            INTEL_FAM6_MODEL(0x3f),
+            INTEL_FAM6_MODEL(0x45),
+            INTEL_FAM6_MODEL(0x46),
             /* Broadwell */
             INTEL_FAM6_MODEL(0x47),
             INTEL_FAM6_MODEL(0x3d),
++++++ 5ece1b91-x86-clear-RDRAND-CPUID-bit-on-AMD-fam-15-16.patch ++++++
# Commit 93401e28a84b9dc5945f5d0bf5bce68e9d5ee121
# Date 2020-05-27 09:49:37 +0200
# Author Jan Beulich <[email protected]>
# Committer Jan Beulich <[email protected]>
x86: clear RDRAND CPUID bit on AMD family 15h/16h

Inspired by Linux commit c49a0a80137c7ca7d6ced4c812c9e07a949f6f24:

    There have been reports of RDRAND issues after resuming from suspend on
    some AMD family 15h and family 16h systems. This issue stems from a BIOS
    not performing the proper steps during resume to ensure RDRAND continues
    to function properly.

    Update the CPU initialization to clear the RDRAND CPUID bit for any family
    15h and 16h processor that supports RDRAND. If it is known that the family
    15h or family 16h system does not have an RDRAND resume issue or that the
    system will not be placed in suspend, the "cpuid=rdrand" kernel parameter
    can be used to stop the clearing of the RDRAND CPUID bit.

    Note, that clearing the RDRAND CPUID bit does not prevent a processor
    that normally supports the RDRAND instruction from executing it. So any
    code that determined the support based on family and model won't #UD.

Warn if no explicit choice was given on affected hardware.

Check RDRAND functions at boot as well as after S3 resume (the retry
limit chosen is entirely arbitrary).

Signed-off-by: Jan Beulich <[email protected]>
Reviewed-by: Roger Pau Monné <[email protected]>
Acked-by: Andrew Cooper <[email protected]>

--- a/docs/misc/xen-command-line.pandoc
+++ b/docs/misc/xen-command-line.pandoc
@@ -488,6 +488,10 @@ The Speculation Control hardware feature
 be ignored, e.g. `no-ibrsb`, at which point Xen won't use them itself, and
 won't offer them to guests.
 
+`rdrand` can be used to override the default disabling of the feature on 
certain
+AMD systems.  Its negative form can of course also be used to suppress use and
+exposure of the feature.
+
 ### cpuid_mask_cpu
 > `= fam_0f_rev_[cdefg] | fam_10_rev_[bc] | fam_11_rev_b`
 
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -3,6 +3,7 @@
 #include <xen/mm.h>
 #include <xen/smp.h>
 #include <xen/pci.h>
+#include <xen/warning.h>
 #include <asm/io.h>
 #include <asm/msr.h>
 #include <asm/processor.h>
@@ -645,6 +646,26 @@ static void init_amd(struct cpuinfo_x86
                if (acpi_smi_cmd && (acpi_enable_value | acpi_disable_value))
                        amd_acpi_c1e_quirk = true;
                break;
+
+       case 0x15: case 0x16:
+               /*
+                * There are some Fam15/Fam16 systems where upon resume from S3
+                * firmware fails to re-setup properly functioning RDRAND.
+                * By the time we can spot the problem, it is too late to take
+                * action, and there is nothing Xen can do to repair the 
problem.
+                * Clear the feature unless force-enabled on the command line.
+                */
+               if (c == &boot_cpu_data &&
+                   cpu_has(c, X86_FEATURE_RDRAND) &&
+                   !is_forced_cpu_cap(X86_FEATURE_RDRAND)) {
+                       static const char __initconst text[] =
+                               "RDRAND may cease to work on this hardware upon 
resume from S3.\n"
+                               "Please choose an explicit cpuid={no-}rdrand 
setting.\n";
+
+                       setup_clear_cpu_cap(X86_FEATURE_RDRAND);
+                       warning_add(text);
+               }
+               break;
        }
 
        display_cacheinfo(c);
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -10,6 +10,7 @@
 #include <asm/io.h>
 #include <asm/mpspec.h>
 #include <asm/apic.h>
+#include <asm/random.h>
 #include <asm/setup.h>
 #include <mach_apic.h>
 #include <public/sysctl.h> /* for XEN_INVALID_{SOCKET,CORE}_ID */
@@ -97,6 +98,11 @@ void __init setup_force_cpu_cap(unsigned
        __set_bit(cap, boot_cpu_data.x86_capability);
 }
 
+bool __init is_forced_cpu_cap(unsigned int cap)
+{
+       return test_bit(cap, forced_caps);
+}
+
 static void default_init(struct cpuinfo_x86 * c)
 {
        /* Not much we can do here... */
@@ -496,6 +502,27 @@ void identify_cpu(struct cpuinfo_x86 *c)
        printk("\n");
 #endif
 
+       /*
+        * If RDRAND is available, make an attempt to check that it actually
+        * (still) works.
+        */
+       if (cpu_has(c, X86_FEATURE_RDRAND)) {
+               unsigned int prev = 0;
+
+               for (i = 0; i < 5; ++i)
+               {
+                       unsigned int cur = arch_get_random();
+
+                       if (prev && cur != prev)
+                               break;
+                       prev = cur;
+               }
+
+               if (i >= 5)
+                       printk(XENLOG_WARNING "CPU%u: RDRAND appears to not 
work\n",
+                              smp_processor_id());
+       }
+
        if (system_state == SYS_STATE_resume)
                return;
 
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -67,6 +67,9 @@ static int __init parse_xen_cpuid(const
             {
                 if ( !val )
                     setup_clear_cpu_cap(mid->bit);
+                else if ( mid->bit == X86_FEATURE_RDRAND &&
+                          (cpuid_ecx(1) & cpufeat_mask(X86_FEATURE_RDRAND)) )
+                    setup_force_cpu_cap(X86_FEATURE_RDRAND);
                 mid = NULL;
             }
 
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -166,6 +166,7 @@ extern const struct x86_cpu_id *x86_matc
 extern void identify_cpu(struct cpuinfo_x86 *);
 extern void setup_clear_cpu_cap(unsigned int);
 extern void setup_force_cpu_cap(unsigned int);
+extern bool is_forced_cpu_cap(unsigned int);
 extern void print_cpu_info(unsigned int cpu);
 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
 
++++++ 5ece8ac4-x86-load_system_tables-NMI-MC-safe.patch ++++++
# Commit 9f3e9139fa6c3d620eb08dff927518fc88200b8d
# Date 2020-05-27 16:44:04 +0100
# Author Andrew Cooper <[email protected]>
# Committer Andrew Cooper <[email protected]>
x86/boot: Fix load_system_tables() to be NMI/#MC-safe

During boot, load_system_tables() is used in reinit_bsp_stack() to switch the
virtual addresses used from their .data/.bss alias, to their directmap alias.

The structure assignment is implemented as a memset() to zero first, then a
copy-in of the new data.  This causes the NMI/#MC stack pointers to
transiently become 0, at a point where we may have an NMI watchdog running.

Rewrite the logic using a volatile tss pointer (equivalent to, but more
readable than, using ACCESS_ONCE() for all writes).

This does drop the zeroing side effect for holes in the structure, but the
backing memory for the TSS is fully zeroed anyway, and architecturally, they
are all reserved.

Signed-off-by: Andrew Cooper <[email protected]>
Reviewed-by: Jan Beulich <[email protected]>

--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -729,11 +729,12 @@ static cpumask_t cpu_initialized;
  */
 void load_system_tables(void)
 {
-       unsigned int cpu = smp_processor_id();
+       unsigned int i, cpu = smp_processor_id();
        unsigned long stack_bottom = get_stack_bottom(),
                stack_top = stack_bottom & ~(STACK_SIZE - 1);
 
-       struct tss64 *tss = &this_cpu(tss_page).tss;
+       /* The TSS may be live.  Disuade any clever optimisations. */
+       volatile struct tss64 *tss = &this_cpu(tss_page).tss;
        seg_desc_t *gdt =
                this_cpu(gdt) - FIRST_RESERVED_GDT_ENTRY;
        seg_desc_t *compat_gdt =
@@ -748,30 +749,26 @@ void load_system_tables(void)
                .limit = (IDT_ENTRIES * sizeof(idt_entry_t)) - 1,
        };
 
-       *tss = (struct tss64){
-               /* Main stack for interrupts/exceptions. */
-               .rsp0 = stack_bottom,
-
-               /* Ring 1 and 2 stacks poisoned. */
-               .rsp1 = 0x8600111111111111ul,
-               .rsp2 = 0x8600111111111111ul,
-
-               /*
-                * MCE, NMI and Double Fault handlers get their own stacks.
-                * All others poisoned.
-                */
-               .ist = {
-                       [IST_MCE - 1] = stack_top + IST_MCE * PAGE_SIZE,
-                       [IST_DF  - 1] = stack_top + IST_DF  * PAGE_SIZE,
-                       [IST_NMI - 1] = stack_top + IST_NMI * PAGE_SIZE,
-                       [IST_DB  - 1] = stack_top + IST_DB  * PAGE_SIZE,
-
-                       [IST_MAX ... ARRAY_SIZE(tss->ist) - 1] =
-                               0x8600111111111111ul,
-               },
-
-               .bitmap = IOBMP_INVALID_OFFSET,
-       };
+       /*
+        * Set up the TSS.  Warning - may be live, and the NMI/#MC must remain
+        * valid on every instruction boundary.  (Note: these are all
+        * semantically ACCESS_ONCE() due to tss's volatile qualifier.)
+        *
+        * rsp0 refers to the primary stack.  #MC, #DF, NMI and #DB handlers
+        * each get their own stacks.  No IO Bitmap.
+        */
+       tss->rsp0 = stack_bottom;
+       tss->ist[IST_MCE - 1] = stack_top + IST_MCE * PAGE_SIZE;
+       tss->ist[IST_DF  - 1] = stack_top + IST_DF  * PAGE_SIZE;
+       tss->ist[IST_NMI - 1] = stack_top + IST_NMI * PAGE_SIZE;
+       tss->ist[IST_DB  - 1] = stack_top + IST_DB  * PAGE_SIZE;
+       tss->bitmap = IOBMP_INVALID_OFFSET;
+
+       /* All other stack pointers poisioned. */
+       for ( i = IST_MAX; i < ARRAY_SIZE(tss->ist); ++i )
+               tss->ist[i] = 0x8600111111111111ul;
+       tss->rsp1 = 0x8600111111111111ul;
+       tss->rsp2 = 0x8600111111111111ul;
 
        BUILD_BUG_ON(sizeof(*tss) <= 0x67); /* Mandated by the architecture. */
 
++++++ 5ed69804-x86-ucode-fix-start-end-update.patch ++++++
# Commit 3659f54e9bd31f0f59268402fd67fb4b4118e184
# Date 2020-06-02 19:18:44 +0100
# Author Andrew Cooper <[email protected]>
# Committer Andrew Cooper <[email protected]>
x86/ucode: Fix errors with start/end_update()

c/s 9267a439c "x86/ucode: Document the behaviour of the microcode_ops hooks"
identified several poor behaviours of the start_update()/end_update_percpu()
hooks.

AMD have subsequently confirmed that OSVW don't, and are not expected to,
change across a microcode load, rendering all of this complexity unecessary.

Instead of fixing up the logic to not leave the OSVW state reset in a number
of corner cases, delete the logic entirely.

This in turn allows for the removal of the poorly-named 'start_update'
parameter to microcode_update_one(), and for svm_host_osvw_{init,reset}() to
become static.

Signed-off-by: Andrew Cooper <[email protected]>
Reviewed-by: Roger Pau Monné <[email protected]>
Reviewed-by: Jan Beulich <[email protected]>

--- a/xen/arch/x86/acpi/power.c
+++ b/xen/arch/x86/acpi/power.c
@@ -286,7 +286,7 @@ static int enter_state(u32 state)
     console_end_sync();
     watchdog_enable();
 
-    microcode_update_one(true);
+    microcode_update_one();
 
     if ( !recheck_cpu_features(0) )
         panic("Missing previously available feature(s)\n");
--- a/xen/arch/x86/microcode_amd.c
+++ b/xen/arch/x86/microcode_amd.c
@@ -24,7 +24,6 @@
 #include <asm/msr.h>
 #include <asm/processor.h>
 #include <asm/microcode.h>
-#include <asm/hvm/svm/svm.h>
 
 #define pr_debug(x...) ((void)0)
 
@@ -590,27 +589,10 @@ static struct microcode_patch *cpu_reque
     return patch;
 }
 
-#ifdef CONFIG_HVM
-static int start_update(void)
-{
-    /*
-     * svm_host_osvw_init() will be called on each cpu by calling '.end_update'
-     * in common code.
-     */
-    svm_host_osvw_reset();
-
-    return 0;
-}
-#endif
-
 static const struct microcode_ops microcode_amd_ops = {
     .cpu_request_microcode            = cpu_request_microcode,
     .collect_cpu_info                 = collect_cpu_info,
     .apply_microcode                  = apply_microcode,
-#ifdef CONFIG_HVM
-    .start_update                     = start_update,
-    .end_update_percpu                = svm_host_osvw_init,
-#endif
     .free_patch                       = free_patch,
     .compare_patch                    = compare_patch,
     .match_cpu                        = match_cpu,
--- a/xen/arch/x86/microcode.c
+++ b/xen/arch/x86/microcode.c
@@ -578,9 +578,6 @@ static int do_microcode_update(void *pat
     else
         ret = secondary_thread_fn();
 
-    if ( microcode_ops->end_update_percpu )
-        microcode_ops->end_update_percpu();
-
     return ret;
 }
 
@@ -652,16 +649,6 @@ static long microcode_update_helper(void
     }
     spin_unlock(&microcode_mutex);
 
-    if ( microcode_ops->start_update )
-    {
-        ret = microcode_ops->start_update();
-        if ( ret )
-        {
-            microcode_free_patch(patch);
-            goto put;
-        }
-    }
-
     cpumask_clear(&cpu_callin_map);
     atomic_set(&cpu_out, 0);
     atomic_set(&cpu_updated, 0);
@@ -760,28 +747,14 @@ static int __init microcode_init(void)
 __initcall(microcode_init);
 
 /* Load a cached update to current cpu */
-int microcode_update_one(bool start_update)
+int microcode_update_one(void)
 {
-    int err;
-
     if ( !microcode_ops )
         return -EOPNOTSUPP;
 
     microcode_ops->collect_cpu_info(&this_cpu(cpu_sig));
 
-    if ( start_update && microcode_ops->start_update )
-    {
-        err = microcode_ops->start_update();
-        if ( err )
-            return err;
-    }
-
-    err = microcode_update_cpu(NULL);
-
-    if ( microcode_ops->end_update_percpu )
-        microcode_ops->end_update_percpu();
-
-    return err;
+    return microcode_update_cpu(NULL);
 }
 
 /* BSP calls this function to parse ucode blob and then apply an update. */
@@ -825,7 +798,7 @@ int __init early_microcode_update_cpu(vo
     spin_unlock(&microcode_mutex);
     ASSERT(rc);
 
-    return microcode_update_one(true);
+    return microcode_update_one();
 }
 
 int __init early_microcode_init(void)
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1082,7 +1082,7 @@ static void svm_guest_osvw_init(struct d
     spin_unlock(&osvw_lock);
 }
 
-void svm_host_osvw_reset()
+static void svm_host_osvw_reset(void)
 {
     spin_lock(&osvw_lock);
 
@@ -1092,7 +1092,7 @@ void svm_host_osvw_reset()
     spin_unlock(&osvw_lock);
 }
 
-void svm_host_osvw_init()
+static void svm_host_osvw_init(void)
 {
     spin_lock(&osvw_lock);
 
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -358,7 +358,7 @@ void start_secondary(void *unused)
 
     initialize_cpu_data(cpu);
 
-    microcode_update_one(false);
+    microcode_update_one();
 
     /*
      * If MSR_SPEC_CTRL is available, apply Xen's default setting and discard
--- a/xen/include/asm-x86/hvm/svm/svm.h
+++ b/xen/include/asm-x86/hvm/svm/svm.h
@@ -93,9 +93,6 @@ extern u32 svm_feature_flags;
 #define DEFAULT_TSC_RATIO       0x0000000100000000ULL
 #define TSC_RATIO_RSVD_BITS     0xffffff0000000000ULL
 
-extern void svm_host_osvw_reset(void);
-extern void svm_host_osvw_init(void);
-
 /* EXITINFO1 fields on NPT faults */
 #define _NPT_PFEC_with_gla     32
 #define NPT_PFEC_with_gla      (1UL<<_NPT_PFEC_with_gla)
--- a/xen/include/asm-x86/microcode.h
+++ b/xen/include/asm-x86/microcode.h
@@ -24,8 +24,6 @@ struct microcode_ops {
                                                      size_t size);
     int (*collect_cpu_info)(struct cpu_signature *csig);
     int (*apply_microcode)(const struct microcode_patch *patch);
-    int (*start_update)(void);
-    void (*end_update_percpu)(void);
     void (*free_patch)(void *mc);
     bool (*match_cpu)(const struct microcode_patch *patch);
     enum microcode_match_result (*compare_patch)(
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -586,7 +586,7 @@ void microcode_set_module(unsigned int);
 int microcode_update(XEN_GUEST_HANDLE_PARAM(const_void), unsigned long len);
 int early_microcode_update_cpu(void);
 int early_microcode_init(void);
-int microcode_update_one(bool start_update);
+int microcode_update_one(void);
 int microcode_init_intel(void);
 int microcode_init_amd(void);
 
++++++ 5eda60cb-SVM-split-recalc-NPT-fault-handling.patch ++++++
# Commit 51ca66c37371b10b378513af126646de22eddb17
# Date 2020-06-05 17:12:11 +0200
# Author Igor Druzhinin <[email protected]>
# Committer Jan Beulich <[email protected]>
x86/svm: do not try to handle recalc NPT faults immediately

A recalculation NPT fault doesn't always require additional handling
in hvm_hap_nested_page_fault(), moreover in general case if there is no
explicit handling done there - the fault is wrongly considered fatal.

This covers a specific case of migration with vGPU assigned which
uses direct MMIO mappings made by XEN_DOMCTL_memory_mapping hypercall:
at a moment log-dirty is enabled globally, recalculation is requested
for the whole guest memory including those mapped MMIO regions
which causes a page fault being raised at the first access to them;
but due to MMIO P2M type not having any explicit handling in
hvm_hap_nested_page_fault() a domain is erroneously crashed with unhandled
SVM violation.

Instead of trying to be opportunistic - use safer approach and handle
P2M recalculation in a separate NPT fault by attempting to retry after
making the necessary adjustments. This is aligned with Intel behavior
where there are separate VMEXITs for recalculation and EPT violations
(faults) and only faults are handled in hvm_hap_nested_page_fault().
Do it by also unifying do_recalc return code with Intel implementation
where returning 1 means P2M was actually changed.

Since there was no case previously where p2m_pt_handle_deferred_changes()
could return a positive value - it's safe to replace ">= 0" with just "== 0"
in VMEXIT_NPF handler. finish_type_change() is also not affected by the
change as being able to deal with >0 return value of p2m->recalc from
EPT implementation.

Signed-off-by: Igor Druzhinin <[email protected]>
Reviewed-by: Roger Pau Monné <[email protected]>
Reviewed-by: Jan Beulich <[email protected]>

--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2947,9 +2947,10 @@ void svm_vmexit_handler(struct cpu_user_
             v->arch.hvm.svm.cached_insn_len = vmcb->guest_ins_len & 0xf;
         rc = vmcb->exitinfo1 & PFEC_page_present
              ? p2m_pt_handle_deferred_changes(vmcb->exitinfo2) : 0;
-        if ( rc >= 0 )
+        if ( rc == 0 )
+            /* If no recal adjustments were being made - handle this fault */
             svm_do_nested_pgfault(v, regs, vmcb->exitinfo1, vmcb->exitinfo2);
-        else
+        else if ( rc < 0 )
         {
             printk(XENLOG_G_ERR
                    "%pv: Error %d handling NPF (gpa=%08lx ec=%04lx)\n",
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -341,6 +341,7 @@ static int do_recalc(struct p2m_domain *
     unsigned int level = 4;
     l1_pgentry_t *pent;
     int err = 0;
+    bool recalc_done = false;
 
     table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
     while ( --level )
@@ -402,6 +403,8 @@ static int do_recalc(struct p2m_domain *
                 clear_recalc(l1, e);
                 err = p2m->write_p2m_entry(p2m, gfn, pent, e, level + 1);
                 ASSERT(!err);
+
+                recalc_done = true;
             }
         }
         unmap_domain_page((void *)((unsigned long)pent & PAGE_MASK));
@@ -448,12 +451,14 @@ static int do_recalc(struct p2m_domain *
             clear_recalc(l1, e);
         err = p2m->write_p2m_entry(p2m, gfn, pent, e, level + 1);
         ASSERT(!err);
+
+        recalc_done = true;
     }
 
  out:
     unmap_domain_page(table);
 
-    return err;
+    return err ?: recalc_done;
 }
 
 int p2m_pt_handle_deferred_changes(uint64_t gpa)
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1194,7 +1194,7 @@ static int finish_type_change(struct p2m
         rc = p2m->recalc(p2m, gfn);
         /*
          * ept->recalc could return 0/1/-ENOMEM. pt->recalc could return
-         * 0/-ENOMEM/-ENOENT, -ENOENT isn't an error as we are looping
+         * 0/1/-ENOMEM/-ENOENT, -ENOENT isn't an error as we are looping
          * gfn here. If rc is 1 we need to have it 0 for success.
          */
         if ( rc == -ENOENT || rc > 0 )
++++++ 5edf6ad8-ioreq-pending-emulation-server-destruction-race.patch ++++++
# Commit f7039ee41b3d3448775a1623f230037fd0455104
# Date 2020-06-09 12:56:24 +0200
# Author Paul Durrant <[email protected]>
# Committer Jan Beulich <[email protected]>
ioreq: handle pending emulation racing with ioreq server destruction

When an emulation request is initiated in hvm_send_ioreq() the guest vcpu is
blocked on an event channel until that request is completed. If, however,
the emulator is killed whilst that emulation is pending then the ioreq
server may be destroyed. Thus when the vcpu is awoken the code in
handle_hvm_io_completion() will find no pending request to wait for, but will
leave the internal vcpu io_req.state set to IOREQ_READY and the vcpu shutdown
deferall flag in place (because hvm_io_assist() will never be called). The
emulation request is then completed anyway. This means that any subsequent call
to hvmemul_do_io() will find an unexpected value in io_req.state and will
return X86EMUL_UNHANDLEABLE, which in some cases will result in continuous
re-tries.

This patch fixes the issue by moving the setting of io_req.state and clearing
of shutdown deferral (as will as MSI-X write completion) out of hvm_io_assist()
and directly into handle_hvm_io_completion().

Reported-by: Marek Marczykowski-Górecki <[email protected]>
Signed-off-by: Paul Durrant <[email protected]>
Reviewed-by: Jan Beulich <[email protected]>

--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -107,15 +107,7 @@ static void hvm_io_assist(struct hvm_ior
     ioreq_t *ioreq = &v->arch.hvm.hvm_io.io_req;
 
     if ( hvm_ioreq_needs_completion(ioreq) )
-    {
-        ioreq->state = STATE_IORESP_READY;
         ioreq->data = data;
-    }
-    else
-        ioreq->state = STATE_IOREQ_NONE;
-
-    msix_write_completion(v);
-    vcpu_end_shutdown_deferral(v);
 
     sv->pending = false;
 }
@@ -207,6 +199,12 @@ bool handle_hvm_io_completion(struct vcp
         }
     }
 
+    vio->io_req.state = hvm_ioreq_needs_completion(&vio->io_req) ?
+        STATE_IORESP_READY : STATE_IOREQ_NONE;
+
+    msix_write_completion(v);
+    vcpu_end_shutdown_deferral(v);
+
     io_completion = vio->io_completion;
     vio->io_completion = HVMIO_no_completion;
 
++++++ 5edfbbea-x86-spec-ctrl-CPUID-MSR-defs-for-SRBDS.patch ++++++
# Commit caab85ab58c0cdf74ab070a5de5c4df89f509ff3
# Date 2020-06-09 17:42:18 +0100
# Author Andrew Cooper <[email protected]>
# Committer Andrew Cooper <[email protected]>
x86/spec-ctrl: CPUID/MSR definitions for Special Register Buffer Data Sampling

This is part of XSA-320 / CVE-2020-0543

Signed-off-by: Andrew Cooper <[email protected]>
Reviewed-by: Jan Beulich <[email protected]>
Acked-by: Wei Liu <[email protected]>

--- a/docs/misc/xen-command-line.pandoc
+++ b/docs/misc/xen-command-line.pandoc
@@ -483,10 +483,10 @@ accounting for hardware capabilities as
 
 Currently accepted:
 
-The Speculation Control hardware features `md-clear`, `ibrsb`, `stibp`, `ibpb`,
-`l1d-flush` and `ssbd` are used by default if available and applicable.  They 
can
-be ignored, e.g. `no-ibrsb`, at which point Xen won't use them itself, and
-won't offer them to guests.
+The Speculation Control hardware features `srbds-ctrl`, `md-clear`, `ibrsb`,
+`stibp`, `ibpb`, `l1d-flush` and `ssbd` are used by default if available and
+applicable.  They can be ignored, e.g. `no-ibrsb`, at which point Xen won't
+use them itself, and won't offer them to guests.
 
 `rdrand` can be used to override the default disabling of the feature on 
certain
 AMD systems.  Its negative form can of course also be used to suppress use and
--- a/tools/libxl/libxl_cpuid.c
+++ b/tools/libxl/libxl_cpuid.c
@@ -213,6 +213,7 @@ int libxl_cpuid_parse_config(libxl_cpuid
 
         {"avx512-4vnniw",0x00000007,  0, CPUID_REG_EDX,  2,  1},
         {"avx512-4fmaps",0x00000007,  0, CPUID_REG_EDX,  3,  1},
+        {"srbds-ctrl",   0x00000007,  0, CPUID_REG_EDX,  9,  1},
         {"md-clear",     0x00000007,  0, CPUID_REG_EDX, 10,  1},
         {"cet-ibt",      0x00000007,  0, CPUID_REG_EDX, 20,  1},
         {"ibrsb",        0x00000007,  0, CPUID_REG_EDX, 26,  1},
--- a/tools/misc/xen-cpuid.c
+++ b/tools/misc/xen-cpuid.c
@@ -157,6 +157,7 @@ static const char *const str_7d0[32] =
     [ 2] = "avx512_4vnniw", [ 3] = "avx512_4fmaps",
     [ 4] = "fsrm",
 
+    /*  8 */                [ 9] = "srbds-ctrl",
     [10] = "md-clear",
     /* 12 */                [13] = "tsx-force-abort",
 
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -134,6 +134,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t
         /* Write-only */
     case MSR_TSX_FORCE_ABORT:
     case MSR_TSX_CTRL:
+    case MSR_MCU_OPT_CTRL:
     case MSR_U_CET:
     case MSR_S_CET:
     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
@@ -288,6 +289,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t
         /* Read-only */
     case MSR_TSX_FORCE_ABORT:
     case MSR_TSX_CTRL:
+    case MSR_MCU_OPT_CTRL:
     case MSR_U_CET:
     case MSR_S_CET:
     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -312,12 +312,13 @@ static void __init print_details(enum in
     printk("Speculative mitigation facilities:\n");
 
     /* Hardware features which pertain to speculative mitigations. */
-    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
            (_7d0 & cpufeat_mask(X86_FEATURE_IBRSB)) ? " IBRS/IBPB" : "",
            (_7d0 & cpufeat_mask(X86_FEATURE_STIBP)) ? " STIBP"     : "",
            (_7d0 & cpufeat_mask(X86_FEATURE_L1D_FLUSH)) ? " L1D_FLUSH" : "",
            (_7d0 & cpufeat_mask(X86_FEATURE_SSBD))  ? " SSBD"      : "",
            (_7d0 & cpufeat_mask(X86_FEATURE_MD_CLEAR)) ? " MD_CLEAR" : "",
+           (_7d0 & cpufeat_mask(X86_FEATURE_SRBDS_CTRL)) ? " SRBDS_CTRL" : "",
            (e8b  & cpufeat_mask(X86_FEATURE_IBPB))  ? " IBPB"      : "",
            (caps & ARCH_CAPS_IBRS_ALL)              ? " IBRS_ALL"  : "",
            (caps & ARCH_CAPS_RDCL_NO)               ? " RDCL_NO"   : "",
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -179,6 +179,9 @@
 #define MSR_IA32_VMX_TRUE_ENTRY_CTLS            0x490
 #define MSR_IA32_VMX_VMFUNC                     0x491
 
+#define MSR_MCU_OPT_CTRL                    0x00000123
+#define  MCU_OPT_CTRL_RNGDS_MITG_DIS        (_AC(1, ULL) <<  0)
+
 #define MSR_U_CET                           0x000006a0
 #define MSR_S_CET                           0x000006a2
 #define MSR_PL0_SSP                         0x000006a4
--- a/xen/include/public/arch-x86/cpufeatureset.h
+++ b/xen/include/public/arch-x86/cpufeatureset.h
@@ -252,6 +252,7 @@ XEN_CPUFEATURE(IBPB,          8*32+12) /
 /* Intel-defined CPU features, CPUID level 0x00000007:0.edx, word 9 */
 XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A  AVX512 Neural Network Instructions 
*/
 XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A  AVX512 Multiply Accumulation 
Single Precision */
+XEN_CPUFEATURE(SRBDS_CTRL,    9*32+ 9) /*   MSR_MCU_OPT_CTRL and 
RNGDS_MITG_DIS. */
 XEN_CPUFEATURE(MD_CLEAR,      9*32+10) /*A  VERW clears microarchitectural 
buffers */
 XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */
 XEN_CPUFEATURE(CET_IBT,       9*32+20) /*   CET - Indirect Branch Tracking */
++++++ 5edfbbea-x86-spec-ctrl-mitigate-SRBDS.patch ++++++
# Commit 6a49b9a7920c82015381740905582b666160d955
# Date 2020-06-09 17:42:18 +0100
# Author Andrew Cooper <[email protected]>
# Committer Andrew Cooper <[email protected]>
x86/spec-ctrl: Mitigate the Special Register Buffer Data Sampling sidechannel

See patch documentation and comments.

This is part of XSA-320 / CVE-2020-0543

Signed-off-by: Andrew Cooper <[email protected]>
Reviewed-by: Jan Beulich <[email protected]>

--- a/docs/misc/xen-command-line.pandoc
+++ b/docs/misc/xen-command-line.pandoc
@@ -1995,7 +1995,7 @@ By default SSBD will be mitigated at run
 ### spec-ctrl (x86)
 > `= List of [ <bool>, xen=<bool>, {pv,hvm,msr-sc,rsb,md-clear}=<bool>,
 >              bti-thunk=retpoline|lfence|jmp, {ibrs,ibpb,ssbd,eager-fpu,
->              l1d-flush,branch-harden}=<bool> ]`
+>              l1d-flush,branch-harden,srb-lock}=<bool> ]`
 
 Controls for speculative execution sidechannel mitigations.  By default, Xen
 will pick the most appropriate mitigations based on compiled in support,
@@ -2072,6 +2072,12 @@ If Xen is compiled with `CONFIG_SPECULAT
 speculation barriers to protect selected conditional branches.  By default,
 Xen will enable this mitigation.
 
+On hardware supporting SRBDS_CTRL, the `srb-lock=` option can be used to force
+or prevent Xen from protect the Special Register Buffer from leaking stale
+data. By default, Xen will enable this mitigation, except on parts where MDS
+is fixed and TAA is fixed/mitigated (in which case, there is believed to be no
+way for an attacker to obtain the stale data).
+
 ### sync_console
 > `= <boolean>`
 
--- a/xen/arch/x86/acpi/power.c
+++ b/xen/arch/x86/acpi/power.c
@@ -295,6 +295,9 @@ static int enter_state(u32 state)
     ci->spec_ctrl_flags |= (default_spec_ctrl_flags & SCF_ist_wrmsr);
     spec_ctrl_exit_idle(ci);
 
+    if ( boot_cpu_has(X86_FEATURE_SRBDS_CTRL) )
+        wrmsrl(MSR_MCU_OPT_CTRL, default_xen_mcu_opt_ctrl);
+
  done:
     spin_debug_enable();
     local_irq_restore(flags);
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -361,12 +361,14 @@ void start_secondary(void *unused)
     microcode_update_one();
 
     /*
-     * If MSR_SPEC_CTRL is available, apply Xen's default setting and discard
-     * any firmware settings.  Note: MSR_SPEC_CTRL may only become available
-     * after loading microcode.
+     * If any speculative control MSRs are available, apply Xen's default
+     * settings.  Note: These MSRs may only become available after loading
+     * microcode.
      */
     if ( boot_cpu_has(X86_FEATURE_IBRSB) )
         wrmsrl(MSR_SPEC_CTRL, default_xen_spec_ctrl);
+    if ( boot_cpu_has(X86_FEATURE_SRBDS_CTRL) )
+        wrmsrl(MSR_MCU_OPT_CTRL, default_xen_mcu_opt_ctrl);
 
     tsx_init(); /* Needs microcode.  May change HLE/RTM feature bits. */
 
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -65,6 +65,9 @@ static unsigned int __initdata l1d_maxph
 static bool __initdata cpu_has_bug_msbds_only; /* => minimal HT impact. */
 static bool __initdata cpu_has_bug_mds; /* Any other M{LP,SB,FB}DS 
combination. */
 
+static int8_t __initdata opt_srb_lock = -1;
+uint64_t __read_mostly default_xen_mcu_opt_ctrl;
+
 static int __init parse_spec_ctrl(const char *s)
 {
     const char *ss;
@@ -112,6 +115,7 @@ static int __init parse_spec_ctrl(const
             opt_ssbd = false;
             opt_l1d_flush = 0;
             opt_branch_harden = false;
+            opt_srb_lock = 0;
         }
         else if ( val > 0 )
             rc = -EINVAL;
@@ -178,6 +182,8 @@ static int __init parse_spec_ctrl(const
             opt_l1d_flush = val;
         else if ( (val = parse_boolean("branch-harden", s, ss)) >= 0 )
             opt_branch_harden = val;
+        else if ( (val = parse_boolean("srb-lock", s, ss)) >= 0 )
+            opt_srb_lock = val;
         else
             rc = -EINVAL;
 
@@ -341,7 +347,7 @@ static void __init print_details(enum in
                "\n");
 
     /* Settings for Xen's protection, irrespective of guests. */
-    printk("  Xen settings: BTI-Thunk %s, SPEC_CTRL: %s%s%s, Other:%s%s%s%s\n",
+    printk("  Xen settings: BTI-Thunk %s, SPEC_CTRL: %s%s%s, 
Other:%s%s%s%s%s\n",
            thunk == THUNK_NONE      ? "N/A" :
            thunk == THUNK_RETPOLINE ? "RETPOLINE" :
            thunk == THUNK_LFENCE    ? "LFENCE" :
@@ -352,6 +358,8 @@ static void __init print_details(enum in
            (default_xen_spec_ctrl & SPEC_CTRL_SSBD)  ? " SSBD+" : " SSBD-",
            !(caps & ARCH_CAPS_TSX_CTRL)              ? "" :
            (opt_tsx & 1)                             ? " TSX+" : " TSX-",
+           !boot_cpu_has(X86_FEATURE_SRBDS_CTRL)     ? "" :
+           opt_srb_lock                              ? " SRB_LOCK+" : " 
SRB_LOCK-",
            opt_ibpb                                  ? " IBPB"  : "",
            opt_l1d_flush                             ? " L1D_FLUSH" : "",
            opt_md_clear_pv || opt_md_clear_hvm       ? " VERW"  : "",
@@ -1149,6 +1157,34 @@ void __init init_speculation_mitigations
         tsx_init();
     }
 
+    /* Calculate suitable defaults for MSR_MCU_OPT_CTRL */
+    if ( boot_cpu_has(X86_FEATURE_SRBDS_CTRL) )
+    {
+        uint64_t val;
+
+        rdmsrl(MSR_MCU_OPT_CTRL, val);
+
+        /*
+         * On some SRBDS-affected hardware, it may be safe to relax srb-lock
+         * by default.
+         *
+         * On parts which enumerate MDS_NO and not TAA_NO, TSX is the only way
+         * to access the Fill Buffer.  If TSX isn't available (inc. SKU
+         * reasons on some models), or TSX is explicitly disabled, then there
+         * is no need for the extra overhead to protect RDRAND/RDSEED.
+         */
+        if ( opt_srb_lock == -1 &&
+             (caps & (ARCH_CAPS_MDS_NO|ARCH_CAPS_TAA_NO)) == ARCH_CAPS_MDS_NO 
&&
+             (!cpu_has_hle || ((caps & ARCH_CAPS_TSX_CTRL) && opt_tsx == 0)) )
+            opt_srb_lock = 0;
+
+        val &= ~MCU_OPT_CTRL_RNGDS_MITG_DIS;
+        if ( !opt_srb_lock )
+            val |= MCU_OPT_CTRL_RNGDS_MITG_DIS;
+
+        default_xen_mcu_opt_ctrl = val;
+    }
+
     print_details(thunk, caps);
 
     /*
@@ -1180,6 +1216,9 @@ void __init init_speculation_mitigations
 
         wrmsrl(MSR_SPEC_CTRL, bsp_delay_spec_ctrl ? 0 : default_xen_spec_ctrl);
     }
+
+    if ( boot_cpu_has(X86_FEATURE_SRBDS_CTRL) )
+        wrmsrl(MSR_MCU_OPT_CTRL, default_xen_mcu_opt_ctrl);
 }
 
 static void __init __maybe_unused build_assertions(void)
--- a/xen/include/asm-x86/spec_ctrl.h
+++ b/xen/include/asm-x86/spec_ctrl.h
@@ -54,6 +54,8 @@ extern int8_t opt_pv_l1tf_hwdom, opt_pv_
  */
 extern paddr_t l1tf_addr_mask, l1tf_safe_maddr;
 
+extern uint64_t default_xen_mcu_opt_ctrl;
+
 static inline void init_shadow_spec_ctrl_state(void)
 {
     struct cpu_info *info = get_cpu_info();
++++++ 5ee24d0e-x86-spec-ctrl-document-SRBDS-workaround.patch ++++++
# Commit 7028534d8482d25860c4d1aa8e45f0b911abfc5a
# Date 2020-06-11 16:26:06 +0100
# Author Andrew Cooper <[email protected]>
# Committer Andrew Cooper <[email protected]>
x86/spec-ctrl: Update docs with SRBDS workaround

RDRAND/RDSEED can be hidden using cpuid= to mitigate SRBDS if microcode
isn't available.

This is part of XSA-320 / CVE-2020-0543.

Signed-off-by: Andrew Cooper <[email protected]>
Acked-by: Julien Grall <[email protected]>

--- a/docs/misc/xen-command-line.pandoc
+++ b/docs/misc/xen-command-line.pandoc
@@ -481,16 +481,21 @@ choice of `dom0-kernel` is deprecated an
 This option allows for fine tuning of the facilities Xen will use, after
 accounting for hardware capabilities as enumerated via CPUID.
 
+Unless otherwise noted, options only have any effect in their negative form,
+to hide the named feature(s).  Ignoring a feature using this mechanism will
+cause Xen not to use the feature, nor offer them as usable to guests.
+
 Currently accepted:
 
 The Speculation Control hardware features `srbds-ctrl`, `md-clear`, `ibrsb`,
 `stibp`, `ibpb`, `l1d-flush` and `ssbd` are used by default if available and
-applicable.  They can be ignored, e.g. `no-ibrsb`, at which point Xen won't
-use them itself, and won't offer them to guests.
+applicable.  They can all be ignored.
 
-`rdrand` can be used to override the default disabling of the feature on 
certain
-AMD systems.  Its negative form can of course also be used to suppress use and
-exposure of the feature.
+`rdrand` and `rdseed` can be ignored, as a mitigation to XSA-320 /
+CVE-2020-0543.  The RDRAND feature is disabled by default on certain AMD
+systems, due to possible malfunctions after ACPI S3 suspend/resume.  `rdrand`
+may be used in its positive form to override Xen's default behaviour on these
+systems, and make the feature fully usable.
 
 ### cpuid_mask_cpu
 > `= fam_0f_rev_[cdefg] | fam_10_rev_[bc] | fam_11_rev_b`
++++++ libxc.migrate_tracking.patch ++++++
Track live migration state unconditionally in logfiles to see how long a domU 
was suspended.
Depends on libxc.sr.superpage.patch

--- a/tools/libxc/xc_sr_common.c
+++ b/tools/libxc/xc_sr_common.c
@@ -196,6 +196,65 @@ bool _xc_sr_bitmap_resize(struct xc_sr_b
     return true;
 }
 
+/* Write a two-character hex representation of 'byte' to digits[].
+   Pre-condition: sizeof(digits) >= 2 */
+static void byte_to_hex(char *digits, const uint8_t byte)
+{
+    uint8_t nybbel = byte >> 4;
+
+    if ( nybbel > 9 )
+        digits[0] = 'a' + nybbel-10;
+    else
+        digits[0] = '0' + nybbel;
+
+    nybbel = byte & 0x0f;
+    if ( nybbel > 9 )
+        digits[1] = 'a' + nybbel-10;
+    else
+        digits[1] = '0' + nybbel;
+}
+
+/* Convert an array of 16 unsigned bytes to a DCE/OSF formatted UUID
+   string.
+
+   Pre-condition: sizeof(dest) >= 37 */
+void sr_uuid_to_string(char *dest, const uint8_t *uuid)
+{
+    int i = 0;
+    char *p = dest;
+
+    for (; i < 4; i++ )
+    {
+        byte_to_hex(p, uuid[i]);
+        p += 2;
+    }
+    *p++ = '-';
+    for (; i < 6; i++ )
+    {
+        byte_to_hex(p, uuid[i]);
+        p += 2;
+    }
+    *p++ = '-';
+    for (; i < 8; i++ )
+    {
+        byte_to_hex(p, uuid[i]);
+        p += 2;
+    }
+    *p++ = '-';
+    for (; i < 10; i++ )
+    {
+        byte_to_hex(p, uuid[i]);
+        p += 2;
+    }
+    *p++ = '-';
+    for (; i < 16; i++ )
+    {
+        byte_to_hex(p, uuid[i]);
+        p += 2;
+    }
+    *p = '\0';
+}
+
 /*
  * Local variables:
  * mode: C
--- a/tools/libxc/xc_sr_common.h
+++ b/tools/libxc/xc_sr_common.h
@@ -10,6 +10,10 @@
 
 #include "xc_sr_stream_format.h"
 
+#define SUSEINFO(_m, _a...)  do { int ERROR_errno = errno; \
+        xc_report(xch, xch->error_handler, XTL_ERROR, XC_ERROR_NONE, 
"SUSEINFO: " _m , ## _a ); \
+        errno = ERROR_errno; \
+        } while (0)
 /* String representation of Domain Header types. */
 const char *dhdr_type_to_str(uint32_t type);
 
@@ -195,6 +199,7 @@ struct xc_sr_context
     int fd;
 
     xc_dominfo_t dominfo;
+    char uuid[16*2+4+1];
 
     union /* Common save or restore data. */
     {
@@ -427,6 +432,8 @@ static inline int pfn_set_populated(stru
     return 0;
 }
 
+extern void sr_uuid_to_string(char *dest, const uint8_t *uuid);
+
 struct xc_sr_record
 {
     uint32_t type;
--- a/tools/libxc/xc_sr_restore.c
+++ b/tools/libxc/xc_sr_restore.c
@@ -608,6 +608,7 @@ static int restore(struct xc_sr_context
     struct xc_sr_record rec;
     int rc, saved_rc = 0, saved_errno = 0;
 
+    SUSEINFO("domid %u: %s %s start", ctx->domid, ctx->uuid, __func__);
     IPRINTF("Restoring domain");
 
     rc = setup(ctx);
@@ -684,6 +685,7 @@ static int restore(struct xc_sr_context
     PERROR("Restore failed");
 
  done:
+    SUSEINFO("domid %u: %s done", ctx->domid, __func__);
     cleanup(ctx);
 
     if ( saved_rc )
@@ -748,6 +750,7 @@ int xc_domain_restore(xc_interface *xch,
     }
 
     ctx.domid = dom;
+    sr_uuid_to_string(ctx.uuid, ctx.dominfo.handle);
 
     if ( read_headers(&ctx) )
         return -1;
--- a/tools/libxc/xc_sr_save.c
+++ b/tools/libxc/xc_sr_save.c
@@ -852,6 +852,7 @@ static int save(struct xc_sr_context *ct
     xc_interface *xch = ctx->xch;
     int rc, saved_rc = 0, saved_errno = 0;
 
+    SUSEINFO("domid %u: %s %s start", ctx->domid, ctx->uuid, __func__);
     IPRINTF("Saving domain %d, type %s",
             ctx->domid, dhdr_type_to_str(guest_type));
 
@@ -964,6 +965,7 @@ static int save(struct xc_sr_context *ct
     PERROR("Save failed");
 
  done:
+    SUSEINFO("domid %u: %s done", ctx->domid, __func__);
     cleanup(ctx);
 
     if ( saved_rc )
@@ -1019,6 +1021,10 @@ static int suse_precopy_policy(struct pr
         goto out;
     }
     /* Keep going */
+    if ( stats.dirty_count >= 0 )
+        SUSEINFO("domid %u: dirty pages %ld after iteration %u/%u",
+                suse_flags.ctx->domid,
+                suse_flags.dirty_count, stats.iteration, suse_flags.max_iters);
     return XGS_POLICY_CONTINUE_PRECOPY;
 
 out:
@@ -1032,6 +1038,9 @@ out:
         return XGS_POLICY_ABORT;
     }
 suspend:
+    SUSEINFO("domid %u: suspending, remaining dirty pages %ld/%lu prior final 
transit",
+            suse_flags.ctx->domid,
+            suse_flags.dirty_count, suse_flags.ctx->save.p2m_size);
     return XGS_POLICY_STOP_AND_COPY;
 }
 
@@ -1095,6 +1104,7 @@ int xc_domain_save_suse(xc_interface *xc
     }
 
     ctx.domid = dom;
+    sr_uuid_to_string(ctx.uuid, ctx.dominfo.handle);
 
     if ( ctx.dominfo.hvm )
     {
++++++ libxl.max_event_channels.patch ++++++
References: bsc#1167608
unbound limits for max_event_channels
1023 is too low for a three digit value of vcpus
it is difficult to make the value depend on the number of vcpus
adding devices at runtime also needs event channels
--- a/tools/libxl/libxl_create.c
+++ b/tools/libxl/libxl_create.c
@@ -224,7 +224,7 @@ int libxl__domain_build_info_setdefault(
             b_info->iomem[i].gfn = b_info->iomem[i].start;
 
     if (!b_info->event_channels)
-        b_info->event_channels = 1023;
+        b_info->event_channels = -1U;
 
     libxl__arch_domain_build_info_setdefault(gc, b_info);
     libxl_defbool_setdefault(&b_info->dm_restrict, false);
++++++ xsa320-1.patch -> xsa317.patch ++++++
--- /work/SRC/openSUSE:Factory/xen/xsa320-1.patch       2020-06-11 
14:47:30.381872679 +0200
+++ /work/SRC/openSUSE:Factory/.xen.new.3060/xsa317.patch       2020-07-14 
07:44:40.218952903 +0200
@@ -1,102 +1,50 @@
-x86/spec-ctrl: CPUID/MSR definitions for Special Register Buffer Data Sampling
+From aeb46e92f915f19a61d5a8a1f4b696793f64e6fb Mon Sep 17 00:00:00 2001
+From: Julien Grall <[email protected]>
+Date: Thu, 19 Mar 2020 13:17:31 +0000
+Subject: [PATCH] xen/common: event_channel: Don't ignore error in
+ get_free_port()
 
-This is part of XSA-320 / CVE-2020-0543
+Currently, get_free_port() is assuming that the port has been allocated
+when evtchn_allocate_port() is not return -EBUSY.
 
-Signed-off-by: Andrew Cooper <[email protected]>
+However, the function may return an error when:
+    - We exhausted all the event channels. This can happen if the limit
+    configured by the administrator for the guest ('max_event_channels'
+    in xl cfg) is higher than the ABI used by the guest. For instance,
+    if the guest is using 2L, the limit should not be higher than 4095.
+    - We cannot allocate memory (e.g Xen has not more memory).
+
+Users of get_free_port() (such as EVTCHNOP_alloc_unbound) will validly
+assuming the port was valid and will next call evtchn_from_port(). This
+will result to a crash as the memory backing the event channel structure
+is not present.
+
+Fixes: 368ae9a05fe ("xen/pvshim: forward evtchn ops between L0 Xen and L2 
DomU")
+Signed-off-by: Julien Grall <[email protected]>
 Reviewed-by: Jan Beulich <[email protected]>
-Acked-by: Wei Liu <[email protected]>
+---
+ xen/common/event_channel.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
+index e86e2bfab0..a8d182b584 100644
+--- a/xen/common/event_channel.c
++++ b/xen/common/event_channel.c
+@@ -195,10 +195,10 @@ static int get_free_port(struct domain *d)
+     {
+         int rc = evtchn_allocate_port(d, port);
+ 
+-        if ( rc == -EBUSY )
+-            continue;
+-
+-        return port;
++        if ( rc == 0 )
++            return port;
++        else if ( rc != -EBUSY )
++            return rc;
+     }
+ 
+     return -ENOSPC;
+-- 
+2.17.1
 
---- a/docs/misc/xen-command-line.pandoc
-+++ b/docs/misc/xen-command-line.pandoc
-@@ -483,10 +483,10 @@ accounting for hardware capabilities as
- 
- Currently accepted:
- 
--The Speculation Control hardware features `md-clear`, `ibrsb`, `stibp`, 
`ibpb`,
--`l1d-flush` and `ssbd` are used by default if available and applicable.  They 
can
--be ignored, e.g. `no-ibrsb`, at which point Xen won't use them itself, and
--won't offer them to guests.
-+The Speculation Control hardware features `srbds-ctrl`, `md-clear`, `ibrsb`,
-+`stibp`, `ibpb`, `l1d-flush` and `ssbd` are used by default if available and
-+applicable.  They can be ignored, e.g. `no-ibrsb`, at which point Xen won't
-+use them itself, and won't offer them to guests.
- 
- ### cpuid_mask_cpu
- > `= fam_0f_rev_[cdefg] | fam_10_rev_[bc] | fam_11_rev_b`
---- a/tools/libxl/libxl_cpuid.c
-+++ b/tools/libxl/libxl_cpuid.c
-@@ -213,6 +213,7 @@ int libxl_cpuid_parse_config(libxl_cpuid
- 
-         {"avx512-4vnniw",0x00000007,  0, CPUID_REG_EDX,  2,  1},
-         {"avx512-4fmaps",0x00000007,  0, CPUID_REG_EDX,  3,  1},
-+        {"srbds-ctrl",   0x00000007,  0, CPUID_REG_EDX,  9,  1},
-         {"md-clear",     0x00000007,  0, CPUID_REG_EDX, 10,  1},
-         {"cet-ibt",      0x00000007,  0, CPUID_REG_EDX, 20,  1},
-         {"ibrsb",        0x00000007,  0, CPUID_REG_EDX, 26,  1},
---- a/tools/misc/xen-cpuid.c
-+++ b/tools/misc/xen-cpuid.c
-@@ -157,6 +157,7 @@ static const char *const str_7d0[32] =
-     [ 2] = "avx512_4vnniw", [ 3] = "avx512_4fmaps",
-     [ 4] = "fsrm",
- 
-+    /*  8 */                [ 9] = "srbds-ctrl",
-     [10] = "md-clear",
-     /* 12 */                [13] = "tsx-force-abort",
- 
---- a/xen/arch/x86/msr.c
-+++ b/xen/arch/x86/msr.c
-@@ -134,6 +134,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t
-         /* Write-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
-@@ -288,6 +289,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t
-         /* Read-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
---- a/xen/arch/x86/spec_ctrl.c
-+++ b/xen/arch/x86/spec_ctrl.c
-@@ -312,12 +312,13 @@ static void __init print_details(enum in
-     printk("Speculative mitigation facilities:\n");
- 
-     /* Hardware features which pertain to speculative mitigations. */
--    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-+    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-            (_7d0 & cpufeat_mask(X86_FEATURE_IBRSB)) ? " IBRS/IBPB" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_STIBP)) ? " STIBP"     : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_L1D_FLUSH)) ? " L1D_FLUSH" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_SSBD))  ? " SSBD"      : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_MD_CLEAR)) ? " MD_CLEAR" : "",
-+           (_7d0 & cpufeat_mask(X86_FEATURE_SRBDS_CTRL)) ? " SRBDS_CTRL" : "",
-            (e8b  & cpufeat_mask(X86_FEATURE_IBPB))  ? " IBPB"      : "",
-            (caps & ARCH_CAPS_IBRS_ALL)              ? " IBRS_ALL"  : "",
-            (caps & ARCH_CAPS_RDCL_NO)               ? " RDCL_NO"   : "",
---- a/xen/include/asm-x86/msr-index.h
-+++ b/xen/include/asm-x86/msr-index.h
-@@ -179,6 +179,9 @@
- #define MSR_IA32_VMX_TRUE_ENTRY_CTLS            0x490
- #define MSR_IA32_VMX_VMFUNC                     0x491
- 
-+#define MSR_MCU_OPT_CTRL                    0x00000123
-+#define  MCU_OPT_CTRL_RNGDS_MITG_DIS        (_AC(1, ULL) <<  0)
-+
- #define MSR_U_CET                           0x000006a0
- #define MSR_S_CET                           0x000006a2
- #define MSR_PL0_SSP                         0x000006a4
---- a/xen/include/public/arch-x86/cpufeatureset.h
-+++ b/xen/include/public/arch-x86/cpufeatureset.h
-@@ -252,6 +252,7 @@ XEN_CPUFEATURE(IBPB,          8*32+12) /
- /* Intel-defined CPU features, CPUID level 0x00000007:0.edx, word 9 */
- XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A  AVX512 Neural Network 
Instructions */
- XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A  AVX512 Multiply Accumulation 
Single Precision */
-+XEN_CPUFEATURE(SRBDS_CTRL,    9*32+ 9) /*   MSR_MCU_OPT_CTRL and 
RNGDS_MITG_DIS. */
- XEN_CPUFEATURE(MD_CLEAR,      9*32+10) /*A  VERW clears microarchitectural 
buffers */
- XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */
- XEN_CPUFEATURE(CET_IBT,       9*32+20) /*   CET - Indirect Branch Tracking */

++++++ xsa320-1.patch -> xsa319.patch ++++++
--- /work/SRC/openSUSE:Factory/xen/xsa320-1.patch       2020-06-11 
14:47:30.381872679 +0200
+++ /work/SRC/openSUSE:Factory/.xen.new.3060/xsa319.patch       2020-07-14 
07:44:43.138962330 +0200
@@ -1,102 +1,27 @@
-x86/spec-ctrl: CPUID/MSR definitions for Special Register Buffer Data Sampling
+From: Jan Beulich <[email protected]>
+Subject: x86/shadow: correct an inverted conditional in dirty VRAM tracking
 
-This is part of XSA-320 / CVE-2020-0543
+This originally was "mfn_x(mfn) == INVALID_MFN". Make it like this
+again, taking the opportunity to also drop the unnecessary nearby
+braces.
 
-Signed-off-by: Andrew Cooper <[email protected]>
-Reviewed-by: Jan Beulich <[email protected]>
-Acked-by: Wei Liu <[email protected]>
+This is XSA-319.
 
---- a/docs/misc/xen-command-line.pandoc
-+++ b/docs/misc/xen-command-line.pandoc
-@@ -483,10 +483,10 @@ accounting for hardware capabilities as
- 
- Currently accepted:
- 
--The Speculation Control hardware features `md-clear`, `ibrsb`, `stibp`, 
`ibpb`,
--`l1d-flush` and `ssbd` are used by default if available and applicable.  They 
can
--be ignored, e.g. `no-ibrsb`, at which point Xen won't use them itself, and
--won't offer them to guests.
-+The Speculation Control hardware features `srbds-ctrl`, `md-clear`, `ibrsb`,
-+`stibp`, `ibpb`, `l1d-flush` and `ssbd` are used by default if available and
-+applicable.  They can be ignored, e.g. `no-ibrsb`, at which point Xen won't
-+use them itself, and won't offer them to guests.
- 
- ### cpuid_mask_cpu
- > `= fam_0f_rev_[cdefg] | fam_10_rev_[bc] | fam_11_rev_b`
---- a/tools/libxl/libxl_cpuid.c
-+++ b/tools/libxl/libxl_cpuid.c
-@@ -213,6 +213,7 @@ int libxl_cpuid_parse_config(libxl_cpuid
- 
-         {"avx512-4vnniw",0x00000007,  0, CPUID_REG_EDX,  2,  1},
-         {"avx512-4fmaps",0x00000007,  0, CPUID_REG_EDX,  3,  1},
-+        {"srbds-ctrl",   0x00000007,  0, CPUID_REG_EDX,  9,  1},
-         {"md-clear",     0x00000007,  0, CPUID_REG_EDX, 10,  1},
-         {"cet-ibt",      0x00000007,  0, CPUID_REG_EDX, 20,  1},
-         {"ibrsb",        0x00000007,  0, CPUID_REG_EDX, 26,  1},
---- a/tools/misc/xen-cpuid.c
-+++ b/tools/misc/xen-cpuid.c
-@@ -157,6 +157,7 @@ static const char *const str_7d0[32] =
-     [ 2] = "avx512_4vnniw", [ 3] = "avx512_4fmaps",
-     [ 4] = "fsrm",
- 
-+    /*  8 */                [ 9] = "srbds-ctrl",
-     [10] = "md-clear",
-     /* 12 */                [13] = "tsx-force-abort",
- 
---- a/xen/arch/x86/msr.c
-+++ b/xen/arch/x86/msr.c
-@@ -134,6 +134,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t
-         /* Write-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
-@@ -288,6 +289,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t
-         /* Read-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
---- a/xen/arch/x86/spec_ctrl.c
-+++ b/xen/arch/x86/spec_ctrl.c
-@@ -312,12 +312,13 @@ static void __init print_details(enum in
-     printk("Speculative mitigation facilities:\n");
- 
-     /* Hardware features which pertain to speculative mitigations. */
--    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-+    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-            (_7d0 & cpufeat_mask(X86_FEATURE_IBRSB)) ? " IBRS/IBPB" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_STIBP)) ? " STIBP"     : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_L1D_FLUSH)) ? " L1D_FLUSH" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_SSBD))  ? " SSBD"      : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_MD_CLEAR)) ? " MD_CLEAR" : "",
-+           (_7d0 & cpufeat_mask(X86_FEATURE_SRBDS_CTRL)) ? " SRBDS_CTRL" : "",
-            (e8b  & cpufeat_mask(X86_FEATURE_IBPB))  ? " IBPB"      : "",
-            (caps & ARCH_CAPS_IBRS_ALL)              ? " IBRS_ALL"  : "",
-            (caps & ARCH_CAPS_RDCL_NO)               ? " RDCL_NO"   : "",
---- a/xen/include/asm-x86/msr-index.h
-+++ b/xen/include/asm-x86/msr-index.h
-@@ -179,6 +179,9 @@
- #define MSR_IA32_VMX_TRUE_ENTRY_CTLS            0x490
- #define MSR_IA32_VMX_VMFUNC                     0x491
- 
-+#define MSR_MCU_OPT_CTRL                    0x00000123
-+#define  MCU_OPT_CTRL_RNGDS_MITG_DIS        (_AC(1, ULL) <<  0)
-+
- #define MSR_U_CET                           0x000006a0
- #define MSR_S_CET                           0x000006a2
- #define MSR_PL0_SSP                         0x000006a4
---- a/xen/include/public/arch-x86/cpufeatureset.h
-+++ b/xen/include/public/arch-x86/cpufeatureset.h
-@@ -252,6 +252,7 @@ XEN_CPUFEATURE(IBPB,          8*32+12) /
- /* Intel-defined CPU features, CPUID level 0x00000007:0.edx, word 9 */
- XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A  AVX512 Neural Network 
Instructions */
- XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A  AVX512 Multiply Accumulation 
Single Precision */
-+XEN_CPUFEATURE(SRBDS_CTRL,    9*32+ 9) /*   MSR_MCU_OPT_CTRL and 
RNGDS_MITG_DIS. */
- XEN_CPUFEATURE(MD_CLEAR,      9*32+10) /*A  VERW clears microarchitectural 
buffers */
- XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */
- XEN_CPUFEATURE(CET_IBT,       9*32+20) /*   CET - Indirect Branch Tracking */
+Fixes: 246a5a3377c2 ("xen: Use a typesafe to define INVALID_MFN")
+Signed-off-by: Jan Beulich <[email protected]>
+Reviewed-by: Andrew Cooper <[email protected]>
+
+--- xen-4.13.1-testing.orig/xen/arch/x86/mm/shadow/common.c
++++ xen-4.13.1-testing/xen/arch/x86/mm/shadow/common.c
+@@ -3249,10 +3249,8 @@ int shadow_track_dirty_vram(struct domai
+             int dirty = 0;
+             paddr_t sl1ma = dirty_vram->sl1ma[i];
+ 
+-            if ( !mfn_eq(mfn, INVALID_MFN) )
+-            {
++            if ( mfn_eq(mfn, INVALID_MFN) )
+                 dirty = 1;
+-            }
+             else
+             {
+                 page = mfn_to_page(mfn);

++++++ xsa320-1.patch -> xsa321-1.patch ++++++
--- /work/SRC/openSUSE:Factory/xen/xsa320-1.patch       2020-06-11 
14:47:30.381872679 +0200
+++ /work/SRC/openSUSE:Factory/.xen.new.3060/xsa321-1.patch     2020-07-14 
07:44:44.938968141 +0200
@@ -1,102 +1,31 @@
-x86/spec-ctrl: CPUID/MSR definitions for Special Register Buffer Data Sampling
+From: Jan Beulich <[email protected]>
+Subject: vtd: improve IOMMU TLB flush
 
-This is part of XSA-320 / CVE-2020-0543
+Do not limit PSI flushes to order 0 pages, in order to avoid doing a
+full TLB flush if the passed in page has an order greater than 0 and
+is aligned. Should increase the performance of IOMMU TLB flushes when
+dealing with page orders greater than 0.
 
-Signed-off-by: Andrew Cooper <[email protected]>
-Reviewed-by: Jan Beulich <[email protected]>
-Acked-by: Wei Liu <[email protected]>
+This is part of XSA-321.
 
---- a/docs/misc/xen-command-line.pandoc
-+++ b/docs/misc/xen-command-line.pandoc
-@@ -483,10 +483,10 @@ accounting for hardware capabilities as
- 
- Currently accepted:
- 
--The Speculation Control hardware features `md-clear`, `ibrsb`, `stibp`, 
`ibpb`,
--`l1d-flush` and `ssbd` are used by default if available and applicable.  They 
can
--be ignored, e.g. `no-ibrsb`, at which point Xen won't use them itself, and
--won't offer them to guests.
-+The Speculation Control hardware features `srbds-ctrl`, `md-clear`, `ibrsb`,
-+`stibp`, `ibpb`, `l1d-flush` and `ssbd` are used by default if available and
-+applicable.  They can be ignored, e.g. `no-ibrsb`, at which point Xen won't
-+use them itself, and won't offer them to guests.
- 
- ### cpuid_mask_cpu
- > `= fam_0f_rev_[cdefg] | fam_10_rev_[bc] | fam_11_rev_b`
---- a/tools/libxl/libxl_cpuid.c
-+++ b/tools/libxl/libxl_cpuid.c
-@@ -213,6 +213,7 @@ int libxl_cpuid_parse_config(libxl_cpuid
- 
-         {"avx512-4vnniw",0x00000007,  0, CPUID_REG_EDX,  2,  1},
-         {"avx512-4fmaps",0x00000007,  0, CPUID_REG_EDX,  3,  1},
-+        {"srbds-ctrl",   0x00000007,  0, CPUID_REG_EDX,  9,  1},
-         {"md-clear",     0x00000007,  0, CPUID_REG_EDX, 10,  1},
-         {"cet-ibt",      0x00000007,  0, CPUID_REG_EDX, 20,  1},
-         {"ibrsb",        0x00000007,  0, CPUID_REG_EDX, 26,  1},
---- a/tools/misc/xen-cpuid.c
-+++ b/tools/misc/xen-cpuid.c
-@@ -157,6 +157,7 @@ static const char *const str_7d0[32] =
-     [ 2] = "avx512_4vnniw", [ 3] = "avx512_4fmaps",
-     [ 4] = "fsrm",
- 
-+    /*  8 */                [ 9] = "srbds-ctrl",
-     [10] = "md-clear",
-     /* 12 */                [13] = "tsx-force-abort",
- 
---- a/xen/arch/x86/msr.c
-+++ b/xen/arch/x86/msr.c
-@@ -134,6 +134,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t
-         /* Write-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
-@@ -288,6 +289,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t
-         /* Read-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
---- a/xen/arch/x86/spec_ctrl.c
-+++ b/xen/arch/x86/spec_ctrl.c
-@@ -312,12 +312,13 @@ static void __init print_details(enum in
-     printk("Speculative mitigation facilities:\n");
- 
-     /* Hardware features which pertain to speculative mitigations. */
--    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-+    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-            (_7d0 & cpufeat_mask(X86_FEATURE_IBRSB)) ? " IBRS/IBPB" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_STIBP)) ? " STIBP"     : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_L1D_FLUSH)) ? " L1D_FLUSH" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_SSBD))  ? " SSBD"      : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_MD_CLEAR)) ? " MD_CLEAR" : "",
-+           (_7d0 & cpufeat_mask(X86_FEATURE_SRBDS_CTRL)) ? " SRBDS_CTRL" : "",
-            (e8b  & cpufeat_mask(X86_FEATURE_IBPB))  ? " IBPB"      : "",
-            (caps & ARCH_CAPS_IBRS_ALL)              ? " IBRS_ALL"  : "",
-            (caps & ARCH_CAPS_RDCL_NO)               ? " RDCL_NO"   : "",
---- a/xen/include/asm-x86/msr-index.h
-+++ b/xen/include/asm-x86/msr-index.h
-@@ -179,6 +179,9 @@
- #define MSR_IA32_VMX_TRUE_ENTRY_CTLS            0x490
- #define MSR_IA32_VMX_VMFUNC                     0x491
+Signed-off-by: Jan Beulich <[email protected]>
+
+--- a/xen/drivers/passthrough/vtd/iommu.c
++++ b/xen/drivers/passthrough/vtd/iommu.c
+@@ -570,13 +570,14 @@ static int __must_check iommu_flush_iotl
+         if ( iommu_domid == -1 )
+             continue;
+ 
+-        if ( page_count != 1 || dfn_eq(dfn, INVALID_DFN) )
++        if ( !page_count || (page_count & (page_count - 1)) ||
++             dfn_eq(dfn, INVALID_DFN) || !IS_ALIGNED(dfn_x(dfn), page_count) )
+             rc = iommu_flush_iotlb_dsi(iommu, iommu_domid,
+                                        0, flush_dev_iotlb);
+         else
+             rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
+                                        dfn_to_daddr(dfn),
+-                                       PAGE_ORDER_4K,
++                                       get_order_from_pages(page_count),
+                                        !dma_old_pte_present,
+                                        flush_dev_iotlb);
  
-+#define MSR_MCU_OPT_CTRL                    0x00000123
-+#define  MCU_OPT_CTRL_RNGDS_MITG_DIS        (_AC(1, ULL) <<  0)
-+
- #define MSR_U_CET                           0x000006a0
- #define MSR_S_CET                           0x000006a2
- #define MSR_PL0_SSP                         0x000006a4
---- a/xen/include/public/arch-x86/cpufeatureset.h
-+++ b/xen/include/public/arch-x86/cpufeatureset.h
-@@ -252,6 +252,7 @@ XEN_CPUFEATURE(IBPB,          8*32+12) /
- /* Intel-defined CPU features, CPUID level 0x00000007:0.edx, word 9 */
- XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A  AVX512 Neural Network 
Instructions */
- XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A  AVX512 Multiply Accumulation 
Single Precision */
-+XEN_CPUFEATURE(SRBDS_CTRL,    9*32+ 9) /*   MSR_MCU_OPT_CTRL and 
RNGDS_MITG_DIS. */
- XEN_CPUFEATURE(MD_CLEAR,      9*32+10) /*A  VERW clears microarchitectural 
buffers */
- XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */
- XEN_CPUFEATURE(CET_IBT,       9*32+20) /*   CET - Indirect Branch Tracking */

++++++ xsa320-1.patch -> xsa321-2.patch ++++++
--- /work/SRC/openSUSE:Factory/xen/xsa320-1.patch       2020-06-11 
14:47:30.381872679 +0200
+++ /work/SRC/openSUSE:Factory/.xen.new.3060/xsa321-2.patch     2020-07-14 
07:44:46.886974430 +0200
@@ -1,102 +1,175 @@
-x86/spec-ctrl: CPUID/MSR definitions for Special Register Buffer Data Sampling
+From: <[email protected]>
+Subject: vtd: prune (and rename) cache flush functions
 
-This is part of XSA-320 / CVE-2020-0543
+Rename __iommu_flush_cache to iommu_sync_cache and remove
+iommu_flush_cache_page. Also remove the iommu_flush_cache_entry
+wrapper and just use iommu_sync_cache instead. Note the _entry suffix
+was meaningless as the wrapper was already taking a size parameter in
+bytes. While there also constify the addr parameter.
+
+No functional change intended.
+
+This is part of XSA-321.
 
-Signed-off-by: Andrew Cooper <[email protected]>
 Reviewed-by: Jan Beulich <[email protected]>
-Acked-by: Wei Liu <[email protected]>
 
---- a/docs/misc/xen-command-line.pandoc
-+++ b/docs/misc/xen-command-line.pandoc
-@@ -483,10 +483,10 @@ accounting for hardware capabilities as
- 
- Currently accepted:
- 
--The Speculation Control hardware features `md-clear`, `ibrsb`, `stibp`, 
`ibpb`,
--`l1d-flush` and `ssbd` are used by default if available and applicable.  They 
can
--be ignored, e.g. `no-ibrsb`, at which point Xen won't use them itself, and
--won't offer them to guests.
-+The Speculation Control hardware features `srbds-ctrl`, `md-clear`, `ibrsb`,
-+`stibp`, `ibpb`, `l1d-flush` and `ssbd` are used by default if available and
-+applicable.  They can be ignored, e.g. `no-ibrsb`, at which point Xen won't
-+use them itself, and won't offer them to guests.
- 
- ### cpuid_mask_cpu
- > `= fam_0f_rev_[cdefg] | fam_10_rev_[bc] | fam_11_rev_b`
---- a/tools/libxl/libxl_cpuid.c
-+++ b/tools/libxl/libxl_cpuid.c
-@@ -213,6 +213,7 @@ int libxl_cpuid_parse_config(libxl_cpuid
- 
-         {"avx512-4vnniw",0x00000007,  0, CPUID_REG_EDX,  2,  1},
-         {"avx512-4fmaps",0x00000007,  0, CPUID_REG_EDX,  3,  1},
-+        {"srbds-ctrl",   0x00000007,  0, CPUID_REG_EDX,  9,  1},
-         {"md-clear",     0x00000007,  0, CPUID_REG_EDX, 10,  1},
-         {"cet-ibt",      0x00000007,  0, CPUID_REG_EDX, 20,  1},
-         {"ibrsb",        0x00000007,  0, CPUID_REG_EDX, 26,  1},
---- a/tools/misc/xen-cpuid.c
-+++ b/tools/misc/xen-cpuid.c
-@@ -157,6 +157,7 @@ static const char *const str_7d0[32] =
-     [ 2] = "avx512_4vnniw", [ 3] = "avx512_4fmaps",
-     [ 4] = "fsrm",
- 
-+    /*  8 */                [ 9] = "srbds-ctrl",
-     [10] = "md-clear",
-     /* 12 */                [13] = "tsx-force-abort",
- 
---- a/xen/arch/x86/msr.c
-+++ b/xen/arch/x86/msr.c
-@@ -134,6 +134,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t
-         /* Write-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
-@@ -288,6 +289,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t
-         /* Read-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
---- a/xen/arch/x86/spec_ctrl.c
-+++ b/xen/arch/x86/spec_ctrl.c
-@@ -312,12 +312,13 @@ static void __init print_details(enum in
-     printk("Speculative mitigation facilities:\n");
- 
-     /* Hardware features which pertain to speculative mitigations. */
--    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-+    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-            (_7d0 & cpufeat_mask(X86_FEATURE_IBRSB)) ? " IBRS/IBPB" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_STIBP)) ? " STIBP"     : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_L1D_FLUSH)) ? " L1D_FLUSH" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_SSBD))  ? " SSBD"      : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_MD_CLEAR)) ? " MD_CLEAR" : "",
-+           (_7d0 & cpufeat_mask(X86_FEATURE_SRBDS_CTRL)) ? " SRBDS_CTRL" : "",
-            (e8b  & cpufeat_mask(X86_FEATURE_IBPB))  ? " IBPB"      : "",
-            (caps & ARCH_CAPS_IBRS_ALL)              ? " IBRS_ALL"  : "",
-            (caps & ARCH_CAPS_RDCL_NO)               ? " RDCL_NO"   : "",
---- a/xen/include/asm-x86/msr-index.h
-+++ b/xen/include/asm-x86/msr-index.h
-@@ -179,6 +179,9 @@
- #define MSR_IA32_VMX_TRUE_ENTRY_CTLS            0x490
- #define MSR_IA32_VMX_VMFUNC                     0x491
+--- a/xen/drivers/passthrough/vtd/extern.h
++++ b/xen/drivers/passthrough/vtd/extern.h
+@@ -43,8 +43,7 @@ void disable_qinval(struct vtd_iommu *io
+ int enable_intremap(struct vtd_iommu *iommu, int eim);
+ void disable_intremap(struct vtd_iommu *iommu);
+ 
+-void iommu_flush_cache_entry(void *addr, unsigned int size);
+-void iommu_flush_cache_page(void *addr, unsigned long npages);
++void iommu_sync_cache(const void *addr, unsigned int size);
+ int iommu_alloc(struct acpi_drhd_unit *drhd);
+ void iommu_free(struct acpi_drhd_unit *drhd);
+ 
+--- a/xen/drivers/passthrough/vtd/intremap.c
++++ b/xen/drivers/passthrough/vtd/intremap.c
+@@ -230,7 +230,7 @@ static void free_remap_entry(struct vtd_
+                      iremap_entries, iremap_entry);
+ 
+     update_irte(iommu, iremap_entry, &new_ire, false);
+-    iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry));
++    iommu_sync_cache(iremap_entry, sizeof(*iremap_entry));
+     iommu_flush_iec_index(iommu, 0, index);
+ 
+     unmap_vtd_domain_page(iremap_entries);
+@@ -406,7 +406,7 @@ static int ioapic_rte_to_remap_entry(str
+     }
+ 
+     update_irte(iommu, iremap_entry, &new_ire, !init);
+-    iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry));
++    iommu_sync_cache(iremap_entry, sizeof(*iremap_entry));
+     iommu_flush_iec_index(iommu, 0, index);
+ 
+     unmap_vtd_domain_page(iremap_entries);
+@@ -695,7 +695,7 @@ static int msi_msg_to_remap_entry(
+     update_irte(iommu, iremap_entry, &new_ire, msi_desc->irte_initialized);
+     msi_desc->irte_initialized = true;
+ 
+-    iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry));
++    iommu_sync_cache(iremap_entry, sizeof(*iremap_entry));
+     iommu_flush_iec_index(iommu, 0, index);
+ 
+     unmap_vtd_domain_page(iremap_entries);
+--- a/xen/drivers/passthrough/vtd/iommu.c
++++ b/xen/drivers/passthrough/vtd/iommu.c
+@@ -140,7 +140,8 @@ static int context_get_domain_id(struct
+ }
  
-+#define MSR_MCU_OPT_CTRL                    0x00000123
-+#define  MCU_OPT_CTRL_RNGDS_MITG_DIS        (_AC(1, ULL) <<  0)
+ static int iommus_incoherent;
+-static void __iommu_flush_cache(void *addr, unsigned int size)
 +
- #define MSR_U_CET                           0x000006a0
- #define MSR_S_CET                           0x000006a2
- #define MSR_PL0_SSP                         0x000006a4
---- a/xen/include/public/arch-x86/cpufeatureset.h
-+++ b/xen/include/public/arch-x86/cpufeatureset.h
-@@ -252,6 +252,7 @@ XEN_CPUFEATURE(IBPB,          8*32+12) /
- /* Intel-defined CPU features, CPUID level 0x00000007:0.edx, word 9 */
- XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A  AVX512 Neural Network 
Instructions */
- XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A  AVX512 Multiply Accumulation 
Single Precision */
-+XEN_CPUFEATURE(SRBDS_CTRL,    9*32+ 9) /*   MSR_MCU_OPT_CTRL and 
RNGDS_MITG_DIS. */
- XEN_CPUFEATURE(MD_CLEAR,      9*32+10) /*A  VERW clears microarchitectural 
buffers */
- XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */
- XEN_CPUFEATURE(CET_IBT,       9*32+20) /*   CET - Indirect Branch Tracking */
++void iommu_sync_cache(const void *addr, unsigned int size)
+ {
+     int i;
+     static unsigned int clflush_size = 0;
+@@ -155,16 +156,6 @@ static void __iommu_flush_cache(void *ad
+         cacheline_flush((char *)addr + i);
+ }
+ 
+-void iommu_flush_cache_entry(void *addr, unsigned int size)
+-{
+-    __iommu_flush_cache(addr, size);
+-}
+-
+-void iommu_flush_cache_page(void *addr, unsigned long npages)
+-{
+-    __iommu_flush_cache(addr, PAGE_SIZE * npages);
+-}
+-
+ /* Allocate page table, return its machine address */
+ uint64_t alloc_pgtable_maddr(unsigned long npages, nodeid_t node)
+ {
+@@ -183,7 +174,7 @@ uint64_t alloc_pgtable_maddr(unsigned lo
+         vaddr = __map_domain_page(cur_pg);
+         memset(vaddr, 0, PAGE_SIZE);
+ 
+-        iommu_flush_cache_page(vaddr, 1);
++        iommu_sync_cache(vaddr, PAGE_SIZE);
+         unmap_domain_page(vaddr);
+         cur_pg++;
+     }
+@@ -216,7 +207,7 @@ static u64 bus_to_context_maddr(struct v
+         }
+         set_root_value(*root, maddr);
+         set_root_present(*root);
+-        iommu_flush_cache_entry(root, sizeof(struct root_entry));
++        iommu_sync_cache(root, sizeof(struct root_entry));
+     }
+     maddr = (u64) get_context_addr(*root);
+     unmap_vtd_domain_page(root_entries);
+@@ -263,7 +254,7 @@ static u64 addr_to_dma_page_maddr(struct
+              */
+             dma_set_pte_readable(*pte);
+             dma_set_pte_writable(*pte);
+-            iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
++            iommu_sync_cache(pte, sizeof(struct dma_pte));
+         }
+ 
+         if ( level == 2 )
+@@ -640,7 +631,7 @@ static int __must_check dma_pte_clear_on
+     *flush_flags |= IOMMU_FLUSHF_modified;
+ 
+     spin_unlock(&hd->arch.mapping_lock);
+-    iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
++    iommu_sync_cache(pte, sizeof(struct dma_pte));
+ 
+     unmap_vtd_domain_page(page);
+ 
+@@ -679,7 +670,7 @@ static void iommu_free_page_table(struct
+             iommu_free_pagetable(dma_pte_addr(*pte), next_level);
+ 
+         dma_clear_pte(*pte);
+-        iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
++        iommu_sync_cache(pte, sizeof(struct dma_pte));
+     }
+ 
+     unmap_vtd_domain_page(pt_vaddr);
+@@ -1400,7 +1391,7 @@ int domain_context_mapping_one(
+     context_set_address_width(*context, agaw);
+     context_set_fault_enable(*context);
+     context_set_present(*context);
+-    iommu_flush_cache_entry(context, sizeof(struct context_entry));
++    iommu_sync_cache(context, sizeof(struct context_entry));
+     spin_unlock(&iommu->lock);
+ 
+     /* Context entry was previously non-present (with domid 0). */
+@@ -1564,7 +1555,7 @@ int domain_context_unmap_one(
+ 
+     context_clear_present(*context);
+     context_clear_entry(*context);
+-    iommu_flush_cache_entry(context, sizeof(struct context_entry));
++    iommu_sync_cache(context, sizeof(struct context_entry));
+ 
+     iommu_domid= domain_iommu_domid(domain, iommu);
+     if ( iommu_domid == -1 )
+@@ -1791,7 +1782,7 @@ static int __must_check intel_iommu_map_
+ 
+     *pte = new;
+ 
+-    iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
++    iommu_sync_cache(pte, sizeof(struct dma_pte));
+     spin_unlock(&hd->arch.mapping_lock);
+     unmap_vtd_domain_page(page);
+ 
+@@ -1866,7 +1857,7 @@ int iommu_pte_flush(struct domain *d, ui
+     int iommu_domid;
+     int rc = 0;
+ 
+-    iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
++    iommu_sync_cache(pte, sizeof(struct dma_pte));
+ 
+     for_each_drhd_unit ( drhd )
+     {
+@@ -2724,7 +2715,7 @@ static int __init intel_iommu_quarantine
+             dma_set_pte_addr(*pte, maddr);
+             dma_set_pte_readable(*pte);
+         }
+-        iommu_flush_cache_page(parent, 1);
++        iommu_sync_cache(parent, PAGE_SIZE);
+ 
+         unmap_vtd_domain_page(parent);
+         parent = map_vtd_domain_page(maddr);

++++++ xsa320-1.patch -> xsa321-3.patch ++++++
--- /work/SRC/openSUSE:Factory/xen/xsa320-1.patch       2020-06-11 
14:47:30.381872679 +0200
+++ /work/SRC/openSUSE:Factory/.xen.new.3060/xsa321-3.patch     2020-07-14 
07:44:48.642980099 +0200
@@ -1,102 +1,82 @@
-x86/spec-ctrl: CPUID/MSR definitions for Special Register Buffer Data Sampling
+From: <[email protected]>
+Subject: x86/iommu: introduce a cache sync hook
 
-This is part of XSA-320 / CVE-2020-0543
+The hook is only implemented for VT-d and it uses the already existing
+iommu_sync_cache function present in VT-d code. The new hook is
+added so that the cache can be flushed by code outside of VT-d when
+using shared page tables.
+
+Note that alloc_pgtable_maddr must use the now locally defined
+sync_cache function, because IOMMU ops are not yet setup the first
+time the function gets called during IOMMU initialization.
+
+No functional change intended.
+
+This is part of XSA-321.
 
-Signed-off-by: Andrew Cooper <[email protected]>
 Reviewed-by: Jan Beulich <[email protected]>
-Acked-by: Wei Liu <[email protected]>
 
---- a/docs/misc/xen-command-line.pandoc
-+++ b/docs/misc/xen-command-line.pandoc
-@@ -483,10 +483,10 @@ accounting for hardware capabilities as
- 
- Currently accepted:
- 
--The Speculation Control hardware features `md-clear`, `ibrsb`, `stibp`, 
`ibpb`,
--`l1d-flush` and `ssbd` are used by default if available and applicable.  They 
can
--be ignored, e.g. `no-ibrsb`, at which point Xen won't use them itself, and
--won't offer them to guests.
-+The Speculation Control hardware features `srbds-ctrl`, `md-clear`, `ibrsb`,
-+`stibp`, `ibpb`, `l1d-flush` and `ssbd` are used by default if available and
-+applicable.  They can be ignored, e.g. `no-ibrsb`, at which point Xen won't
-+use them itself, and won't offer them to guests.
- 
- ### cpuid_mask_cpu
- > `= fam_0f_rev_[cdefg] | fam_10_rev_[bc] | fam_11_rev_b`
---- a/tools/libxl/libxl_cpuid.c
-+++ b/tools/libxl/libxl_cpuid.c
-@@ -213,6 +213,7 @@ int libxl_cpuid_parse_config(libxl_cpuid
- 
-         {"avx512-4vnniw",0x00000007,  0, CPUID_REG_EDX,  2,  1},
-         {"avx512-4fmaps",0x00000007,  0, CPUID_REG_EDX,  3,  1},
-+        {"srbds-ctrl",   0x00000007,  0, CPUID_REG_EDX,  9,  1},
-         {"md-clear",     0x00000007,  0, CPUID_REG_EDX, 10,  1},
-         {"cet-ibt",      0x00000007,  0, CPUID_REG_EDX, 20,  1},
-         {"ibrsb",        0x00000007,  0, CPUID_REG_EDX, 26,  1},
---- a/tools/misc/xen-cpuid.c
-+++ b/tools/misc/xen-cpuid.c
-@@ -157,6 +157,7 @@ static const char *const str_7d0[32] =
-     [ 2] = "avx512_4vnniw", [ 3] = "avx512_4fmaps",
-     [ 4] = "fsrm",
- 
-+    /*  8 */                [ 9] = "srbds-ctrl",
-     [10] = "md-clear",
-     /* 12 */                [13] = "tsx-force-abort",
- 
---- a/xen/arch/x86/msr.c
-+++ b/xen/arch/x86/msr.c
-@@ -134,6 +134,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t
-         /* Write-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
-@@ -288,6 +289,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t
-         /* Read-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
---- a/xen/arch/x86/spec_ctrl.c
-+++ b/xen/arch/x86/spec_ctrl.c
-@@ -312,12 +312,13 @@ static void __init print_details(enum in
-     printk("Speculative mitigation facilities:\n");
- 
-     /* Hardware features which pertain to speculative mitigations. */
--    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-+    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-            (_7d0 & cpufeat_mask(X86_FEATURE_IBRSB)) ? " IBRS/IBPB" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_STIBP)) ? " STIBP"     : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_L1D_FLUSH)) ? " L1D_FLUSH" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_SSBD))  ? " SSBD"      : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_MD_CLEAR)) ? " MD_CLEAR" : "",
-+           (_7d0 & cpufeat_mask(X86_FEATURE_SRBDS_CTRL)) ? " SRBDS_CTRL" : "",
-            (e8b  & cpufeat_mask(X86_FEATURE_IBPB))  ? " IBPB"      : "",
-            (caps & ARCH_CAPS_IBRS_ALL)              ? " IBRS_ALL"  : "",
-            (caps & ARCH_CAPS_RDCL_NO)               ? " RDCL_NO"   : "",
---- a/xen/include/asm-x86/msr-index.h
-+++ b/xen/include/asm-x86/msr-index.h
-@@ -179,6 +179,9 @@
- #define MSR_IA32_VMX_TRUE_ENTRY_CTLS            0x490
- #define MSR_IA32_VMX_VMFUNC                     0x491
- 
-+#define MSR_MCU_OPT_CTRL                    0x00000123
-+#define  MCU_OPT_CTRL_RNGDS_MITG_DIS        (_AC(1, ULL) <<  0)
+--- a/xen/drivers/passthrough/vtd/extern.h
++++ b/xen/drivers/passthrough/vtd/extern.h
+@@ -43,7 +43,6 @@ void disable_qinval(struct vtd_iommu *io
+ int enable_intremap(struct vtd_iommu *iommu, int eim);
+ void disable_intremap(struct vtd_iommu *iommu);
+ 
+-void iommu_sync_cache(const void *addr, unsigned int size);
+ int iommu_alloc(struct acpi_drhd_unit *drhd);
+ void iommu_free(struct acpi_drhd_unit *drhd);
+ 
+--- a/xen/drivers/passthrough/vtd/iommu.c
++++ b/xen/drivers/passthrough/vtd/iommu.c
+@@ -141,7 +141,7 @@ static int context_get_domain_id(struct
+ 
+ static int iommus_incoherent;
+ 
+-void iommu_sync_cache(const void *addr, unsigned int size)
++static void sync_cache(const void *addr, unsigned int size)
+ {
+     int i;
+     static unsigned int clflush_size = 0;
+@@ -174,7 +174,7 @@ uint64_t alloc_pgtable_maddr(unsigned lo
+         vaddr = __map_domain_page(cur_pg);
+         memset(vaddr, 0, PAGE_SIZE);
+ 
+-        iommu_sync_cache(vaddr, PAGE_SIZE);
++        sync_cache(vaddr, PAGE_SIZE);
+         unmap_domain_page(vaddr);
+         cur_pg++;
+     }
+@@ -2763,6 +2763,7 @@ const struct iommu_ops __initconstrel in
+     .iotlb_flush_all = iommu_flush_iotlb_all,
+     .get_reserved_device_memory = intel_iommu_get_reserved_device_memory,
+     .dump_p2m_table = vtd_dump_p2m_table,
++    .sync_cache = sync_cache,
+ };
+ 
+ const struct iommu_init_ops __initconstrel intel_iommu_init_ops = {
+--- a/xen/include/asm-x86/iommu.h
++++ b/xen/include/asm-x86/iommu.h
+@@ -121,6 +121,13 @@ extern bool untrusted_msi;
+ int pi_update_irte(const struct pi_desc *pi_desc, const struct pirq *pirq,
+                    const uint8_t gvec);
+ 
++#define iommu_sync_cache(addr, size) ({                 \
++    const struct iommu_ops *ops = iommu_get_ops();      \
++                                                        \
++    if ( ops->sync_cache )                              \
++        iommu_vcall(ops, sync_cache, addr, size);       \
++})
 +
- #define MSR_U_CET                           0x000006a0
- #define MSR_S_CET                           0x000006a2
- #define MSR_PL0_SSP                         0x000006a4
---- a/xen/include/public/arch-x86/cpufeatureset.h
-+++ b/xen/include/public/arch-x86/cpufeatureset.h
-@@ -252,6 +252,7 @@ XEN_CPUFEATURE(IBPB,          8*32+12) /
- /* Intel-defined CPU features, CPUID level 0x00000007:0.edx, word 9 */
- XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A  AVX512 Neural Network 
Instructions */
- XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A  AVX512 Multiply Accumulation 
Single Precision */
-+XEN_CPUFEATURE(SRBDS_CTRL,    9*32+ 9) /*   MSR_MCU_OPT_CTRL and 
RNGDS_MITG_DIS. */
- XEN_CPUFEATURE(MD_CLEAR,      9*32+10) /*A  VERW clears microarchitectural 
buffers */
- XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */
- XEN_CPUFEATURE(CET_IBT,       9*32+20) /*   CET - Indirect Branch Tracking */
+ #endif /* !__ARCH_X86_IOMMU_H__ */
+ /*
+  * Local variables:
+--- a/xen/include/xen/iommu.h
++++ b/xen/include/xen/iommu.h
+@@ -250,6 +250,7 @@ struct iommu_ops {
+     int (*setup_hpet_msi)(struct msi_desc *);
+ 
+     int (*adjust_irq_affinities)(void);
++    void (*sync_cache)(const void *addr, unsigned int size);
+ #endif /* CONFIG_X86 */
+ 
+     int __must_check (*suspend)(void);

++++++ xsa320-1.patch -> xsa321-4.patch ++++++
--- /work/SRC/openSUSE:Factory/xen/xsa320-1.patch       2020-06-11 
14:47:30.381872679 +0200
+++ /work/SRC/openSUSE:Factory/.xen.new.3060/xsa321-4.patch     2020-07-14 
07:44:50.094984786 +0200
@@ -1,102 +1,36 @@
-x86/spec-ctrl: CPUID/MSR definitions for Special Register Buffer Data Sampling
+From: <[email protected]>
+Subject: vtd: don't assume addresses are aligned in sync_cache
 
-This is part of XSA-320 / CVE-2020-0543
+Current code in sync_cache assume that the address passed in is
+aligned to a cache line size. Fix the code to support passing in
+arbitrary addresses not necessarily aligned to a cache line size.
+
+This is part of XSA-321.
 
-Signed-off-by: Andrew Cooper <[email protected]>
 Reviewed-by: Jan Beulich <[email protected]>
-Acked-by: Wei Liu <[email protected]>
 
---- a/docs/misc/xen-command-line.pandoc
-+++ b/docs/misc/xen-command-line.pandoc
-@@ -483,10 +483,10 @@ accounting for hardware capabilities as
- 
- Currently accepted:
- 
--The Speculation Control hardware features `md-clear`, `ibrsb`, `stibp`, 
`ibpb`,
--`l1d-flush` and `ssbd` are used by default if available and applicable.  They 
can
--be ignored, e.g. `no-ibrsb`, at which point Xen won't use them itself, and
--won't offer them to guests.
-+The Speculation Control hardware features `srbds-ctrl`, `md-clear`, `ibrsb`,
-+`stibp`, `ibpb`, `l1d-flush` and `ssbd` are used by default if available and
-+applicable.  They can be ignored, e.g. `no-ibrsb`, at which point Xen won't
-+use them itself, and won't offer them to guests.
- 
- ### cpuid_mask_cpu
- > `= fam_0f_rev_[cdefg] | fam_10_rev_[bc] | fam_11_rev_b`
---- a/tools/libxl/libxl_cpuid.c
-+++ b/tools/libxl/libxl_cpuid.c
-@@ -213,6 +213,7 @@ int libxl_cpuid_parse_config(libxl_cpuid
- 
-         {"avx512-4vnniw",0x00000007,  0, CPUID_REG_EDX,  2,  1},
-         {"avx512-4fmaps",0x00000007,  0, CPUID_REG_EDX,  3,  1},
-+        {"srbds-ctrl",   0x00000007,  0, CPUID_REG_EDX,  9,  1},
-         {"md-clear",     0x00000007,  0, CPUID_REG_EDX, 10,  1},
-         {"cet-ibt",      0x00000007,  0, CPUID_REG_EDX, 20,  1},
-         {"ibrsb",        0x00000007,  0, CPUID_REG_EDX, 26,  1},
---- a/tools/misc/xen-cpuid.c
-+++ b/tools/misc/xen-cpuid.c
-@@ -157,6 +157,7 @@ static const char *const str_7d0[32] =
-     [ 2] = "avx512_4vnniw", [ 3] = "avx512_4fmaps",
-     [ 4] = "fsrm",
- 
-+    /*  8 */                [ 9] = "srbds-ctrl",
-     [10] = "md-clear",
-     /* 12 */                [13] = "tsx-force-abort",
- 
---- a/xen/arch/x86/msr.c
-+++ b/xen/arch/x86/msr.c
-@@ -134,6 +134,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t
-         /* Write-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
-@@ -288,6 +289,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t
-         /* Read-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
---- a/xen/arch/x86/spec_ctrl.c
-+++ b/xen/arch/x86/spec_ctrl.c
-@@ -312,12 +312,13 @@ static void __init print_details(enum in
-     printk("Speculative mitigation facilities:\n");
- 
-     /* Hardware features which pertain to speculative mitigations. */
--    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-+    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-            (_7d0 & cpufeat_mask(X86_FEATURE_IBRSB)) ? " IBRS/IBPB" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_STIBP)) ? " STIBP"     : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_L1D_FLUSH)) ? " L1D_FLUSH" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_SSBD))  ? " SSBD"      : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_MD_CLEAR)) ? " MD_CLEAR" : "",
-+           (_7d0 & cpufeat_mask(X86_FEATURE_SRBDS_CTRL)) ? " SRBDS_CTRL" : "",
-            (e8b  & cpufeat_mask(X86_FEATURE_IBPB))  ? " IBPB"      : "",
-            (caps & ARCH_CAPS_IBRS_ALL)              ? " IBRS_ALL"  : "",
-            (caps & ARCH_CAPS_RDCL_NO)               ? " RDCL_NO"   : "",
---- a/xen/include/asm-x86/msr-index.h
-+++ b/xen/include/asm-x86/msr-index.h
-@@ -179,6 +179,9 @@
- #define MSR_IA32_VMX_TRUE_ENTRY_CTLS            0x490
- #define MSR_IA32_VMX_VMFUNC                     0x491
+--- a/xen/drivers/passthrough/vtd/iommu.c
++++ b/xen/drivers/passthrough/vtd/iommu.c
+@@ -143,8 +143,8 @@ static int iommus_incoherent;
+ 
+ static void sync_cache(const void *addr, unsigned int size)
+ {
+-    int i;
+-    static unsigned int clflush_size = 0;
++    static unsigned long clflush_size = 0;
++    const void *end = addr + size;
+ 
+     if ( !iommus_incoherent )
+         return;
+@@ -152,8 +152,9 @@ static void sync_cache(const void *addr,
+     if ( clflush_size == 0 )
+         clflush_size = get_cache_line_size();
+ 
+-    for ( i = 0; i < size; i += clflush_size )
+-        cacheline_flush((char *)addr + i);
++    addr -= (unsigned long)addr & (clflush_size - 1);
++    for ( ; addr < end; addr += clflush_size )
++        cacheline_flush((char *)addr);
+ }
  
-+#define MSR_MCU_OPT_CTRL                    0x00000123
-+#define  MCU_OPT_CTRL_RNGDS_MITG_DIS        (_AC(1, ULL) <<  0)
-+
- #define MSR_U_CET                           0x000006a0
- #define MSR_S_CET                           0x000006a2
- #define MSR_PL0_SSP                         0x000006a4
---- a/xen/include/public/arch-x86/cpufeatureset.h
-+++ b/xen/include/public/arch-x86/cpufeatureset.h
-@@ -252,6 +252,7 @@ XEN_CPUFEATURE(IBPB,          8*32+12) /
- /* Intel-defined CPU features, CPUID level 0x00000007:0.edx, word 9 */
- XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A  AVX512 Neural Network 
Instructions */
- XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A  AVX512 Multiply Accumulation 
Single Precision */
-+XEN_CPUFEATURE(SRBDS_CTRL,    9*32+ 9) /*   MSR_MCU_OPT_CTRL and 
RNGDS_MITG_DIS. */
- XEN_CPUFEATURE(MD_CLEAR,      9*32+10) /*A  VERW clears microarchitectural 
buffers */
- XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */
- XEN_CPUFEATURE(CET_IBT,       9*32+20) /*   CET - Indirect Branch Tracking */
+ /* Allocate page table, return its machine address */

++++++ xsa320-1.patch -> xsa321-5.patch ++++++
--- /work/SRC/openSUSE:Factory/xen/xsa320-1.patch       2020-06-11 
14:47:30.381872679 +0200
+++ /work/SRC/openSUSE:Factory/.xen.new.3060/xsa321-5.patch     2020-07-14 
07:44:51.062987912 +0200
@@ -1,102 +1,24 @@
-x86/spec-ctrl: CPUID/MSR definitions for Special Register Buffer Data Sampling
+From: <[email protected]>
+Subject: x86/alternative: introduce alternative_2
 
-This is part of XSA-320 / CVE-2020-0543
+It's based on alternative_io_2 without inputs or outputs but with an
+added memory clobber.
 
-Signed-off-by: Andrew Cooper <[email protected]>
-Reviewed-by: Jan Beulich <[email protected]>
-Acked-by: Wei Liu <[email protected]>
+This is part of XSA-321.
 
---- a/docs/misc/xen-command-line.pandoc
-+++ b/docs/misc/xen-command-line.pandoc
-@@ -483,10 +483,10 @@ accounting for hardware capabilities as
- 
- Currently accepted:
- 
--The Speculation Control hardware features `md-clear`, `ibrsb`, `stibp`, 
`ibpb`,
--`l1d-flush` and `ssbd` are used by default if available and applicable.  They 
can
--be ignored, e.g. `no-ibrsb`, at which point Xen won't use them itself, and
--won't offer them to guests.
-+The Speculation Control hardware features `srbds-ctrl`, `md-clear`, `ibrsb`,
-+`stibp`, `ibpb`, `l1d-flush` and `ssbd` are used by default if available and
-+applicable.  They can be ignored, e.g. `no-ibrsb`, at which point Xen won't
-+use them itself, and won't offer them to guests.
- 
- ### cpuid_mask_cpu
- > `= fam_0f_rev_[cdefg] | fam_10_rev_[bc] | fam_11_rev_b`
---- a/tools/libxl/libxl_cpuid.c
-+++ b/tools/libxl/libxl_cpuid.c
-@@ -213,6 +213,7 @@ int libxl_cpuid_parse_config(libxl_cpuid
- 
-         {"avx512-4vnniw",0x00000007,  0, CPUID_REG_EDX,  2,  1},
-         {"avx512-4fmaps",0x00000007,  0, CPUID_REG_EDX,  3,  1},
-+        {"srbds-ctrl",   0x00000007,  0, CPUID_REG_EDX,  9,  1},
-         {"md-clear",     0x00000007,  0, CPUID_REG_EDX, 10,  1},
-         {"cet-ibt",      0x00000007,  0, CPUID_REG_EDX, 20,  1},
-         {"ibrsb",        0x00000007,  0, CPUID_REG_EDX, 26,  1},
---- a/tools/misc/xen-cpuid.c
-+++ b/tools/misc/xen-cpuid.c
-@@ -157,6 +157,7 @@ static const char *const str_7d0[32] =
-     [ 2] = "avx512_4vnniw", [ 3] = "avx512_4fmaps",
-     [ 4] = "fsrm",
- 
-+    /*  8 */                [ 9] = "srbds-ctrl",
-     [10] = "md-clear",
-     /* 12 */                [13] = "tsx-force-abort",
- 
---- a/xen/arch/x86/msr.c
-+++ b/xen/arch/x86/msr.c
-@@ -134,6 +134,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t
-         /* Write-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
-@@ -288,6 +289,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t
-         /* Read-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
---- a/xen/arch/x86/spec_ctrl.c
-+++ b/xen/arch/x86/spec_ctrl.c
-@@ -312,12 +312,13 @@ static void __init print_details(enum in
-     printk("Speculative mitigation facilities:\n");
- 
-     /* Hardware features which pertain to speculative mitigations. */
--    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-+    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-            (_7d0 & cpufeat_mask(X86_FEATURE_IBRSB)) ? " IBRS/IBPB" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_STIBP)) ? " STIBP"     : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_L1D_FLUSH)) ? " L1D_FLUSH" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_SSBD))  ? " SSBD"      : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_MD_CLEAR)) ? " MD_CLEAR" : "",
-+           (_7d0 & cpufeat_mask(X86_FEATURE_SRBDS_CTRL)) ? " SRBDS_CTRL" : "",
-            (e8b  & cpufeat_mask(X86_FEATURE_IBPB))  ? " IBPB"      : "",
-            (caps & ARCH_CAPS_IBRS_ALL)              ? " IBRS_ALL"  : "",
-            (caps & ARCH_CAPS_RDCL_NO)               ? " RDCL_NO"   : "",
---- a/xen/include/asm-x86/msr-index.h
-+++ b/xen/include/asm-x86/msr-index.h
-@@ -179,6 +179,9 @@
- #define MSR_IA32_VMX_TRUE_ENTRY_CTLS            0x490
- #define MSR_IA32_VMX_VMFUNC                     0x491
- 
-+#define MSR_MCU_OPT_CTRL                    0x00000123
-+#define  MCU_OPT_CTRL_RNGDS_MITG_DIS        (_AC(1, ULL) <<  0)
+Acked-by: Jan Beulich <[email protected]>
+
+--- a/xen/include/asm-x86/alternative.h
++++ b/xen/include/asm-x86/alternative.h
+@@ -114,6 +114,11 @@ extern void alternative_branches(void);
+ #define alternative(oldinstr, newinstr, feature)                        \
+         asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory")
+ 
++#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \
++      asm volatile (ALTERNATIVE_2(oldinstr, newinstr1, feature1,      \
++                                  newinstr2, feature2)                \
++                    : : : "memory")
 +
- #define MSR_U_CET                           0x000006a0
- #define MSR_S_CET                           0x000006a2
- #define MSR_PL0_SSP                         0x000006a4
---- a/xen/include/public/arch-x86/cpufeatureset.h
-+++ b/xen/include/public/arch-x86/cpufeatureset.h
-@@ -252,6 +252,7 @@ XEN_CPUFEATURE(IBPB,          8*32+12) /
- /* Intel-defined CPU features, CPUID level 0x00000007:0.edx, word 9 */
- XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A  AVX512 Neural Network 
Instructions */
- XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A  AVX512 Multiply Accumulation 
Single Precision */
-+XEN_CPUFEATURE(SRBDS_CTRL,    9*32+ 9) /*   MSR_MCU_OPT_CTRL and 
RNGDS_MITG_DIS. */
- XEN_CPUFEATURE(MD_CLEAR,      9*32+10) /*A  VERW clears microarchitectural 
buffers */
- XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */
- XEN_CPUFEATURE(CET_IBT,       9*32+20) /*   CET - Indirect Branch Tracking */
+ /*
+  * Alternative inline assembly with input.
+  *

++++++ xsa320-1.patch -> xsa321-6.patch ++++++
--- /work/SRC/openSUSE:Factory/xen/xsa320-1.patch       2020-06-11 
14:47:30.381872679 +0200
+++ /work/SRC/openSUSE:Factory/.xen.new.3060/xsa321-6.patch     2020-07-14 
07:44:51.618989706 +0200
@@ -1,102 +1,91 @@
-x86/spec-ctrl: CPUID/MSR definitions for Special Register Buffer Data Sampling
+From: <[email protected]>
+Subject: vtd: optimize CPU cache sync
 
-This is part of XSA-320 / CVE-2020-0543
+Some VT-d IOMMUs are non-coherent, which requires a cache write back
+in order for the changes made by the CPU to be visible to the IOMMU.
+This cache write back was unconditionally done using clflush, but there are
+other more efficient instructions to do so, hence implement support
+for them using the alternative framework.
+
+This is part of XSA-321.
 
-Signed-off-by: Andrew Cooper <[email protected]>
 Reviewed-by: Jan Beulich <[email protected]>
-Acked-by: Wei Liu <[email protected]>
 
---- a/docs/misc/xen-command-line.pandoc
-+++ b/docs/misc/xen-command-line.pandoc
-@@ -483,10 +483,10 @@ accounting for hardware capabilities as
- 
- Currently accepted:
- 
--The Speculation Control hardware features `md-clear`, `ibrsb`, `stibp`, 
`ibpb`,
--`l1d-flush` and `ssbd` are used by default if available and applicable.  They 
can
--be ignored, e.g. `no-ibrsb`, at which point Xen won't use them itself, and
--won't offer them to guests.
-+The Speculation Control hardware features `srbds-ctrl`, `md-clear`, `ibrsb`,
-+`stibp`, `ibpb`, `l1d-flush` and `ssbd` are used by default if available and
-+applicable.  They can be ignored, e.g. `no-ibrsb`, at which point Xen won't
-+use them itself, and won't offer them to guests.
- 
- ### cpuid_mask_cpu
- > `= fam_0f_rev_[cdefg] | fam_10_rev_[bc] | fam_11_rev_b`
---- a/tools/libxl/libxl_cpuid.c
-+++ b/tools/libxl/libxl_cpuid.c
-@@ -213,6 +213,7 @@ int libxl_cpuid_parse_config(libxl_cpuid
- 
-         {"avx512-4vnniw",0x00000007,  0, CPUID_REG_EDX,  2,  1},
-         {"avx512-4fmaps",0x00000007,  0, CPUID_REG_EDX,  3,  1},
-+        {"srbds-ctrl",   0x00000007,  0, CPUID_REG_EDX,  9,  1},
-         {"md-clear",     0x00000007,  0, CPUID_REG_EDX, 10,  1},
-         {"cet-ibt",      0x00000007,  0, CPUID_REG_EDX, 20,  1},
-         {"ibrsb",        0x00000007,  0, CPUID_REG_EDX, 26,  1},
---- a/tools/misc/xen-cpuid.c
-+++ b/tools/misc/xen-cpuid.c
-@@ -157,6 +157,7 @@ static const char *const str_7d0[32] =
-     [ 2] = "avx512_4vnniw", [ 3] = "avx512_4fmaps",
-     [ 4] = "fsrm",
- 
-+    /*  8 */                [ 9] = "srbds-ctrl",
-     [10] = "md-clear",
-     /* 12 */                [13] = "tsx-force-abort",
- 
---- a/xen/arch/x86/msr.c
-+++ b/xen/arch/x86/msr.c
-@@ -134,6 +134,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t
-         /* Write-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
-@@ -288,6 +289,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t
-         /* Read-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
---- a/xen/arch/x86/spec_ctrl.c
-+++ b/xen/arch/x86/spec_ctrl.c
-@@ -312,12 +312,13 @@ static void __init print_details(enum in
-     printk("Speculative mitigation facilities:\n");
- 
-     /* Hardware features which pertain to speculative mitigations. */
--    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-+    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-            (_7d0 & cpufeat_mask(X86_FEATURE_IBRSB)) ? " IBRS/IBPB" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_STIBP)) ? " STIBP"     : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_L1D_FLUSH)) ? " L1D_FLUSH" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_SSBD))  ? " SSBD"      : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_MD_CLEAR)) ? " MD_CLEAR" : "",
-+           (_7d0 & cpufeat_mask(X86_FEATURE_SRBDS_CTRL)) ? " SRBDS_CTRL" : "",
-            (e8b  & cpufeat_mask(X86_FEATURE_IBPB))  ? " IBPB"      : "",
-            (caps & ARCH_CAPS_IBRS_ALL)              ? " IBRS_ALL"  : "",
-            (caps & ARCH_CAPS_RDCL_NO)               ? " RDCL_NO"   : "",
---- a/xen/include/asm-x86/msr-index.h
-+++ b/xen/include/asm-x86/msr-index.h
-@@ -179,6 +179,9 @@
- #define MSR_IA32_VMX_TRUE_ENTRY_CTLS            0x490
- #define MSR_IA32_VMX_VMFUNC                     0x491
- 
-+#define MSR_MCU_OPT_CTRL                    0x00000123
-+#define  MCU_OPT_CTRL_RNGDS_MITG_DIS        (_AC(1, ULL) <<  0)
+--- a/xen/drivers/passthrough/vtd/extern.h
++++ b/xen/drivers/passthrough/vtd/extern.h
+@@ -68,7 +68,6 @@ int __must_check qinval_device_iotlb_syn
+                                           u16 did, u16 size, u64 addr);
+ 
+ unsigned int get_cache_line_size(void);
+-void cacheline_flush(char *);
+ void flush_all_cache(void);
+ 
+ uint64_t alloc_pgtable_maddr(unsigned long npages, nodeid_t node);
+--- a/xen/drivers/passthrough/vtd/iommu.c
++++ b/xen/drivers/passthrough/vtd/iommu.c
+@@ -31,6 +31,7 @@
+ #include <xen/pci_regs.h>
+ #include <xen/keyhandler.h>
+ #include <asm/msi.h>
++#include <asm/nops.h>
+ #include <asm/irq.h>
+ #include <asm/hvm/vmx/vmx.h>
+ #include <asm/p2m.h>
+@@ -154,7 +155,42 @@ static void sync_cache(const void *addr,
+ 
+     addr -= (unsigned long)addr & (clflush_size - 1);
+     for ( ; addr < end; addr += clflush_size )
+-        cacheline_flush((char *)addr);
++/*
++ * The arguments to a macro must not include preprocessor directives. Doing so
++ * results in undefined behavior, so we have to create some defines here in
++ * order to avoid it.
++ */
++#if defined(HAVE_AS_CLWB)
++# define CLWB_ENCODING "clwb %[p]"
++#elif defined(HAVE_AS_XSAVEOPT)
++# define CLWB_ENCODING "data16 xsaveopt %[p]" /* clwb */
++#else
++# define CLWB_ENCODING ".byte 0x66, 0x0f, 0xae, 0x30" /* clwb (%%rax) */
++#endif
++
++#define BASE_INPUT(addr) [p] "m" (*(const char *)(addr))
++#if defined(HAVE_AS_CLWB) || defined(HAVE_AS_XSAVEOPT)
++# define INPUT BASE_INPUT
++#else
++# define INPUT(addr) "a" (addr), BASE_INPUT(addr)
++#endif
++        /*
++         * Note regarding the use of NOP_DS_PREFIX: it's faster to do a 
clflush
++         * + prefix than a clflush + nop, and hence the prefix is added 
instead
++         * of letting the alternative framework fill the gap by appending 
nops.
++         */
++        alternative_io_2(".byte " __stringify(NOP_DS_PREFIX) "; clflush %[p]",
++                         "data16 clflush %[p]", /* clflushopt */
++                         X86_FEATURE_CLFLUSHOPT,
++                         CLWB_ENCODING,
++                         X86_FEATURE_CLWB, /* no outputs */,
++                         INPUT(addr));
++#undef INPUT
++#undef BASE_INPUT
++#undef CLWB_ENCODING
 +
- #define MSR_U_CET                           0x000006a0
- #define MSR_S_CET                           0x000006a2
- #define MSR_PL0_SSP                         0x000006a4
---- a/xen/include/public/arch-x86/cpufeatureset.h
-+++ b/xen/include/public/arch-x86/cpufeatureset.h
-@@ -252,6 +252,7 @@ XEN_CPUFEATURE(IBPB,          8*32+12) /
- /* Intel-defined CPU features, CPUID level 0x00000007:0.edx, word 9 */
- XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A  AVX512 Neural Network 
Instructions */
- XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A  AVX512 Multiply Accumulation 
Single Precision */
-+XEN_CPUFEATURE(SRBDS_CTRL,    9*32+ 9) /*   MSR_MCU_OPT_CTRL and 
RNGDS_MITG_DIS. */
- XEN_CPUFEATURE(MD_CLEAR,      9*32+10) /*A  VERW clears microarchitectural 
buffers */
- XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */
- XEN_CPUFEATURE(CET_IBT,       9*32+20) /*   CET - Indirect Branch Tracking */
++    alternative_2("", "sfence", X86_FEATURE_CLFLUSHOPT,
++                      "sfence", X86_FEATURE_CLWB);
+ }
+ 
+ /* Allocate page table, return its machine address */
+--- a/xen/drivers/passthrough/vtd/x86/vtd.c
++++ b/xen/drivers/passthrough/vtd/x86/vtd.c
+@@ -51,11 +51,6 @@ unsigned int get_cache_line_size(void)
+     return ((cpuid_ebx(1) >> 8) & 0xff) * 8;
+ }
+ 
+-void cacheline_flush(char * addr)
+-{
+-    clflush(addr);
+-}
+-
+ void flush_all_cache()
+ {
+     wbinvd();

++++++ xsa320-1.patch -> xsa321-7.patch ++++++
--- /work/SRC/openSUSE:Factory/xen/xsa320-1.patch       2020-06-11 
14:47:30.381872679 +0200
+++ /work/SRC/openSUSE:Factory/.xen.new.3060/xsa321-7.patch     2020-07-14 
07:44:52.066991153 +0200
@@ -1,102 +1,153 @@
-x86/spec-ctrl: CPUID/MSR definitions for Special Register Buffer Data Sampling
+From: <[email protected]>
+Subject: x86/ept: flush cache when modifying PTEs and sharing page tables
 
-This is part of XSA-320 / CVE-2020-0543
+Modifications made to the page tables by EPT code need to be written
+to memory when the page tables are shared with the IOMMU, as Intel
+IOMMUs can be non-coherent and thus require changes to be written to
+memory in order to be visible to the IOMMU.
+
+In order to achieve this make sure data is written back to memory
+after writing an EPT entry when the recalc bit is not set in
+atomic_write_ept_entry. If such bit is set, the entry will be
+adjusted and atomic_write_ept_entry will be called a second time
+without the recalc bit set. Note that when splitting a super page the
+new tables resulting of the split should also be written back.
+
+Failure to do so can allow devices behind the IOMMU access to the
+stale super page, or cause coherency issues as changes made by the
+processor to the page tables are not visible to the IOMMU.
+
+This allows to remove the VT-d specific iommu_pte_flush helper, since
+the cache write back is now performed by atomic_write_ept_entry, and
+hence iommu_iotlb_flush can be used to flush the IOMMU TLB. The newly
+used method (iommu_iotlb_flush) can result in less flushes, since it
+might sometimes be called rightly with 0 flags, in which case it
+becomes a no-op.
+
+This is part of XSA-321.
 
-Signed-off-by: Andrew Cooper <[email protected]>
 Reviewed-by: Jan Beulich <[email protected]>
-Acked-by: Wei Liu <[email protected]>
 
---- a/docs/misc/xen-command-line.pandoc
-+++ b/docs/misc/xen-command-line.pandoc
-@@ -483,10 +483,10 @@ accounting for hardware capabilities as
- 
- Currently accepted:
- 
--The Speculation Control hardware features `md-clear`, `ibrsb`, `stibp`, 
`ibpb`,
--`l1d-flush` and `ssbd` are used by default if available and applicable.  They 
can
--be ignored, e.g. `no-ibrsb`, at which point Xen won't use them itself, and
--won't offer them to guests.
-+The Speculation Control hardware features `srbds-ctrl`, `md-clear`, `ibrsb`,
-+`stibp`, `ibpb`, `l1d-flush` and `ssbd` are used by default if available and
-+applicable.  They can be ignored, e.g. `no-ibrsb`, at which point Xen won't
-+use them itself, and won't offer them to guests.
- 
- ### cpuid_mask_cpu
- > `= fam_0f_rev_[cdefg] | fam_10_rev_[bc] | fam_11_rev_b`
---- a/tools/libxl/libxl_cpuid.c
-+++ b/tools/libxl/libxl_cpuid.c
-@@ -213,6 +213,7 @@ int libxl_cpuid_parse_config(libxl_cpuid
- 
-         {"avx512-4vnniw",0x00000007,  0, CPUID_REG_EDX,  2,  1},
-         {"avx512-4fmaps",0x00000007,  0, CPUID_REG_EDX,  3,  1},
-+        {"srbds-ctrl",   0x00000007,  0, CPUID_REG_EDX,  9,  1},
-         {"md-clear",     0x00000007,  0, CPUID_REG_EDX, 10,  1},
-         {"cet-ibt",      0x00000007,  0, CPUID_REG_EDX, 20,  1},
-         {"ibrsb",        0x00000007,  0, CPUID_REG_EDX, 26,  1},
---- a/tools/misc/xen-cpuid.c
-+++ b/tools/misc/xen-cpuid.c
-@@ -157,6 +157,7 @@ static const char *const str_7d0[32] =
-     [ 2] = "avx512_4vnniw", [ 3] = "avx512_4fmaps",
-     [ 4] = "fsrm",
- 
-+    /*  8 */                [ 9] = "srbds-ctrl",
-     [10] = "md-clear",
-     /* 12 */                [13] = "tsx-force-abort",
- 
---- a/xen/arch/x86/msr.c
-+++ b/xen/arch/x86/msr.c
-@@ -134,6 +134,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t
-         /* Write-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
-@@ -288,6 +289,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t
-         /* Read-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
---- a/xen/arch/x86/spec_ctrl.c
-+++ b/xen/arch/x86/spec_ctrl.c
-@@ -312,12 +312,13 @@ static void __init print_details(enum in
-     printk("Speculative mitigation facilities:\n");
- 
-     /* Hardware features which pertain to speculative mitigations. */
--    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-+    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-            (_7d0 & cpufeat_mask(X86_FEATURE_IBRSB)) ? " IBRS/IBPB" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_STIBP)) ? " STIBP"     : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_L1D_FLUSH)) ? " L1D_FLUSH" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_SSBD))  ? " SSBD"      : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_MD_CLEAR)) ? " MD_CLEAR" : "",
-+           (_7d0 & cpufeat_mask(X86_FEATURE_SRBDS_CTRL)) ? " SRBDS_CTRL" : "",
-            (e8b  & cpufeat_mask(X86_FEATURE_IBPB))  ? " IBPB"      : "",
-            (caps & ARCH_CAPS_IBRS_ALL)              ? " IBRS_ALL"  : "",
-            (caps & ARCH_CAPS_RDCL_NO)               ? " RDCL_NO"   : "",
---- a/xen/include/asm-x86/msr-index.h
-+++ b/xen/include/asm-x86/msr-index.h
-@@ -179,6 +179,9 @@
- #define MSR_IA32_VMX_TRUE_ENTRY_CTLS            0x490
- #define MSR_IA32_VMX_VMFUNC                     0x491
+--- a/xen/arch/x86/mm/p2m-ept.c
++++ b/xen/arch/x86/mm/p2m-ept.c
+@@ -58,6 +58,19 @@ static int atomic_write_ept_entry(struct
+ 
+     write_atomic(&entryptr->epte, new.epte);
+ 
++    /*
++     * The recalc field on the EPT is used to signal either that a
++     * recalculation of the EMT field is required (which doesn't effect the
++     * IOMMU), or a type change. Type changes can only be between ram_rw,
++     * logdirty and ioreq_server: changes to/from logdirty won't work well 
with
++     * an IOMMU anyway, as IOMMU #PFs are not synchronous and will lead to
++     * aborts, and changes to/from ioreq_server are already fully flushed
++     * before returning to guest context (see
++     * XEN_DMOP_map_mem_type_to_ioreq_server).
++     */
++    if ( !new.recalc && iommu_use_hap_pt(p2m->domain) )
++        iommu_sync_cache(entryptr, sizeof(*entryptr));
++
+     return 0;
+ }
+ 
+@@ -278,6 +291,9 @@ static bool_t ept_split_super_page(struc
+             break;
+     }
+ 
++    if ( iommu_use_hap_pt(p2m->domain) )
++        iommu_sync_cache(table, EPT_PAGETABLE_ENTRIES * sizeof(ept_entry_t));
++
+     unmap_domain_page(table);
+ 
+     /* Even failed we should install the newly allocated ept page. */
+@@ -337,6 +353,9 @@ static int ept_next_level(struct p2m_dom
+         if ( !next )
+             return GUEST_TABLE_MAP_FAILED;
  
-+#define MSR_MCU_OPT_CTRL                    0x00000123
-+#define  MCU_OPT_CTRL_RNGDS_MITG_DIS        (_AC(1, ULL) <<  0)
++        if ( iommu_use_hap_pt(p2m->domain) )
++            iommu_sync_cache(next, EPT_PAGETABLE_ENTRIES * 
sizeof(ept_entry_t));
 +
- #define MSR_U_CET                           0x000006a0
- #define MSR_S_CET                           0x000006a2
- #define MSR_PL0_SSP                         0x000006a4
---- a/xen/include/public/arch-x86/cpufeatureset.h
-+++ b/xen/include/public/arch-x86/cpufeatureset.h
-@@ -252,6 +252,7 @@ XEN_CPUFEATURE(IBPB,          8*32+12) /
- /* Intel-defined CPU features, CPUID level 0x00000007:0.edx, word 9 */
- XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A  AVX512 Neural Network 
Instructions */
- XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A  AVX512 Multiply Accumulation 
Single Precision */
-+XEN_CPUFEATURE(SRBDS_CTRL,    9*32+ 9) /*   MSR_MCU_OPT_CTRL and 
RNGDS_MITG_DIS. */
- XEN_CPUFEATURE(MD_CLEAR,      9*32+10) /*A  VERW clears microarchitectural 
buffers */
- XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */
- XEN_CPUFEATURE(CET_IBT,       9*32+20) /*   CET - Indirect Branch Tracking */
+         rc = atomic_write_ept_entry(p2m, ept_entry, e, next_level);
+         ASSERT(rc == 0);
+     }
+@@ -821,7 +840,10 @@ out:
+          need_modify_vtd_table )
+     {
+         if ( iommu_use_hap_pt(d) )
+-            rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order, 
vtd_pte_present);
++            rc = iommu_iotlb_flush(d, _dfn(gfn), (1u << order),
++                                   (iommu_flags ? IOMMU_FLUSHF_added : 0) |
++                                   (vtd_pte_present ? IOMMU_FLUSHF_modified
++                                                    : 0));
+         else if ( need_iommu_pt_sync(d) )
+             rc = iommu_flags ?
+                 iommu_legacy_map(d, _dfn(gfn), mfn, order, iommu_flags) :
+--- a/xen/drivers/passthrough/vtd/iommu.c
++++ b/xen/drivers/passthrough/vtd/iommu.c
+@@ -1884,53 +1884,6 @@ static int intel_iommu_lookup_page(struc
+     return 0;
+ }
+ 
+-int iommu_pte_flush(struct domain *d, uint64_t dfn, uint64_t *pte,
+-                    int order, int present)
+-{
+-    struct acpi_drhd_unit *drhd;
+-    struct vtd_iommu *iommu = NULL;
+-    struct domain_iommu *hd = dom_iommu(d);
+-    bool_t flush_dev_iotlb;
+-    int iommu_domid;
+-    int rc = 0;
+-
+-    iommu_sync_cache(pte, sizeof(struct dma_pte));
+-
+-    for_each_drhd_unit ( drhd )
+-    {
+-        iommu = drhd->iommu;
+-        if ( !test_bit(iommu->index, &hd->arch.iommu_bitmap) )
+-            continue;
+-
+-        flush_dev_iotlb = !!find_ats_dev_drhd(iommu);
+-        iommu_domid= domain_iommu_domid(d, iommu);
+-        if ( iommu_domid == -1 )
+-            continue;
+-
+-        rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
+-                                   __dfn_to_daddr(dfn),
+-                                   order, !present, flush_dev_iotlb);
+-        if ( rc > 0 )
+-        {
+-            iommu_flush_write_buffer(iommu);
+-            rc = 0;
+-        }
+-    }
+-
+-    if ( unlikely(rc) )
+-    {
+-        if ( !d->is_shutting_down && printk_ratelimit() )
+-            printk(XENLOG_ERR VTDPREFIX
+-                   " d%d: IOMMU pages flush failed: %d\n",
+-                   d->domain_id, rc);
+-
+-        if ( !is_hardware_domain(d) )
+-            domain_crash(d);
+-    }
+-
+-    return rc;
+-}
+-
+ static int __init vtd_ept_page_compatible(struct vtd_iommu *iommu)
+ {
+     u64 ept_cap, vtd_cap = iommu->cap;
+--- a/xen/include/asm-x86/iommu.h
++++ b/xen/include/asm-x86/iommu.h
+@@ -97,10 +97,6 @@ static inline int iommu_adjust_irq_affin
+            : 0;
+ }
+ 
+-/* While VT-d specific, this must get declared in a generic header. */
+-int __must_check iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
+-                                 int order, int present);
+-
+ static inline bool iommu_supports_x2apic(void)
+ {
+     return iommu_init_ops && iommu_init_ops->supports_x2apic

++++++ xsa320-1.patch -> xsa328-1.patch ++++++
--- /work/SRC/openSUSE:Factory/xen/xsa320-1.patch       2020-06-11 
14:47:30.381872679 +0200
+++ /work/SRC/openSUSE:Factory/.xen.new.3060/xsa328-1.patch     2020-07-14 
07:44:52.322991979 +0200
@@ -1,102 +1,118 @@
-x86/spec-ctrl: CPUID/MSR definitions for Special Register Buffer Data Sampling
+From: Jan Beulich <[email protected]>
+Subject: x86/EPT: ept_set_middle_entry() related adjustments
 
-This is part of XSA-320 / CVE-2020-0543
+ept_split_super_page() wants to further modify the newly allocated
+table, so have ept_set_middle_entry() return the mapped pointer rather
+than tearing it down and then getting re-established right again.
 
-Signed-off-by: Andrew Cooper <[email protected]>
-Reviewed-by: Jan Beulich <[email protected]>
-Acked-by: Wei Liu <[email protected]>
-
---- a/docs/misc/xen-command-line.pandoc
-+++ b/docs/misc/xen-command-line.pandoc
-@@ -483,10 +483,10 @@ accounting for hardware capabilities as
- 
- Currently accepted:
- 
--The Speculation Control hardware features `md-clear`, `ibrsb`, `stibp`, 
`ibpb`,
--`l1d-flush` and `ssbd` are used by default if available and applicable.  They 
can
--be ignored, e.g. `no-ibrsb`, at which point Xen won't use them itself, and
--won't offer them to guests.
-+The Speculation Control hardware features `srbds-ctrl`, `md-clear`, `ibrsb`,
-+`stibp`, `ibpb`, `l1d-flush` and `ssbd` are used by default if available and
-+applicable.  They can be ignored, e.g. `no-ibrsb`, at which point Xen won't
-+use them itself, and won't offer them to guests.
- 
- ### cpuid_mask_cpu
- > `= fam_0f_rev_[cdefg] | fam_10_rev_[bc] | fam_11_rev_b`
---- a/tools/libxl/libxl_cpuid.c
-+++ b/tools/libxl/libxl_cpuid.c
-@@ -213,6 +213,7 @@ int libxl_cpuid_parse_config(libxl_cpuid
- 
-         {"avx512-4vnniw",0x00000007,  0, CPUID_REG_EDX,  2,  1},
-         {"avx512-4fmaps",0x00000007,  0, CPUID_REG_EDX,  3,  1},
-+        {"srbds-ctrl",   0x00000007,  0, CPUID_REG_EDX,  9,  1},
-         {"md-clear",     0x00000007,  0, CPUID_REG_EDX, 10,  1},
-         {"cet-ibt",      0x00000007,  0, CPUID_REG_EDX, 20,  1},
-         {"ibrsb",        0x00000007,  0, CPUID_REG_EDX, 26,  1},
---- a/tools/misc/xen-cpuid.c
-+++ b/tools/misc/xen-cpuid.c
-@@ -157,6 +157,7 @@ static const char *const str_7d0[32] =
-     [ 2] = "avx512_4vnniw", [ 3] = "avx512_4fmaps",
-     [ 4] = "fsrm",
- 
-+    /*  8 */                [ 9] = "srbds-ctrl",
-     [10] = "md-clear",
-     /* 12 */                [13] = "tsx-force-abort",
- 
---- a/xen/arch/x86/msr.c
-+++ b/xen/arch/x86/msr.c
-@@ -134,6 +134,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t
-         /* Write-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
-@@ -288,6 +289,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t
-         /* Read-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
---- a/xen/arch/x86/spec_ctrl.c
-+++ b/xen/arch/x86/spec_ctrl.c
-@@ -312,12 +312,13 @@ static void __init print_details(enum in
-     printk("Speculative mitigation facilities:\n");
- 
-     /* Hardware features which pertain to speculative mitigations. */
--    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-+    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-            (_7d0 & cpufeat_mask(X86_FEATURE_IBRSB)) ? " IBRS/IBPB" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_STIBP)) ? " STIBP"     : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_L1D_FLUSH)) ? " L1D_FLUSH" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_SSBD))  ? " SSBD"      : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_MD_CLEAR)) ? " MD_CLEAR" : "",
-+           (_7d0 & cpufeat_mask(X86_FEATURE_SRBDS_CTRL)) ? " SRBDS_CTRL" : "",
-            (e8b  & cpufeat_mask(X86_FEATURE_IBPB))  ? " IBPB"      : "",
-            (caps & ARCH_CAPS_IBRS_ALL)              ? " IBRS_ALL"  : "",
-            (caps & ARCH_CAPS_RDCL_NO)               ? " RDCL_NO"   : "",
---- a/xen/include/asm-x86/msr-index.h
-+++ b/xen/include/asm-x86/msr-index.h
-@@ -179,6 +179,9 @@
- #define MSR_IA32_VMX_TRUE_ENTRY_CTLS            0x490
- #define MSR_IA32_VMX_VMFUNC                     0x491
- 
-+#define MSR_MCU_OPT_CTRL                    0x00000123
-+#define  MCU_OPT_CTRL_RNGDS_MITG_DIS        (_AC(1, ULL) <<  0)
+Similarly ept_next_level() wants to hand back a mapped pointer of
+the next level page, so re-use the one established by
+ept_set_middle_entry() in case that path was taken.
+
+Pull the setting of suppress_ve ahead of insertion into the higher level
+table, and don't have ept_split_super_page() set the field a 2nd time.
+
+This is part of XSA-328.
+
+Signed-off-by: Jan Beulich <[email protected]>
+
+--- a/xen/arch/x86/mm/p2m-ept.c
++++ b/xen/arch/x86/mm/p2m-ept.c
+@@ -187,8 +187,9 @@ static void ept_p2m_type_to_flags(struct
+ #define GUEST_TABLE_SUPER_PAGE  2
+ #define GUEST_TABLE_POD_PAGE    3
+ 
+-/* Fill in middle levels of ept table */
+-static int ept_set_middle_entry(struct p2m_domain *p2m, ept_entry_t 
*ept_entry)
++/* Fill in middle level of ept table; return pointer to mapped new table. */
++static ept_entry_t *ept_set_middle_entry(struct p2m_domain *p2m,
++                                         ept_entry_t *ept_entry)
+ {
+     mfn_t mfn;
+     ept_entry_t *table;
+@@ -196,7 +197,12 @@ static int ept_set_middle_entry(struct p
+ 
+     mfn = p2m_alloc_ptp(p2m, 0);
+     if ( mfn_eq(mfn, INVALID_MFN) )
+-        return 0;
++        return NULL;
++
++    table = map_domain_page(mfn);
 +
- #define MSR_U_CET                           0x000006a0
- #define MSR_S_CET                           0x000006a2
- #define MSR_PL0_SSP                         0x000006a4
---- a/xen/include/public/arch-x86/cpufeatureset.h
-+++ b/xen/include/public/arch-x86/cpufeatureset.h
-@@ -252,6 +252,7 @@ XEN_CPUFEATURE(IBPB,          8*32+12) /
- /* Intel-defined CPU features, CPUID level 0x00000007:0.edx, word 9 */
- XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A  AVX512 Neural Network 
Instructions */
- XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A  AVX512 Multiply Accumulation 
Single Precision */
-+XEN_CPUFEATURE(SRBDS_CTRL,    9*32+ 9) /*   MSR_MCU_OPT_CTRL and 
RNGDS_MITG_DIS. */
- XEN_CPUFEATURE(MD_CLEAR,      9*32+10) /*A  VERW clears microarchitectural 
buffers */
- XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */
- XEN_CPUFEATURE(CET_IBT,       9*32+20) /*   CET - Indirect Branch Tracking */
++    for ( i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
++        table[i].suppress_ve = 1;
+ 
+     ept_entry->epte = 0;
+     ept_entry->mfn = mfn_x(mfn);
+@@ -208,14 +214,7 @@ static int ept_set_middle_entry(struct p
+ 
+     ept_entry->suppress_ve = 1;
+ 
+-    table = map_domain_page(mfn);
+-
+-    for ( i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
+-        table[i].suppress_ve = 1;
+-
+-    unmap_domain_page(table);
+-
+-    return 1;
++    return table;
+ }
+ 
+ /* free ept sub tree behind an entry */
+@@ -253,10 +252,10 @@ static bool_t ept_split_super_page(struc
+ 
+     ASSERT(is_epte_superpage(ept_entry));
+ 
+-    if ( !ept_set_middle_entry(p2m, &new_ept) )
++    table = ept_set_middle_entry(p2m, &new_ept);
++    if ( !table )
+         return 0;
+ 
+-    table = map_domain_page(_mfn(new_ept.mfn));
+     trunk = 1UL << ((level - 1) * EPT_TABLE_ORDER);
+ 
+     for ( i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
+@@ -267,7 +266,6 @@ static bool_t ept_split_super_page(struc
+         epte->sp = (level > 1);
+         epte->mfn += i * trunk;
+         epte->snp = is_iommu_enabled(p2m->domain) && iommu_snoop;
+-        epte->suppress_ve = 1;
+ 
+         ept_p2m_type_to_flags(p2m, epte, epte->sa_p2mt, epte->access);
+ 
+@@ -306,8 +304,7 @@ static int ept_next_level(struct p2m_dom
+                           ept_entry_t **table, unsigned long *gfn_remainder,
+                           int next_level)
+ {
+-    unsigned long mfn;
+-    ept_entry_t *ept_entry, e;
++    ept_entry_t *ept_entry, *next = NULL, e;
+     u32 shift, index;
+ 
+     shift = next_level * EPT_TABLE_ORDER;
+@@ -332,19 +329,17 @@ static int ept_next_level(struct p2m_dom
+         if ( read_only )
+             return GUEST_TABLE_MAP_FAILED;
+ 
+-        if ( !ept_set_middle_entry(p2m, ept_entry) )
++        next = ept_set_middle_entry(p2m, ept_entry);
++        if ( !next )
+             return GUEST_TABLE_MAP_FAILED;
+-        else
+-            e = atomic_read_ept_entry(ept_entry); /* Refresh */
++        /* e is now stale and hence may not be used anymore below. */
+     }
+-
+     /* The only time sp would be set here is if we had hit a superpage */
+-    if ( is_epte_superpage(&e) )
++    else if ( is_epte_superpage(&e) )
+         return GUEST_TABLE_SUPER_PAGE;
+ 
+-    mfn = e.mfn;
+     unmap_domain_page(*table);
+-    *table = map_domain_page(_mfn(mfn));
++    *table = next ?: map_domain_page(_mfn(e.mfn));
+     *gfn_remainder &= (1UL << shift) - 1;
+     return GUEST_TABLE_NORMAL_PAGE;
+ }

++++++ xsa320-1.patch -> xsa328-2.patch ++++++
--- /work/SRC/openSUSE:Factory/xen/xsa320-1.patch       2020-06-11 
14:47:30.381872679 +0200
+++ /work/SRC/openSUSE:Factory/.xen.new.3060/xsa328-2.patch     2020-07-14 
07:44:52.606992896 +0200
@@ -1,102 +1,48 @@
-x86/spec-ctrl: CPUID/MSR definitions for Special Register Buffer Data Sampling
+From: <[email protected]>
+Subject: x86/ept: atomically modify entries in ept_next_level
 
-This is part of XSA-320 / CVE-2020-0543
+ept_next_level was passing a live PTE pointer to ept_set_middle_entry,
+which was then modified without taking into account that the PTE could
+be part of a live EPT table. This wasn't a security issue because the
+pages returned by p2m_alloc_ptp are zeroed, so adding such an entry
+before actually initializing it didn't allow a guest to access
+physical memory addresses it wasn't supposed to access.
+
+This is part of XSA-328.
 
-Signed-off-by: Andrew Cooper <[email protected]>
 Reviewed-by: Jan Beulich <[email protected]>
-Acked-by: Wei Liu <[email protected]>
 
---- a/docs/misc/xen-command-line.pandoc
-+++ b/docs/misc/xen-command-line.pandoc
-@@ -483,10 +483,10 @@ accounting for hardware capabilities as
- 
- Currently accepted:
- 
--The Speculation Control hardware features `md-clear`, `ibrsb`, `stibp`, 
`ibpb`,
--`l1d-flush` and `ssbd` are used by default if available and applicable.  They 
can
--be ignored, e.g. `no-ibrsb`, at which point Xen won't use them itself, and
--won't offer them to guests.
-+The Speculation Control hardware features `srbds-ctrl`, `md-clear`, `ibrsb`,
-+`stibp`, `ibpb`, `l1d-flush` and `ssbd` are used by default if available and
-+applicable.  They can be ignored, e.g. `no-ibrsb`, at which point Xen won't
-+use them itself, and won't offer them to guests.
+--- a/xen/arch/x86/mm/p2m-ept.c
++++ b/xen/arch/x86/mm/p2m-ept.c
+@@ -307,6 +307,8 @@ static int ept_next_level(struct p2m_dom
+     ept_entry_t *ept_entry, *next = NULL, e;
+     u32 shift, index;
  
- ### cpuid_mask_cpu
- > `= fam_0f_rev_[cdefg] | fam_10_rev_[bc] | fam_11_rev_b`
---- a/tools/libxl/libxl_cpuid.c
-+++ b/tools/libxl/libxl_cpuid.c
-@@ -213,6 +213,7 @@ int libxl_cpuid_parse_config(libxl_cpuid
- 
-         {"avx512-4vnniw",0x00000007,  0, CPUID_REG_EDX,  2,  1},
-         {"avx512-4fmaps",0x00000007,  0, CPUID_REG_EDX,  3,  1},
-+        {"srbds-ctrl",   0x00000007,  0, CPUID_REG_EDX,  9,  1},
-         {"md-clear",     0x00000007,  0, CPUID_REG_EDX, 10,  1},
-         {"cet-ibt",      0x00000007,  0, CPUID_REG_EDX, 20,  1},
-         {"ibrsb",        0x00000007,  0, CPUID_REG_EDX, 26,  1},
---- a/tools/misc/xen-cpuid.c
-+++ b/tools/misc/xen-cpuid.c
-@@ -157,6 +157,7 @@ static const char *const str_7d0[32] =
-     [ 2] = "avx512_4vnniw", [ 3] = "avx512_4fmaps",
-     [ 4] = "fsrm",
++    ASSERT(next_level);
++
+     shift = next_level * EPT_TABLE_ORDER;
  
-+    /*  8 */                [ 9] = "srbds-ctrl",
-     [10] = "md-clear",
-     /* 12 */                [13] = "tsx-force-abort",
+     index = *gfn_remainder >> shift;
+@@ -323,16 +325,20 @@ static int ept_next_level(struct p2m_dom
  
---- a/xen/arch/x86/msr.c
-+++ b/xen/arch/x86/msr.c
-@@ -134,6 +134,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t
-         /* Write-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
-@@ -288,6 +289,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t
-         /* Read-only */
-     case MSR_TSX_FORCE_ABORT:
-     case MSR_TSX_CTRL:
-+    case MSR_MCU_OPT_CTRL:
-     case MSR_U_CET:
-     case MSR_S_CET:
-     case MSR_PL0_SSP ... MSR_INTERRUPT_SSP_TABLE:
---- a/xen/arch/x86/spec_ctrl.c
-+++ b/xen/arch/x86/spec_ctrl.c
-@@ -312,12 +312,13 @@ static void __init print_details(enum in
-     printk("Speculative mitigation facilities:\n");
+     if ( !is_epte_present(&e) )
+     {
++        int rc;
++
+         if ( e.sa_p2mt == p2m_populate_on_demand )
+             return GUEST_TABLE_POD_PAGE;
  
-     /* Hardware features which pertain to speculative mitigations. */
--    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-+    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
-            (_7d0 & cpufeat_mask(X86_FEATURE_IBRSB)) ? " IBRS/IBPB" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_STIBP)) ? " STIBP"     : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_L1D_FLUSH)) ? " L1D_FLUSH" : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_SSBD))  ? " SSBD"      : "",
-            (_7d0 & cpufeat_mask(X86_FEATURE_MD_CLEAR)) ? " MD_CLEAR" : "",
-+           (_7d0 & cpufeat_mask(X86_FEATURE_SRBDS_CTRL)) ? " SRBDS_CTRL" : "",
-            (e8b  & cpufeat_mask(X86_FEATURE_IBPB))  ? " IBPB"      : "",
-            (caps & ARCH_CAPS_IBRS_ALL)              ? " IBRS_ALL"  : "",
-            (caps & ARCH_CAPS_RDCL_NO)               ? " RDCL_NO"   : "",
---- a/xen/include/asm-x86/msr-index.h
-+++ b/xen/include/asm-x86/msr-index.h
-@@ -179,6 +179,9 @@
- #define MSR_IA32_VMX_TRUE_ENTRY_CTLS            0x490
- #define MSR_IA32_VMX_VMFUNC                     0x491
+         if ( read_only )
+             return GUEST_TABLE_MAP_FAILED;
  
-+#define MSR_MCU_OPT_CTRL                    0x00000123
-+#define  MCU_OPT_CTRL_RNGDS_MITG_DIS        (_AC(1, ULL) <<  0)
+-        next = ept_set_middle_entry(p2m, ept_entry);
++        next = ept_set_middle_entry(p2m, &e);
+         if ( !next )
+             return GUEST_TABLE_MAP_FAILED;
+-        /* e is now stale and hence may not be used anymore below. */
 +
- #define MSR_U_CET                           0x000006a0
- #define MSR_S_CET                           0x000006a2
- #define MSR_PL0_SSP                         0x000006a4
---- a/xen/include/public/arch-x86/cpufeatureset.h
-+++ b/xen/include/public/arch-x86/cpufeatureset.h
-@@ -252,6 +252,7 @@ XEN_CPUFEATURE(IBPB,          8*32+12) /
- /* Intel-defined CPU features, CPUID level 0x00000007:0.edx, word 9 */
- XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A  AVX512 Neural Network 
Instructions */
- XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A  AVX512 Multiply Accumulation 
Single Precision */
-+XEN_CPUFEATURE(SRBDS_CTRL,    9*32+ 9) /*   MSR_MCU_OPT_CTRL and 
RNGDS_MITG_DIS. */
- XEN_CPUFEATURE(MD_CLEAR,      9*32+10) /*A  VERW clears microarchitectural 
buffers */
- XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */
- XEN_CPUFEATURE(CET_IBT,       9*32+20) /*   CET - Indirect Branch Tracking */
++        rc = atomic_write_ept_entry(p2m, ept_entry, e, next_level);
++        ASSERT(rc == 0);
+     }
+     /* The only time sp would be set here is if we had hit a superpage */
+     else if ( is_epte_superpage(&e) )


Reply via email to