Hello community,

here is the log from the commit of package xen for openSUSE:Factory checked in 
at 2019-09-11 10:22:29
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/xen (Old)
 and      /work/SRC/openSUSE:Factory/.xen.new.7948 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "xen"

Wed Sep 11 10:22:29 2019 rev:270 rq:728544 version:4.12.1_02

Changes:
--------
--- /work/SRC/openSUSE:Factory/xen/xen.changes  2019-08-19 20:48:45.477080861 
+0200
+++ /work/SRC/openSUSE:Factory/.xen.new.7948/xen.changes        2019-09-11 
10:22:34.415494531 +0200
@@ -1,0 +2,19 @@
+Wed Aug 28 09:25:30 MDT 2019 - [email protected]
+
+- Upstream bug fixes (bsc#1027519)
+  5d419d49-x86-spec-ctrl-report-proper-status.patch
+  5d43253c-x86-ucode-always-collect_cpu_info-at-boot.patch
+  5d4aa36f-x86-apic-enable-x2APIC-mode-earlier.patch
+  5d4afa7a-credit2-fix-memory-leak.patch
+  5d4d850a-introduce-bss-percpu-page-aligned.patch
+  5d516531-x86-xpti-dont-leak-TSS-adjacent-data.patch
+  5d5bf475-x86-PV-fix-handling-of-iommu-mappings.patch
+  5d6524ca-x86-mm-correctly-init-M2P-entries.patch
+
+-------------------------------------------------------------------
+Wed Aug 28 11:25:17 UTC 2019 - [email protected]
+
+- Preserve modified files which used to be marked as %config,
+  rename file.rpmsave to file
+
+-------------------------------------------------------------------

New:
----
  5d419d49-x86-spec-ctrl-report-proper-status.patch
  5d43253c-x86-ucode-always-collect_cpu_info-at-boot.patch
  5d4aa36f-x86-apic-enable-x2APIC-mode-earlier.patch
  5d4afa7a-credit2-fix-memory-leak.patch
  5d4d850a-introduce-bss-percpu-page-aligned.patch
  5d516531-x86-xpti-dont-leak-TSS-adjacent-data.patch
  5d5bf475-x86-PV-fix-handling-of-iommu-mappings.patch
  5d6524ca-x86-mm-correctly-init-M2P-entries.patch

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ xen.spec ++++++
--- /var/tmp/diff_new_pack.HuVxiw/_old  2019-09-11 10:22:37.147494192 +0200
+++ /var/tmp/diff_new_pack.HuVxiw/_new  2019-09-11 10:22:37.151494191 +0200
@@ -159,6 +159,14 @@
 # For xen-libs
 Source99:       baselibs.conf
 # Upstream patches
+Patch1:         5d419d49-x86-spec-ctrl-report-proper-status.patch
+Patch2:         5d43253c-x86-ucode-always-collect_cpu_info-at-boot.patch
+Patch3:         5d4aa36f-x86-apic-enable-x2APIC-mode-earlier.patch
+Patch4:         5d4afa7a-credit2-fix-memory-leak.patch
+Patch5:         5d4d850a-introduce-bss-percpu-page-aligned.patch
+Patch6:         5d516531-x86-xpti-dont-leak-TSS-adjacent-data.patch
+Patch7:         5d5bf475-x86-PV-fix-handling-of-iommu-mappings.patch
+Patch8:         5d6524ca-x86-mm-correctly-init-M2P-entries.patch
 # Our platform specific patches
 Patch400:       xen-destdir.patch
 Patch401:       vif-bridge-no-iptables.patch
@@ -357,6 +365,14 @@
 %prep
 %setup -q -n %xen_build_dir -a 1 -a 5 -a 6 -a 57
 # Upstream patches
+%patch1 -p1
+%patch2 -p1
+%patch3 -p1
+%patch4 -p1
+%patch5 -p1
+%patch6 -p1
+%patch7 -p1
+%patch8 -p1
 # Our platform specific patches
 %patch400 -p1
 %patch401 -p1
@@ -1158,6 +1174,17 @@
 fi
 
 %pre tools
+for empty_config_file in \
+       logrotate.d/xen \
+       modprobe.d/xen_loop.conf \
+       pam.d/xen-api \
+       xen/cpupool \
+       xen/xenapiusers \
+       xen/xl.conf
+do
+  test -f /etc/${empty_config_file}.rpmsave && mv -v 
/etc/${empty_config_file}.rpmsave /etc/${empty_config_file}.rpmsave.old ||:
+done
+
 %service_add_pre xencommons.service
 %service_add_pre xendomains.service
 %service_add_pre xen-watchdog.service
@@ -1229,6 +1256,18 @@
 %service_del_postun xen-init-dom0.service
 %service_del_postun xen-qemu-dom0-disk-backend.service
 
+%posttrans tools
+for empty_config_file in \
+       logrotate.d/xen \
+       modprobe.d/xen_loop.conf \
+       pam.d/xen-api \
+       xen/cpupool \
+       xen/xenapiusers \
+       xen/xl.conf
+do
+  test -f /etc/${empty_config_file}.rpmsave && mv -v 
/etc/${empty_config_file}.rpmsave /etc/${empty_config_file}
+done
+
 %endif
 
 %post libs -p /sbin/ldconfig

++++++ 5d419d49-x86-spec-ctrl-report-proper-status.patch ++++++
# Commit 2adc580bd59f5c3034fd6ecacd5748678373f17a
# Date 2019-07-31 14:53:13 +0100
# Author Jin Nan Wang <[email protected]>
# Committer Andrew Cooper <[email protected]>
xen/spec-ctrl: Speculative mitigation facilities report wrong status

Booting with spec-ctrl=0 results in Xen printing "None MD_CLEAR".

  (XEN)   Support for HVM VMs: None MD_CLEAR
  (XEN)   Support for PV VMs: None MD_CLEAR

Add a check about X86_FEATURE_MD_CLEAR to avoid to print "None".

Signed-off-by: James Wang <[email protected]>
Reviewed-by: Andrew Cooper <[email protected]>

--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -360,6 +360,7 @@ static void __init print_details(enum in
     printk("  Support for HVM VMs:%s%s%s%s%s\n",
            (boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ||
             boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ||
+            boot_cpu_has(X86_FEATURE_MD_CLEAR)   ||
             opt_eager_fpu)                           ? ""               : " 
None",
            boot_cpu_has(X86_FEATURE_SC_MSR_HVM)      ? " MSR_SPEC_CTRL" : "",
            boot_cpu_has(X86_FEATURE_SC_RSB_HVM)      ? " RSB"           : "",
@@ -371,6 +372,7 @@ static void __init print_details(enum in
     printk("  Support for PV VMs:%s%s%s%s%s\n",
            (boot_cpu_has(X86_FEATURE_SC_MSR_PV) ||
             boot_cpu_has(X86_FEATURE_SC_RSB_PV) ||
+            boot_cpu_has(X86_FEATURE_MD_CLEAR)  ||
             opt_eager_fpu)                           ? ""               : " 
None",
            boot_cpu_has(X86_FEATURE_SC_MSR_PV)       ? " MSR_SPEC_CTRL" : "",
            boot_cpu_has(X86_FEATURE_SC_RSB_PV)       ? " RSB"           : "",
++++++ 5d43253c-x86-ucode-always-collect_cpu_info-at-boot.patch ++++++
# Commit 2bb2c55cf870e78bc7f514784b2cd8c947d8729c
# Date 2019-08-01 18:45:32 +0100
# Author Sergey Dyasli <[email protected]>
# Committer Andrew Cooper <[email protected]>
x86/microcode: always collect_cpu_info() during boot

Currently cpu_sig struct is not updated during boot if no microcode blob
is specified by "ucode=[<interger>| scan]".

It will result in cpu_sig.rev being 0 which affects APIC's
check_deadline_errata() and retpoline_safe() functions.

Fix this by getting ucode revision early during boot and SMP bring up.
While at it, protect early_microcode_update_cpu() for cases when
microcode_ops is NULL.

Signed-off-by: Sergey Dyasli <[email protected]>
Signed-off-by: Chao Gao <[email protected]>
Reviewed-by: Andrew Cooper <[email protected]>

--- a/xen/arch/x86/microcode.c
+++ b/xen/arch/x86/microcode.c
@@ -383,10 +383,15 @@ static struct notifier_block microcode_p
 
 int __init early_microcode_update_cpu(bool start_update)
 {
+    unsigned int cpu = smp_processor_id();
+    struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu);
     int rc = 0;
     void *data = NULL;
     size_t len;
 
+    if ( !microcode_ops )
+        return -ENOSYS;
+
     if ( ucode_blob.size )
     {
         len = ucode_blob.size;
@@ -397,6 +402,9 @@ int __init early_microcode_update_cpu(bo
         len = ucode_mod.mod_end;
         data = bootstrap_map(&ucode_mod);
     }
+
+    microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig);
+
     if ( data )
     {
         if ( start_update && microcode_ops->start_update )
@@ -413,6 +421,8 @@ int __init early_microcode_update_cpu(bo
 
 int __init early_microcode_init(void)
 {
+    unsigned int cpu = smp_processor_id();
+    struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu);
     int rc;
 
     rc = microcode_init_intel();
@@ -425,6 +435,8 @@ int __init early_microcode_init(void)
 
     if ( microcode_ops )
     {
+        microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig);
+
         if ( ucode_mod.mod_end || ucode_blob.size )
             rc = early_microcode_update_cpu(true);
 
++++++ 5d4aa36f-x86-apic-enable-x2APIC-mode-earlier.patch ++++++
# Commit 260940578de348c38f18cadc6fa53f499e57919c
# Date 2019-08-07 12:09:51 +0200
# Author Roger Pau Monné <[email protected]>
# Committer Jan Beulich <[email protected]>
x86/apic: enable x2APIC mode before doing any setup

Current code calls apic_x2apic_probe which does some initialization
and setup before having enabled x2APIC mode (if it's not already
enabled by the firmware).

This can lead to issues if the APIC ID doesn't match the x2APIC ID, as
apic_x2apic_probe calls init_apic_ldr_x2apic_cluster which depending
on the APIC mode might set cpu_2_logical_apicid using the APIC ID
instead of the x2APIC ID (because x2APIC might not be enabled yet).

Fix this by enabling x2APIC before calling apic_x2apic_probe.

As a remark, this was discovered while I was trying to figure out why
one of my test boxes didn't report any iommu faults. The root cause
was that the iommu MSI address field was set using the stale value in
cpu_2_logical_apicid, and thus the iommu fault interrupt would get
lost. Even if the MSI address field gets sets to a correct value
afterwards as soon as a single iommu fault is pending no further
interrupts would get injected, so losing a single iommu fault
interrupt is fatal.

Signed-off-by: Roger Pau Monné <[email protected]>
Reviewed-by: Jan Beulich <[email protected]>

--- a/xen/arch/x86/apic.c
+++ b/xen/arch/x86/apic.c
@@ -946,15 +946,15 @@ void __init x2apic_bsp_setup(void)
 
     force_iommu = 1;
 
-    genapic = *apic_x2apic_probe();
-    printk("Switched to APIC driver %s.\n", genapic.name);
-
     if ( !x2apic_enabled )
     {
         x2apic_enabled = true;
         __enable_x2apic();
     }
 
+    genapic = *apic_x2apic_probe();
+    printk("Switched to APIC driver %s\n", genapic.name);
+
 restore_out:
     restore_IO_APIC_setup(ioapic_entries);
     unmask_8259A();
++++++ 5d4afa7a-credit2-fix-memory-leak.patch ++++++
# Commit 70f9dff51ee873cf65246d3e95b27e2e92ca137b
# Date 2019-08-07 17:21:14 +0100
# Author Juergen Gross <[email protected]>
# Committer Andrew Cooper <[email protected]>
xen/sched: fix memory leak in credit2

csched2_deinit() is leaking the run-queue memory.

Signed-off-by: Juergen Gross <[email protected]>
Acked-by: Dario Faggioli <[email protected]>

--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -4072,6 +4072,8 @@ csched2_deinit(struct scheduler *ops)
 
     prv = csched2_priv(ops);
     ops->sched_data = NULL;
+    if ( prv )
+        xfree(prv->rqd);
     xfree(prv);
 }
 
++++++ 5d4d850a-introduce-bss-percpu-page-aligned.patch ++++++
# Commit 6c9639a72f0ca3a9430ef75f375877182281fdef
# Date 2019-08-09 16:36:58 +0200
# Author Andrew Cooper <[email protected]>
# Committer Jan Beulich <[email protected]>
xen/link: Introduce .bss.percpu.page_aligned

Future changes are going to need to page align some percpu data.

Shuffle the exact link order of items within the BSS to give
.bss.percpu.page_aligned appropriate alignment, even on CPU0, which uses
.bss.percpu itself.

Insert explicit alignment such that there won't be a gap between
__per_cpu_start and the first actual per-CPU object.  The POINTER_ALIGN
for __bss_end is to cover the lack of SMP_CACHE_BYTES alignment, as the
loops which zero the BSS use pointer-sized stores on all architectures.

Rework __DEFINE_PER_CPU() so the caller passes in all attributes, and
adjust DEFINE_PER_CPU{,_READ_MOSTLY}() to match.  This has the added bonus
that it is now possible to grep for .bss.percpu and find all the users.

Finally, introduce DEFINE_PER_CPU_PAGE_ALIGNED() which specifies the
section attribute and verifies the type's alignment.

Signed-off-by: Andrew Cooper <[email protected]>

Make DEFINE_PER_CPU_PAGE_ALIGNED() verify the alignment rather than
specifying it. It is the underlying type which should be suitably aligned.

Signed-off-by: Jan Beulich <[email protected]>
Acked-by: Julien Grall <[email protected]>
Acked-by: Andrew Cooper <[email protected]>

--- a/xen/arch/arm/xen.lds.S
+++ b/xen/arch/arm/xen.lds.S
@@ -195,14 +195,16 @@ SECTIONS
        *(.bss.stack_aligned)
        . = ALIGN(PAGE_SIZE);
        *(.bss.page_aligned)
-       *(.bss)
-       . = ALIGN(SMP_CACHE_BYTES);
+       . = ALIGN(PAGE_SIZE);
        __per_cpu_start = .;
+       *(.bss.percpu.page_aligned)
        *(.bss.percpu)
        . = ALIGN(SMP_CACHE_BYTES);
        *(.bss.percpu.read_mostly)
        . = ALIGN(SMP_CACHE_BYTES);
        __per_cpu_data_end = .;
+       *(.bss)
+       . = ALIGN(POINTER_ALIGN);
        __bss_end = .;
   } :text
   _end = . ;
--- a/xen/arch/x86/xen.lds.S
+++ b/xen/arch/x86/xen.lds.S
@@ -277,14 +277,16 @@ SECTIONS
        __bss_start = .;
        *(.bss.stack_aligned)
        *(.bss.page_aligned*)
-       *(.bss)
-       . = ALIGN(SMP_CACHE_BYTES);
+       . = ALIGN(PAGE_SIZE);
        __per_cpu_start = .;
+       *(.bss.percpu.page_aligned)
        *(.bss.percpu)
        . = ALIGN(SMP_CACHE_BYTES);
        *(.bss.percpu.read_mostly)
        . = ALIGN(SMP_CACHE_BYTES);
        __per_cpu_data_end = .;
+       *(.bss)
+       . = ALIGN(POINTER_ALIGN);
        __bss_end = .;
   } :text
   _end = . ;
--- a/xen/include/asm-arm/percpu.h
+++ b/xen/include/asm-arm/percpu.h
@@ -10,10 +10,8 @@ extern char __per_cpu_start[], __per_cpu
 extern unsigned long __per_cpu_offset[NR_CPUS];
 void percpu_init_areas(void);
 
-/* Separate out the type, so (int[3], foo) works. */
-#define __DEFINE_PER_CPU(type, name, suffix)                    \
-    __section(".bss.percpu" #suffix)                            \
-    __typeof__(type) per_cpu_##name
+#define __DEFINE_PER_CPU(attr, type, name) \
+    attr __typeof__(type) per_cpu_ ## name
 
 #define per_cpu(var, cpu)  \
     (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
--- a/xen/include/asm-x86/percpu.h
+++ b/xen/include/asm-x86/percpu.h
@@ -7,10 +7,8 @@ extern unsigned long __per_cpu_offset[NR
 void percpu_init_areas(void);
 #endif
 
-/* Separate out the type, so (int[3], foo) works. */
-#define __DEFINE_PER_CPU(type, name, suffix)                    \
-    __section(".bss.percpu" #suffix)                            \
-    __typeof__(type) per_cpu_##name
+#define __DEFINE_PER_CPU(attr, type, name) \
+    attr __typeof__(type) per_cpu_ ## name
 
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu)  \
--- a/xen/include/xen/percpu.h
+++ b/xen/include/xen/percpu.h
@@ -9,9 +9,17 @@
  * The _##name concatenation is being used here to prevent 'name' from getting
  * macro expanded, while still allowing a per-architecture symbol name prefix.
  */
-#define DEFINE_PER_CPU(type, name) __DEFINE_PER_CPU(type, _##name, )
+#define DEFINE_PER_CPU(type, name) \
+    __DEFINE_PER_CPU(__section(".bss.percpu"), type, _ ## name)
+
+#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
+    typedef char name ## _chk_t \
+        [BUILD_BUG_ON_ZERO(__alignof(type) & (PAGE_SIZE - 1))]; \
+    __DEFINE_PER_CPU(__section(".bss.percpu.page_aligned"), \
+                     type, _ ## name)
+
 #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \
-       __DEFINE_PER_CPU(type, _##name, .read_mostly)
+    __DEFINE_PER_CPU(__section(".bss.percpu.read_mostly"), type, _ ## name)
 
 /* Preferred on Xen. Also see arch-defined per_cpu(). */
 #define this_cpu(var)    __get_cpu_var(var)
++++++ 5d516531-x86-xpti-dont-leak-TSS-adjacent-data.patch ++++++
# Commit 7888440625617693487495a7842e6a991ead2647
# Date 2019-08-12 14:10:09 +0100
# Author Jan Beulich <[email protected]>
# Committer Andrew Cooper <[email protected]>
x86/xpti: Don't leak TSS-adjacent percpu data via Meltdown

The XPTI work restricted the visibility of most of memory, but missed a few
aspects when it came to the TSS.

Given that the TSS is just an object in percpu data, the 4k mapping for it
created in setup_cpu_root_pgt() maps adjacent percpu data, making it all
leakable via Meltdown, even when XPTI is in use.

Furthermore, no care is taken to check that the TSS doesn't cross a page
boundary.  As it turns out, struct tss_struct is aligned on its size which
does prevent it straddling a page boundary.

Rework the TSS types while making this change.  Rename tss_struct to tss64, to
mirror the existing tss32 structure we have in HVM's Tast Switch logic.  Drop
tss64's alignment and __cacheline_filler[] field.

Introduce tss_page which contains a single tss64 and keeps the rest of the
page clear, so no adjacent data can be leaked.  Move the definition from
setup.c to traps.c, which is a more appropriate place for it to live.

Signed-off-by: Andrew Cooper <[email protected]>
Signed-off-by: Jan Beulich <[email protected]>

--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -727,7 +727,7 @@ void load_system_tables(void)
        unsigned long stack_bottom = get_stack_bottom(),
                stack_top = stack_bottom & ~(STACK_SIZE - 1);
 
-       struct tss_struct *tss = &this_cpu(init_tss);
+       struct tss64 *tss = &this_cpu(tss_page).tss;
        seg_desc_t *gdt =
                this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY;
        seg_desc_t *compat_gdt =
@@ -742,7 +742,7 @@ void load_system_tables(void)
                .limit = (IDT_ENTRIES * sizeof(idt_entry_t)) - 1,
        };
 
-       *tss = (struct tss_struct){
+       *tss = (struct tss64){
                /* Main stack for interrupts/exceptions. */
                .rsp0 = stack_bottom,
 
@@ -767,16 +767,12 @@ void load_system_tables(void)
                .bitmap = IOBMP_INVALID_OFFSET,
        };
 
-       _set_tssldt_desc(
-               gdt + TSS_ENTRY,
-               (unsigned long)tss,
-               offsetof(struct tss_struct, __cacheline_filler) - 1,
-               SYS_DESC_tss_avail);
-       _set_tssldt_desc(
-               compat_gdt + TSS_ENTRY,
-               (unsigned long)tss,
-               offsetof(struct tss_struct, __cacheline_filler) - 1,
-               SYS_DESC_tss_busy);
+       BUILD_BUG_ON(sizeof(*tss) <= 0x67); /* Mandated by the architecture. */
+
+       _set_tssldt_desc(gdt + TSS_ENTRY, (unsigned long)tss,
+                        sizeof(*tss) - 1, SYS_DESC_tss_avail);
+       _set_tssldt_desc(compat_gdt + TSS_ENTRY, (unsigned long)tss,
+                        sizeof(*tss) - 1, SYS_DESC_tss_busy);
 
        lgdt(&gdtr);
        lidt(&idtr);
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -796,7 +796,7 @@ static void vmx_set_host_env(struct vcpu
               (unsigned long)(this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY));
     __vmwrite(HOST_IDTR_BASE, (unsigned long)idt_tables[cpu]);
 
-    __vmwrite(HOST_TR_BASE, (unsigned long)&per_cpu(init_tss, cpu));
+    __vmwrite(HOST_TR_BASE, (unsigned long)&per_cpu(tss_page, cpu).tss);
 
     __vmwrite(HOST_SYSENTER_ESP, get_stack_bottom());
 
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -16,7 +16,6 @@
 #include <xen/domain_page.h>
 #include <xen/version.h>
 #include <xen/gdbstub.h>
-#include <xen/percpu.h>
 #include <xen/hypercall.h>
 #include <xen/keyhandler.h>
 #include <xen/numa.h>
@@ -101,8 +100,6 @@ unsigned long __read_mostly xen_phys_sta
 
 unsigned long __read_mostly xen_virt_end;
 
-DEFINE_PER_CPU(struct tss_struct, init_tss);
-
 char __section(".bss.stack_aligned") __aligned(STACK_SIZE)
     cpu0_stack[STACK_SIZE];
 
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -830,7 +830,11 @@ static int setup_cpu_root_pgt(unsigned i
     if ( !rc )
         rc = clone_mapping(idt_tables[cpu], rpt);
     if ( !rc )
-        rc = clone_mapping(&per_cpu(init_tss, cpu), rpt);
+    {
+        BUILD_BUG_ON(sizeof(this_cpu(tss_page)) != PAGE_SIZE);
+
+        rc = clone_mapping(&per_cpu(tss_page, cpu).tss, rpt);
+    }
     if ( !rc )
         rc = clone_mapping((void *)per_cpu(stubs.addr, cpu), rpt);
 
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -108,6 +108,12 @@ idt_entry_t __section(".bss.page_aligned
 /* Pointer to the IDT of every CPU. */
 idt_entry_t *idt_tables[NR_CPUS] __read_mostly;
 
+/*
+ * The TSS is smaller than a page, but we give it a full page to avoid
+ * adjacent per-cpu data leaking via Meltdown when XPTI is in use.
+ */
+DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_page, tss_page);
+
 bool (*ioemul_handle_quirk)(
     u8 opcode, char *io_emul_stub, struct cpu_user_regs *regs);
 
@@ -559,7 +565,7 @@ void show_stack_overflow(unsigned int cp
 
     printk("Valid stack range: %p-%p, sp=%p, tss.rsp0=%p\n",
            (void *)esp_top, (void *)esp_bottom, (void *)esp,
-           (void *)per_cpu(init_tss, cpu).rsp0);
+           (void *)per_cpu(tss_page, cpu).tss.rsp0);
 
     /*
      * Trigger overflow trace if %esp is anywhere within the guard page, or
@@ -1929,7 +1935,7 @@ static void __init set_intr_gate(unsigne
 
 void load_TR(void)
 {
-    struct tss_struct *tss = &this_cpu(init_tss);
+    struct tss64 *tss = &this_cpu(tss_page).tss;
     struct desc_ptr old_gdt, tss_gdt = {
         .base = (long)(this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY),
         .limit = LAST_RESERVED_GDT_BYTE
@@ -1937,14 +1943,10 @@ void load_TR(void)
 
     _set_tssldt_desc(
         this_cpu(gdt_table) + TSS_ENTRY - FIRST_RESERVED_GDT_ENTRY,
-        (unsigned long)tss,
-        offsetof(struct tss_struct, __cacheline_filler) - 1,
-        SYS_DESC_tss_avail);
+        (unsigned long)tss, sizeof(*tss) - 1, SYS_DESC_tss_avail);
     _set_tssldt_desc(
         this_cpu(compat_gdt_table) + TSS_ENTRY - FIRST_RESERVED_GDT_ENTRY,
-        (unsigned long)tss,
-        offsetof(struct tss_struct, __cacheline_filler) - 1,
-        SYS_DESC_tss_busy);
+        (unsigned long)tss, sizeof(*tss) - 1, SYS_DESC_tss_busy);
 
     /* Switch to non-compat GDT (which has B bit clear) to execute LTR. */
     asm volatile (
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -416,7 +416,7 @@ static always_inline void __mwait(unsign
 #define IOBMP_BYTES             8192
 #define IOBMP_INVALID_OFFSET    0x8000
 
-struct __packed __cacheline_aligned tss_struct {
+struct __packed tss64 {
     uint32_t :32;
     uint64_t rsp0, rsp1, rsp2;
     uint64_t :64;
@@ -427,9 +427,11 @@ struct __packed __cacheline_aligned tss_
     uint64_t ist[7];
     uint64_t :64;
     uint16_t :16, bitmap;
-    /* Pads the TSS to be cacheline-aligned (total size is 0x80). */
-    uint8_t __cacheline_filler[24];
 };
+struct tss_page {
+    struct tss64 __aligned(PAGE_SIZE) tss;
+};
+DECLARE_PER_CPU(struct tss_page, tss_page);
 
 #define IST_NONE 0UL
 #define IST_DF   1UL
@@ -468,7 +470,6 @@ static inline void disable_each_ist(idt_
 extern idt_entry_t idt_table[];
 extern idt_entry_t *idt_tables[];
 
-DECLARE_PER_CPU(struct tss_struct, init_tss);
 DECLARE_PER_CPU(root_pgentry_t *, root_pgt);
 
 extern void write_ptbase(struct vcpu *v);
++++++ 5d5bf475-x86-PV-fix-handling-of-iommu-mappings.patch ++++++
# Commit 77a994f3f8eb0d3cb0f2bf314b0ebf6a1d37f623
# Date 2019-08-20 14:24:05 +0100
# Author Roger Pau Monne <[email protected]>
# Committer Andrew Cooper <[email protected]>
x86/p2m: fix non-translated handling of iommu mappings

The current usage of need_iommu_pt_sync in p2m for non-translated
guests is wrong because it doesn't correctly handle a relaxed PV
hardware domain, that has need_sync set to false, but still need
entries to be added from calls to {set/clear}_identity_p2m_entry.

Signed-off-by: Roger Pau Monné <[email protected]>
Reviewed-by: Paul Durrant <[email protected]>
Reviewed-by: Jan Beulich <[email protected]>
Acked-by: George Dunlap <[email protected]>
Tested-by: Roman Shaposhnik <[email protected]>

--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1331,7 +1331,7 @@ int set_identity_p2m_entry(struct domain
 
     if ( !paging_mode_translate(p2m->domain) )
     {
-        if ( !need_iommu_pt_sync(d) )
+        if ( !has_iommu_pt(d) )
             return 0;
         return iommu_legacy_map(d, _dfn(gfn_l), _mfn(gfn_l), PAGE_ORDER_4K,
                                 IOMMUF_readable | IOMMUF_writable);
@@ -1422,7 +1422,7 @@ int clear_identity_p2m_entry(struct doma
 
     if ( !paging_mode_translate(d) )
     {
-        if ( !need_iommu_pt_sync(d) )
+        if ( !has_iommu_pt(d) )
             return 0;
         return iommu_legacy_unmap(d, _dfn(gfn_l), PAGE_ORDER_4K);
     }
++++++ 5d6524ca-x86-mm-correctly-init-M2P-entries.patch ++++++
# Commit 6c093931a765803cfc7b0df466ee032760cc8020
# Date 2019-08-27 13:40:42 +0100
# Author Igor Druzhinin <[email protected]>
# Committer Andrew Cooper <[email protected]>
x86/mm: correctly initialise M2P entries on boot

Since guest resource management work it's now possible to have a page
assigned to a domain without a valid M2P entry. Some paths in the code
rely on the fact a GFN returned from mfn_to_gfn() for such a page
is not valid as well, i.e. see arch_iommu_populate_page_table().

For systems without 512GB contiguous RAM M2P entries were already
correctly initialised on boot with INVALID_M2P_ENTRY (~0UL) but
on systems where M2P could be covered by a single 1GB page directory
0x77 poison was used instead. That eventually resulted in a crash
during IOMMU construction on systems without shared PTs enabled.

While here fix up compat M2P entries as well.

Signed-off-by: Igor Druzhinin <[email protected]>
Reviewed-by: Andrew Cooper <[email protected]>

--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -574,8 +574,9 @@ void __init paging_init(void)
                     page_to_mfn(l1_pg),
                     1UL << (2 * PAGETABLE_ORDER),
                     PAGE_HYPERVISOR);
+                /* Fill with INVALID_M2P_ENTRY. */
                 memset((void *)(RDWR_MPT_VIRT_START + (i << 
L2_PAGETABLE_SHIFT)),
-                       0x77, 1UL << L3_PAGETABLE_SHIFT);
+                       0xFF, 1UL << L3_PAGETABLE_SHIFT);
 
                 ASSERT(!l2_table_offset(va));
                 /* NB. Cannot be GLOBAL: guest user mode should not see it. */
@@ -666,10 +667,10 @@ void __init paging_init(void)
             page_to_mfn(l1_pg),
             1UL << PAGETABLE_ORDER,
             PAGE_HYPERVISOR);
+        /* Fill with INVALID_M2P_ENTRY. */
         memset((void *)(RDWR_COMPAT_MPT_VIRT_START +
                         (i << L2_PAGETABLE_SHIFT)),
-               0x55,
-               1UL << L2_PAGETABLE_SHIFT);
+               0xFF, 1UL << L2_PAGETABLE_SHIFT);
         /* NB. Cannot be GLOBAL as the ptes get copied into per-VM space. */
         l2e_write(l2_ro_mpt, l2e_from_page(l1_pg, _PAGE_PSE|_PAGE_PRESENT));
     }

Reply via email to