Hello community,

here is the log from the commit of package xen for openSUSE:Factory checked in 
at 2017-10-17 01:49:50
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/xen (Old)
 and      /work/SRC/openSUSE:Factory/.xen.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "xen"

Tue Oct 17 01:49:50 2017 rev:235 rq:533118 version:4.9.0_50

Changes:
--------
--- /work/SRC/openSUSE:Factory/xen/xen.changes  2017-09-22 21:31:58.884976319 
+0200
+++ /work/SRC/openSUSE:Factory/.xen.new/xen.changes     2017-10-17 
01:49:56.645669334 +0200
@@ -1,0 +2,28 @@
+Tue Sep 26 08:44:03 MDT 2017 - carn...@suse.com
+
+- bsc#1056278 - VUL-0: xen: Missing NUMA node parameter
+  verification (XSA-231)
+  59b7d664-mm-make-sure-node-is-less-than-MAX_NUMNODES.patch
+- bsc#1056280 - VUL-0: xen: Missing check for grant table (XSA-232)
+  59b7d69b-grant_table-fix-GNTTABOP_cache_flush-handling.patch
+- bsc#1056281 - VUL-0: xen: cxenstored: Race in domain cleanup
+  (XSA-233)
+  59b7d6c8-xenstore-dont-unlink-connection-object-twice.patch
+- bsc#1056282 - VUL-0: xen: insufficient grant unmapping checks for
+  x86 PV guests (XSA-234)
+  59b7d6d9-gnttab-also-validate-PTE-perms-upon-destroy-replace.patch
+- bsc#1055321 - VUL-0: xen: add-to-physmap error paths fail to
+  release lock on ARM (XSA-235)
+  599da329-arm-mm-release-grant-lock-on-xatp1-error-paths.patch
+- Upstream patches from Jan (bsc#1027519)
+  59a01223-x86-check-for-alloc-errors-in-modify_xen_mappings.patch
+  59a0130c-x86-efi-dont-write-relocs-in-efi_arch_relocate_image-1st-pass.patch
+  59a9221f-VT-d-use-correct-BDF-for-VF-to-search-VT-d-unit.patch
+  59ae9177-x86-emul-fix-handling-of-unimplemented-Grp7-insns.patch
+  59aec335-x86emul-correct-VEX-W-handling-for-VPINSRD.patch
+  59aec375-x86emul-correct-VEX-L-handling-for-VCVTx2SI.patch
+  59afcea0-x86-introduce-and-use-setup_force_cpu_cap.patch
+  59b2a7f2-x86-HVM-correct-repeat-count-update-linear-phys.patch
+- Dropped gcc7-xen.patch
+
+-------------------------------------------------------------------

Old:
----
  gcc7-xen.patch

New:
----
  599da329-arm-mm-release-grant-lock-on-xatp1-error-paths.patch
  59a01223-x86-check-for-alloc-errors-in-modify_xen_mappings.patch
  59a0130c-x86-efi-dont-write-relocs-in-efi_arch_relocate_image-1st-pass.patch
  59a9221f-VT-d-use-correct-BDF-for-VF-to-search-VT-d-unit.patch
  59ae9177-x86-emul-fix-handling-of-unimplemented-Grp7-insns.patch
  59aec335-x86emul-correct-VEX-W-handling-for-VPINSRD.patch
  59aec375-x86emul-correct-VEX-L-handling-for-VCVTx2SI.patch
  59afcea0-x86-introduce-and-use-setup_force_cpu_cap.patch
  59b2a7f2-x86-HVM-correct-repeat-count-update-linear-phys.patch
  59b7d664-mm-make-sure-node-is-less-than-MAX_NUMNODES.patch
  59b7d69b-grant_table-fix-GNTTABOP_cache_flush-handling.patch
  59b7d6c8-xenstore-dont-unlink-connection-object-twice.patch
  59b7d6d9-gnttab-also-validate-PTE-perms-upon-destroy-replace.patch

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ xen.spec ++++++
--- /var/tmp/diff_new_pack.G7mzgF/_old  2017-10-17 01:49:59.241547713 +0200
+++ /var/tmp/diff_new_pack.G7mzgF/_new  2017-10-17 01:49:59.245547525 +0200
@@ -172,6 +172,19 @@
 Patch9:         59958e76-gnttab-dont-use-possibly-unbounded-tail-calls.patch
 Patch10:        59958ebf-gnttab-fix-transitive-grant-handling.patch
 Patch11:        
59958edd-gnttab-avoid-spurious-maptrack-handle-alloc-failures.patch
+Patch12:        599da329-arm-mm-release-grant-lock-on-xatp1-error-paths.patch
+Patch13:        
59a01223-x86-check-for-alloc-errors-in-modify_xen_mappings.patch
+Patch14:        
59a0130c-x86-efi-dont-write-relocs-in-efi_arch_relocate_image-1st-pass.patch
+Patch15:        59a9221f-VT-d-use-correct-BDF-for-VF-to-search-VT-d-unit.patch
+Patch16:        
59ae9177-x86-emul-fix-handling-of-unimplemented-Grp7-insns.patch
+Patch17:        59aec335-x86emul-correct-VEX-W-handling-for-VPINSRD.patch
+Patch18:        59aec375-x86emul-correct-VEX-L-handling-for-VCVTx2SI.patch
+Patch19:        59afcea0-x86-introduce-and-use-setup_force_cpu_cap.patch
+Patch20:        59b2a7f2-x86-HVM-correct-repeat-count-update-linear-phys.patch
+Patch21:        59b7d664-mm-make-sure-node-is-less-than-MAX_NUMNODES.patch
+Patch22:        59b7d69b-grant_table-fix-GNTTABOP_cache_flush-handling.patch
+Patch23:        59b7d6c8-xenstore-dont-unlink-connection-object-twice.patch
+Patch24:        
59b7d6d9-gnttab-also-validate-PTE-perms-upon-destroy-replace.patch
 # Our platform specific patches
 Patch400:       xen-destdir.patch
 Patch401:       vif-bridge-no-iptables.patch
@@ -186,8 +199,7 @@
 Patch422:       stubdom-have-iovec.patch
 Patch423:       vif-route.patch
 Patch424:       gcc7-mini-os.patch
-Patch425:       gcc7-xen.patch
-Patch426:       gcc7-arm.patch
+Patch425:       gcc7-arm.patch
 # Other bug fixes or features
 Patch451:       xenconsole-no-multiple-connections.patch
 Patch452:       hibernate.patch
@@ -368,6 +380,19 @@
 %patch9 -p1
 %patch10 -p1
 %patch11 -p1
+%patch12 -p1
+%patch13 -p1
+%patch14 -p1
+%patch15 -p1
+%patch16 -p1
+%patch17 -p1
+%patch18 -p1
+%patch19 -p1
+%patch20 -p1
+%patch21 -p1
+%patch22 -p1
+%patch23 -p1
+%patch24 -p1
 # Our platform specific patches
 %patch400 -p1
 %patch401 -p1
@@ -383,7 +408,6 @@
 %patch423 -p1
 %patch424 -p1
 %patch425 -p1
-%patch426 -p1
 # Other bug fixes or features
 %patch451 -p1
 %patch452 -p1

++++++ 599da329-arm-mm-release-grant-lock-on-xatp1-error-paths.patch ++++++
# Commit 59546c1897a90fe9af5ebbbb05ead8d98b4d17b9
# Date 2017-08-23 17:45:45 +0200
# Author Jan Beulich <jbeul...@suse.com>
# Committer Jan Beulich <jbeul...@suse.com>
arm/mm: release grant lock on xenmem_add_to_physmap_one() error paths

Commit 55021ff9ab ("xen/arm: add_to_physmap_one: Avoid to map mfn 0 if
an error occurs") introduced error paths not releasing the grant table
lock. Replace them by a suitable check after the lock was dropped.

This is XSA-235.

Reported-by: Wei Liu <wei.l...@citrix.com>
Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: Julien Grall <julien.gr...@arm.com>

--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -1164,7 +1164,7 @@ int xenmem_add_to_physmap_one(
             if ( idx < nr_status_frames(d->grant_table) )
                 mfn = virt_to_mfn(d->grant_table->status[idx]);
             else
-                return -EINVAL;
+                mfn = mfn_x(INVALID_MFN);
         }
         else
         {
@@ -1175,14 +1175,21 @@ int xenmem_add_to_physmap_one(
             if ( idx < nr_grant_frames(d->grant_table) )
                 mfn = virt_to_mfn(d->grant_table->shared_raw[idx]);
             else
-                return -EINVAL;
+                mfn = mfn_x(INVALID_MFN);
         }
 
-        d->arch.grant_table_gfn[idx] = gfn;
+        if ( mfn != mfn_x(INVALID_MFN) )
+        {
+            d->arch.grant_table_gfn[idx] = gfn;
 
-        t = p2m_ram_rw;
+            t = p2m_ram_rw;
+        }
 
         grant_write_unlock(d->grant_table);
+
+        if ( mfn == mfn_x(INVALID_MFN) )
+            return -EINVAL;
+
         break;
     case XENMAPSPACE_shared_info:
         if ( idx != 0 )
++++++ 59a01223-x86-check-for-alloc-errors-in-modify_xen_mappings.patch ++++++
# Commit e466ec4f51d38a2c9d02bf9f3d5e43e47db2d66b
# Date 2017-08-25 14:03:47 +0200
# Author Jan Beulich <jbeul...@suse.com>
# Committer Jan Beulich <jbeul...@suse.com>
x86: check for allocation errors in modify_xen_mappings()

Reported-by: Julien Grall <julien.gr...@arm.com>
Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>

--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -6162,7 +6162,7 @@ int modify_xen_mappings(unsigned long s,
     {
         l3_pgentry_t *pl3e = virt_to_xen_l3e(v);
 
-        if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) )
+        if ( !pl3e || !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) )
         {
             /* Confirm the caller isn't trying to create new mappings. */
             ASSERT(!(nf & _PAGE_PRESENT));
@@ -6190,6 +6190,8 @@ int modify_xen_mappings(unsigned long s,
 
             /* PAGE1GB: shatter the superpage and fall through. */
             pl2e = alloc_xen_pagetable();
+            if ( !pl2e )
+                return -ENOMEM;
             for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
                 l2e_write(pl2e + i,
                           l2e_from_pfn(l3e_get_pfn(*pl3e) +
@@ -6210,7 +6212,11 @@ int modify_xen_mappings(unsigned long s,
                 free_xen_pagetable(pl2e);
         }
 
-        pl2e = virt_to_xen_l2e(v);
+        /*
+         * The L3 entry has been verified to be present, and we've dealt with
+         * 1G pages as well, so the L2 table cannot require allocation.
+         */
+        pl2e = l3e_to_l2e(*pl3e) + l2_table_offset(v);
 
         if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
         {
@@ -6239,6 +6245,8 @@ int modify_xen_mappings(unsigned long s,
             {
                 /* PSE: shatter the superpage and try again. */
                 pl1e = alloc_xen_pagetable();
+                if ( !pl1e )
+                    return -ENOMEM;
                 for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
                     l1e_write(&pl1e[i],
                               l1e_from_pfn(l2e_get_pfn(*pl2e) + i,
@@ -6262,7 +6270,11 @@ int modify_xen_mappings(unsigned long s,
         {
             l1_pgentry_t nl1e;
 
-            /* Ordinary 4kB mapping. */
+            /*
+             * Ordinary 4kB mapping: The L2 entry has been verified to be
+             * present, and we've dealt with 2M pages as well, so the L1 table
+             * cannot require allocation.
+             */
             pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(v);
 
             /* Confirm the caller isn't trying to create new mappings. */
++++++ 
59a0130c-x86-efi-dont-write-relocs-in-efi_arch_relocate_image-1st-pass.patch 
++++++
# Commit 34828425d36b560adfe96430b9b83dfb0f66f2a8
# Date 2017-08-25 14:07:40 +0200
# Author David Woodhouse <d...@amazon.co.uk>
# Committer Jan Beulich <jbeul...@suse.com>
x86/efi: don't write relocations in efi_arch_relocate_image() first pass

The function is invoked with delta=0 before ExitBootServices() is called,
as a dummy run purely to validate that all the relocations can be handled.
This allows us to exit gracefully with an error message.

However, we have relocations in read-only sections such as .rodata and
.init.te(xt). Recent versions of UEFI will actually make those sections
read-only, which will cause a fault. This functionaity was added in
EDK2 commit d0e92aad4 ("MdeModulePkg/DxeCore: Add UEFI image protection.")

It's OK to actually make the changes in the later pass because UEFI will
tear down the protection when ExitBootServices() is called, because it
knows we're going to need to do this kind of thing.

Reported-by: Jan Beulich <jbeul...@suse.com>
Signed-off-by: David Woodhouse <d...@amazon.co.uk>

--- a/xen/arch/x86/efi/efi-boot.h
+++ b/xen/arch/x86/efi/efi-boot.h
@@ -87,7 +87,8 @@ static void __init efi_arch_relocate_ima
             case PE_BASE_RELOC_DIR64:
                 if ( in_page_tables(addr) )
                     blexit(L"Unexpected relocation type");
-                *(u64 *)addr += delta;
+                if ( delta )
+                    *(u64 *)addr += delta;
                 break;
             default:
                 blexit(L"Unsupported relocation type");
++++++ 59a9221f-VT-d-use-correct-BDF-for-VF-to-search-VT-d-unit.patch ++++++
# Commit c286af54c7177c14180121b422d8df7281e547cb
# Date 2017-09-01 11:02:23 +0200
# Author Chao Gao <chao....@intel.com>
# Committer Jan Beulich <jbeul...@suse.com>
VT-d: use correct BDF for VF to search VT-d unit

When SR-IOV is enabled, 'Virtual Functions' of a 'Physical Function'
are under the scope of the same VT-d unit as the 'Physical Function'.
A 'Physical Function' can be a 'Traditional Function' or an ARI
'Extended Function'. And furthermore, 'Extended Functions' on an
endpoint are under the scope of the same VT-d unit as the 'Traditional
Functions' on the endpoint. To search VT-d unit for a VF, if its PF
isn't an extended function, the BDF of PF should be used. Otherwise
the BDF of a traditional function in the same device with the PF
should be used.

Current code uses PCI_SLOT() to recognize an ARI 'Extended Funcion'.
But it is conceptually wrong w/o checking whether PF is an extended
function and would lead to match VFs of a RC integrated PF to a wrong
VT-d unit.

This patch overrides VF 'is_extfn' field and uses this field to
indicate whether the PF of this VF is an extended function. The field
helps to use correct BDF to search VT-d unit.

Reported-by: Crawford, Eric R <eric.r.crawf...@intel.com>
Signed-off-by: Chao Gao <chao....@intel.com>
Reviewed-by: Kevin Tian <kevin.t...@intel.com>
Acked-by: Jan Beulich <jbeul...@suse.com>
Tested-by: Crawford, Eric R <eric.r.crawf...@intel.com>

--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -595,21 +595,24 @@ int pci_add_device(u16 seg, u8 bus, u8 d
     unsigned int slot = PCI_SLOT(devfn), func = PCI_FUNC(devfn);
     const char *pdev_type;
     int ret;
+    bool pf_is_extfn = false;
 
-    if (!info)
+    if ( !info )
         pdev_type = "device";
-    else if (info->is_extfn)
-        pdev_type = "extended function";
-    else if (info->is_virtfn)
+    else if ( info->is_virtfn )
     {
         pcidevs_lock();
         pdev = pci_get_pdev(seg, info->physfn.bus, info->physfn.devfn);
+        if ( pdev )
+            pf_is_extfn = pdev->info.is_extfn;
         pcidevs_unlock();
         if ( !pdev )
             pci_add_device(seg, info->physfn.bus, info->physfn.devfn,
                            NULL, node);
         pdev_type = "virtual function";
     }
+    else if ( info->is_extfn )
+        pdev_type = "extended function";
     else
     {
         info = NULL;
@@ -633,7 +636,15 @@ int pci_add_device(u16 seg, u8 bus, u8 d
     pdev->node = node;
 
     if ( info )
+    {
         pdev->info = *info;
+        /*
+         * VF's 'is_extfn' field is used to indicate whether its PF is an
+         * extended function.
+         */
+        if ( pdev->info.is_virtfn )
+            pdev->info.is_extfn = pf_is_extfn;
+    }
     else if ( !pdev->vf_rlen[0] )
     {
         unsigned int pos = pci_find_ext_capability(seg, bus, devfn,
--- a/xen/drivers/passthrough/vtd/dmar.c
+++ b/xen/drivers/passthrough/vtd/dmar.c
@@ -211,15 +211,15 @@ struct acpi_drhd_unit *acpi_find_matched
     if ( pdev == NULL )
         return NULL;
 
-    if ( pdev->info.is_extfn )
+    if ( pdev->info.is_virtfn )
     {
-        bus = pdev->bus;
-        devfn = 0;
+        bus = pdev->info.physfn.bus;
+        devfn = !pdev->info.is_extfn ? pdev->info.physfn.devfn : 0;
     }
-    else if ( pdev->info.is_virtfn )
+    else if ( pdev->info.is_extfn )
     {
-        bus = pdev->info.physfn.bus;
-        devfn = PCI_SLOT(pdev->info.physfn.devfn) ? 0 : 
pdev->info.physfn.devfn;
+        bus = pdev->bus;
+        devfn = 0;
     }
     else
     {
--- a/xen/include/xen/pci.h
+++ b/xen/include/xen/pci.h
@@ -39,6 +39,10 @@
 #define PCI_SBDF3(s,b,df) ((((s) & 0xffff) << 16) | PCI_BDF2(b, df))
 
 struct pci_dev_info {
+    /*
+     * VF's 'is_extfn' field is used to indicate whether its PF is an extended
+     * function.
+     */
     bool_t is_extfn;
     bool_t is_virtfn;
     struct {
++++++ 59ae9177-x86-emul-fix-handling-of-unimplemented-Grp7-insns.patch ++++++
# Commit 4d3f0fde471e7588ce512eaff1abdab209d8cd4b
# Date 2017-09-05 12:58:47 +0100
# Author Andrew Cooper <andrew.coop...@citrix.com>
# Committer Andrew Cooper <andrew.coop...@citrix.com>
x86/emul: Fix the handling of unimplemented Grp7 instructions

Grp7 is abnormally complicated to decode, even by x86's standards, with
{s,l}msw being the problematic cases.

Previously, any value which fell through the first switch statement (looking
for instructions with entirely implicit operands) would be interpreted by the
second switch statement (handling instructions with memory operands).

Unimplemented instructions would then hit the #UD case for having a non-memory
operand, rather than taking the cannot_emulate path.

Consolidate the two switch statements into a single one, using ranges to cover
the instructions with memory operands.

Reported-by: Petre Pircalabu <ppircal...@bitdefender.com>
Signed-off-by: Andrew Cooper <andrew.coop...@citrix.com>
Reviewed-by: Jan Beulich <jbeul...@suse.com>

--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -4986,9 +4986,12 @@ x86_emulate(
         }
         break;
 
-    case X86EMUL_OPC(0x0f, 0x01): /* Grp7 */ {
+    case X86EMUL_OPC(0x0f, 0x01): /* Grp7 */
+    {
         unsigned long base, limit, cr0, cr0w;
 
+        seg = (modrm_reg & 1) ? x86_seg_idtr : x86_seg_gdtr;
+
         switch( modrm )
         {
         case 0xca: /* clac */
@@ -4999,7 +5002,7 @@ x86_emulate(
             _regs.eflags &= ~X86_EFLAGS_AC;
             if ( modrm == 0xcb )
                 _regs.eflags |= X86_EFLAGS_AC;
-            goto complete_insn;
+            break;
 
 #ifdef __XEN__
         case 0xd1: /* xsetbv */
@@ -5011,7 +5014,7 @@ x86_emulate(
                                   handle_xsetbv(_regs.ecx,
                                                 _regs.eax | (_regs.rdx << 32)),
                                   EXC_GP, 0);
-            goto complete_insn;
+            break;
 #endif
 
         case 0xd4: /* vmfunc */
@@ -5019,7 +5022,7 @@ x86_emulate(
             fail_if(!ops->vmfunc);
             if ( (rc = ops->vmfunc(ctxt)) != X86EMUL_OKAY )
                 goto done;
-            goto complete_insn;
+            break;
 
         case 0xd5: /* xend */
             generate_exception_if(vex.pfx, EXC_UD);
@@ -5033,7 +5036,7 @@ x86_emulate(
                                   EXC_UD);
             /* Neither HLE nor RTM can be active when we get here. */
             _regs.eflags |= X86_EFLAGS_ZF;
-            goto complete_insn;
+            break;
 
         case 0xdf: /* invlpga */
             generate_exception_if(!in_protmode(ctxt, ops), EXC_UD);
@@ -5042,7 +5045,7 @@ x86_emulate(
             if ( (rc = ops->invlpg(x86_seg_none, truncate_ea(_regs.r(ax)),
                                    ctxt)) )
                 goto done;
-            goto complete_insn;
+            break;
 
         case 0xf9: /* rdtscp */
             fail_if(ops->read_msr == NULL);
@@ -5090,17 +5093,17 @@ x86_emulate(
                 base += sizeof(zero);
                 limit -= sizeof(zero);
             }
-            goto complete_insn;
-        }
+            break;
         }
 
-        seg = (modrm_reg & 1) ? x86_seg_idtr : x86_seg_gdtr;
-
-        switch ( modrm_reg & 7 )
-        {
-        case 0: /* sgdt */
-        case 1: /* sidt */
-            generate_exception_if(ea.type != OP_MEM, EXC_UD);
+#define _GRP7(mod, reg) \
+            (((mod) << 6) | ((reg) << 3)) ... (((mod) << 6) | ((reg) << 3) | 7)
+#define GRP7_MEM(reg) _GRP7(0, reg): case _GRP7(1, reg): case _GRP7(2, reg)
+#define GRP7_ALL(reg) GRP7_MEM(reg): case _GRP7(3, reg)
+
+        case GRP7_MEM(0): /* sgdt */
+        case GRP7_MEM(1): /* sidt */
+            ASSERT(ea.type == OP_MEM);
             generate_exception_if(umip_active(ctxt, ops), EXC_GP, 0);
             fail_if(!ops->read_segment || !ops->write);
             if ( (rc = ops->read_segment(seg, &sreg, ctxt)) )
@@ -5118,10 +5121,11 @@ x86_emulate(
                                   op_bytes, ctxt)) != X86EMUL_OKAY )
                 goto done;
             break;
-        case 2: /* lgdt */
-        case 3: /* lidt */
+
+        case GRP7_MEM(2): /* lgdt */
+        case GRP7_MEM(3): /* lidt */
+            ASSERT(ea.type == OP_MEM);
             generate_exception_if(!mode_ring0(), EXC_GP, 0);
-            generate_exception_if(ea.type != OP_MEM, EXC_UD);
             fail_if(ops->write_segment == NULL);
             memset(&sreg, 0, sizeof(sreg));
             if ( (rc = read_ulong(ea.mem.seg, ea.mem.off+0,
@@ -5137,7 +5141,8 @@ x86_emulate(
             if ( (rc = ops->write_segment(seg, &sreg, ctxt)) )
                 goto done;
             break;
-        case 4: /* smsw */
+
+        case GRP7_ALL(4): /* smsw */
             generate_exception_if(umip_active(ctxt, ops), EXC_GP, 0);
             if ( ea.type == OP_MEM )
             {
@@ -5152,7 +5157,8 @@ x86_emulate(
             if ( (rc = ops->read_cr(0, &dst.val, ctxt)) )
                 goto done;
             break;
-        case 6: /* lmsw */
+
+        case GRP7_ALL(6): /* lmsw */
             fail_if(ops->read_cr == NULL);
             fail_if(ops->write_cr == NULL);
             generate_exception_if(!mode_ring0(), EXC_GP, 0);
@@ -5168,13 +5174,19 @@ x86_emulate(
             if ( (rc = ops->write_cr(0, cr0, ctxt)) )
                 goto done;
             break;
-        case 7: /* invlpg */
+
+        case GRP7_MEM(7): /* invlpg */
+            ASSERT(ea.type == OP_MEM);
             generate_exception_if(!mode_ring0(), EXC_GP, 0);
-            generate_exception_if(ea.type != OP_MEM, EXC_UD);
             fail_if(ops->invlpg == NULL);
             if ( (rc = ops->invlpg(ea.mem.seg, ea.mem.off, ctxt)) )
                 goto done;
             break;
+
+#undef GRP7_ALL
+#undef GRP7_MEM
+#undef _GRP7
+
         default:
             goto cannot_emulate;
         }
++++++ 59aec335-x86emul-correct-VEX-W-handling-for-VPINSRD.patch ++++++
# Commit 9c2babd05a213f8802e3cc1c64a2af932b5cbd7d
# Date 2017-09-05 17:31:01 +0200
# Author Jan Beulich <jbeul...@suse.com>
# Committer Jan Beulich <jbeul...@suse.com>
x86emul: correct VEX.W handling for non-64-bit VPINSRD

Going though the XED commits from the last couple of months made me
notice that VPINSRD, other than VPEXTRD, does not clear VEX.W for non-
64-bit modes, leading to an insertion of stray 32-bits of zero in case
the original instruction had the bit set.

Also remove a pointless fall-through in VPEXTRW handling, bringing
things in line with VPINSRW.

Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>

--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -6744,10 +6744,9 @@ x86_emulate(
         ea.type = OP_MEM;
         goto simd_0f_int_imm8;
 
+    CASE_SIMD_PACKED_INT(0x0f, 0xc5):      /* pextrw $imm8,{,x}mm,reg */
     case X86EMUL_OPC_VEX_66(0x0f, 0xc5):   /* vpextrw $imm8,xmm,reg */
         generate_exception_if(vex.l, EXC_UD);
-        /* fall through */
-    CASE_SIMD_PACKED_INT(0x0f, 0xc5):      /* pextrw $imm8,{,x}mm,reg */
         opc = init_prefixes(stub);
         opc[0] = b;
         /* Convert GPR destination to %rAX. */
@@ -7526,6 +7525,8 @@ x86_emulate(
     case X86EMUL_OPC_VEX_66(0x0f3a, 0x20): /* vpinsrb $imm8,r32/m8,xmm,xmm */
     case X86EMUL_OPC_VEX_66(0x0f3a, 0x22): /* vpinsr{d,q} $imm8,r/m,xmm,xmm */
         generate_exception_if(vex.l, EXC_UD);
+        if ( !mode_64bit() )
+            vex.w = 0;
         memcpy(mmvalp, &src.val, op_bytes);
         ea.type = OP_MEM;
         op_bytes = src.bytes;
++++++ 59aec375-x86emul-correct-VEX-L-handling-for-VCVTx2SI.patch ++++++
# Commit a6488965ca3ec30f2e0b7022b539bba78c2aeede
# Date 2017-09-05 17:32:05 +0200
# Author Jan Beulich <jbeul...@suse.com>
# Committer Jan Beulich <jbeul...@suse.com>
x86emul: correct VEX.L handling for VCVT{,T}S{S,D}2SI

Recent changes to the SDM (and XED) have made clear that older hardware
raising #UD when the bit is set was really an erratum. Generalize the
so far AMD-only override.

Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>

--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -5609,9 +5609,8 @@ x86_emulate(
         }
         else
         {
-            if ( ctxt->vendor == X86_VENDOR_AMD )
-                vex.l = 0;
-            generate_exception_if(vex.l || vex.reg != 0xf, EXC_UD);
+            generate_exception_if(vex.reg != 0xf, EXC_UD);
+            vex.l = 0;
             host_and_vcpu_must_have(avx);
             get_fpu(X86EMUL_FPU_ymm, &fic);
         }
++++++ 59afcea0-x86-introduce-and-use-setup_force_cpu_cap.patch ++++++
# Commit 0829a6bdbdc6b79990bd0668e847275b6a2717e5
# Date 2017-09-06 12:32:00 +0200
# Author Jan Beulich <jbeul...@suse.com>
# Committer Jan Beulich <jbeul...@suse.com>
x86: introduce and use setup_force_cpu_cap()

For XEN_SMEP and XEN_SMAP to not be cleared while bringing up APs we'd
need to clone the respective hack used for CPUID_FAULTING. Introduce an
inverse of setup_clear_cpu_cap() instead, but let clearing of features
overrule forced setting of them.

XEN_SMAP being wrong post-boot is a problem specifically for live
patching, as a live patch may need alternative instruction patching
keyed off of that feature flag.

Reported-by: Sarah Newman <secur...@prgmr.com>
Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>

--- a/xen/arch/x86/apic.c
+++ b/xen/arch/x86/apic.c
@@ -856,7 +856,7 @@ static int __init detect_init_APIC (void
         return -1;
     }
 
-    __set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
+    setup_force_cpu_cap(X86_FEATURE_APIC);
     mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
 
     /* The BIOS may have set up the APIC at some other address */
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -54,6 +54,7 @@ unsigned int vaddr_bits __read_mostly =
 u64 host_pat = 0x050100070406;
 
 static unsigned int cleared_caps[NCAPINTS];
+static unsigned int forced_caps[NCAPINTS];
 
 void __init setup_clear_cpu_cap(unsigned int cap)
 {
@@ -63,6 +64,10 @@ void __init setup_clear_cpu_cap(unsigned
        if (__test_and_set_bit(cap, cleared_caps))
                return;
 
+       if (test_bit(cap, forced_caps))
+               printk("%pS clearing previously forced feature %#x\n",
+                      __builtin_return_address(0), cap);
+
        __clear_bit(cap, boot_cpu_data.x86_capability);
        dfs = lookup_deep_deps(cap);
 
@@ -72,7 +77,26 @@ void __init setup_clear_cpu_cap(unsigned
        for (i = 0; i < FSCAPINTS; ++i) {
                cleared_caps[i] |= dfs[i];
                boot_cpu_data.x86_capability[i] &= ~dfs[i];
+               if (!(forced_caps[i] & dfs[i]))
+                       continue;
+               printk("%pS implicitly clearing previously forced feature(s) 
%u:%#x\n",
+                      __builtin_return_address(0),
+                      i, forced_caps[i] & dfs[i]);
+       }
+}
+
+void __init setup_force_cpu_cap(unsigned int cap)
+{
+       if (__test_and_set_bit(cap, forced_caps))
+               return;
+
+       if (test_bit(cap, cleared_caps)) {
+               printk("%pS tries to force previously cleared feature %#x\n",
+                      __builtin_return_address(0), cap);
+               return;
        }
+
+       __set_bit(cap, boot_cpu_data.x86_capability);
 }
 
 static void default_init(struct cpuinfo_x86 * c)
@@ -375,8 +399,10 @@ void identify_cpu(struct cpuinfo_x86 *c)
        for (i = 0; i < FSCAPINTS; ++i)
                c->x86_capability[i] &= known_features[i];
 
-       for (i = 0 ; i < NCAPINTS ; ++i)
+       for (i = 0 ; i < NCAPINTS ; ++i) {
+               c->x86_capability[i] |= forced_caps[i];
                c->x86_capability[i] &= ~cleared_caps[i];
+       }
 
        /* If the model name is still unset, do table lookup. */
        if ( !c->x86_model_id[0] ) {
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -27,7 +27,7 @@ static bool __init probe_intel_cpuid_fau
 
        expected_levelling_cap |= LCAP_faulting;
        levelling_caps |=  LCAP_faulting;
-       __set_bit(X86_FEATURE_CPUID_FAULTING, boot_cpu_data.x86_capability);
+       setup_force_cpu_cap(X86_FEATURE_CPUID_FAULTING);
        return 1;
 }
 
@@ -320,9 +320,6 @@ static void early_init_intel(struct cpui
        if (c == &boot_cpu_data)
                intel_init_levelling();
 
-       if (test_bit(X86_FEATURE_CPUID_FAULTING, boot_cpu_data.x86_capability))
-               __set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
-
        intel_ctxt_switch_levelling(NULL);
 }
 
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1470,14 +1470,14 @@ void __init noreturn __start_xen(unsigne
     if ( !opt_smep )
         setup_clear_cpu_cap(X86_FEATURE_SMEP);
     if ( cpu_has_smep && opt_smep != SMEP_HVM_ONLY )
-        __set_bit(X86_FEATURE_XEN_SMEP, boot_cpu_data.x86_capability);
+        setup_force_cpu_cap(X86_FEATURE_XEN_SMEP);
     if ( boot_cpu_has(X86_FEATURE_XEN_SMEP) )
         set_in_cr4(X86_CR4_SMEP);
 
     if ( !opt_smap )
         setup_clear_cpu_cap(X86_FEATURE_SMAP);
     if ( cpu_has_smap && opt_smap != SMAP_HVM_ONLY )
-        __set_bit(X86_FEATURE_XEN_SMAP, boot_cpu_data.x86_capability);
+        setup_force_cpu_cap(X86_FEATURE_XEN_SMAP);
     if ( boot_cpu_has(X86_FEATURE_XEN_SMAP) )
         set_in_cr4(X86_CR4_SMAP);
 
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -168,6 +168,7 @@ extern const struct x86_cpu_id *x86_matc
 
 extern void identify_cpu(struct cpuinfo_x86 *);
 extern void setup_clear_cpu_cap(unsigned int);
+extern void setup_force_cpu_cap(unsigned int);
 extern void print_cpu_info(unsigned int cpu);
 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
 
++++++ 59b2a7f2-x86-HVM-correct-repeat-count-update-linear-phys.patch ++++++
# Commit 49160d205236d8e36d27d40b6bf69b9b75f2c333
# Date 2017-09-08 16:23:46 +0200
# Author Jan Beulich <jbeul...@suse.com>
# Committer Jan Beulich <jbeul...@suse.com>
x86/HVM: correct repeat count update in linear->phys translation

For the insn emulator's fallback logic in REP INS/OUTS handling
to work correctly, *reps must not be set to zero when returning
X86EMUL_UNHANDLEABLE.

Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>
Acked-by: Paul Durrant <paul.durr...@citrix.com>

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -566,15 +566,16 @@ static int hvmemul_linear_to_phys(
             if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
                 return X86EMUL_RETRY;
             done /= bytes_per_rep;
-            *reps = done;
             if ( done == 0 )
             {
                 ASSERT(!reverse);
                 if ( npfn != gfn_x(INVALID_GFN) )
                     return X86EMUL_UNHANDLEABLE;
+                *reps = 0;
                 x86_emul_pagefault(pfec, addr & PAGE_MASK, 
&hvmemul_ctxt->ctxt);
                 return X86EMUL_EXCEPTION;
             }
+            *reps = done;
             break;
         }
 
++++++ 59b7d664-mm-make-sure-node-is-less-than-MAX_NUMNODES.patch ++++++
# Commit 2fece35303529395bfea6b03d2268380ef682c93
# Date 2017-09-12 14:43:16 +0200
# Author George Dunlap <george.dun...@citrix.com>
# Committer Jan Beulich <jbeul...@suse.com>
xen/mm: make sure node is less than MAX_NUMNODES

The output of MEMF_get_node(memflags) can be as large as nodeid_t can
hold (currently 255).  This is then used as an index to arrays of size
MAX_NUMNODE, which is 64 on x86 and 1 on ARM, can be passed in by an
untrusted guest (via memory_exchange and increase_reservation) and is
not currently bounds-checked.

Check the value in page_alloc.c before using it, and also check the
value in the hypercall call sites and return -EINVAL if appropriate.
Don't permit domains other than the hardware or control domain to
allocate node-constrained memory.

This is CVE-2017-14316 / XSA-231.

Reported-by: Matthew Daley <ma...@bugfuzz.com>
Signed-off-by: George Dunlap <george.dun...@citrix.com>
Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>

--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -411,6 +411,31 @@ static void decrease_reservation(struct
     a->nr_done = i;
 }
 
+static bool propagate_node(unsigned int xmf, unsigned int *memflags)
+{
+    const struct domain *currd = current->domain;
+
+    BUILD_BUG_ON(XENMEMF_get_node(0) != NUMA_NO_NODE);
+    BUILD_BUG_ON(MEMF_get_node(0) != NUMA_NO_NODE);
+
+    if ( XENMEMF_get_node(xmf) == NUMA_NO_NODE )
+        return true;
+
+    if ( is_hardware_domain(currd) || is_control_domain(currd) )
+    {
+        if ( XENMEMF_get_node(xmf) >= MAX_NUMNODES )
+            return false;
+
+        *memflags |= MEMF_node(XENMEMF_get_node(xmf));
+        if ( xmf & XENMEMF_exact_node_request )
+            *memflags |= MEMF_exact_node;
+    }
+    else if ( xmf & XENMEMF_exact_node_request )
+        return false;
+
+    return true;
+}
+
 static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
 {
     struct xen_memory_exchange exch;
@@ -483,6 +508,12 @@ static long memory_exchange(XEN_GUEST_HA
         }
     }
 
+    if ( unlikely(!propagate_node(exch.out.mem_flags, &memflags)) )
+    {
+        rc = -EINVAL;
+        goto fail_early;
+    }
+
     d = rcu_lock_domain_by_any_id(exch.in.domid);
     if ( d == NULL )
     {
@@ -501,7 +532,6 @@ static long memory_exchange(XEN_GUEST_HA
         d,
         XENMEMF_get_address_bits(exch.out.mem_flags) ? :
         (BITS_PER_LONG+PAGE_SHIFT)));
-    memflags |= MEMF_node(XENMEMF_get_node(exch.out.mem_flags));
 
     for ( i = (exch.nr_exchanged >> in_chunk_order);
           i < (exch.in.nr_extents >> in_chunk_order);
@@ -864,12 +894,8 @@ static int construct_memop_from_reservat
         }
         read_unlock(&d->vnuma_rwlock);
     }
-    else
-    {
-        a->memflags |= MEMF_node(XENMEMF_get_node(r->mem_flags));
-        if ( r->mem_flags & XENMEMF_exact_node_request )
-            a->memflags |= MEMF_exact_node;
-    }
+    else if ( unlikely(!propagate_node(r->mem_flags, &a->memflags)) )
+        return -EINVAL;
 
     return 0;
 }
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -706,9 +706,13 @@ static struct page_info *alloc_heap_page
         if ( node >= MAX_NUMNODES )
             node = cpu_to_node(smp_processor_id());
     }
+    else if ( unlikely(node >= MAX_NUMNODES) )
+    {
+        ASSERT_UNREACHABLE();
+        return NULL;
+    }
     first_node = node;
 
-    ASSERT(node < MAX_NUMNODES);
     ASSERT(zone_lo <= zone_hi);
     ASSERT(zone_hi < NR_ZONES);
 
++++++ 59b7d69b-grant_table-fix-GNTTABOP_cache_flush-handling.patch ++++++
# Commit c3d830b244998b3686e2eb64db95996be5eb5e5c
# Date 2017-09-12 14:44:11 +0200
# Author Andrew Cooper <andrew.coop...@citrix.com>
# Committer Jan Beulich <jbeul...@suse.com>
grant_table: fix GNTTABOP_cache_flush handling

Don't fall over a NULL grant_table pointer when the owner of the domain
is a system domain (DOMID_{XEN,IO} etc).

This is CVE-2017-14318 / XSA-232.

Reported-by: Matthew Daley <ma...@bugfuzz.com>
Signed-off-by: Andrew Cooper <andrew.coop...@citrix.com>
Reviewed-by: Jan Beulich <jbeul...@suse.com>

--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -3030,7 +3030,7 @@ static int __gnttab_cache_flush(gnttab_c
 
     page = mfn_to_page(mfn);
     owner = page_get_owner_and_reference(page);
-    if ( !owner )
+    if ( !owner || !owner->grant_table )
     {
         rcu_unlock_domain(d);
         return -EPERM;
++++++ 59b7d6c8-xenstore-dont-unlink-connection-object-twice.patch ++++++
# Commit 562a1c0f7ef3fbf3c122c3dfa4f2ad9dd51da9fe
# Date 2017-09-12 14:44:56 +0200
# Author Juergen Gross <jgr...@suse.com>
# Committer Jan Beulich <jbeul...@suse.com>
tools/xenstore: dont unlink connection object twice

A connection object of a domain with associated stubdom has two
parents: the domain and the stubdom. When cleaning up the list of
active domains in domain_cleanup() make sure not to unlink the
connection twice from the same domain. This could happen when the
domain and its stubdom are being destroyed at the same time leading
to the domain loop being entered twice.

Additionally don't use talloc_free() in this case as it will remove
a random parent link, leading eventually to a memory leak. Use
talloc_unlink() instead specifying the context from which the
connection object should be removed.

This is CVE-2017-14317 / XSA-233.

Reported-by: Eric Chanudet <chanud...@ainfosec.com>
Signed-off-by: Juergen Gross <jgr...@suse.com>
Reviewed-by: Ian Jackson <ian.jack...@eu.citrix.com>

--- a/tools/xenstore/xenstored_domain.c
+++ b/tools/xenstore/xenstored_domain.c
@@ -221,10 +221,11 @@ static int destroy_domain(void *_domain)
 static void domain_cleanup(void)
 {
        xc_dominfo_t dominfo;
-       struct domain *domain, *tmp;
+       struct domain *domain;
        int notify = 0;
 
-       list_for_each_entry_safe(domain, tmp, &domains, list) {
+ again:
+       list_for_each_entry(domain, &domains, list) {
                if (xc_domain_getinfo(*xc_handle, domain->domid, 1,
                                      &dominfo) == 1 &&
                    dominfo.domid == domain->domid) {
@@ -236,8 +237,12 @@ static void domain_cleanup(void)
                        if (!dominfo.dying)
                                continue;
                }
-               talloc_free(domain->conn);
-               notify = 0; /* destroy_domain() fires the watch */
+               if (domain->conn) {
+                       talloc_unlink(talloc_autofree_context(), domain->conn);
+                       domain->conn = NULL;
+                       notify = 0; /* destroy_domain() fires the watch */
+                       goto again;
+               }
        }
 
        if (notify)
++++++ 59b7d6d9-gnttab-also-validate-PTE-perms-upon-destroy-replace.patch ++++++
# Commit 16b1414de91b5a82a0996c67f6db3af7d7e32873
# Date 2017-09-12 14:45:13 +0200
# Author Jan Beulich <jbeul...@suse.com>
# Committer Jan Beulich <jbeul...@suse.com>
gnttab: also validate PTE permissions upon destroy/replace

In order for PTE handling to match up with the reference counting done
by common code, presence and writability of grant mapping PTEs must
also be taken into account; validating just the frame number is not
enough. This is in particular relevant if a guest fiddles with grant
PTEs via non-grant hypercalls.

Note that the flags being passed to replace_grant_host_mapping()
already happen to be those of the existing mapping, so no new function
parameter is needed.

This is CVE-2017-14319 / XSA-234.

Reported-by: Andrew Cooper <andrew.coop...@citrix.com>
Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>

--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4058,7 +4058,8 @@ static int create_grant_pte_mapping(
 }
 
 static int destroy_grant_pte_mapping(
-    uint64_t addr, unsigned long frame, struct domain *d)
+    uint64_t addr, unsigned long frame, unsigned int grant_pte_flags,
+    struct domain *d)
 {
     int rc = GNTST_okay;
     void *va;
@@ -4104,17 +4105,29 @@ static int destroy_grant_pte_mapping(
 
     ol1e = *(l1_pgentry_t *)va;
     
-    /* Check that the virtual address supplied is actually mapped to frame. */
-    if ( unlikely(l1e_get_pfn(ol1e) != frame) )
+    /*
+     * Check that the PTE supplied actually maps frame (with appropriate
+     * permissions).
+     */
+    if ( unlikely(l1e_get_pfn(ol1e) != frame) ||
+         unlikely((l1e_get_flags(ol1e) ^ grant_pte_flags) &
+                  (_PAGE_PRESENT | _PAGE_RW)) )
     {
         page_unlock(page);
-        gdprintk(XENLOG_WARNING,
-                 "PTE entry %"PRIpte" for address %"PRIx64" doesn't match 
frame %lx\n",
-                 l1e_get_intpte(ol1e), addr, frame);
+        gdprintk(XENLOG_ERR,
+                 "PTE %"PRIpte" at %"PRIx64" doesn't match grant 
(%"PRIpte")\n",
+                 l1e_get_intpte(ol1e), addr,
+                 l1e_get_intpte(l1e_from_pfn(frame, grant_pte_flags)));
         rc = GNTST_general_error;
         goto failed;
     }
 
+    if ( unlikely((l1e_get_flags(ol1e) ^ grant_pte_flags) &
+                  ~(_PAGE_AVAIL | PAGE_CACHE_ATTRS)) )
+        gdprintk(XENLOG_WARNING,
+                 "PTE flags %x at %"PRIx64" don't match grant (%x)\n",
+                 l1e_get_flags(ol1e), addr, grant_pte_flags);
+
     /* Delete pagetable entry. */
     if ( unlikely(!UPDATE_ENTRY
                   (l1, 
@@ -4123,7 +4136,8 @@ static int destroy_grant_pte_mapping(
                    0)) )
     {
         page_unlock(page);
-        gdprintk(XENLOG_WARNING, "Cannot delete PTE entry at %p\n", va);
+        gdprintk(XENLOG_WARNING, "Cannot delete PTE entry at %"PRIx64"\n",
+                 addr);
         rc = GNTST_general_error;
         goto failed;
     }
@@ -4191,7 +4205,8 @@ static int create_grant_va_mapping(
 }
 
 static int replace_grant_va_mapping(
-    unsigned long addr, unsigned long frame, l1_pgentry_t nl1e, struct vcpu *v)
+    unsigned long addr, unsigned long frame, unsigned int grant_pte_flags,
+    l1_pgentry_t nl1e, struct vcpu *v)
 {
     l1_pgentry_t *pl1e, ol1e;
     unsigned long gl1mfn;
@@ -4227,20 +4242,33 @@ static int replace_grant_va_mapping(
 
     ol1e = *pl1e;
 
-    /* Check that the virtual address supplied is actually mapped to frame. */
-    if ( unlikely(l1e_get_pfn(ol1e) != frame) )
-    {
-        gdprintk(XENLOG_WARNING,
-                 "PTE entry %lx for address %lx doesn't match frame %lx\n",
-                 l1e_get_pfn(ol1e), addr, frame);
+    /*
+     * Check that the virtual address supplied is actually mapped to frame
+     * (with appropriate permissions).
+     */
+    if ( unlikely(l1e_get_pfn(ol1e) != frame) ||
+         unlikely((l1e_get_flags(ol1e) ^ grant_pte_flags) &
+                  (_PAGE_PRESENT | _PAGE_RW)) )
+    {
+        gdprintk(XENLOG_ERR,
+                 "PTE %"PRIpte" for %lx doesn't match grant (%"PRIpte")\n",
+                 l1e_get_intpte(ol1e), addr,
+                 l1e_get_intpte(l1e_from_pfn(frame, grant_pte_flags)));
         rc = GNTST_general_error;
         goto unlock_and_out;
     }
 
+    if ( unlikely((l1e_get_flags(ol1e) ^ grant_pte_flags) &
+                  ~(_PAGE_AVAIL | PAGE_CACHE_ATTRS)) )
+        gdprintk(XENLOG_WARNING,
+                 "PTE flags %x for %"PRIx64" don't match grant (%x)\n",
+                 l1e_get_flags(ol1e), addr, grant_pte_flags);
+
     /* Delete pagetable entry. */
     if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, v, 0)) )
     {
-        gdprintk(XENLOG_WARNING, "Cannot delete PTE entry at %p\n", pl1e);
+        gdprintk(XENLOG_WARNING, "Cannot delete PTE entry for %"PRIx64"\n",
+                 addr);
         rc = GNTST_general_error;
         goto unlock_and_out;
     }
@@ -4254,9 +4282,11 @@ static int replace_grant_va_mapping(
 }
 
 static int destroy_grant_va_mapping(
-    unsigned long addr, unsigned long frame, struct vcpu *v)
+    unsigned long addr, unsigned long frame, unsigned int grant_pte_flags,
+    struct vcpu *v)
 {
-    return replace_grant_va_mapping(addr, frame, l1e_empty(), v);
+    return replace_grant_va_mapping(addr, frame, grant_pte_flags,
+                                    l1e_empty(), v);
 }
 
 static int create_grant_p2m_mapping(uint64_t addr, unsigned long frame,
@@ -4351,20 +4381,39 @@ int replace_grant_host_mapping(
     unsigned long gl1mfn;
     struct page_info *l1pg;
     int rc;
+    unsigned int grant_pte_flags;
     
     if ( paging_mode_external(current->domain) )
         return replace_grant_p2m_mapping(addr, frame, new_addr, flags);
 
+    grant_pte_flags =
+        _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_GNTTAB | _PAGE_NX;
+
+    if ( flags & GNTMAP_application_map )
+        grant_pte_flags |= _PAGE_USER;
+    if ( !(flags & GNTMAP_readonly) )
+        grant_pte_flags |= _PAGE_RW;
+    /*
+     * On top of the explicit settings done by create_grant_host_mapping()
+     * also open-code relevant parts of adjust_guest_l1e(). Don't mirror
+     * available and cachability flags, though.
+     */
+    if ( !is_pv_32bit_domain(curr->domain) )
+        grant_pte_flags |= (grant_pte_flags & _PAGE_USER)
+                           ? _PAGE_GLOBAL
+                           : _PAGE_GUEST_KERNEL | _PAGE_USER;
+
     if ( flags & GNTMAP_contains_pte )
     {
         if ( !new_addr )
-            return destroy_grant_pte_mapping(addr, frame, curr->domain);
+            return destroy_grant_pte_mapping(addr, frame, grant_pte_flags,
+                                             curr->domain);
         
         return GNTST_general_error;
     }
 
     if ( !new_addr )
-        return destroy_grant_va_mapping(addr, frame, curr);
+        return destroy_grant_va_mapping(addr, frame, grant_pte_flags, curr);
 
     pl1e = guest_map_l1e(new_addr, &gl1mfn);
     if ( !pl1e )
@@ -4412,7 +4461,7 @@ int replace_grant_host_mapping(
     put_page(l1pg);
     guest_unmap_l1e(pl1e);
 
-    rc = replace_grant_va_mapping(addr, frame, ol1e, curr);
+    rc = replace_grant_va_mapping(addr, frame, grant_pte_flags, ol1e, curr);
     if ( rc && !paging_mode_refcounts(curr->domain) )
         put_page_from_l1e(ol1e, curr->domain);
 

Reply via email to