Hello community,

here is the log from the commit of package xen for openSUSE:Factory checked in 
at 2015-11-02 12:54:41
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/xen (Old)
 and      /work/SRC/openSUSE:Factory/.xen.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "xen"

Changes:
--------
--- /work/SRC/openSUSE:Factory/xen/xen.changes  2015-10-14 16:43:21.000000000 
+0200
+++ /work/SRC/openSUSE:Factory/.xen.new/xen.changes     2015-11-02 
12:54:43.000000000 +0100
@@ -1,0 +2,52 @@
+Wed Oct 28 09:47:38 MDT 2015 - [email protected]
+
+- Upstream patches from Jan
+  5604f239-x86-PV-properly-populate-descriptor-tables.patch
+  561bbc8b-VT-d-don-t-suppress-invalidation-address-write-when-0.patch
+  561d2046-VT-d-use-proper-error-codes-in-iommu_enable_x2apic_IR.patch
+  561d20a0-x86-hide-MWAITX-from-PV-domains.patch
+  561e3283-x86-NUMA-fix-SRAT-table-processor-entry-handling.patch
+
+-------------------------------------------------------------------
+Fri Oct 23 13:35:59 MDT 2015 - [email protected]
+
+- bsc#951845 - VUL-0: CVE-2015-7972: xen: x86: populate-on-demand
+  balloon size inaccuracy can crash guests (XSA-153)
+  xsa153-libxl.patch
+
+-------------------------------------------------------------------
+Fri Oct 16 08:40:31 MDT 2015 - [email protected]
+
+- bsc#950703 - VUL-1: CVE-2015-7969: xen: leak of main per-domain
+  vcpu pointer array (DoS) (XSA-149)
+  xsa149.patch
+- bsc#950705 - VUL-1: CVE-2015-7969: xen: x86: leak of per-domain
+  profiling-related vcpu pointer array (DoS) (XSA-151)
+  xsa151.patch
+- bsc#950706 - VUL-0: CVE-2015-7971: xen: x86: some pmu and
+  profiling hypercalls log without rate limiting (XSA-152)
+  xsa152.patch
+- Dropped
+  55dc7937-x86-IO-APIC-don-t-create-pIRQ-mapping-from-masked-RTE.patch
+  5604f239-x86-PV-properly-populate-descriptor-tables.patch
+
+-------------------------------------------------------------------
+Thu Oct 15 11:43:23 MDT 2015 - [email protected]
+
+- bsc#932267 - VUL-1: CVE-2015-4037: qemu,kvm,xen: insecure
+  temporary file use in /net/slirp.c
+  CVE-2015-4037-qemuu-smb-config-dir-name.patch
+  CVE-2015-4037-qemut-smb-config-dir-name.patch
+- bsc#877642 - VUL-0: CVE-2014-0222: qemu: qcow1: validate L2 table
+  size to avoid integer overflows
+  CVE-2014-0222-qemuu-qcow1-validate-l2-table-size.patch
+  CVE-2014-0222-qemut-qcow1-validate-l2-table-size.patch
+
+-------------------------------------------------------------------
+Wed Oct 14 10:24:15 MDT 2015 - [email protected]
+
+- bsc#950367 - VUL-0: CVE-2015-7835: xen: x86: Uncontrolled
+  creation of large page mappings by PV guests (XSA-148)
+  CVE-2015-7835-xsa148.patch
+
+-------------------------------------------------------------------

Old:
----
  55dc7937-x86-IO-APIC-don-t-create-pIRQ-mapping-from-masked-RTE.patch

New:
----
  561bbc8b-VT-d-don-t-suppress-invalidation-address-write-when-0.patch
  561d2046-VT-d-use-proper-error-codes-in-iommu_enable_x2apic_IR.patch
  561d20a0-x86-hide-MWAITX-from-PV-domains.patch
  561e3283-x86-NUMA-fix-SRAT-table-processor-entry-handling.patch
  CVE-2014-0222-qemut-qcow1-validate-l2-table-size.patch
  CVE-2014-0222-qemuu-qcow1-validate-l2-table-size.patch
  CVE-2015-4037-qemut-smb-config-dir-name.patch
  CVE-2015-4037-qemuu-smb-config-dir-name.patch
  CVE-2015-7835-xsa148.patch
  xsa149.patch
  xsa151.patch
  xsa152.patch
  xsa153-libxl.patch

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ xen.spec ++++++
--- /var/tmp/diff_new_pack.Do0HSP/_old  2015-11-02 12:54:48.000000000 +0100
+++ /var/tmp/diff_new_pack.Do0HSP/_new  2015-11-02 12:54:48.000000000 +0100
@@ -158,7 +158,7 @@
 %endif
 %endif
 
-Version:        4.5.1_10
+Version:        4.5.1_13
 Release:        0
 Summary:        Xen Virtualization: Hypervisor (aka VMM aka Microkernel)
 License:        GPL-2.0
@@ -205,41 +205,55 @@
 Patch2:         
55103616-vm-assist-prepare-for-discontiguous-used-bit-numbers.patch
 Patch3:         551ac326-xentop-add-support-for-qdisk.patch
 Patch4:         
552d0f49-x86-traps-identify-the-vcpu-in-context-when-dumping-regs.patch
-Patch5:         5537a4d8-libxl-use-DEBUG-log-level-instead-of-INFO.patch
-Patch6:         
5548e903-domctl-don-t-truncate-XEN_DOMCTL_max_mem-requests.patch
-Patch7:         5548e95d-x86-allow-to-suppress-M2P-user-mode-exposure.patch
-Patch8:         554cc211-libxl-add-qxl.patch
-Patch9:         
556d973f-unmodified-drivers-tolerate-IRQF_DISABLED-being-undefined.patch
-Patch10:        5576f178-kexec-add-more-pages-to-v1-environment.patch
-Patch11:        
55780be1-x86-EFI-adjust-EFI_MEMORY_WP-handling-for-spec-version-2.5.patch
-Patch12:        558bfaa0-x86-traps-avoid-using-current-too-early.patch
-Patch13:        5592a116-nested-EPT-fix-the-handling-of-nested-EPT.patch
-Patch14:        559b9dd6-x86-p2m-ept-don-t-unmap-in-use-EPT-pagetable.patch
-Patch15:        
559bc633-x86-cpupool-clear-proper-cpu_valid-bit-on-CPU-teardown.patch
-Patch16:        559bc64e-credit1-properly-deal-with-CPUs-not-in-any-pool.patch
-Patch17:        
559bc87f-x86-hvmloader-avoid-data-corruption-with-xenstore-rw.patch
-Patch18:        559bdde5-pull-in-latest-linux-earlycpio.patch
-Patch19:        
55a62eb0-xl-correct-handling-of-extra_config-in-main_cpupoolcreate.patch
-Patch20:        55a66a1e-make-rangeset_report_ranges-report-all-ranges.patch
-Patch21:        55a77e4f-dmar-device-scope-mem-leak-fix.patch
-Patch22:        55c1d83d-x86-gdt-Drop-write-only-xalloc-d-array.patch
-Patch23:        55c3232b-x86-mm-Make-hap-shadow-teardown-preemptible.patch
-Patch24:        55dc78e9-x86-amd_ucode-skip-updates-for-final-levels.patch
-Patch25:        
55dc7937-x86-IO-APIC-don-t-create-pIRQ-mapping-from-masked-RTE.patch
-Patch26:        
55df2f76-IOMMU-skip-domains-without-page-tables-when-dumping.patch
-Patch27:        55e43fd8-x86-NUMA-fix-setup_node.patch
-Patch28:        55e43ff8-x86-NUMA-don-t-account-hotplug-regions.patch
-Patch29:        
55e593f1-x86-NUMA-make-init_node_heap-respect-Xen-heap-limit.patch
-Patch30:        55f2e438-x86-hvm-fix-saved-pmtimer-and-hpet-values.patch
-Patch31:        
55f7f9d2-libxl-slightly-refine-pci-assignable-add-remove-handling.patch
-Patch32:        55f9345b-x86-MSI-fail-if-no-hardware-support.patch
-Patch33:        5604f239-x86-PV-properly-populate-descriptor-tables.patch
-Patch34:        5604f2e6-vt-d-fix-IM-bit-mask-and-unmask-of-FECTL_REG.patch
-Patch35:        
560a4af9-x86-EPT-tighten-conditions-of-IOMMU-mapping-updates.patch
-Patch36:        
560a7c36-x86-p2m-pt-delay-freeing-of-intermediate-page-tables.patch
-Patch37:        
560a7c53-x86-p2m-pt-ignore-pt-share-flag-for-shadow-mode-guests.patch
-Patch38:        
560bd926-credit1-fix-tickling-when-it-happens-from-a-remote-pCPU.patch
-Patch39:        
560e6d34-x86-p2m-pt-tighten-conditions-of-IOMMU-mapping-updates.patch
+Patch5:         552d293b-x86-vMSI-X-honor-all-mask-requests.patch
+Patch6:         552d2966-x86-vMSI-X-add-valid-bits-for-read-acceleration.patch
+Patch7:         5537a4d8-libxl-use-DEBUG-log-level-instead-of-INFO.patch
+Patch8:         
5548e903-domctl-don-t-truncate-XEN_DOMCTL_max_mem-requests.patch
+Patch9:         5548e95d-x86-allow-to-suppress-M2P-user-mode-exposure.patch
+Patch10:        554cc211-libxl-add-qxl.patch
+Patch11:        
556d973f-unmodified-drivers-tolerate-IRQF_DISABLED-being-undefined.patch
+Patch12:        5576f143-x86-adjust-PV-I-O-emulation-functions-types.patch
+Patch13:        5576f178-kexec-add-more-pages-to-v1-environment.patch
+Patch14:        
55780be1-x86-EFI-adjust-EFI_MEMORY_WP-handling-for-spec-version-2.5.patch
+Patch15:        55795a52-x86-vMSI-X-support-qword-MMIO-access.patch
+Patch16:        5583d9c5-x86-MSI-X-cleanup.patch
+Patch17:        5583da09-x86-MSI-track-host-and-guest-masking-separately.patch
+Patch18:        558bfaa0-x86-traps-avoid-using-current-too-early.patch
+Patch19:        5592a116-nested-EPT-fix-the-handling-of-nested-EPT.patch
+Patch20:        559b9dd6-x86-p2m-ept-don-t-unmap-in-use-EPT-pagetable.patch
+Patch21:        
559bc633-x86-cpupool-clear-proper-cpu_valid-bit-on-CPU-teardown.patch
+Patch22:        559bc64e-credit1-properly-deal-with-CPUs-not-in-any-pool.patch
+Patch23:        
559bc87f-x86-hvmloader-avoid-data-corruption-with-xenstore-rw.patch
+Patch24:        559bdde5-pull-in-latest-linux-earlycpio.patch
+Patch25:        
55a62eb0-xl-correct-handling-of-extra_config-in-main_cpupoolcreate.patch
+Patch26:        55a66a1e-make-rangeset_report_ranges-report-all-ranges.patch
+Patch27:        55a77e4f-dmar-device-scope-mem-leak-fix.patch
+Patch28:        55b0a218-x86-PCI-CFG-write-intercept.patch
+Patch29:        55b0a255-x86-MSI-X-maskall.patch
+Patch30:        55b0a283-x86-MSI-X-teardown.patch
+Patch31:        55b0a2ab-x86-MSI-X-enable.patch
+Patch32:        55b0a2db-x86-MSI-track-guest-masking.patch
+Patch33:        55c1d83d-x86-gdt-Drop-write-only-xalloc-d-array.patch
+Patch34:        55c3232b-x86-mm-Make-hap-shadow-teardown-preemptible.patch
+Patch35:        55dc78e9-x86-amd_ucode-skip-updates-for-final-levels.patch
+Patch36:        
55df2f76-IOMMU-skip-domains-without-page-tables-when-dumping.patch
+Patch37:        55e43fd8-x86-NUMA-fix-setup_node.patch
+Patch38:        55e43ff8-x86-NUMA-don-t-account-hotplug-regions.patch
+Patch39:        
55e593f1-x86-NUMA-make-init_node_heap-respect-Xen-heap-limit.patch
+Patch40:        55f2e438-x86-hvm-fix-saved-pmtimer-and-hpet-values.patch
+Patch41:        
55f7f9d2-libxl-slightly-refine-pci-assignable-add-remove-handling.patch
+Patch42:        55f9345b-x86-MSI-fail-if-no-hardware-support.patch
+Patch43:        5604f239-x86-PV-properly-populate-descriptor-tables.patch
+Patch44:        5604f2e6-vt-d-fix-IM-bit-mask-and-unmask-of-FECTL_REG.patch
+Patch45:        
560a4af9-x86-EPT-tighten-conditions-of-IOMMU-mapping-updates.patch
+Patch46:        
560a7c36-x86-p2m-pt-delay-freeing-of-intermediate-page-tables.patch
+Patch47:        
560a7c53-x86-p2m-pt-ignore-pt-share-flag-for-shadow-mode-guests.patch
+Patch48:        
560bd926-credit1-fix-tickling-when-it-happens-from-a-remote-pCPU.patch
+Patch49:        
560e6d34-x86-p2m-pt-tighten-conditions-of-IOMMU-mapping-updates.patch
+Patch50:        
561bbc8b-VT-d-don-t-suppress-invalidation-address-write-when-0.patch
+Patch51:        
561d2046-VT-d-use-proper-error-codes-in-iommu_enable_x2apic_IR.patch
+Patch52:        561d20a0-x86-hide-MWAITX-from-PV-domains.patch
+Patch53:        561e3283-x86-NUMA-fix-SRAT-table-processor-entry-handling.patch
 Patch131:       CVE-2015-4106-xsa131-9.patch
 Patch137:       CVE-2015-3259-xsa137.patch
 Patch139:       xsa139-qemuu.patch
@@ -258,6 +272,11 @@
 Patch14016:     xsa140-qemut-6.patch
 Patch14017:     xsa140-qemut-7.patch
 Patch142:       CVE-2015-7311-xsa142.patch
+Patch148:       CVE-2015-7835-xsa148.patch
+Patch149:       xsa149.patch
+Patch151:       xsa151.patch
+Patch152:       xsa152.patch
+Patch153:       xsa153-libxl.patch
 # Upstream qemu
 Patch250:       VNC-Support-for-ExtendedKeyEvent-client-message.patch
 Patch251:       0001-net-move-the-tap-buffer-into-TAPState.patch
@@ -278,6 +297,10 @@
 Patch266:       CVE-2015-6815-qemut-e1000-fix-infinite-loop.patch
 Patch267:       
CVE-2015-5239-qemuu-limit-client_cut_text-msg-payload-size.patch
 Patch268:       
CVE-2015-5239-qemut-limit-client_cut_text-msg-payload-size.patch
+Patch269:       CVE-2015-4037-qemuu-smb-config-dir-name.patch
+Patch270:       CVE-2015-4037-qemut-smb-config-dir-name.patch
+Patch271:       CVE-2014-0222-qemuu-qcow1-validate-l2-table-size.patch
+Patch272:       CVE-2014-0222-qemut-qcow1-validate-l2-table-size.patch
 # Our platform specific patches
 Patch301:       xen-destdir.patch
 Patch302:       vif-bridge-no-iptables.patch
@@ -363,18 +386,6 @@
 Patch606:       xen.build-compare.seabios.patch
 Patch607:       xen.build-compare.man.patch
 Patch608:       ipxe-no-error-logical-not-parentheses.patch
-# MSI issues (bsc#907514 bsc#910258 bsc#918984 bsc#923967)
-Patch700:       552d293b-x86-vMSI-X-honor-all-mask-requests.patch
-Patch701:       552d2966-x86-vMSI-X-add-valid-bits-for-read-acceleration.patch
-Patch702:       5576f143-x86-adjust-PV-I-O-emulation-functions-types.patch
-Patch703:       55795a52-x86-vMSI-X-support-qword-MMIO-access.patch
-Patch704:       5583d9c5-x86-MSI-X-cleanup.patch
-Patch705:       5583da09-x86-MSI-track-host-and-guest-masking-separately.patch
-Patch706:       55b0a218-x86-PCI-CFG-write-intercept.patch
-Patch707:       55b0a255-x86-MSI-X-maskall.patch
-Patch708:       55b0a283-x86-MSI-X-teardown.patch
-Patch709:       55b0a2ab-x86-MSI-X-enable.patch
-Patch710:       55b0a2db-x86-MSI-track-guest-masking.patch
 # grant table performance improvements
 Patch715:       
54c2553c-grant-table-use-uint16_t-consistently-for-offset-and-length.patch
 Patch716:       
54ca33bc-grant-table-refactor-grant-copy-to-reduce-duplicate-code.patch
@@ -648,6 +659,20 @@
 %patch37 -p1
 %patch38 -p1
 %patch39 -p1
+%patch40 -p1
+%patch41 -p1
+%patch42 -p1
+%patch43 -p1
+%patch44 -p1
+%patch45 -p1
+%patch46 -p1
+%patch47 -p1
+%patch48 -p1
+%patch49 -p1
+%patch50 -p1
+%patch51 -p1
+%patch52 -p1
+%patch53 -p1
 %patch131 -p1
 %patch137 -p1
 %patch139 -p1
@@ -666,6 +691,11 @@
 %patch14016 -p1
 %patch14017 -p1
 %patch142 -p1
+%patch148 -p1
+%patch149 -p1
+%patch151 -p1
+%patch152 -p1
+%patch153 -p1
 # Upstream qemu patches
 %patch250 -p1
 %patch251 -p1
@@ -686,6 +716,10 @@
 %patch266 -p1
 %patch267 -p1
 %patch268 -p1
+%patch269 -p1
+%patch270 -p1
+%patch271 -p1
+%patch272 -p1
 # Our platform specific patches
 %patch301 -p1
 %patch302 -p1
@@ -770,18 +804,6 @@
 %patch606 -p1
 %patch607 -p1
 %patch608 -p1
-# MSI issues (bsc#907514 bsc#910258 bsc#918984 bsc#923967)
-%patch700 -p1
-%patch701 -p1
-%patch702 -p1
-%patch703 -p1
-%patch704 -p1
-%patch705 -p1
-%patch706 -p1
-%patch707 -p1
-%patch708 -p1
-%patch709 -p1
-%patch710 -p1
 # grant table performance improvements
 %patch715 -p1
 %patch716 -p1

++++++ 557eb620-gnttab-make-the-grant-table-lock-a-read-write-lock.patch ++++++
--- /var/tmp/diff_new_pack.Do0HSP/_old  2015-11-02 12:54:48.000000000 +0100
+++ /var/tmp/diff_new_pack.Do0HSP/_new  2015-11-02 12:54:48.000000000 +0100
@@ -113,7 +113,7 @@
          if ( idx != 0 )
 --- a/xen/arch/x86/mm.c
 +++ b/xen/arch/x86/mm.c
-@@ -4592,7 +4592,7 @@ int xenmem_add_to_physmap_one(
+@@ -4595,7 +4595,7 @@ int xenmem_add_to_physmap_one(
                  mfn = virt_to_mfn(d->shared_info);
              break;
          case XENMAPSPACE_grant_table:
@@ -122,7 +122,7 @@
  
              if ( d->grant_table->gt_version == 0 )
                  d->grant_table->gt_version = 1;
-@@ -4614,7 +4614,7 @@ int xenmem_add_to_physmap_one(
+@@ -4617,7 +4617,7 @@ int xenmem_add_to_physmap_one(
                      mfn = virt_to_mfn(d->grant_table->shared_raw[idx]);
              }
  
@@ -133,7 +133,7 @@
          case XENMAPSPACE_gmfn:
 --- a/xen/common/grant_table.c
 +++ b/xen/common/grant_table.c
-@@ -196,7 +196,7 @@ active_entry_acquire(struct grant_table 
+@@ -196,7 +196,7 @@ active_entry_acquire(struct grant_table
  {
      struct active_grant_entry *act;
  

++++++ 5583d9c5-x86-MSI-X-cleanup.patch ++++++
--- /var/tmp/diff_new_pack.Do0HSP/_old  2015-11-02 12:54:48.000000000 +0100
+++ /var/tmp/diff_new_pack.Do0HSP/_new  2015-11-02 12:54:48.000000000 +0100
@@ -104,7 +104,7 @@
              u32 mask_bits;
              u16 seg = entry->dev->seg;
              u8 bus = entry->dev->bus;
-@@ -703,13 +707,14 @@ static u64 read_pci_mem_bar(u16 seg, u8 
+@@ -701,13 +705,14 @@ static u64 read_pci_mem_bar(u16 seg, u8
   * requested MSI-X entries with allocated irqs or non-zero for otherwise.
   **/
  static int msix_capability_init(struct pci_dev *dev,
@@ -120,7 +120,7 @@
      u16 control;
      u64 table_paddr;
      u32 table_offset;
-@@ -721,7 +726,6 @@ static int msix_capability_init(struct p
+@@ -719,7 +724,6 @@ static int msix_capability_init(struct p
  
      ASSERT(spin_is_locked(&pcidevs_lock));
  
@@ -128,7 +128,7 @@
      control = pci_conf_read16(seg, bus, slot, func, msix_control_reg(pos));
      msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
  
-@@ -886,10 +890,9 @@ static int __pci_enable_msi(struct msi_i
+@@ -884,10 +888,9 @@ static int __pci_enable_msi(struct msi_i
      old_desc = find_msi_entry(pdev, msi->irq, PCI_CAP_ID_MSI);
      if ( old_desc )
      {
@@ -142,7 +142,7 @@
          *desc = old_desc;
          return 0;
      }
-@@ -897,10 +900,10 @@ static int __pci_enable_msi(struct msi_i
+@@ -895,10 +898,10 @@ static int __pci_enable_msi(struct msi_i
      old_desc = find_msi_entry(pdev, -1, PCI_CAP_ID_MSIX);
      if ( old_desc )
      {
@@ -157,7 +157,7 @@
      }
  
      return msi_capability_init(pdev, msi->irq, desc, msi->entry_nr);
-@@ -914,7 +917,6 @@ static void __pci_disable_msi(struct msi
+@@ -912,7 +915,6 @@ static void __pci_disable_msi(struct msi
      msi_set_enable(dev, 0);
  
      BUG_ON(list_empty(&dev->msi_list));
@@ -165,7 +165,7 @@
  }
  
  /**
-@@ -934,7 +936,7 @@ static void __pci_disable_msi(struct msi
+@@ -932,7 +934,7 @@ static void __pci_disable_msi(struct msi
   **/
  static int __pci_enable_msix(struct msi_info *msi, struct msi_desc **desc)
  {
@@ -174,7 +174,7 @@
      struct pci_dev *pdev;
      u16 control;
      u8 slot = PCI_SLOT(msi->devfn);
-@@ -943,23 +945,22 @@ static int __pci_enable_msix(struct msi_
+@@ -941,23 +943,22 @@ static int __pci_enable_msix(struct msi_
  
      ASSERT(spin_is_locked(&pcidevs_lock));
      pdev = pci_get_pdev(msi->seg, msi->bus, msi->devfn);
@@ -204,7 +204,7 @@
          *desc = old_desc;
          return 0;
      }
-@@ -967,15 +968,13 @@ static int __pci_enable_msix(struct msi_
+@@ -965,15 +966,13 @@ static int __pci_enable_msix(struct msi_
      old_desc = find_msi_entry(pdev, -1, PCI_CAP_ID_MSI);
      if ( old_desc )
      {
@@ -225,7 +225,7 @@
  }
  
  static void _pci_cleanup_msix(struct arch_msix *msix)
-@@ -993,19 +992,16 @@ static void _pci_cleanup_msix(struct arc
+@@ -991,19 +990,16 @@ static void _pci_cleanup_msix(struct arc
  
  static void __pci_disable_msix(struct msi_desc *entry)
  {
@@ -254,7 +254,7 @@
      msix_set_enable(dev, 0);
  
      BUG_ON(list_empty(&dev->msi_list));
-@@ -1047,7 +1043,7 @@ int pci_prepare_msix(u16 seg, u8 bus, u8
+@@ -1045,7 +1041,7 @@ int pci_prepare_msix(u16 seg, u8 bus, u8
          u16 control = pci_conf_read16(seg, bus, slot, func,
                                        msix_control_reg(pos));
  
@@ -263,7 +263,7 @@
                                    multi_msix_capable(control));
      }
      spin_unlock(&pcidevs_lock);
-@@ -1066,8 +1062,8 @@ int pci_enable_msi(struct msi_info *msi,
+@@ -1064,8 +1060,8 @@ int pci_enable_msi(struct msi_info *msi,
      if ( !use_msi )
          return -EPERM;
  
@@ -274,7 +274,7 @@
  }
  
  /*
-@@ -1117,7 +1113,9 @@ int pci_restore_msi_state(struct pci_dev
+@@ -1115,7 +1111,9 @@ int pci_restore_msi_state(struct pci_dev
      if ( !pdev )
          return -EINVAL;
  

++++++ 5583da09-x86-MSI-track-host-and-guest-masking-separately.patch ++++++
--- /var/tmp/diff_new_pack.Do0HSP/_old  2015-11-02 12:54:48.000000000 +0100
+++ /var/tmp/diff_new_pack.Do0HSP/_new  2015-11-02 12:54:48.000000000 +0100
@@ -137,7 +137,7 @@
      spin_unlock_irqrestore(&desc->lock, flags);
 --- a/xen/arch/x86/irq.c
 +++ b/xen/arch/x86/irq.c
-@@ -2503,6 +2503,25 @@ int unmap_domain_pirq_emuirq(struct doma
+@@ -2502,6 +2502,25 @@ int unmap_domain_pirq_emuirq(struct doma
      return ret;
  }
  
@@ -230,7 +230,7 @@
      .enable       = unmask_msi_irq,
      .disable      = mask_msi_irq,
      .ack          = ack_maskable_msi_irq,
-@@ -593,7 +605,8 @@ static int msi_capability_init(struct pc
+@@ -591,7 +603,8 @@ static int msi_capability_init(struct pc
          entry[i].msi_attrib.is_64 = is_64bit_address(control);
          entry[i].msi_attrib.entry_nr = i;
          entry[i].msi_attrib.maskbit = is_mask_bit_support(control);
@@ -240,7 +240,7 @@
          entry[i].msi_attrib.pos = pos;
          if ( entry[i].msi_attrib.maskbit )
              entry[i].msi.mpos = mpos;
-@@ -819,7 +832,8 @@ static int msix_capability_init(struct p
+@@ -817,7 +830,8 @@ static int msix_capability_init(struct p
          entry->msi_attrib.is_64 = 1;
          entry->msi_attrib.entry_nr = msi->entry_nr;
          entry->msi_attrib.maskbit = 1;
@@ -250,7 +250,7 @@
          entry->msi_attrib.pos = pos;
          entry->irq = msi->irq;
          entry->dev = dev;
-@@ -1154,7 +1168,8 @@ int pci_restore_msi_state(struct pci_dev
+@@ -1152,7 +1166,8 @@ int pci_restore_msi_state(struct pci_dev
  
          for ( i = 0; ; )
          {
@@ -260,7 +260,7 @@
  
              if ( !--nr )
                  break;
-@@ -1306,7 +1321,7 @@ static void dump_msi(unsigned char key)
+@@ -1304,7 +1319,7 @@ static void dump_msi(unsigned char key)
          else
              mask = '?';
          printk(" %-6s%4u vec=%02x%7s%6s%3sassert%5s%7s"
@@ -269,7 +269,7 @@
                 type, irq,
                 (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT,
                 data & MSI_DATA_DELIVERY_LOWPRI ? "lowest" : "fixed",
-@@ -1314,7 +1329,10 @@ static void dump_msi(unsigned char key)
+@@ -1312,7 +1327,10 @@ static void dump_msi(unsigned char key)
                 data & MSI_DATA_LEVEL_ASSERT ? "" : "de",
                 addr & MSI_ADDR_DESTMODE_LOGIC ? "log" : "phys",
                 addr & MSI_ADDR_REDIRECTION_LOWPRI ? "lowest" : "cpu",
@@ -317,18 +317,18 @@
  static unsigned int iommu_msi_startup(struct irq_desc *desc)
 --- a/xen/drivers/passthrough/vtd/iommu.c
 +++ b/xen/drivers/passthrough/vtd/iommu.c
-@@ -999,7 +999,7 @@ static void dma_msi_unmask(struct irq_de
-     sts &= ~DMA_FECTL_IM;
-     dmar_writel(iommu->reg, DMAR_FECTL_REG, sts);
+@@ -996,7 +996,7 @@ static void dma_msi_unmask(struct irq_de
+     spin_lock_irqsave(&iommu->register_lock, flags);
+     dmar_writel(iommu->reg, DMAR_FECTL_REG, 0);
      spin_unlock_irqrestore(&iommu->register_lock, flags);
 -    iommu->msi.msi_attrib.masked = 0;
 +    iommu->msi.msi_attrib.host_masked = 0;
  }
  
  static void dma_msi_mask(struct irq_desc *desc)
-@@ -1014,7 +1014,7 @@ static void dma_msi_mask(struct irq_desc
-     sts |= DMA_FECTL_IM;
-     dmar_writel(iommu->reg, DMAR_FECTL_REG, sts);
+@@ -1008,7 +1008,7 @@ static void dma_msi_mask(struct irq_desc
+     spin_lock_irqsave(&iommu->register_lock, flags);
+     dmar_writel(iommu->reg, DMAR_FECTL_REG, DMA_FECTL_IM);
      spin_unlock_irqrestore(&iommu->register_lock, flags);
 -    iommu->msi.msi_attrib.masked = 1;
 +    iommu->msi.msi_attrib.host_masked = 1;

++++++ 55b0a218-x86-PCI-CFG-write-intercept.patch ++++++
--- /var/tmp/diff_new_pack.Do0HSP/_old  2015-11-02 12:54:48.000000000 +0100
+++ /var/tmp/diff_new_pack.Do0HSP/_new  2015-11-02 12:54:48.000000000 +0100
@@ -14,7 +14,7 @@
 
 --- a/xen/arch/x86/msi.c
 +++ b/xen/arch/x86/msi.c
-@@ -1110,6 +1110,12 @@ void pci_cleanup_msi(struct pci_dev *pde
+@@ -1108,6 +1108,12 @@ void pci_cleanup_msi(struct pci_dev *pde
      msi_free_irqs(pdev);
  }
  

++++++ 55b0a255-x86-MSI-X-maskall.patch ++++++
--- /var/tmp/diff_new_pack.Do0HSP/_old  2015-11-02 12:54:48.000000000 +0100
+++ /var/tmp/diff_new_pack.Do0HSP/_new  2015-11-02 12:54:48.000000000 +0100
@@ -15,7 +15,7 @@
 
 --- a/xen/arch/x86/msi.c
 +++ b/xen/arch/x86/msi.c
-@@ -845,6 +845,12 @@ static int msix_capability_init(struct p
+@@ -843,6 +843,12 @@ static int msix_capability_init(struct p
  
      if ( !msix->used_entries )
      {
@@ -28,7 +28,7 @@
          if ( rangeset_add_range(mmio_ro_ranges, msix->table.first,
                                  msix->table.last) )
              WARN();
-@@ -1113,6 +1119,34 @@ void pci_cleanup_msi(struct pci_dev *pde
+@@ -1111,6 +1117,34 @@ void pci_cleanup_msi(struct pci_dev *pde
  int pci_msi_conf_write_intercept(struct pci_dev *pdev, unsigned int reg,
                                   unsigned int size, uint32_t *data)
  {

++++++ 55b0a283-x86-MSI-X-teardown.patch ++++++
--- /var/tmp/diff_new_pack.Do0HSP/_old  2015-11-02 12:54:48.000000000 +0100
+++ /var/tmp/diff_new_pack.Do0HSP/_new  2015-11-02 12:54:48.000000000 +0100
@@ -265,7 +265,7 @@
  }
  
  void guest_mask_msi_irq(struct irq_desc *desc, bool_t mask)
-@@ -422,13 +489,15 @@ void guest_mask_msi_irq(struct irq_desc 
+@@ -422,13 +489,15 @@ void guest_mask_msi_irq(struct irq_desc
  
  static unsigned int startup_msi_irq(struct irq_desc *desc)
  {
@@ -283,7 +283,7 @@
  }
  
  void ack_nonmaskable_msi_irq(struct irq_desc *desc)
-@@ -742,6 +811,9 @@ static int msix_capability_init(struct p
+@@ -740,6 +809,9 @@ static int msix_capability_init(struct p
      control = pci_conf_read16(seg, bus, slot, func, msix_control_reg(pos));
      msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
  
@@ -293,7 +293,7 @@
      if ( desc )
      {
          entry = alloc_msi_entry(1);
-@@ -881,7 +953,8 @@ static int msix_capability_init(struct p
+@@ -879,7 +951,8 @@ static int msix_capability_init(struct p
      ++msix->used_entries;
  
      /* Restore MSI-X enabled bits */
@@ -303,7 +303,7 @@
  
      return 0;
  }
-@@ -1026,8 +1099,16 @@ static void __pci_disable_msix(struct ms
+@@ -1024,8 +1097,16 @@ static void __pci_disable_msix(struct ms
  
      BUG_ON(list_empty(&dev->msi_list));
  
@@ -322,7 +322,7 @@
      pci_conf_write16(seg, bus, slot, func, msix_control_reg(pos), control);
  
      _pci_cleanup_msix(dev->msix);
-@@ -1201,15 +1282,24 @@ int pci_restore_msi_state(struct pci_dev
+@@ -1199,15 +1280,24 @@ int pci_restore_msi_state(struct pci_dev
              nr = entry->msi.nvec;
          }
          else if ( entry->msi_attrib.type == PCI_CAP_ID_MSIX )

++++++ 55b0a2ab-x86-MSI-X-enable.patch ++++++
--- /var/tmp/diff_new_pack.Do0HSP/_old  2015-11-02 12:54:48.000000000 +0100
+++ /var/tmp/diff_new_pack.Do0HSP/_new  2015-11-02 12:54:48.000000000 +0100
@@ -171,7 +171,7 @@
  }
  
  int __setup_msi_irq(struct irq_desc *desc, struct msi_desc *msidesc,
-@@ -805,20 +850,38 @@ static int msix_capability_init(struct p
+@@ -803,20 +848,38 @@ static int msix_capability_init(struct p
      u8 bus = dev->bus;
      u8 slot = PCI_SLOT(dev->devfn);
      u8 func = PCI_FUNC(dev->devfn);
@@ -211,7 +211,7 @@
          ASSERT(msi);
      }
  
-@@ -849,6 +912,8 @@ static int msix_capability_init(struct p
+@@ -847,6 +910,8 @@ static int msix_capability_init(struct p
      {
          if ( !msi || !msi->table_base )
          {
@@ -220,7 +220,7 @@
              xfree(entry);
              return -ENXIO;
          }
-@@ -891,6 +956,8 @@ static int msix_capability_init(struct p
+@@ -889,6 +954,8 @@ static int msix_capability_init(struct p
  
          if ( idx < 0 )
          {
@@ -229,7 +229,7 @@
              xfree(entry);
              return idx;
          }
-@@ -917,7 +984,7 @@ static int msix_capability_init(struct p
+@@ -915,7 +982,7 @@ static int msix_capability_init(struct p
  
      if ( !msix->used_entries )
      {
@@ -238,7 +238,7 @@
          if ( !msix->guest_maskall )
              control &= ~PCI_MSIX_FLAGS_MASKALL;
          else
-@@ -953,8 +1020,8 @@ static int msix_capability_init(struct p
+@@ -951,8 +1018,8 @@ static int msix_capability_init(struct p
      ++msix->used_entries;
  
      /* Restore MSI-X enabled bits */
@@ -249,7 +249,7 @@
  
      return 0;
  }
-@@ -1094,8 +1161,15 @@ static void __pci_disable_msix(struct ms
+@@ -1092,8 +1159,15 @@ static void __pci_disable_msix(struct ms
                                             PCI_CAP_ID_MSIX);
      u16 control = pci_conf_read16(seg, bus, slot, func,
                                    msix_control_reg(entry->msi_attrib.pos));
@@ -266,7 +266,7 @@
  
      BUG_ON(list_empty(&dev->msi_list));
  
-@@ -1107,8 +1181,11 @@ static void __pci_disable_msix(struct ms
+@@ -1105,8 +1179,11 @@ static void __pci_disable_msix(struct ms
                 "cannot disable IRQ %d: masking MSI-X on %04x:%02x:%02x.%u\n",
                 entry->irq, dev->seg, dev->bus,
                 PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
@@ -279,7 +279,7 @@
      pci_conf_write16(seg, bus, slot, func, msix_control_reg(pos), control);
  
      _pci_cleanup_msix(dev->msix);
-@@ -1257,6 +1334,8 @@ int pci_restore_msi_state(struct pci_dev
+@@ -1255,6 +1332,8 @@ int pci_restore_msi_state(struct pci_dev
      list_for_each_entry_safe( entry, tmp, &pdev->msi_list, list )
      {
          unsigned int i = 0, nr = 1;
@@ -288,7 +288,7 @@
  
          irq = entry->irq;
          desc = &irq_desc[irq];
-@@ -1283,10 +1362,18 @@ int pci_restore_msi_state(struct pci_dev
+@@ -1281,10 +1360,18 @@ int pci_restore_msi_state(struct pci_dev
          }
          else if ( entry->msi_attrib.type == PCI_CAP_ID_MSIX )
          {
@@ -308,7 +308,7 @@
                  return -ENXIO;
              }
          }
-@@ -1316,11 +1403,9 @@ int pci_restore_msi_state(struct pci_dev
+@@ -1314,11 +1401,9 @@ int pci_restore_msi_state(struct pci_dev
          if ( entry->msi_attrib.type == PCI_CAP_ID_MSI )
          {
              unsigned int cpos = msi_control_reg(entry->msi_attrib.pos);
@@ -322,7 +322,7 @@
              multi_msi_enable(control, entry->msi.nvec);
              pci_conf_write16(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
                               PCI_FUNC(pdev->devfn), cpos, control);
-@@ -1328,7 +1413,9 @@ int pci_restore_msi_state(struct pci_dev
+@@ -1326,7 +1411,9 @@ int pci_restore_msi_state(struct pci_dev
              msi_set_enable(pdev, 1);
          }
          else if ( entry->msi_attrib.type == PCI_CAP_ID_MSIX )

++++++ 55b0a2db-x86-MSI-track-guest-masking.patch ++++++
--- /var/tmp/diff_new_pack.Do0HSP/_old  2015-11-02 12:54:48.000000000 +0100
+++ /var/tmp/diff_new_pack.Do0HSP/_new  2015-11-02 12:54:48.000000000 +0100
@@ -15,7 +15,7 @@
 
 --- a/xen/arch/x86/msi.c
 +++ b/xen/arch/x86/msi.c
-@@ -1305,6 +1305,37 @@ int pci_msi_conf_write_intercept(struct 
+@@ -1303,6 +1303,37 @@ int pci_msi_conf_write_intercept(struct
          return 1;
      }
  

++++++ 55f9345b-x86-MSI-fail-if-no-hardware-support.patch ++++++
--- /var/tmp/diff_new_pack.Do0HSP/_old  2015-11-02 12:54:48.000000000 +0100
+++ /var/tmp/diff_new_pack.Do0HSP/_new  2015-11-02 12:54:48.000000000 +0100
@@ -12,7 +12,7 @@
 
 --- a/xen/arch/x86/msi.c
 +++ b/xen/arch/x86/msi.c
-@@ -566,6 +566,8 @@ static int msi_capability_init(struct pc
+@@ -696,6 +696,8 @@ static int msi_capability_init(struct pc
  
      ASSERT(spin_is_locked(&pcidevs_lock));
      pos = pci_find_cap_offset(seg, bus, slot, func, PCI_CAP_ID_MSI);

++++++ 5604f239-x86-PV-properly-populate-descriptor-tables.patch ++++++
--- /var/tmp/diff_new_pack.Do0HSP/_old  2015-11-02 12:54:48.000000000 +0100
+++ /var/tmp/diff_new_pack.Do0HSP/_new  2015-11-02 12:54:48.000000000 +0100
@@ -18,9 +18,25 @@
 Signed-off-by: Jan Beulich <[email protected]>
 Reviewed-by: Andrew Cooper <[email protected]>
 
+# Commit 61031e64d3dafd2fb1953436444bf02eccb9b146
+# Date 2015-10-27 14:46:12 +0100
+# Author Jan Beulich <[email protected]>
+# Committer Jan Beulich <[email protected]>
+x86/PV: don't zero-map LDT
+
+This effectvely reverts the LDT related part of commit cf6d39f819
+("x86/PV: properly populate descriptor tables"), which broke demand
+paged LDT handling in guests.
+
+Reported-by: David Vrabel <[email protected]>
+Diagnosed-by: Andrew Cooper <[email protected]>
+Signed-off-by: Jan Beulich <[email protected]>
+Tested-by: David Vrabel <[email protected]>
+Reviewed-by: Andrew Cooper <[email protected]>
+
 --- a/xen/arch/x86/mm.c
 +++ b/xen/arch/x86/mm.c
-@@ -505,12 +505,13 @@ void update_cr3(struct vcpu *v)
+@@ -505,12 +505,12 @@ void update_cr3(struct vcpu *v)
      make_cr3(v, cr3_mfn);
  }
  
@@ -32,24 +48,24 @@
 -    int i;
 -    unsigned long pfn;
 +    unsigned int i;
-+    unsigned long pfn, zero_pfn = PFN_DOWN(__pa(zero_page));
      struct page_info *page;
  
      BUG_ON(unlikely(in_irq()));
-@@ -526,8 +527,10 @@ static void invalidate_shadow_ldt(struct
+@@ -525,10 +525,10 @@ static void invalidate_shadow_ldt(struct
+ 
      for ( i = 16; i < 32; i++ )
      {
-         pfn = l1e_get_pfn(pl1e[i]);
+-        pfn = l1e_get_pfn(pl1e[i]);
 -        if ( pfn == 0 ) continue;
--        l1e_write(&pl1e[i], l1e_empty());
-+        if ( !(l1e_get_flags(pl1e[i]) & _PAGE_PRESENT) || pfn == zero_pfn )
++        if ( !(l1e_get_flags(pl1e[i]) & _PAGE_PRESENT) )
 +            continue;
-+        l1e_write(&pl1e[i],
-+                  l1e_from_pfn(zero_pfn, __PAGE_HYPERVISOR & ~_PAGE_RW));
-         page = mfn_to_page(pfn);
++        page = l1e_get_page(pl1e[i]);
+         l1e_write(&pl1e[i], l1e_empty());
+-        page = mfn_to_page(pfn);
          ASSERT_PAGE_IS_TYPE(page, PGT_seg_desc_page);
          ASSERT_PAGE_IS_DOMAIN(page, v->domain);
-@@ -4360,16 +4363,18 @@ long do_update_va_mapping_otherdomain(un
+         put_page_and_type(page);
+@@ -4360,16 +4360,18 @@ long do_update_va_mapping_otherdomain(un
  void destroy_gdt(struct vcpu *v)
  {
      l1_pgentry_t *pl1e;
@@ -72,7 +88,7 @@
          v->arch.pv_vcpu.gdt_frames[i] = 0;
      }
  }
-@@ -4382,7 +4387,7 @@ long set_gdt(struct vcpu *v, 
+@@ -4382,7 +4384,7 @@ long set_gdt(struct vcpu *v,
      struct domain *d = v->domain;
      l1_pgentry_t *pl1e;
      /* NB. There are 512 8-byte entries per GDT page. */

++++++ 5604f2e6-vt-d-fix-IM-bit-mask-and-unmask-of-FECTL_REG.patch ++++++
--- /var/tmp/diff_new_pack.Do0HSP/_old  2015-11-02 12:54:48.000000000 +0100
+++ /var/tmp/diff_new_pack.Do0HSP/_new  2015-11-02 12:54:48.000000000 +0100
@@ -39,7 +39,7 @@
 +    sts &= ~DMA_FECTL_IM;
 +    dmar_writel(iommu->reg, DMAR_FECTL_REG, sts);
      spin_unlock_irqrestore(&iommu->register_lock, flags);
-     iommu->msi.msi_attrib.masked = 0;
+     iommu->msi.msi_attrib.host_masked = 0;
  }
 @@ -1003,10 +1006,13 @@ static void dma_msi_mask(struct irq_desc
  {
@@ -54,7 +54,7 @@
 +    sts |= DMA_FECTL_IM;
 +    dmar_writel(iommu->reg, DMAR_FECTL_REG, sts);
      spin_unlock_irqrestore(&iommu->register_lock, flags);
-     iommu->msi.msi_attrib.masked = 1;
+     iommu->msi.msi_attrib.host_masked = 1;
  }
 @@ -2002,6 +2008,7 @@ static int init_vtd_hw(void)
      struct iommu_flush *flush = NULL;

++++++ 561bbc8b-VT-d-don-t-suppress-invalidation-address-write-when-0.patch 
++++++
# Commit 710942e57fb42ff8f344ca82f6b678f67e38ae63
# Date 2015-10-12 15:58:35 +0200
# Author Jan Beulich <[email protected]>
# Committer Jan Beulich <[email protected]>
VT-d: don't suppress invalidation address write when it is zero

GFN zero is a valid address, and hence may need invalidation done for
it just like for any other GFN.

Signed-off-by: Jan Beulich <[email protected]>
Reviewed-by: Andrew Cooper <[email protected]>
Acked-by: Yang Zhang <[email protected]>

--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -414,7 +414,7 @@ static int flush_iotlb_reg(void *_iommu,
 {
     struct iommu *iommu = (struct iommu *) _iommu;
     int tlb_offset = ecap_iotlb_offset(iommu->ecap);
-    u64 val = 0, val_iva = 0;
+    u64 val = 0;
     unsigned long flags;
 
     /*
@@ -435,7 +435,6 @@ static int flush_iotlb_reg(void *_iommu,
     switch ( type )
     {
     case DMA_TLB_GLOBAL_FLUSH:
-        /* global flush doesn't need set IVA_REG */
         val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
         break;
     case DMA_TLB_DSI_FLUSH:
@@ -443,8 +442,6 @@ static int flush_iotlb_reg(void *_iommu,
         break;
     case DMA_TLB_PSI_FLUSH:
         val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
-        /* Note: always flush non-leaf currently */
-        val_iva = size_order | addr;
         break;
     default:
         BUG();
@@ -457,8 +454,11 @@ static int flush_iotlb_reg(void *_iommu,
 
     spin_lock_irqsave(&iommu->register_lock, flags);
     /* Note: Only uses first TLB reg currently */
-    if ( val_iva )
-        dmar_writeq(iommu->reg, tlb_offset, val_iva);
+    if ( type == DMA_TLB_PSI_FLUSH )
+    {
+        /* Note: always flush non-leaf currently. */
+        dmar_writeq(iommu->reg, tlb_offset, size_order | addr);
+    }
     dmar_writeq(iommu->reg, tlb_offset + 8, val);
 
     /* Make sure hardware complete it */
++++++ 561d2046-VT-d-use-proper-error-codes-in-iommu_enable_x2apic_IR.patch 
++++++
# Commit 6851e979874ebc05d270ea94360c49d920d3eaf4
# Date 2015-10-13 17:16:22 +0200
# Author Jan Beulich <[email protected]>
# Committer Jan Beulich <[email protected]>
VT-d: use proper error codes in iommu_enable_x2apic_IR()

... allowing to suppress a confusing message combination: When
ACPI_DMAR_X2APIC_OPT_OUT is set, so far we first logged a message
that IR could not be enabled (hence not using x2APIC), followed by
one indicating successful initialization of IR (if no other problems
prevented that).

Also adjust the return type of iommu_supports_eim() and fix some
broken indentation in the function.

Signed-off-by: Jan Beulich <[email protected]>
Reviewed-by: Andrew Cooper <[email protected]>
Acked-by: Yang Zhang <[email protected]>

--- a/xen/arch/x86/apic.c
+++ b/xen/arch/x86/apic.c
@@ -946,8 +946,18 @@ void __init x2apic_bsp_setup(void)
     mask_8259A();
     mask_IO_APIC_setup(ioapic_entries);
 
-    if ( iommu_enable_x2apic_IR() )
+    switch ( iommu_enable_x2apic_IR() )
     {
+    case 0:
+        break;
+    case -ENXIO: /* ACPI_DMAR_X2APIC_OPT_OUT set */
+        if ( !x2apic_enabled )
+        {
+            printk("Not enabling x2APIC (upon firmware request)\n");
+            goto restore_out;
+        }
+        /* fall through */
+    default:
         if ( x2apic_enabled )
             panic("Interrupt remapping could not be enabled while "
                   "x2APIC is already enabled by BIOS");
--- a/xen/drivers/passthrough/vtd/intremap.c
+++ b/xen/drivers/passthrough/vtd/intremap.c
@@ -144,10 +144,10 @@ static void set_hpet_source_id(unsigned
     set_ire_sid(ire, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, hpetid_to_bdf(id));
 }
 
-int iommu_supports_eim(void)
+bool_t iommu_supports_eim(void)
 {
     struct acpi_drhd_unit *drhd;
-    int apic;
+    unsigned int apic;
 
     if ( !iommu_qinval || !iommu_intremap || list_empty(&acpi_drhd_units) )
         return 0;
@@ -155,12 +155,12 @@ int iommu_supports_eim(void)
     /* We MUST have a DRHD unit for each IOAPIC. */
     for ( apic = 0; apic < nr_ioapics; apic++ )
         if ( !ioapic_to_drhd(IO_APIC_ID(apic)) )
-    {
+        {
             dprintk(XENLOG_WARNING VTDPREFIX,
                     "There is not a DRHD for IOAPIC %#x (id: %#x)!\n",
                     apic, IO_APIC_ID(apic));
             return 0;
-    }
+        }
 
     for_each_drhd_unit ( drhd )
         if ( !ecap_queued_inval(drhd->iommu->ecap) ||
@@ -834,10 +834,10 @@ int iommu_enable_x2apic_IR(void)
     struct iommu *iommu;
 
     if ( !iommu_supports_eim() )
-        return -1;
+        return -EOPNOTSUPP;
 
     if ( !platform_supports_x2apic() )
-        return -1;
+        return -ENXIO;
 
     for_each_drhd_unit ( drhd )
     {
@@ -862,7 +862,7 @@ int iommu_enable_x2apic_IR(void)
         {
             dprintk(XENLOG_INFO VTDPREFIX,
                     "Failed to enable Queued Invalidation!\n");
-            return -1;
+            return -EIO;
         }
     }
 
@@ -874,7 +874,7 @@ int iommu_enable_x2apic_IR(void)
         {
             dprintk(XENLOG_INFO VTDPREFIX,
                     "Failed to enable Interrupt Remapping!\n");
-            return -1;
+            return -EIO;
         }
     }
 
--- a/xen/include/asm-x86/iommu.h
+++ b/xen/include/asm-x86/iommu.h
@@ -28,7 +28,7 @@ int iommu_setup_hpet_msi(struct msi_desc
 /* While VT-d specific, this must get declared in a generic header. */
 int adjust_vtd_irq_affinities(void);
 void iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte, int order, int 
present);
-int iommu_supports_eim(void);
+bool_t iommu_supports_eim(void);
 int iommu_enable_x2apic_IR(void);
 void iommu_disable_x2apic_IR(void);
 
++++++ 561d20a0-x86-hide-MWAITX-from-PV-domains.patch ++++++
# Commit 941cd44324db7eddc46cba4596fa13d505066ccf
# Date 2015-10-13 17:17:52 +0200
# Author Jan Beulich <[email protected]>
# Committer Jan Beulich <[email protected]>
x86: hide MWAITX from PV domains

Since MWAIT is hidden too. (Linux starting with 4.3 is making use of
that feature, and is checking for it without looking at the MWAIT one.)

Signed-off-by: Jan Beulich <[email protected]>
Reviewed-by: Andrew Cooper <[email protected]>

--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -904,6 +904,7 @@ void pv_cpuid(struct cpu_user_regs *regs
         __clear_bit(X86_FEATURE_LWP % 32, &c);
         __clear_bit(X86_FEATURE_NODEID_MSR % 32, &c);
         __clear_bit(X86_FEATURE_TOPOEXT % 32, &c);
+        __clear_bit(X86_FEATURE_MWAITX % 32, &c);
         break;
 
     case 0x00000005: /* MONITOR/MWAIT */
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -137,6 +137,7 @@
 #define X86_FEATURE_TBM         (6*32+21) /* trailing bit manipulations */
 #define X86_FEATURE_TOPOEXT     (6*32+22) /* topology extensions CPUID leafs */
 #define X86_FEATURE_DBEXT       (6*32+26) /* data breakpoint extension */
+#define X86_FEATURE_MWAITX      (6*32+29) /* MWAIT extension (MONITORX/MWAITX) 
*/
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 7 */
 #define X86_FEATURE_FSGSBASE   (7*32+ 0) /* {RD,WR}{FS,GS}BASE instructions */
++++++ 561e3283-x86-NUMA-fix-SRAT-table-processor-entry-handling.patch ++++++
# Commit 83281fc9b31396e94c0bfb6550b75c165037a0ad
# Date 2015-10-14 12:46:27 +0200
# Author Jan Beulich <[email protected]>
# Committer Jan Beulich <[email protected]>
x86/NUMA: fix SRAT table processor entry parsing and consumption

- don't overrun apicid_to_node[] (possible in the x2APIC case)
- don't limit number of processor related SRAT entries we can consume
- make acpi_numa_{processor,x2apic}_affinity_init() as similar to one
  another as possible
- print APIC IDs in hex (to ease matching with other log messages), at
  once making legacy and x2APIC ones distinguishable (by width)

Signed-off-by: Jan Beulich <[email protected]>
Reviewed-by: Andrew Cooper <[email protected]>

--- a/xen/arch/x86/numa.c
+++ b/xen/arch/x86/numa.c
@@ -347,7 +347,7 @@ void __init init_cpu_to_node(void)
         u32 apicid = x86_cpu_to_apicid[i];
         if ( apicid == BAD_APICID )
             continue;
-        node = apicid_to_node[apicid];
+        node = apicid < MAX_LOCAL_APIC ? apicid_to_node[apicid] : NUMA_NO_NODE;
         if ( node == NUMA_NO_NODE || !node_online(node) )
             node = 0;
         numa_set_node(i, node);
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -191,7 +191,7 @@ void __devinit srat_detect_node(int cpu)
     unsigned node;
     u32 apicid = x86_cpu_to_apicid[cpu];
 
-    node = apicid_to_node[apicid];
+    node = apicid < MAX_LOCAL_APIC ? apicid_to_node[apicid] : NUMA_NO_NODE;
     if ( node == NUMA_NO_NODE )
         node = 0;
 
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -885,7 +885,8 @@ int cpu_add(uint32_t apic_id, uint32_t a
             cpu = node;
             goto out;
         }
-        apicid_to_node[apic_id] = node;
+        if ( apic_id < MAX_LOCAL_APIC )
+             apicid_to_node[apic_id] = node;
     }
 
     /* Physically added CPUs do not have synchronised TSC. */
--- a/xen/arch/x86/srat.c
+++ b/xen/arch/x86/srat.c
@@ -170,7 +170,6 @@ void __init
 acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
 {
        int pxm, node;
-       int apic_id;
 
        if (srat_disabled())
                return;
@@ -178,8 +177,13 @@ acpi_numa_x2apic_affinity_init(struct ac
                bad_srat();
                return;
        }
-       if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
+       if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
+               return;
+       if (pa->apic_id >= MAX_LOCAL_APIC) {
+               printk(KERN_INFO "SRAT: APIC %08x ignored\n", pa->apic_id);
                return;
+       }
+
        pxm = pa->proximity_domain;
        node = setup_node(pxm);
        if (node < 0) {
@@ -187,11 +191,11 @@ acpi_numa_x2apic_affinity_init(struct ac
                return;
        }
 
-       apic_id = pa->apic_id;
-       apicid_to_node[apic_id] = node;
+       apicid_to_node[pa->apic_id] = node;
+       node_set(node, processor_nodes_parsed);
        acpi_numa = 1;
-       printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
-              pxm, apic_id, node);
+       printk(KERN_INFO "SRAT: PXM %u -> APIC %08x -> Node %u\n",
+              pxm, pa->apic_id, node);
 }
 
 /* Callback for Proximity Domain -> LAPIC mapping */
@@ -221,7 +225,7 @@ acpi_numa_processor_affinity_init(struct
        apicid_to_node[pa->apic_id] = node;
        node_set(node, processor_nodes_parsed);
        acpi_numa = 1;
-       printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
+       printk(KERN_INFO "SRAT: PXM %u -> APIC %02x -> Node %u\n",
               pxm, pa->apic_id, node);
 }
 
--- a/xen/drivers/acpi/numa.c
+++ b/xen/drivers/acpi/numa.c
@@ -199,9 +199,9 @@ int __init acpi_numa_init(void)
        /* SRAT: Static Resource Affinity Table */
        if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
                acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
-                                     acpi_parse_x2apic_affinity, NR_CPUS);
+                                     acpi_parse_x2apic_affinity, 0);
                acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
-                                     acpi_parse_processor_affinity, NR_CPUS);
+                                     acpi_parse_processor_affinity, 0);
                acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
                                      acpi_parse_memory_affinity,
                                      NR_NODE_MEMBLKS);
++++++ CVE-2014-0222-qemut-qcow1-validate-l2-table-size.patch ++++++
References: bsc#877642

Subject: qcow1: Validate L2 table size (CVE-2014-0222)
From: Kevin Wolf [email protected] Thu May 15 16:10:11 2014 +0200
Date: Mon May 19 11:36:49 2014 +0200:
Git: 42eb58179b3b215bb507da3262b682b8a2ec10b5

Too large L2 table sizes cause unbounded allocations. Images actually
created by qemu-img only have 512 byte or 4k L2 tables.

To keep things consistent with cluster sizes, allow ranges between 512
bytes and 64k (in fact, down to 1 entry = 8 bytes is technically
working, but L2 table sizes smaller than a cluster don't make a lot of
sense).

This also means that the number of bytes on the virtual disk that are
described by the same L2 table is limited to at most 8k * 64k or 2^29,
preventively avoiding any integer overflows.

Cc: [email protected]
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Benoit Canet <[email protected]>

Index: xen-4.5.1-testing/tools/qemu-xen-traditional-dir-remote/block-qcow.c
===================================================================
--- xen-4.5.1-testing.orig/tools/qemu-xen-traditional-dir-remote/block-qcow.c
+++ xen-4.5.1-testing/tools/qemu-xen-traditional-dir-remote/block-qcow.c
@@ -126,6 +126,10 @@ static int qcow_open(BlockDriverState *b
         goto fail;
     if (header.size <= 1 || header.cluster_bits < 9)
         goto fail;
+    /* l2_bits specifies number of entries; storing a uint64_t in each entry,
+     * so bytes = num_entries << 3. */
+    if (header.l2_bits < 9 - 3 || header.l2_bits > 16 - 3)
+        goto fail;
     if (header.crypt_method > QCOW_CRYPT_AES)
         goto fail;
     s->crypt_method_header = header.crypt_method;
++++++ CVE-2014-0222-qemuu-qcow1-validate-l2-table-size.patch ++++++
References: bsc#877642

Subject: qcow1: Validate L2 table size (CVE-2014-0222)
From: Kevin Wolf [email protected] Thu May 15 16:10:11 2014 +0200
Date: Mon May 19 11:36:49 2014 +0200:
Git: 42eb58179b3b215bb507da3262b682b8a2ec10b5

Too large L2 table sizes cause unbounded allocations. Images actually
created by qemu-img only have 512 byte or 4k L2 tables.

To keep things consistent with cluster sizes, allow ranges between 512
bytes and 64k (in fact, down to 1 entry = 8 bytes is technically
working, but L2 table sizes smaller than a cluster don't make a lot of
sense).

This also means that the number of bytes on the virtual disk that are
described by the same L2 table is limited to at most 8k * 64k or 2^29,
preventively avoiding any integer overflows.

Cc: [email protected]
Signed-off-by: Kevin Wolf <[email protected]>
Reviewed-by: Benoit Canet <[email protected]>

Index: xen-4.5.1-testing/tools/qemu-xen-dir-remote/block/qcow.c
===================================================================
--- xen-4.5.1-testing.orig/tools/qemu-xen-dir-remote/block/qcow.c
+++ xen-4.5.1-testing/tools/qemu-xen-dir-remote/block/qcow.c
@@ -147,6 +147,14 @@ static int qcow_open(BlockDriverState *b
         goto fail;
     }
 
+    /* l2_bits specifies number of entries; storing a uint64_t in each entry,
+     * so bytes = num_entries << 3. */
+    if (header.l2_bits < 9 - 3 || header.l2_bits > 16 - 3) {
+        error_setg(errp, "L2 table size must be between 512 and 64k");
+        ret = -EINVAL;
+        goto fail;
+    }
+
     if (header.crypt_method > QCOW_CRYPT_AES) {
         error_setg(errp, "invalid encryption method in qcow header");
         ret = -EINVAL;
++++++ CVE-2015-4037-qemut-smb-config-dir-name.patch ++++++
References: bsc#932267

Subject: slirp: use less predictable directory name in /tmp for smb config 
(CVE-2015-4037)
From: Michael Tokarev [email protected] Thu May 28 14:12:26 2015 +0300
Date: Wed Jun 3 14:21:45 2015 +0300:
Git: 8b8f1c7e9ddb2e88a144638f6527bf70e32343e3

In this version I used mkdtemp(3) which is:

        _BSD_SOURCE
        || /* Since glibc 2.10: */
            (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700)

(POSIX.1-2008), so should be available on systems we care about.

While at it, reset the resulting directory name within smb structure
on error so cleanup function wont try to remove directory which we
failed to create.

Signed-off-by: Michael Tokarev <[email protected]>
Reviewed-by: Markus Armbruster <[email protected]>

Index: xen-4.5.1-testing/tools/qemu-xen-traditional-dir-remote/net.c
===================================================================
--- xen-4.5.1-testing.orig/tools/qemu-xen-traditional-dir-remote/net.c
+++ xen-4.5.1-testing/tools/qemu-xen-traditional-dir-remote/net.c
@@ -624,9 +624,10 @@ void net_slirp_smb(const char *exported_
     }
 
     /* XXX: better tmp dir construction */
-    snprintf(smb_dir, sizeof(smb_dir), "/tmp/qemu-smb.%d", getpid());
-    if (mkdir(smb_dir, 0700) < 0) {
+    snprintf(smb_dir, sizeof(smb_dir), "/tmp/qemu-smb.XXXXXX");
+    if (!mkdtemp(smb_dir)) {
         fprintf(stderr, "qemu: could not create samba server dir '%s'\n", 
smb_dir);
+        smb_dir[0] = 0;
         exit(1);
     }
     snprintf(smb_conf, sizeof(smb_conf), "%s/%s", smb_dir, "smb.conf");
++++++ CVE-2015-4037-qemuu-smb-config-dir-name.patch ++++++
References: bsc#932267

Subject: slirp: use less predictable directory name in /tmp for smb config 
(CVE-2015-4037)
From: Michael Tokarev [email protected] Thu May 28 14:12:26 2015 +0300
Date: Wed Jun 3 14:21:45 2015 +0300:
Git: 8b8f1c7e9ddb2e88a144638f6527bf70e32343e3

In this version I used mkdtemp(3) which is:

        _BSD_SOURCE
        || /* Since glibc 2.10: */
            (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700)

(POSIX.1-2008), so should be available on systems we care about.

While at it, reset the resulting directory name within smb structure
on error so cleanup function wont try to remove directory which we
failed to create.

Signed-off-by: Michael Tokarev <[email protected]>
Reviewed-by: Markus Armbruster <[email protected]>

Index: xen-4.5.1-testing/tools/qemu-xen-dir-remote/net/slirp.c
===================================================================
--- xen-4.5.1-testing.orig/tools/qemu-xen-dir-remote/net/slirp.c
+++ xen-4.5.1-testing/tools/qemu-xen-dir-remote/net/slirp.c
@@ -481,7 +481,6 @@ static void slirp_smb_cleanup(SlirpState
 static int slirp_smb(SlirpState* s, const char *exported_dir,
                      struct in_addr vserver_addr)
 {
-    static int instance;
     char smb_conf[128];
     char smb_cmdline[128];
     struct passwd *passwd;
@@ -505,10 +504,10 @@ static int slirp_smb(SlirpState* s, cons
         return -1;
     }
 
-    snprintf(s->smb_dir, sizeof(s->smb_dir), "/tmp/qemu-smb.%ld-%d",
-             (long)getpid(), instance++);
-    if (mkdir(s->smb_dir, 0700) < 0) {
+    snprintf(s->smb_dir, sizeof(s->smb_dir), "/tmp/qemu-smb.XXXXXX");
+    if (!mkdtemp(s->smb_dir)) {
         error_report("could not create samba server dir '%s'", s->smb_dir);
+        s->smb_dir[0] = 0;
         return -1;
     }
     snprintf(smb_conf, sizeof(smb_conf), "%s/%s", s->smb_dir, "smb.conf");
++++++ CVE-2015-7835-xsa148.patch ++++++
References: bsc#950367 CVE-2015-7835 XSA-148

x86: guard against undue super page PTE creation

When optional super page support got added (commit bd1cd81d64 "x86: PV
support for hugepages"), two adjustments were missed: mod_l2_entry()
needs to consider the PSE and RW bits when deciding whether to use the
fast path, and the PSE bit must not be removed from L2_DISALLOW_MASK
unconditionally.

This is CVE-2015-7835 / XSA-148.

Signed-off-by: Jan Beulich <[email protected]>
Reviewed-by: Tim Deegan <[email protected]>

Index: xen-4.5.1-testing/xen/arch/x86/mm.c
===================================================================
--- xen-4.5.1-testing.orig/xen/arch/x86/mm.c
+++ xen-4.5.1-testing/xen/arch/x86/mm.c
@@ -162,7 +162,10 @@ static void put_superpage(unsigned long
 static uint32_t base_disallow_mask;
 /* Global bit is allowed to be set on L1 PTEs. Intended for user mappings. */
 #define L1_DISALLOW_MASK ((base_disallow_mask | _PAGE_GNTTAB) & ~_PAGE_GLOBAL)
-#define L2_DISALLOW_MASK (base_disallow_mask & ~_PAGE_PSE)
+
+#define L2_DISALLOW_MASK (unlikely(opt_allow_superpage) \
+                          ? base_disallow_mask & ~_PAGE_PSE \
+                          : base_disallow_mask)
 
 #define l3_disallow_mask(d) (!is_pv_32on64_domain(d) ?  \
                              base_disallow_mask :       \
@@ -1790,7 +1793,10 @@ static int mod_l2_entry(l2_pgentry_t *pl
         }
 
         /* Fast path for identical mapping and presence. */
-        if ( !l2e_has_changed(ol2e, nl2e, _PAGE_PRESENT) )
+        if ( !l2e_has_changed(ol2e, nl2e,
+                              unlikely(opt_allow_superpage)
+                              ? _PAGE_PSE | _PAGE_RW | _PAGE_PRESENT
+                              : _PAGE_PRESENT) )
         {
             adjust_guest_l2e(nl2e, d);
             if ( UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, vcpu, preserve_ad) )
++++++ libxl.set-migration-constraints-from-cmdline.patch ++++++
--- /var/tmp/diff_new_pack.Do0HSP/_old  2015-11-02 12:54:48.000000000 +0100
+++ /var/tmp/diff_new_pack.Do0HSP/_new  2015-11-02 12:54:48.000000000 +0100
@@ -323,7 +323,7 @@
 ===================================================================
 --- xen-4.5.1-testing.orig/tools/libxl/libxl_dom.c
 +++ xen-4.5.1-testing/tools/libxl/libxl_dom.c
-@@ -1808,6 +1808,7 @@ void libxl__domain_suspend(libxl__egc *e
+@@ -1815,6 +1815,7 @@ void libxl__domain_suspend(libxl__egc *e
  
      dss->xcflags = (live ? XCFLAGS_LIVE : 0)
            | (debug ? XCFLAGS_DEBUG : 0)

++++++ xen-hvm-default-bridge.patch ++++++
--- /var/tmp/diff_new_pack.Do0HSP/_old  2015-11-02 12:54:48.000000000 +0100
+++ /var/tmp/diff_new_pack.Do0HSP/_new  2015-11-02 12:54:48.000000000 +0100
@@ -1,7 +1,7 @@
-Index: xen-4.2.3-testing/tools/qemu-xen-traditional-dir-remote/net.h
+Index: xen-4.5.1-testing/tools/qemu-xen-traditional-dir-remote/net.h
 ===================================================================
---- xen-4.2.3-testing.orig/tools/qemu-xen-traditional-dir-remote/net.h
-+++ xen-4.2.3-testing/tools/qemu-xen-traditional-dir-remote/net.h
+--- xen-4.5.1-testing.orig/tools/qemu-xen-traditional-dir-remote/net.h
++++ xen-4.5.1-testing/tools/qemu-xen-traditional-dir-remote/net.h
 @@ -107,8 +107,8 @@ void net_host_device_add(const char *dev
  void net_host_device_remove(int vlan_id, const char *device);
  
@@ -13,11 +13,11 @@
  #endif
  #ifdef __sun__
  #define SMBD_COMMAND "/usr/sfw/sbin/smbd"
-Index: xen-4.2.3-testing/tools/qemu-xen-traditional-dir-remote/net.c
+Index: xen-4.5.1-testing/tools/qemu-xen-traditional-dir-remote/net.c
 ===================================================================
---- xen-4.2.3-testing.orig/tools/qemu-xen-traditional-dir-remote/net.c
-+++ xen-4.2.3-testing/tools/qemu-xen-traditional-dir-remote/net.c
-@@ -1764,9 +1764,10 @@ int net_client_init(const char *device,
+--- xen-4.5.1-testing.orig/tools/qemu-xen-traditional-dir-remote/net.c
++++ xen-4.5.1-testing/tools/qemu-xen-traditional-dir-remote/net.c
+@@ -1765,9 +1765,10 @@ int net_client_init(const char *device,
              }
              if (get_param_value(script_arg, sizeof(script_arg), "scriptarg", 
p) == 0 &&
                  get_param_value(script_arg, sizeof(script_arg), "bridge", p) 
== 0) { /* deprecated; for xend compatibility */
@@ -30,10 +30,10 @@
          }
      } else
  #endif
-Index: 
xen-4.2.3-testing/tools/qemu-xen-traditional-dir-remote/i386-dm/qemu-ifup-Linux
+Index: 
xen-4.5.1-testing/tools/qemu-xen-traditional-dir-remote/i386-dm/qemu-ifup-Linux
 ===================================================================
---- 
xen-4.2.3-testing.orig/tools/qemu-xen-traditional-dir-remote/i386-dm/qemu-ifup-Linux
-+++ 
xen-4.2.3-testing/tools/qemu-xen-traditional-dir-remote/i386-dm/qemu-ifup-Linux
+--- 
xen-4.5.1-testing.orig/tools/qemu-xen-traditional-dir-remote/i386-dm/qemu-ifup-Linux
++++ 
xen-4.5.1-testing/tools/qemu-xen-traditional-dir-remote/i386-dm/qemu-ifup-Linux
 @@ -1,36 +1,22 @@
  #!/bin/sh
  


++++++ xsa149.patch ++++++
xen: free domain's vcpu array

This was overlooked in fb442e2171 ("x86_64: allow more vCPU-s per
guest").

This is XSA-149.

Signed-off-by: Jan Beulich <[email protected]>
Reviewed-by: Ian Campbell <[email protected]>

Index: xen-4.5.1-testing/xen/common/domain.c
===================================================================
--- xen-4.5.1-testing.orig/xen/common/domain.c
+++ xen-4.5.1-testing/xen/common/domain.c
@@ -831,6 +831,7 @@ static void complete_domain_destroy(stru
 
     xsm_free_security_domain(d);
     free_cpumask_var(d->domain_dirty_cpumask);
+    xfree(d->vcpu);
     free_domain_struct(d);
 
     send_global_virq(VIRQ_DOM_EXC);
++++++ xsa151.patch ++++++
xenoprof: free domain's vcpu array

This was overlooked in fb442e2171 ("x86_64: allow more vCPU-s per
guest").

This is XSA-151.

Signed-off-by: Jan Beulich <[email protected]>
Reviewed-by: Ian Campbell <[email protected]>

Index: xen-4.5.1-testing/xen/common/xenoprof.c
===================================================================
--- xen-4.5.1-testing.orig/xen/common/xenoprof.c
+++ xen-4.5.1-testing/xen/common/xenoprof.c
@@ -239,6 +239,7 @@ static int alloc_xenoprof_struct(
     d->xenoprof->rawbuf = alloc_xenheap_pages(get_order_from_pages(npages), 0);
     if ( d->xenoprof->rawbuf == NULL )
     {
+        xfree(d->xenoprof->vcpu);
         xfree(d->xenoprof);
         d->xenoprof = NULL;
         return -ENOMEM;
@@ -286,6 +287,7 @@ void free_xenoprof_pages(struct domain *
         free_xenheap_pages(x->rawbuf, order);
     }
 
+    xfree(x->vcpu);
     xfree(x);
     d->xenoprof = NULL;
 }
++++++ xsa152.patch ++++++
x86: rate-limit logging in do_xen{oprof,pmu}_op()

Some of the sub-ops are acessible to all guests, and hence should be
rate-limited. In the xenoprof case, just like for XSA-146, include them
only in debug builds. Since the vPMU code is rather new, allow them to
be always present, but downgrade them to (rate limited) guest messages.

This is XSA-152.

Signed-off-by: Jan Beulich <[email protected]>

Index: xen-4.5.1-testing/xen/common/xenoprof.c
===================================================================
--- xen-4.5.1-testing.orig/xen/common/xenoprof.c
+++ xen-4.5.1-testing/xen/common/xenoprof.c
@@ -676,15 +676,13 @@ ret_t do_xenoprof_op(int op, XEN_GUEST_H
     
     if ( (op < 0) || (op > XENOPROF_last_op) )
     {
-        printk("xenoprof: invalid operation %d for domain %d\n",
-               op, current->domain->domain_id);
+        gdprintk(XENLOG_DEBUG, "invalid operation %d\n", op);
         return -EINVAL;
     }
 
     if ( !NONPRIV_OP(op) && (current->domain != xenoprof_primary_profiler) )
     {
-        printk("xenoprof: dom %d denied privileged operation %d\n",
-               current->domain->domain_id, op);
+        gdprintk(XENLOG_DEBUG, "denied privileged operation %d\n", op);
         return -EPERM;
     }
 
@@ -907,8 +905,7 @@ ret_t do_xenoprof_op(int op, XEN_GUEST_H
     spin_unlock(&xenoprof_lock);
 
     if ( ret < 0 )
-        printk("xenoprof: operation %d failed for dom %d (status : %d)\n",
-               op, current->domain->domain_id, ret);
+        gdprintk(XENLOG_DEBUG, "operation %d failed: %d\n", op, ret);
 
     return ret;
 }
++++++ xsa153-libxl.patch ++++++
>From 27593ec62bdad8621df910931349d964a6dbaa8c Mon Sep 17 00:00:00 2001
From: Ian Jackson <[email protected]>
Date: Wed, 21 Oct 2015 16:18:30 +0100
Subject: [PATCH XSA-153 v3] libxl: adjust PoD target by memory fudge, too

PoD guests need to balloon at least as far as required by PoD, or risk
crashing.  Currently they don't necessarily know what the right value
is, because our memory accounting is (at the very least) confusing.

Apply the memory limit fudge factor to the in-hypervisor PoD memory
target, too.  This will increase the size of the guest's PoD cache by
the fudge factor LIBXL_MAXMEM_CONSTANT (currently 1Mby).  This ensures
that even with a slightly-off balloon driver, the guest will be
stable even under memory pressure.

There are two call sites of xc_domain_set_pod_target that need fixing:

The one in libxl_set_memory_target is straightforward.

The one in xc_hvm_build_x86.c:setup_guest is more awkward.  Simply
setting the PoD target differently does not work because the various
amounts of memory during domain construction no longer match up.
Instead, we adjust the guest memory target in xenstore (but only for
PoD guests).

This introduces a 1Mby discrepancy between the balloon target of a PoD
guest at boot, and the target set by an apparently-equivalent `xl
mem-set' (or similar) later.  This approach is low-risk for a security
fix but we need to fix this up properly in xen.git#staging and
probably also in stable trees.

This is XSA-153.

Signed-off-by: Ian Jackson <[email protected]>
---
 tools/libxl/libxl.c     |    2 +-
 tools/libxl/libxl_dom.c |    9 ++++++++-
 2 files changed, 9 insertions(+), 2 deletions(-)

Index: xen-4.5.1-testing/tools/libxl/libxl.c
===================================================================
--- xen-4.5.1-testing.orig/tools/libxl/libxl.c
+++ xen-4.5.1-testing/tools/libxl/libxl.c
@@ -4859,7 +4859,7 @@ retry_transaction:
 
     new_target_memkb -= videoram;
     rc = xc_domain_set_pod_target(ctx->xch, domid,
-            new_target_memkb / 4, NULL, NULL, NULL);
+            (new_target_memkb + LIBXL_MAXMEM_CONSTANT) / 4, NULL, NULL, NULL);
     if (rc != 0) {
         LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR,
                 "xc_domain_set_pod_target domid=%d, memkb=%d "
Index: xen-4.5.1-testing/tools/libxl/libxl_dom.c
===================================================================
--- xen-4.5.1-testing.orig/tools/libxl/libxl_dom.c
+++ xen-4.5.1-testing/tools/libxl/libxl_dom.c
@@ -446,6 +446,7 @@ int libxl__build_post(libxl__gc *gc, uin
     xs_transaction_t t;
     char **ents;
     int i, rc;
+    int64_t mem_target_fudge;
 
     rc = libxl_domain_sched_params_set(CTX, domid, &info->sched_params);
     if (rc)
@@ -472,11 +473,17 @@ int libxl__build_post(libxl__gc *gc, uin
         }
     }
 
+    mem_target_fudge =
+        (info->type == LIBXL_DOMAIN_TYPE_HVM &&
+         info->max_memkb > info->target_memkb)
+        ? LIBXL_MAXMEM_CONSTANT : 0;
+
     ents = libxl__calloc(gc, 12 + (info->max_vcpus * 2) + 2, sizeof(char *));
     ents[0] = "memory/static-max";
     ents[1] = GCSPRINTF("%"PRId64, info->max_memkb);
     ents[2] = "memory/target";
-    ents[3] = GCSPRINTF("%"PRId64, info->target_memkb - info->video_memkb);
+    ents[3] = GCSPRINTF("%"PRId64, info->target_memkb - info->video_memkb
+                        - mem_target_fudge);
     ents[4] = "memory/videoram";
     ents[5] = GCSPRINTF("%"PRId64, info->video_memkb);
     ents[6] = "domid";

Reply via email to