Script 'mail_helper' called by obssrc Hello community, here is the log from the commit of package virt-manager for openSUSE:Factory checked in at 2022-11-08 10:53:34 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/virt-manager (Old) and /work/SRC/openSUSE:Factory/.virt-manager.new.1597 (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "virt-manager" Tue Nov 8 10:53:34 2022 rev:237 rq:1034192 version:4.1.0 Changes: -------- --- /work/SRC/openSUSE:Factory/virt-manager/virt-manager.changes 2022-10-28 19:29:51.326729153 +0200 +++ /work/SRC/openSUSE:Factory/.virt-manager.new.1597/virt-manager.changes 2022-11-08 10:53:40.049497733 +0100 @@ -1,0 +2,19 @@ +Mon Nov 7 05:32:57 MST 2022 - [email protected] + +- Refresh test skips +- Drop the very old "Obsoletes: python-virtinst <= 0.600.4" + virt-manager.spec + +------------------------------------------------------------------- +Fri Nov 4 11:01:28 MDT 2022 - [email protected] + +- Upstream bug fixes (bsc#1027942) + 11a887ec-cli-disk-Add-driver.metadata_cache-options.patch + 7295ebfb-tests-cli-Fix-test-output-after-previous-commit.patch + 58f5e36d-fsdetails-Fix-an-error-with-source.socket-of-virtiofs.patch + c22a876e-tests-Add-a-compat-check-for-linux2020-in-amd-sev-test-case.patch + fbdf0516-cli-cpu-Add-maxphysaddr.mode-bits-options.patch + b0d05167-cloner-Sync-uuid-and-sysinfo-system-uuid.patch + 999ccb85-virt-install-unattended-and-cloud-init-conflict.patch + +------------------------------------------------------------------- New: ---- 11a887ec-cli-disk-Add-driver.metadata_cache-options.patch 58f5e36d-fsdetails-Fix-an-error-with-source.socket-of-virtiofs.patch 7295ebfb-tests-cli-Fix-test-output-after-previous-commit.patch 999ccb85-virt-install-unattended-and-cloud-init-conflict.patch b0d05167-cloner-Sync-uuid-and-sysinfo-system-uuid.patch c22a876e-tests-Add-a-compat-check-for-linux2020-in-amd-sev-test-case.patch fbdf0516-cli-cpu-Add-maxphysaddr.mode-bits-options.patch ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ virt-manager.spec ++++++ --- /var/tmp/diff_new_pack.xBVVfq/_old 2022-11-08 10:53:41.925508897 +0100 +++ /var/tmp/diff_new_pack.xBVVfq/_new 2022-11-08 10:53:41.929508921 +0100 @@ -42,6 +42,13 @@ Source3: virt-manager-supportconfig # Upstream Patches Patch1: revert-363fca41-virt-install-Require-osinfo-for-non-x86-HVM-case-too.patch +Patch2: 11a887ec-cli-disk-Add-driver.metadata_cache-options.patch +Patch3: 7295ebfb-tests-cli-Fix-test-output-after-previous-commit.patch +Patch4: 58f5e36d-fsdetails-Fix-an-error-with-source.socket-of-virtiofs.patch +Patch5: c22a876e-tests-Add-a-compat-check-for-linux2020-in-amd-sev-test-case.patch +Patch6: fbdf0516-cli-cpu-Add-maxphysaddr.mode-bits-options.patch +Patch7: b0d05167-cloner-Sync-uuid-and-sysinfo-system-uuid.patch +Patch8: 999ccb85-virt-install-unattended-and-cloud-init-conflict.patch # SUSE Only Patch70: virtman-desktop.patch Patch71: virtman-kvm.patch @@ -162,7 +169,6 @@ Requires: python3-requests Provides: python3-virtinst Provides: virt-clone -Obsoletes: python-virtinst <= 0.600.4 Supplements: virt-manager %description -n virt-install @@ -210,23 +216,32 @@ donttest="$donttest or testCLI0001virt_install_many_devices" donttest="$donttest or testCLI0110virt_install_reinstall_cdrom" donttest="$donttest or testCLI0284virt_xml_build_pool_logical_disk" +donttest="$donttest or testCLI0285virt_xml_build_pool_logical_disk" donttest="$donttest or testCLI0276virt_xml_build_disk_domain" +donttest="$donttest or testCLI0277virt_xml_build_disk_domain" donttest="$donttest or testCLI0371virt_xml_add_disk_create_storage_start" +donttest="$donttest or testCLI0372virt_xml_add_disk_create_storage_start" # depends on osc/obs host cpu? donttest="$donttest or testCLI0003virt_install_singleton_config_2" donttest="$donttest or testCLI0283virt_xml_edit_cpu_host_copy" +donttest="$donttest or testCLI0284virt_xml_edit_cpu_host_copy" # RuntimeError: SEV launch security requires a Q35 machine -- due to patch for bsc#1196806, jsc#SLE-18834 ? donttest="$donttest or testCLI0162virt_install" # Expectsion <video> element donttest="$donttest or testCLI0168virt_install_s390x_cdrom" +donttest="$donttest or testCLI0169virt_install_s390x_cdrom" # missing <boot> element, extra <kernel> element donttest="$donttest or testCLI0189virt_install_xen_default" +donttest="$donttest or testCLI0190virt_install_xen_default" donttest="$donttest or testCLI0190virt_install_xen_pv" +donttest="$donttest or testCLI0191virt_install_xen_pv" # different <emulator> additional <model> in <interface> donttest="$donttest or testCLI0191virt_install_xen_hvm" donttest="$donttest or testCLI0192virt_install_xen_hvm" +donttest="$donttest or testCLI0193virt_install_xen_hvm" # different source image format donttest="$donttest or testCLI0199virt_install_bhyve_default_f27" +donttest="$donttest or testCLI0200virt_install_bhyve_default_f27" # Due to the above skips: # "there are XML properties that are untested in the test suite" donttest="$donttest or testCheckXMLBuilderProps" ++++++ 11a887ec-cli-disk-Add-driver.metadata_cache-options.patch ++++++ Subject: cli: --disk: Add driver.metadata_cache options From: Lin Ma [email protected] Tue Aug 16 12:59:57 2022 +0800 Date: Wed Aug 17 09:57:29 2022 -0400: Git: 11a887ece5b5bab287ff77b09337dc44c4e6e976 Properly setting the metadata cache size can provide better performance in case of using big qcow2 images. This patch introduces two driver options: * driver.metadata_cache.max_size * driver.metadata_cache.max_size.unit E.g. --disk ...,driver.type=qcow2,\ driver.metadata_cache.max_size=2,\ driver.metadata_cache.max_size.unit=MiB BTW, Metadata cache size control is currently supported only for qcow2. Regarding how to properly caluclate the cache size of qcow2, Please refer to qemu's documentation. Signed-off-by: Lin Ma <[email protected]> diff --git a/tests/data/cli/compare/virt-install-many-devices.xml b/tests/data/cli/compare/virt-install-many-devices.xml index a73343a9..a33dc16a 100644 --- a/tests/data/cli/compare/virt-install-many-devices.xml +++ b/tests/data/cli/compare/virt-install-many-devices.xml @@ -423,6 +423,15 @@ </source> <target dev="vdu" bus="virtio"/> </disk> + <disk type="file" device="disk"> + <driver name="qemu" type="qcow2"> + <metadata_cache> + <max_size unit="KiB">2048</max_size> + </metadata_cache> + </driver> + <source file="/tmp/disk1.qcow2"/> + <target dev="vdv" bus="virtio"/> + </disk> <controller type="usb" index="0" model="ich9-ehci1"> <address type="pci" domain="0" bus="0" slot="4" function="7"/> </controller> diff --git a/tests/test_cli.py b/tests/test_cli.py index 774db098..259ac78c 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -605,6 +605,7 @@ source.reservations.managed=no,source.reservations.source.type=unix,source.reser --disk path=/fooroot.img,size=.0001,transient=on --disk source.dir=/ --disk type=nvme,source.type=pci,source.managed=no,source.namespace=2,source.address.domain=0x0001,source.address.bus=0x02,source.address.slot=0x00,source.address.function=0x0 +--disk /tmp/disk1.qcow2,size=16,driver.type=qcow2,driver.metadata_cache.max_size=2048,driver.metadata_cache.max_size.unit=KiB --network user,mac=12:34:56:78:11:22,portgroup=foo,link_state=down,rom_bar=on,rom_file=/tmp/foo diff --git a/virtinst/cli.py b/virtinst/cli.py index c4dffd34..042b500f 100644 --- a/virtinst/cli.py +++ b/virtinst/cli.py @@ -3497,6 +3497,8 @@ class ParserDisk(VirtCLIParser): "driver.io": "io", "driver.name": "driver_name", "driver.type": "driver_type", + "driver.metadata_cache.max_size": "metadata_cache.max_size", + "driver.metadata_cache.max_size.unit": "metadata_cache.max_size.unit", } def _add_advertised_aliases(self): @@ -3696,6 +3698,11 @@ class ParserDisk(VirtCLIParser): cls.add_arg("driver.queues", "driver_queues") cls.add_arg("driver.error_policy", "error_policy") + cls.add_arg("driver.metadata_cache.max_size", + "driver_metadata_cache_max_size") + cls.add_arg("driver.metadata_cache.max_size.unit", + "driver_metadata_cache_max_size_unit") + cls.add_arg("iotune.read_bytes_sec", "iotune_rbs") cls.add_arg("iotune.write_bytes_sec", "iotune_wbs") cls.add_arg("iotune.total_bytes_sec", "iotune_tbs") diff --git a/virtinst/devices/disk.py b/virtinst/devices/disk.py index dc59fd13..9609ebac 100644 --- a/virtinst/devices/disk.py +++ b/virtinst/devices/disk.py @@ -481,6 +481,11 @@ class DeviceDisk(Device): driver_iothread = XMLProperty("./driver/@iothread", is_int=True) driver_queues = XMLProperty("./driver/@queues", is_int=True) + driver_metadata_cache_max_size = XMLProperty( + "./driver/metadata_cache/max_size", is_int=True) + driver_metadata_cache_max_size_unit = XMLProperty( + "./driver/metadata_cache/max_size/@unit") + error_policy = XMLProperty("./driver/@error_policy") serial = XMLProperty("./serial") wwn = XMLProperty("./wwn") ++++++ 58f5e36d-fsdetails-Fix-an-error-with-source.socket-of-virtiofs.patch ++++++ Subject: fsdetails: Fix an error with source.socket of virtiofs From: Lin Ma [email protected] Tue Aug 16 12:59:36 2022 +0800 Date: Wed Aug 17 10:24:10 2022 -0400: Git: 58f5e36da76277bfc7fb4d87293be60ef6e0cbc1 Using the source.socket of virtiofs needs a virtiofsd daemon launched outside of libvirtd, So the filesystem UI doesn't support it yet. If users need it they can set it manually in the XML editor. But if we view the filesystem info of such a VM on the details page, It fails with this error message: Traceback (most recent call last): File "/usr/share/virt-manager/virtManager/details/details.py", line 1713, in _refresh_page self._refresh_filesystem_page(dev) File "/usr/share/virt-manager/virtManager/details/details.py", line 2241, in _refresh_filesystem_page self.fsDetails.set_dev(dev) File "/usr/share/virt-manager/virtManager/device/fsdetails.py", line 193, in set_dev self.widget("fs-source").set_text(dev.source) TypeError: Argument 1 does not allow None as a value This patch fixes above issue by leaving the 'source path' info blank in case of source.socket. In this case, Considering that showing 'target path' info without source info is kind of meaningless, So this patch leaves the 'target path' info blank as well. Signed-off-by: Lin Ma <[email protected]> diff --git a/virtManager/device/fsdetails.py b/virtManager/device/fsdetails.py index 40868d1c..b9956e1d 100644 --- a/virtManager/device/fsdetails.py +++ b/virtManager/device/fsdetails.py @@ -190,7 +190,7 @@ class vmmFSDetails(vmmGObjectUI): self.widget("fs-format-combo"), dev.driver_format) if dev.type != DeviceFilesystem.TYPE_RAM: - self.widget("fs-source").set_text(dev.source) + self.widget("fs-source").set_text(dev.source or "") else: self.widget("fs-ram-source-spin").set_value(int(dev.source) // 1024) self.widget("fs-target").set_text(dev.target or "") ++++++ 7295ebfb-tests-cli-Fix-test-output-after-previous-commit.patch ++++++ Subject: tests: cli: Fix test output after previous commit From: Cole Robinson [email protected] Wed Aug 17 10:21:31 2022 -0400 Date: Wed Aug 17 10:21:31 2022 -0400: Git: 7295ebfb02e1a6ebcc1fc94c4aecfe8e21a0e567 Signed-off-by: Cole Robinson <[email protected]> diff --git a/tests/data/cli/compare/virt-install-many-devices.xml b/tests/data/cli/compare/virt-install-many-devices.xml index a33dc16a..c27512d1 100644 --- a/tests/data/cli/compare/virt-install-many-devices.xml +++ b/tests/data/cli/compare/virt-install-many-devices.xml @@ -424,7 +424,7 @@ <target dev="vdu" bus="virtio"/> </disk> <disk type="file" device="disk"> - <driver name="qemu" type="qcow2"> + <driver name="qemu" type="qcow2" discard="unmap"> <metadata_cache> <max_size unit="KiB">2048</max_size> </metadata_cache> ++++++ 999ccb85-virt-install-unattended-and-cloud-init-conflict.patch ++++++ Subject: virt-install: --unattended and --cloud-init conflict From: Cole Robinson [email protected] Sun Aug 21 16:47:26 2022 -0400 Date: Sun Aug 21 16:47:26 2022 -0400: Git: 999ccb85e3e4189386786256cdf70cf5238cf785 Make it an explicit error, otherwise unattended is preferred and cloud-init is ignored https://bugzilla.redhat.com/show_bug.cgi?id=2117157 Signed-off-by: Cole Robinson <[email protected]> --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -1113,6 +1113,7 @@ c.add_invalid("--disk none --boot networ c.add_invalid("--nodisks --boot network --arch mips --virt-type kvm", grep="any virtualization options for architecture 'mips'") c.add_invalid("--nodisks --boot network --paravirt --arch mips", grep=" 'xen' for architecture 'mips'") c.add_invalid("--osinfo generic --launchSecurity sev --connect " + utils.URIs.kvm_amd_sev, grep="SEV launch security requires a Q35 UEFI machine") +c.add_invalid("--disk none --cloud-init --unattended --install fedora30", grep="--unattended and --cloud-init can not") --- a/virtinst/virtinstall.py +++ b/virtinst/virtinstall.py @@ -407,6 +407,9 @@ def build_installer(options, guest, inst else: extra_args = [installdata.kernel_args] + if options.unattended and options.cloud_init: + fail("--unattended and --cloud-init can not be specified together.") + if options.unattended: unattended_data = cli.parse_unattended(options.unattended) ++++++ b0d05167-cloner-Sync-uuid-and-sysinfo-system-uuid.patch ++++++ Subject: cloner: Sync <uuid> and <sysinfo> system uuid From: Cole Robinson [email protected] Sun Aug 21 16:21:10 2022 -0400 Date: Sun Aug 21 16:21:10 2022 -0400: Git: b0d0516736320315a70f74aff3759fb35dd35d9d Otherwise libvirt errors like: ERROR UUID mismatch between <uuid> and <sysinfo> https://bugzilla.redhat.com/show_bug.cgi?id=2038040 Signed-off-by: Cole Robinson <[email protected]> diff --git a/tests/data/cli/compare/virt-clone-auto-unmanaged.xml b/tests/data/cli/compare/virt-clone-auto-unmanaged.xml index 21a9a639..f2043be2 100644 --- a/tests/data/cli/compare/virt-clone-auto-unmanaged.xml +++ b/tests/data/cli/compare/virt-clone-auto-unmanaged.xml @@ -1,6 +1,11 @@ <domain type="test"> <name>origtest-clone</name> <uuid>00000000-1111-2222-3333-444444444444</uuid> + <sysinfo type="smbios"> + <system> + <entry name="uuid">00000000-1111-2222-3333-444444444444</entry> + </system> + </sysinfo> <memory>8388608</memory> <currentMemory>2097152</currentMemory> <vcpu>2</vcpu> diff --git a/tests/data/cli/compare/virt-clone-unmanaged-preserve.xml b/tests/data/cli/compare/virt-clone-unmanaged-preserve.xml index 3bdbbbe3..c003ed3e 100644 --- a/tests/data/cli/compare/virt-clone-unmanaged-preserve.xml +++ b/tests/data/cli/compare/virt-clone-unmanaged-preserve.xml @@ -1,6 +1,11 @@ <domain type="test"> <name>clonetest</name> <uuid>00000000-1111-2222-3333-444444444444</uuid> + <sysinfo type="smbios"> + <system> + <entry name="uuid">00000000-1111-2222-3333-444444444444</entry> + </system> + </sysinfo> <memory>8388608</memory> <currentMemory>2097152</currentMemory> <vcpu>2</vcpu> diff --git a/tests/data/cli/virtclone/clone-disk.xml b/tests/data/cli/virtclone/clone-disk.xml index da1eb0a6..2f6e916d 100644 --- a/tests/data/cli/virtclone/clone-disk.xml +++ b/tests/data/cli/virtclone/clone-disk.xml @@ -1,6 +1,11 @@ <domain type='test' id='1'> <name>origtest</name> <uuid>db69fa1f-eef0-e567-3c20-3ef16f10376b</uuid> + <sysinfo type='smbios'> + <system> + <entry name='uuid'>db69fa1f-eef0-e567-3c20-3ef16f10376b</entry> + </system> + </sysinfo> <memory>8388608</memory> <currentMemory>2097152</currentMemory> <vcpu>2</vcpu> diff --git a/virtinst/cloner.py b/virtinst/cloner.py index 34a702f9..9334513c 100644 --- a/virtinst/cloner.py +++ b/virtinst/cloner.py @@ -352,8 +352,7 @@ class Cloner(object): """ self._new_guest.id = None self._new_guest.title = None - self._new_guest.uuid = None - self._new_guest.uuid = Guest.generate_uuid(self.conn) + self.set_clone_uuid(Guest.generate_uuid(self.conn)) for dev in self._new_guest.devices.graphics: if dev.port and dev.port != -1: @@ -408,6 +407,9 @@ class Cloner(object): Override the new VMs generated UUId """ self._new_guest.uuid = uuid + for sysinfo in self._new_guest.sysinfo: + if sysinfo.system_uuid: + sysinfo.system_uuid = uuid def set_replace(self, val): """ ++++++ c22a876e-tests-Add-a-compat-check-for-linux2020-in-amd-sev-test-case.patch ++++++ Subject: tests: Add a compat check for linux2020 in amd-sev test case From: Lin Ma [email protected] Fri Aug 19 18:18:09 2022 +0800 Date: Sat Aug 20 09:59:27 2022 -0400: Git: c22a876e9a63cb7114e2b008f2e24682c8bbef3e It avoids amd-sev test failure if using older osinfo-db. Signed-off-by: Lin Ma <[email protected]> --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -1105,7 +1105,7 @@ c.add_compare("--connect " + utils.URIs. c.add_compare("--connect %(URI-KVM-X86)s --os-variant fedora26 --graphics spice --controller usb,model=none", "graphics-usb-disable") c.add_compare("--osinfo generic --boot uefi --disk size=1", "boot-uefi") c.add_compare("--osinfo generic --boot uefi --disk size=1 --tpm none --connect " + utils.URIs.kvm_x86_oldfirmware, "boot-uefi-oldcaps") -c.add_compare("--osinfo linux2020 --boot uefi --launchSecurity sev --connect " + utils.URIs.kvm_amd_sev, "amd-sev") +c.add_compare("--osinfo linux2020 --boot uefi --launchSecurity sev --connect " + utils.URIs.kvm_amd_sev, "amd-sev", prerun_check=no_osinfo_linux2020_virtio) c.add_invalid("--disk none --location nfs:example.com/fake --nonetworks", grep="NFS URL installs are no longer supported") c.add_invalid("--disk none --boot network --machine foobar", grep="domain type None with machine 'foobar'") ++++++ fbdf0516-cli-cpu-Add-maxphysaddr.mode-bits-options.patch ++++++ Subject: cli: --cpu: Add maxphysaddr.{mode,bits} options From: Lin Ma [email protected] Fri Aug 19 18:18:50 2022 +0800 Date: Sat Aug 20 10:03:11 2022 -0400: Git: fbdf05162606e4d70506b65d0dd647a59f229253 This commit added support for cpu physical address bits control, It's useful for VMs with huge amount of ram. E.g. --cpu Cascadelake-Server,maxphysaddr.mode=emulate,maxphysaddr.bits=46 Signed-off-by: Lin Ma <[email protected]> diff --git a/tests/data/cli/compare/virt-install-many-devices.xml b/tests/data/cli/compare/virt-install-many-devices.xml index c27512d1..e4a7da8f 100644 --- a/tests/data/cli/compare/virt-install-many-devices.xml +++ b/tests/data/cli/compare/virt-install-many-devices.xml @@ -194,6 +194,7 @@ <bandwidth initiator="0" target="2" cache="1" type="access" value="409600" unit="KiB"/> </interconnects> </numa> + <maxphysaddr mode="emulate" bits="46"/> </cpu> <clock offset="utc"> <timer name="pit" tickpolicy="catchup" present="yes"/> diff --git a/tests/data/cli/compare/virt-install-testdriver-edgecases.xml b/tests/data/cli/compare/virt-install-testdriver-edgecases.xml index f129d089..3cc385c0 100644 --- a/tests/data/cli/compare/virt-install-testdriver-edgecases.xml +++ b/tests/data/cli/compare/virt-install-testdriver-edgecases.xml @@ -17,7 +17,9 @@ <pae/> <vmport state="off"/> </features> - <cpu mode="host-passthrough" migratable="on"/> + <cpu mode="host-passthrough" migratable="on"> + <maxphysaddr mode="passthrough"/> + </cpu> <clock offset="utc"/> <pm> <suspend-to-mem enabled="no"/> diff --git a/tests/test_cli.py b/tests/test_cli.py index 9f6c3bc0..ef27276a 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -511,7 +511,8 @@ numa.interconnects.latency0.initiator=0,numa.interconnects.latency0.target=0,num numa.interconnects.latency1.initiator=0,numa.interconnects.latency1.target=2,numa.interconnects.latency1.cache=1,numa.interconnects.latency1.type=access,numa.interconnects.latency1.value=10,numa.interconnects.latency1.unit=ns,\ numa.interconnects.bandwidth0.initiator=0,numa.interconnects.bandwidth0.target=0,numa.interconnects.bandwidth0.type=access,numa.interconnects.bandwidth0.value=204800,\ numa.interconnects.bandwidth1.initiator=0,numa.interconnects.bandwidth1.target=2,numa.interconnects.bandwidth1.cache=1,numa.interconnects.bandwidth1.type=access,numa.interconnects.bandwidth1.value=409600,numa.interconnects.bandwidth1.unit=KiB,\ -cache.mode=emulate,cache.level=3 +cache.mode=emulate,cache.level=3,\ +maxphysaddr.mode=emulate,maxphysaddr.bits=46 --numatune 1,2,3,5-7,^6,mode=strict,\ @@ -880,7 +881,7 @@ c.add_compare("--pxe " # Hitting test driver specific output c.add_compare("--connect " + utils.URIs.test_suite + " " -"--cpu host-passthrough,migratable=on " # migratable=on is only accepted with host-passthrough +"--cpu host-passthrough,migratable=on,maxphysaddr.mode=passthrough " # migratable=on is only accepted with host-passthrough "--seclabel label=foobar.label,a1,z2,b3,relabel=yes,type=dynamic " # fills in default model=testModel "--tpm default " # --tpm default when domcaps missing "", diff --git a/virtinst/cli.py b/virtinst/cli.py index 388c5263..5ac8266b 100644 --- a/virtinst/cli.py +++ b/virtinst/cli.py @@ -2386,6 +2386,9 @@ class ParserCPU(VirtCLIParser): cls.add_arg("cache.level", "cache.level") cls.add_arg("cache.mode", "cache.mode") + cls.add_arg("maxphysaddr.mode", "maxphysaddr.mode") + cls.add_arg("maxphysaddr.bits", "maxphysaddr.bits") + # CPU features # These are handled specially in _parse cls.add_arg("force", None, lookup_cb=None, cb=cls.set_feature_cb) diff --git a/virtinst/domain/cpu.py b/virtinst/domain/cpu.py index 5de42b4e..c635932e 100644 --- a/virtinst/domain/cpu.py +++ b/virtinst/domain/cpu.py @@ -102,6 +102,17 @@ class _CPUFeature(XMLBuilder): policy = XMLProperty("./@policy") +class _CPUMaxphysaddr(XMLBuilder): + """ + Class for generating XML for <cpu> child node <maxphysaddr>. + """ + XML_NAME = "maxphysaddr" + _XML_PROP_ORDER = ["mode", "bits"] + + mode = XMLProperty("./@mode") + bits = XMLProperty("./@bits", is_int=True) + + ############## # NUMA cells # ############## @@ -211,7 +222,7 @@ class DomainCpu(XMLBuilder): _XML_PROP_ORDER = ["mode", "match", "check", "migratable", "model", "model_fallback", "model_vendor_id", "vendor", "topology", "cache", "features", - "cells", "latencies", "bandwidths"] + "cells", "latencies", "bandwidths", "maxphysaddr"] ################## @@ -242,6 +253,8 @@ class DomainCpu(XMLBuilder): latencies = XMLChildProperty(_NUMALatency, relative_xpath="./numa/interconnects") bandwidths = XMLChildProperty(_NUMABandwidth, relative_xpath="./numa/interconnects") + maxphysaddr = XMLChildProperty(_CPUMaxphysaddr, is_single=True) + ############################# # Special CPU mode handling # ++++++ revert-363fca41-virt-install-Require-osinfo-for-non-x86-HVM-case-too.patch ++++++ --- /var/tmp/diff_new_pack.xBVVfq/_old 2022-11-08 10:53:42.053509658 +0100 +++ /var/tmp/diff_new_pack.xBVVfq/_new 2022-11-08 10:53:42.053509658 +0100 @@ -29,7 +29,7 @@ VIRTINSTALL_OSINFO_DISABLE_REQUIRE=1. --- a/tests/test_cli.py +++ b/tests/test_cli.py -@@ -1162,7 +1162,6 @@ c.add_compare("--connect %(URI-KVM-ARMV7 +@@ -1169,7 +1169,6 @@ c.add_compare("--connect %(URI-KVM-ARMV7 ################# c.add_valid("--arch aarch64 --osinfo fedora19 --nodisks --pxe --connect " + utils.URIs.kvm_x86_nodomcaps, grep="Libvirt version does not support UEFI") # attempt to default to aarch64 UEFI, but it fails, but should only print warnings ++++++ virtinst-add-pvh-support.patch ++++++ --- /var/tmp/diff_new_pack.xBVVfq/_old 2022-11-08 10:53:42.097509920 +0100 +++ /var/tmp/diff_new_pack.xBVVfq/_new 2022-11-08 10:53:42.101509944 +0100 @@ -1,10 +1,10 @@ References: fate#326698 - Add pvh support to virt-manager At this time support is disabled in this patch. -Index: virt-manager-4.0.0/virtManager/createvm.py +Index: virt-manager-4.1.0/virtManager/createvm.py =================================================================== ---- virt-manager-4.0.0.orig/virtManager/createvm.py -+++ virt-manager-4.0.0/virtManager/createvm.py +--- virt-manager-4.1.0.orig/virtManager/createvm.py ++++ virt-manager-4.1.0/virtManager/createvm.py @@ -844,6 +844,9 @@ class vmmCreateVM(vmmGObjectUI): break if label is None: @@ -15,10 +15,10 @@ # Determine if this is the default given by guest_lookup if (gtype == self._capsinfo.os_type and -Index: virt-manager-4.0.0/virtinst/domain/os.py +Index: virt-manager-4.1.0/virtinst/domain/os.py =================================================================== ---- virt-manager-4.0.0.orig/virtinst/domain/os.py -+++ virt-manager-4.0.0/virtinst/domain/os.py +--- virt-manager-4.1.0.orig/virtinst/domain/os.py ++++ virt-manager-4.1.0/virtinst/domain/os.py @@ -46,6 +46,8 @@ class DomainOs(XMLBuilder): return self.os_type == "hvm" def is_xenpv(self): @@ -28,11 +28,11 @@ def is_container(self): return self.os_type == "exe" -Index: virt-manager-4.0.0/virtinst/guest.py +Index: virt-manager-4.1.0/virtinst/guest.py =================================================================== ---- virt-manager-4.0.0.orig/virtinst/guest.py -+++ virt-manager-4.0.0/virtinst/guest.py -@@ -888,7 +888,7 @@ class Guest(XMLBuilder): +--- virt-manager-4.1.0.orig/virtinst/guest.py ++++ virt-manager-4.1.0/virtinst/guest.py +@@ -905,7 +905,7 @@ class Guest(XMLBuilder): usb_tablet = False usb_keyboard = False @@ -41,10 +41,10 @@ usb_tablet = True if (self.os.is_arm_machvirt() or self.os.is_riscv_virt() or -Index: virt-manager-4.0.0/virtManager/object/domain.py +Index: virt-manager-4.1.0/virtManager/object/domain.py =================================================================== ---- virt-manager-4.0.0.orig/virtManager/object/domain.py -+++ virt-manager-4.0.0/virtManager/object/domain.py +--- virt-manager-4.1.0.orig/virtManager/object/domain.py ++++ virt-manager-4.1.0/virtManager/object/domain.py @@ -1310,6 +1310,8 @@ class vmmDomain(vmmLibvirtObject): return self.get_xmlobj().os.is_xenpv() def is_hvm(self): @@ -54,10 +54,10 @@ def get_uuid(self): if self._uuid is None: -Index: virt-manager-4.0.0/virtManager/connection.py +Index: virt-manager-4.1.0/virtManager/connection.py =================================================================== ---- virt-manager-4.0.0.orig/virtManager/connection.py -+++ virt-manager-4.0.0/virtManager/connection.py +--- virt-manager-4.1.0.orig/virtManager/connection.py ++++ virt-manager-4.1.0/virtManager/connection.py @@ -211,6 +211,8 @@ class vmmConnection(vmmGObject): label = "xen (paravirt)" elif gtype == "hvm": ++++++ virtinst-dont-use-special-copy-cpu-features.patch ++++++ --- /var/tmp/diff_new_pack.xBVVfq/_old 2022-11-08 10:53:42.125510087 +0100 +++ /var/tmp/diff_new_pack.xBVVfq/_new 2022-11-08 10:53:42.129510111 +0100 @@ -12,11 +12,11 @@ and "CPUs" -> "Copy host CPU definition" also inserts 'host-model' so this change mirrors what is already done there. -Index: virt-manager-3.3.0/virtinst/domain/cpu.py +Index: virt-manager-4.1.0/virtinst/domain/cpu.py =================================================================== ---- virt-manager-3.3.0.orig/virtinst/domain/cpu.py -+++ virt-manager-3.3.0/virtinst/domain/cpu.py -@@ -282,7 +282,8 @@ class DomainCpu(XMLBuilder): +--- virt-manager-4.1.0.orig/virtinst/domain/cpu.py ++++ virt-manager-4.1.0/virtinst/domain/cpu.py +@@ -295,7 +295,8 @@ class DomainCpu(XMLBuilder): log.debug("Using default cpu mode=%s", val) if (val == self.SPECIAL_MODE_HOST_MODEL or @@ -26,7 +26,7 @@ self.model = None self.vendor = None self.model_fallback = None -@@ -290,6 +291,9 @@ class DomainCpu(XMLBuilder): +@@ -303,6 +304,9 @@ class DomainCpu(XMLBuilder): self.check = None for f in self.features: self.remove_child(f) ++++++ virtinst-s390x-disable-graphics.patch ++++++ --- /var/tmp/diff_new_pack.xBVVfq/_old 2022-11-08 10:53:42.149510230 +0100 +++ /var/tmp/diff_new_pack.xBVVfq/_new 2022-11-08 10:53:42.149510230 +0100 @@ -1,10 +1,10 @@ Reference: bnc#869024 Disable graphics on s390x -Index: virt-manager-4.0.0/virtinst/guest.py +Index: virt-manager-4.1.0/virtinst/guest.py =================================================================== ---- virt-manager-4.0.0.orig/virtinst/guest.py -+++ virt-manager-4.0.0/virtinst/guest.py -@@ -200,7 +200,10 @@ class Guest(XMLBuilder): +--- virt-manager-4.1.0.orig/virtinst/guest.py ++++ virt-manager-4.1.0/virtinst/guest.py +@@ -208,7 +208,10 @@ class Guest(XMLBuilder): self.skip_default_channel = False self.skip_default_sound = False self.skip_default_usbredir = False @@ -16,7 +16,7 @@ self.skip_default_rng = False self.skip_default_tpm = False self.x86_cpu_default = self.cpu.SPECIAL_MODE_APP_DEFAULT -@@ -349,7 +352,7 @@ class Guest(XMLBuilder): +@@ -358,7 +361,7 @@ class Guest(XMLBuilder): if not os_support: return False @@ -25,7 +25,7 @@ return True return False # pragma: no cover -@@ -935,7 +938,7 @@ class Guest(XMLBuilder): +@@ -952,7 +955,7 @@ class Guest(XMLBuilder): self.add_device(dev) def _add_default_video_device(self): ++++++ virtinst-set-qemu-emulator.patch ++++++ --- /var/tmp/diff_new_pack.xBVVfq/_old 2022-11-08 10:53:42.165510325 +0100 +++ /var/tmp/diff_new_pack.xBVVfq/_new 2022-11-08 10:53:42.165510325 +0100 @@ -1,11 +1,11 @@ Use the correct qemu emulator based on the architecture. We want to get away from using the old qemu-dm emulator for Xen HVM guests so default to qemu-system-i386. -Index: virt-manager-4.0.0/virtinst/guest.py +Index: virt-manager-4.1.0/virtinst/guest.py =================================================================== ---- virt-manager-4.0.0.orig/virtinst/guest.py -+++ virt-manager-4.0.0/virtinst/guest.py -@@ -780,6 +780,10 @@ class Guest(XMLBuilder): +--- virt-manager-4.1.0.orig/virtinst/guest.py ++++ virt-manager-4.1.0/virtinst/guest.py +@@ -797,6 +797,10 @@ class Guest(XMLBuilder): self._add_default_tpm() self.clock.set_defaults(self) ++++++ virtinst-smbios-unsupported-for-xenpv.patch ++++++ --- /var/tmp/diff_new_pack.xBVVfq/_old 2022-11-08 10:53:42.173510373 +0100 +++ /var/tmp/diff_new_pack.xBVVfq/_new 2022-11-08 10:53:42.177510397 +0100 @@ -1,10 +1,10 @@ References: bsc#1180069 -Index: virt-manager-4.0.0/virtinst/guest.py +Index: virt-manager-4.1.0/virtinst/guest.py =================================================================== ---- virt-manager-4.0.0.orig/virtinst/guest.py -+++ virt-manager-4.0.0/virtinst/guest.py -@@ -689,6 +689,8 @@ class Guest(XMLBuilder): +--- virt-manager-4.1.0.orig/virtinst/guest.py ++++ virt-manager-4.1.0/virtinst/guest.py +@@ -706,6 +706,8 @@ class Guest(XMLBuilder): self.type != "kvm"): log.warning( # pragma: no cover "KVM acceleration not available, using '%s'", self.type) ++++++ virtinst-use-qemu-for-cdrom-device.patch ++++++ --- /var/tmp/diff_new_pack.xBVVfq/_old 2022-11-08 10:53:42.185510444 +0100 +++ /var/tmp/diff_new_pack.xBVVfq/_new 2022-11-08 10:53:42.189510468 +0100 @@ -2,11 +2,11 @@ When the device added is a cdrom device (/dev/sr0), don't use "phy" as the driver name but instead use "qemu". -Index: virt-manager-3.3.0/virtinst/devices/disk.py +Index: virt-manager-4.1.0/virtinst/devices/disk.py =================================================================== ---- virt-manager-3.3.0.orig/virtinst/devices/disk.py -+++ virt-manager-3.3.0/virtinst/devices/disk.py -@@ -525,7 +525,8 @@ class DeviceDisk(Device): +--- virt-manager-4.1.0.orig/virtinst/devices/disk.py ++++ virt-manager-4.1.0/virtinst/devices/disk.py +@@ -533,7 +533,8 @@ class DeviceDisk(Device): # Recommended xen defaults from here: # https://bugzilla.redhat.com/show_bug.cgi?id=1171550#c9 # If type block, use name=phy. Otherwise do the same as qemu ++++++ virtinst-vol-default-nocow.patch ++++++ --- /var/tmp/diff_new_pack.xBVVfq/_old 2022-11-08 10:53:42.197510515 +0100 +++ /var/tmp/diff_new_pack.xBVVfq/_new 2022-11-08 10:53:42.201510539 +0100 @@ -4,11 +4,11 @@ Signed-off-by: Chunyan Liu <[email protected]> -Index: virt-manager-3.3.0/virtinst/storage.py +Index: virt-manager-4.1.0/virtinst/storage.py =================================================================== ---- virt-manager-3.3.0.orig/virtinst/storage.py -+++ virt-manager-3.3.0/virtinst/storage.py -@@ -570,6 +570,11 @@ class StorageVolume(_StorageObject): +--- virt-manager-4.1.0.orig/virtinst/storage.py ++++ virt-manager-4.1.0/virtinst/storage.py +@@ -568,6 +568,11 @@ class StorageVolume(_StorageObject): return self._pool_xml.get_disk_type() file_type = property(_get_vol_type) @@ -20,10 +20,10 @@ ################## # XML properties # -Index: virt-manager-3.3.0/virtinst/support.py +Index: virt-manager-4.1.0/virtinst/support.py =================================================================== ---- virt-manager-3.3.0.orig/virtinst/support.py -+++ virt-manager-3.3.0/virtinst/support.py +--- virt-manager-4.1.0.orig/virtinst/support.py ++++ virt-manager-4.1.0/virtinst/support.py @@ -269,6 +269,7 @@ class SupportCache: conn_vnc_none_auth = _make(hv_version={"qemu": "2.9.0"}) conn_device_boot_order = _make(hv_version={"qemu": 0, "test": 0}) ++++++ virtinst-xen-drive-type.patch ++++++ --- /var/tmp/diff_new_pack.xBVVfq/_old 2022-11-08 10:53:42.213510611 +0100 +++ /var/tmp/diff_new_pack.xBVVfq/_new 2022-11-08 10:53:42.217510635 +0100 @@ -2,11 +2,11 @@ Virt-manager on Xen doesn't fill in any type thereby defaulting to 'raw'. This patch will generate the correct XML on Xen. -Index: virt-manager-3.3.0/virtinst/devices/disk.py +Index: virt-manager-4.1.0/virtinst/devices/disk.py =================================================================== ---- virt-manager-3.3.0.orig/virtinst/devices/disk.py -+++ virt-manager-3.3.0/virtinst/devices/disk.py -@@ -542,6 +542,10 @@ class DeviceDisk(Device): +--- virt-manager-4.1.0.orig/virtinst/devices/disk.py ++++ virt-manager-4.1.0/virtinst/devices/disk.py +@@ -550,6 +550,10 @@ class DeviceDisk(Device): https://lists.gnu.org/archive/html/qemu-devel/2008-04/msg00675.html """ if self.driver_name != self.DRIVER_NAME_QEMU: ++++++ virtinst-xenbus-disk-index-fix.patch ++++++ --- /var/tmp/diff_new_pack.xBVVfq/_old 2022-11-08 10:53:42.225510682 +0100 +++ /var/tmp/diff_new_pack.xBVVfq/_new 2022-11-08 10:53:42.229510706 +0100 @@ -6,11 +6,11 @@ passed to qemu where it error'ed out with the disks having the same index (in this case both are 0). -Index: virt-manager-3.3.0/virtinst/devices/disk.py +Index: virt-manager-4.1.0/virtinst/devices/disk.py =================================================================== ---- virt-manager-3.3.0.orig/virtinst/devices/disk.py -+++ virt-manager-3.3.0/virtinst/devices/disk.py -@@ -895,6 +895,17 @@ class DeviceDisk(Device): +--- virt-manager-4.1.0.orig/virtinst/devices/disk.py ++++ virt-manager-4.1.0/virtinst/devices/disk.py +@@ -903,6 +903,17 @@ class DeviceDisk(Device): :returns: generated target """ prefix, maxnode = self.get_target_prefix() @@ -28,7 +28,7 @@ skip_targets = [t for t in skip_targets if t and t.startswith(prefix)] skip_targets.sort() -@@ -902,11 +913,18 @@ class DeviceDisk(Device): +@@ -910,11 +921,18 @@ class DeviceDisk(Device): first_found = None for i in range(maxnode): ++++++ virtman-add-sev-memory-support.patch ++++++ --- /var/tmp/diff_new_pack.xBVVfq/_old 2022-11-08 10:53:42.237510754 +0100 +++ /var/tmp/diff_new_pack.xBVVfq/_new 2022-11-08 10:53:42.241510777 +0100 @@ -1,9 +1,9 @@ References: bsc#1196806, jsc#SLE-18834 -Index: virt-manager-4.0.0/ui/details.ui +Index: virt-manager-4.1.0/ui/details.ui =================================================================== ---- virt-manager-4.0.0.orig/ui/details.ui -+++ virt-manager-4.0.0/ui/details.ui +--- virt-manager-4.1.0.orig/ui/details.ui ++++ virt-manager-4.1.0/ui/details.ui @@ -1925,7 +1925,20 @@ </packing> </child> @@ -26,11 +26,11 @@ </child> </object> <packing> -Index: virt-manager-4.0.0/virtManager/details/details.py +Index: virt-manager-4.1.0/virtManager/details/details.py =================================================================== ---- virt-manager-4.0.0.orig/virtManager/details/details.py -+++ virt-manager-4.0.0/virtManager/details/details.py -@@ -50,6 +50,7 @@ from ..delete import vmmDeleteStorage +--- virt-manager-4.1.0.orig/virtManager/details/details.py ++++ virt-manager-4.1.0/virtManager/details/details.py +@@ -49,6 +49,7 @@ from ..delete import vmmDeleteStorage EDIT_MEM, EDIT_MEM_SHARED, @@ -38,7 +38,7 @@ EDIT_AUTOSTART, EDIT_BOOTORDER, -@@ -87,7 +88,7 @@ from ..delete import vmmDeleteStorage +@@ -86,7 +87,7 @@ from ..delete import vmmDeleteStorage EDIT_FS, @@ -47,7 +47,7 @@ # Columns in hw list model -@@ -440,6 +441,7 @@ class vmmDetails(vmmGObjectUI): +@@ -422,6 +423,7 @@ class vmmDetails(vmmGObjectUI): "on_mem_maxmem_changed": _e(EDIT_MEM), "on_mem_memory_changed": self._curmem_changed_cb, "on_mem_shared_access_toggled": _e(EDIT_MEM_SHARED), @@ -55,7 +55,7 @@ "on_boot_list_changed": self._boot_list_changed_cb, "on_boot_moveup_clicked": self._boot_moveup_clicked_cb, -@@ -1516,6 +1518,9 @@ class vmmDetails(vmmGObjectUI): +@@ -1498,6 +1500,9 @@ class vmmDetails(vmmGObjectUI): if self._edited(EDIT_MEM_SHARED): kwargs["mem_shared"] = self.widget("shared-memory").get_active() @@ -65,7 +65,7 @@ return self._change_config( self.vm.define_memory, kwargs, hotplug_args=hotplug_args) -@@ -2021,6 +2026,14 @@ class vmmDetails(vmmGObjectUI): +@@ -2004,6 +2009,14 @@ class vmmDetails(vmmGObjectUI): curmem.set_value(int(round(vm_cur_mem))) maxmem.set_value(int(round(vm_max_mem))) @@ -80,10 +80,10 @@ shared_mem, shared_mem_err = self.vm.has_shared_mem() self.widget("shared-memory").set_active(shared_mem) self.widget("shared-memory").set_sensitive(not bool(shared_mem_err)) -Index: virt-manager-4.0.0/virtManager/object/domain.py +Index: virt-manager-4.1.0/virtManager/object/domain.py =================================================================== ---- virt-manager-4.0.0.orig/virtManager/object/domain.py -+++ virt-manager-4.0.0/virtManager/object/domain.py +--- virt-manager-4.1.0.orig/virtManager/object/domain.py ++++ virt-manager-4.1.0/virtManager/object/domain.py @@ -706,15 +706,33 @@ class vmmDomain(vmmLibvirtObject): guest.memoryBacking.access_mode = access_mode @@ -129,10 +129,10 @@ def get_boot_order(self): legacy = not self.can_use_device_boot_order() return self.xmlobj.get_boot_order(legacy=legacy) -Index: virt-manager-4.0.0/virtinst/domain/memorybacking.py +Index: virt-manager-4.1.0/virtinst/domain/memorybacking.py =================================================================== ---- virt-manager-4.0.0.orig/virtinst/domain/memorybacking.py -+++ virt-manager-4.0.0/virtinst/domain/memorybacking.py +--- virt-manager-4.1.0.orig/virtinst/domain/memorybacking.py ++++ virt-manager-4.1.0/virtinst/domain/memorybacking.py @@ -27,6 +27,9 @@ class DomainMemoryBacking(XMLBuilder): XML_NAME = "memoryBacking" _XML_PROP_ORDER = ["hugepages", "nosharepages", "locked", "pages"] @@ -143,10 +143,10 @@ hugepages = XMLProperty("./hugepages", is_bool=True) nosharepages = XMLProperty("./nosharepages", is_bool=True) locked = XMLProperty("./locked", is_bool=True) -Index: virt-manager-4.0.0/virtinst/domcapabilities.py +Index: virt-manager-4.1.0/virtinst/domcapabilities.py =================================================================== ---- virt-manager-4.0.0.orig/virtinst/domcapabilities.py -+++ virt-manager-4.0.0/virtinst/domcapabilities.py +--- virt-manager-4.1.0.orig/virtinst/domcapabilities.py ++++ virt-manager-4.1.0/virtinst/domcapabilities.py @@ -93,6 +93,9 @@ def _make_capsblock(xml_root_name): class _SEV(XMLBuilder): XML_NAME = "sev" @@ -157,7 +157,7 @@ maxESGuests = XMLProperty("./maxESGuests") -@@ -402,6 +405,9 @@ class DomainCapabilities(XMLBuilder): +@@ -404,6 +407,9 @@ class DomainCapabilities(XMLBuilder): self.features.sev.maxESGuests) return bool(self.features.sev.supported) @@ -167,10 +167,10 @@ def supports_video_bochs(self): """ Returns False if either libvirt or qemu do not have support to bochs -Index: virt-manager-4.0.0/virtinst/domain/launch_security.py +Index: virt-manager-4.1.0/virtinst/domain/launch_security.py =================================================================== ---- virt-manager-4.0.0.orig/virtinst/domain/launch_security.py -+++ virt-manager-4.0.0/virtinst/domain/launch_security.py +--- virt-manager-4.1.0.orig/virtinst/domain/launch_security.py ++++ virt-manager-4.1.0/virtinst/domain/launch_security.py @@ -19,8 +19,12 @@ class DomainLaunchSecurity(XMLBuilder): kernelHashes = XMLProperty("./@kernelHashes", is_yesno=True) @@ -186,10 +186,10 @@ # The 'policy' is a mandatory 4-byte argument for the SEV firmware. # If missing, we use 0x03 for the original SEV implementation and -Index: virt-manager-4.0.0/virtinst/devices/interface.py +Index: virt-manager-4.1.0/virtinst/devices/interface.py =================================================================== ---- virt-manager-4.0.0.orig/virtinst/devices/interface.py -+++ virt-manager-4.0.0/virtinst/devices/interface.py +--- virt-manager-4.1.0.orig/virtinst/devices/interface.py ++++ virt-manager-4.1.0/virtinst/devices/interface.py @@ -287,6 +287,9 @@ class DeviceInterface(Device): self.type = nettype self.source = source @@ -200,11 +200,11 @@ ################## # Default config # -Index: virt-manager-4.0.0/virtManager/addhardware.py +Index: virt-manager-4.1.0/virtManager/addhardware.py =================================================================== ---- virt-manager-4.0.0.orig/virtManager/addhardware.py -+++ virt-manager-4.0.0/virtManager/addhardware.py -@@ -1438,6 +1438,9 @@ class vmmAddHardware(vmmGObjectUI): +--- virt-manager-4.1.0.orig/virtManager/addhardware.py ++++ virt-manager-4.1.0/virtManager/addhardware.py +@@ -1444,6 +1444,9 @@ class vmmAddHardware(vmmGObjectUI): mac = self.widget("create-mac-address").get_text() dev = self._netlist.build_device(mac, model) ++++++ virtman-disallow-adding-floppy-disk.patch ++++++ --- /var/tmp/diff_new_pack.xBVVfq/_old 2022-11-08 10:53:42.261510896 +0100 +++ /var/tmp/diff_new_pack.xBVVfq/_new 2022-11-08 10:53:42.261510896 +0100 @@ -4,11 +4,11 @@ added. -Index: virt-manager-3.3.0/virtManager/addhardware.py +Index: virt-manager-4.1.0/virtManager/addhardware.py =================================================================== ---- virt-manager-3.3.0.orig/virtManager/addhardware.py -+++ virt-manager-3.3.0/virtManager/addhardware.py -@@ -487,6 +487,9 @@ class vmmAddHardware(vmmGObjectUI): +--- virt-manager-4.1.0.orig/virtManager/addhardware.py ++++ virt-manager-4.1.0/virtManager/addhardware.py +@@ -490,6 +490,9 @@ class vmmAddHardware(vmmGObjectUI): buses = domcaps.devices.disk.get_enum("bus").get_values() else: buses = vmmAddHardware.disk_old_recommended_buses(guest) @@ -18,7 +18,7 @@ bus_map = { "disk": ["ide", "sata", "scsi", "sd", "usb", "virtio", "xen"], -@@ -506,6 +509,7 @@ class vmmAddHardware(vmmGObjectUI): +@@ -509,6 +512,7 @@ class vmmAddHardware(vmmGObjectUI): "usb": _("USB"), "virtio": _("VirtIO"), "xen": _("Xen"), @@ -26,7 +26,7 @@ } return bus_mappings.get(bus, bus) -@@ -673,8 +677,9 @@ class vmmAddHardware(vmmGObjectUI): +@@ -676,8 +680,9 @@ class vmmAddHardware(vmmGObjectUI): "drive-harddisk", _("Disk device")]) target_model.append([DeviceDisk.DEVICE_CDROM, "media-optical", _("CDROM device")]) ++++++ virtman-increase-setKeepAlive-count.patch ++++++ --- /var/tmp/diff_new_pack.xBVVfq/_old 2022-11-08 10:53:42.281511016 +0100 +++ /var/tmp/diff_new_pack.xBVVfq/_new 2022-11-08 10:53:42.281511016 +0100 @@ -2,11 +2,11 @@ For very large memory VMs Xen takes a long time scrubbing memory which causes the libvirt connection to timeout. Upstream was not interested in making this a preferences option (4/11/2015) -Index: virt-manager-3.3.0/virtManager/connection.py +Index: virt-manager-4.1.0/virtManager/connection.py =================================================================== ---- virt-manager-3.3.0.orig/virtManager/connection.py -+++ virt-manager-3.3.0/virtManager/connection.py -@@ -971,7 +971,7 @@ class vmmConnection(vmmGObject): +--- virt-manager-4.1.0.orig/virtManager/connection.py ++++ virt-manager-4.1.0/virtManager/connection.py +@@ -966,7 +966,7 @@ class vmmConnection(vmmGObject): self._add_conn_events() try: ++++++ virtman-register-delete-event-for-details-dialog.patch ++++++ --- /var/tmp/diff_new_pack.xBVVfq/_old 2022-11-08 10:53:42.297511110 +0100 +++ /var/tmp/diff_new_pack.xBVVfq/_new 2022-11-08 10:53:42.301511134 +0100 @@ -6,11 +6,11 @@ need to set the connection event. Not getting the event to cleanup leaves us in a bad state for when the details dialog is reopened. -Index: virt-manager-3.1.0/virtManager/vmwindow.py +Index: virt-manager-4.1.0/virtManager/vmwindow.py =================================================================== ---- virt-manager-3.1.0.orig/virtManager/vmwindow.py -+++ virt-manager-3.1.0/virtManager/vmwindow.py -@@ -133,6 +133,9 @@ class vmmVMWindow(vmmGObjectUI): +--- virt-manager-4.1.0.orig/virtManager/vmwindow.py ++++ virt-manager-4.1.0/virtManager/vmwindow.py +@@ -134,6 +134,9 @@ class vmmVMWindow(vmmGObjectUI): "on_details_menu_view_autoconnect_activate": self._autoconnect_ui_changed_cb, })
