Hello community,

here is the log from the commit of package yast2-update for openSUSE:Factory 
checked in at 2018-03-26 12:12:13
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/yast2-update (Old)
 and      /work/SRC/openSUSE:Factory/.yast2-update.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "yast2-update"

Mon Mar 26 12:12:13 2018 rev:119 rq:589116 version:4.0.12

Changes:
--------
--- /work/SRC/openSUSE:Factory/yast2-update/yast2-update.changes        
2018-03-18 21:43:30.928586759 +0100
+++ /work/SRC/openSUSE:Factory/.yast2-update.new/yast2-update.changes   
2018-03-26 12:12:15.235064434 +0200
@@ -1,0 +2,8 @@
+Mon Mar 19 16:24:35 UTC 2018 - [email protected]
+
+- Fixed a crash when upgrading systems with a separate /var
+  (bsc#1071454).
+- Better support for upgrading systems with a /var subvolume.
+- 4.0.12
+
+-------------------------------------------------------------------

Old:
----
  yast2-update-4.0.11.tar.bz2

New:
----
  yast2-update-4.0.12.tar.bz2

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ yast2-update.spec ++++++
--- /var/tmp/diff_new_pack.EnOocD/_old  2018-03-26 12:12:16.435021427 +0200
+++ /var/tmp/diff_new_pack.EnOocD/_new  2018-03-26 12:12:16.451020852 +0200
@@ -17,7 +17,7 @@
 
 
 Name:           yast2-update
-Version:        4.0.11
+Version:        4.0.12
 Release:        0
 
 BuildRoot:      %{_tmppath}/%{name}-%{version}-build
@@ -39,9 +39,10 @@
 # Needed for tests
 BuildRequires:  rubygem(rspec)
 
-BuildRequires:  yast2-storage-ng >= 3.3.4
-# Y2Storage::Mountable#mount_path
-Requires:       yast2-storage-ng >= 4.0.90
+# Filesystems::Base#match_fstab_spec? and 
Filesystems::MountByType.from_fstab_spec
+BuildRequires:  yast2-storage-ng >= 4.0.137
+# Filesystems::Base#match_fstab_spec? and 
Filesystems::MountByType.from_fstab_spec
+Requires:       yast2-storage-ng >= 4.0.137
 # FSSnapshotStore
 Requires:       yast2 >= 3.1.126
 Requires:       yast2-installation

++++++ yast2-update-4.0.11.tar.bz2 -> yast2-update-4.0.12.tar.bz2 ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/yast2-update-4.0.11/package/yast2-update.changes 
new/yast2-update-4.0.12/package/yast2-update.changes
--- old/yast2-update-4.0.11/package/yast2-update.changes        2018-03-15 
13:34:54.000000000 +0100
+++ new/yast2-update-4.0.12/package/yast2-update.changes        2018-03-20 
11:19:45.000000000 +0100
@@ -1,4 +1,12 @@
 -------------------------------------------------------------------
+Mon Mar 19 16:24:35 UTC 2018 - [email protected]
+
+- Fixed a crash when upgrading systems with a separate /var
+  (bsc#1071454).
+- Better support for upgrading systems with a /var subvolume.
+- 4.0.12
+
+-------------------------------------------------------------------
 Wed Mar 14 15:42:22 UTC 2018 - [email protected]
 
 - Properly restore the original installation repositories when
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/yast2-update-4.0.11/package/yast2-update.spec 
new/yast2-update-4.0.12/package/yast2-update.spec
--- old/yast2-update-4.0.11/package/yast2-update.spec   2018-03-15 
13:34:54.000000000 +0100
+++ new/yast2-update-4.0.12/package/yast2-update.spec   2018-03-20 
11:19:45.000000000 +0100
@@ -17,7 +17,7 @@
 
 
 Name:           yast2-update
-Version:        4.0.11
+Version:        4.0.12
 Release:        0
 
 BuildRoot:      %{_tmppath}/%{name}-%{version}-build
@@ -41,9 +41,10 @@
 # Needed for tests
 BuildRequires:  rubygem(rspec)
 
-BuildRequires: yast2-storage-ng >= 3.3.4
-# Y2Storage::Mountable#mount_path
-Requires:      yast2-storage-ng >= 4.0.90
+# Filesystems::Base#match_fstab_spec? and 
Filesystems::MountByType.from_fstab_spec
+BuildRequires: yast2-storage-ng >= 4.0.137
+# Filesystems::Base#match_fstab_spec? and 
Filesystems::MountByType.from_fstab_spec
+Requires:      yast2-storage-ng >= 4.0.137
 # FSSnapshotStore
 Requires:      yast2 >= 3.1.126
 Requires:      yast2-installation
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/yast2-update-4.0.11/src/modules/RootPart.rb 
new/yast2-update-4.0.12/src/modules/RootPart.rb
--- old/yast2-update-4.0.11/src/modules/RootPart.rb     2018-03-15 
13:34:54.000000000 +0100
+++ new/yast2-update-4.0.12/src/modules/RootPart.rb     2018-03-20 
11:19:45.000000000 +0100
@@ -469,27 +469,16 @@
 
 
     # Mount partition on specified mount point
-    # @param [String] mount_point string mount point to mount the partition at
-    # @param [String] device string device to mount
-    # @param [String] mount_type string filesystem type to be specified while 
mounting
+    # @param mount_point [String] path to mount the partition at
+    # @param device [String] device to mount, in the format of the first field 
of fstab
+    # @param mount_type [String] filesystem type to be specified while mounting
     # @return [String] nil on success, error description on fail
     def MountPartition(mount_point, device, mount_type, fsopts = "")
       if mount_type == ""
 
-        # storage-ng
-        #
-        # FIXME
-        #
-        # Note that the code below will get passed unmodified fstab entries
-        # (like 'UUID=2f61fdb9-f82a-4052-8610-1eb090b82098') for device names
-        # and typically not match anything due to this.
-        #
-        # See also the comment in #update_staging_filesystem! below.
-        #
+        # Note that "device" comes from the unmodified fstab entry so it can be
+        # something like 'UUID=2f61fdb9-f82a-4052-8610-1eb090b82098'.
         mount_type = fstype_for_device(probed, device) || ""
-=begin
-        mount_type = FileSystems.GetMountString(Storage.DetectFs(device), "")
-=end
       end
 
       # #223878, do not call modprobe with empty mount_type
@@ -603,26 +592,25 @@
     # @param [String] mountpoint string a mount point to find
     # @return [String] the found partition
     def FindPartitionInFstab(fstab, mountpoint)
-      if Builtins.substring(
-          mountpoint,
-          Ops.subtract(Builtins.size(mountpoint), 1),
-          1
-        ) == "/"
-        mountpoint = Builtins.substring(
-          mountpoint,
-          0,
-          Ops.subtract(Builtins.size(mountpoint), 1)
-        )
-      end
+      # Removing the "/" and then adding it again in the comparison below looks
+      # weird, but let's don't change this ancient code too much.
+      mountpoints = mountpoint.chomp("/")
+
+      tmp = fstab.value.select do |entry|
+        file = entry.fetch("file", "")
+        mntops = entry.fetch("mntops", "")
+
+        # Discard Btrfs subvolumes, they are not really a separate device
+        if mntops.include?("subvol=")
+          log.info "FindPartitionInFstab: #{file} subvolume ignored"
+          next false
+        end
 
-      tmp = Builtins.filter(fstab.value) do |entry|
-        Ops.get_string(entry, "file", "") == mountpoint ||
-          Ops.get_string(entry, "file", "") == Ops.add(mountpoint, "/")
+        file == mountpoint || file == mountpoint + "/"
       end
+      return nil if tmp.size.zero?
 
-      return nil if Builtins.size(tmp) == 0
-
-      Ops.get_string(tmp, [0, "spec"], "")
+      tmp.first.fetch("spec", "")
     end
 
     def update_mount_options(options)
@@ -1007,34 +995,6 @@
       true
     end
 
-
-    # Check if specified mount point is mounted
-    # @param [String] mountpoint the mount point to be checked
-    # @return [Boolean] true if it is mounted
-    def IsMounted(mountpoint)
-      if Builtins.substring(
-          mountpoint,
-          Ops.subtract(Builtins.size(mountpoint), 1),
-          1
-        ) == "/"
-        mountpoint = Builtins.substring(
-          mountpoint,
-          0,
-          Ops.subtract(Builtins.size(mountpoint), 1)
-        )
-      end
-
-      ret = true
-      Builtins.foreach(@activated) do |e|
-        if Ops.get_string(e, :type, "") == "mount" &&
-            (Ops.get_string(e, :mntpt, "") == mountpoint ||
-              Ops.get_string(e, :mntpt, "") == Ops.add(mountpoint, "/"))
-          ret = true
-        end
-      end
-      ret
-    end
-
     # bugzilla #258563
     def CheckBootSize(bootpart)
       min_suggested_bootsize = 65536
@@ -1121,18 +1081,6 @@
     #
     def MountFSTab(fstab, message)
       fstab = deep_copy(fstab)
-      allowed_fs = [
-        "ext",
-        "ext2",
-        "ext3",
-        "ext4",
-        "btrfs",
-        "jfs",
-        "xfs",
-        "hpfs",
-        "vfat",
-        "auto",
-      ]
 
       # mount sysfs first
       if MountPartition("/sys", "sysfs", "sysfs") == nil
@@ -1154,9 +1102,8 @@
         mntops = Ops.get_string(mounts, "mntops", "")
         spec = Ops.get_string(mounts, "spec", "")
         fspath = Ops.get_string(mounts, "file", "")
-        if Builtins.contains(allowed_fs, vfstype) && fspath != "/" &&
-            (fspath != "/var" || !IsMounted("/var")) &&
-            !Builtins.issubstring(mntops, "noauto")
+
+        if mount_regular_fstab_entry?(mounts)
           Builtins.y2milestone("mounting %1 to %2", spec, fspath)
 
           if !Mode.test
@@ -1259,7 +1206,7 @@
 
               success = false if !CheckBootSize(checkspec)
             end
-          end # allowed_fs
+          end # mount_regular_fstab_entry?
         elsif vfstype == "swap" && fspath == "swap"
           Builtins.y2milestone("mounting %1 to %2", spec, fspath)
 
@@ -1472,6 +1419,7 @@
 
       manual_mount_successful
     end
+
     def MountVarIfRequired(fstab, root_device_current, manual_var_mount)
       fstab = deep_copy(fstab)
       var_device_fstab = (
@@ -1481,84 +1429,49 @@
         _FindPartitionInFstab_result
       )
 
+      # At this point, var_device_fstab contains the spec column of fstab
+      # for the /var mount point. E.g. "/dev/sda1", "/dev/system/var" or  
"UUID=00x00x00x"
+
       # No need to mount "/var", it's not separate == already mounted with "/"
-      if var_device_fstab == nil
+      if var_device_fstab.nil?
         Builtins.y2milestone("Not a separate /var...")
         return nil
       end
 
-      if !Storage.DeviceRealDisk(var_device_fstab)
-        Builtins.y2milestone(
-          "Device %1 is not a real disk, mounting...",
-          var_device_fstab
-        )
-        return MountVarPartition(var_device_fstab)
-      end
-
-      # BNC #494240: If a device name is not created by Kernel, we can use it 
for upgrade
-      if !Storage.IsKernelDeviceName(var_device_fstab)
-        Builtins.y2milestone(
-          "Device %1 is not a Kernel device name, mounting...",
-          var_device_fstab
-        )
+      # BNC #494240: all methods except kernel names should be stable enough
+      if !mounted_by_kernel_name?(var_device_fstab)
+        log.info "Device #{var_device_fstab} is not mounted by kernel name, 
mounting..."
         return MountVarPartition(var_device_fstab)
       end
 
-      tmp1 = Builtins.filter(fstab) do |entry|
-        Ops.get_string(entry, "file", "") == "/"
-      end
-      root_device_fstab = Ops.get_string(tmp1, [0, "spec"], "")
-      if !Storage.DeviceRealDisk(root_device_fstab)
+      # Mounting virtual devices by kernel name (e.g. /dev/md0 or 
/dev/system/swap_lv)
+      # is also considered to be safe
+      if virtual_device?(var_device_fstab)
+        log.info "Device #{var_device_fstab} is not a partition, mounting..."
         return MountVarPartition(var_device_fstab)
       end
 
-      root_info = Storage.GetDiskPartition(root_device_fstab)
-      var_info = Storage.GetDiskPartition(var_device_fstab)
-
-      if Ops.get_string(root_info, "disk", "") ==
-          Ops.get_string(var_info, "disk", "")
-        tmp2 = Storage.GetDiskPartition(root_device_current)
-        var_partition_current2 = Storage.GetDeviceName(
-          Ops.get_string(tmp2, "disk", ""),
-          Ops.get_integer(var_info, "nr", 0)
-        )
-
-        return MountVarPartition(var_partition_current2)
-      end
-
-      realdisks = []
-      Builtins.foreach(Storage.GetOndiskTarget) do |s, m|
-        # BNC #448577, checking device
-        if Storage.IsKernelDeviceName(s) && Storage.DeviceRealDisk(s)
-          realdisks = Builtins.add(realdisks, s)
+      # At this point, var_device_fstab points either to a device that is not
+      # longer available or to a plain partition.
+      #
+      # In the second case the name may not be reliable since the disk may have
+      # changed its name (e.g. it used to be recognized as /dev/sda or 
/dev/hdb in
+      # the system to update but it became /dev/sdb in the new system).
+      new_name = update_var_dev_name(var_device_fstab, fstab, 
root_device_current)
+      if new_name
+        if new_name != var_device_fstab
+          log.info "Partition #{var_device_fstab} seems to have turned into 
#{new_name}"
         end
+        log.info "Device #{new_name} is a partition, mounting..."
+        return MountVarPartition(new_name)
       end
 
-      if Builtins.size(realdisks) != 2
-        # <-- BNC #448577, Cannot find /var partition automatically
-        return nil if manual_var_mount && MountUserDefinedVarPartition()
+      # BNC #448577: cannot find /var partition automatically, so ask the user
+      return nil if manual_var_mount && MountUserDefinedVarPartition()
 
-        Builtins.y2error(
-          "don't know how to handle more than two disks at this point"
-        )
-        # error message
-        return Ops.add(
-          _("Unable to mount /var partition with this disk configuration.\n"),
-          @sdb
-        )
-      end
-
-      other_disk = Ops.get(
-        realdisks,
-        Ops.get(realdisks, 0, "") == Ops.get_string(root_info, "disk", "") ? 1 
: 0,
-        ""
-      )
-      var_partition_current = Storage.GetDeviceName(
-        other_disk,
-        Ops.get_integer(var_info, "nr", 0)
-      )
-
-      MountVarPartition(var_partition_current)
+      # Everything else failed, return error message
+      log.error "Unable to mount /var partition"
+      _("Unable to mount /var partition with this disk configuration.\n") + 
@sdb
     end
 
     def has_pam_mount
@@ -2204,6 +2117,8 @@
       false
     end
 
+    # The only value of this seems to be for yast-bootloader to locate the
+    # root & boot devices.
     def update_staging!
       log.info "start update_staging"
 
@@ -2227,40 +2142,16 @@
     def update_staging_filesystem!(name, mountpoint)
       log.info "Setting partition data: Device: #{name}, MountPoint: 
#{mountpoint}"
 
-      # storage-ng
-      #
-      # FIXME
-      #
-      # The code below does not work as one would expect as 'name' comes 
straight out of
+      # Take into account that 'name' comes straight out of
       # /etc/fstab and might look like 
'UUID=2f61fdb9-f82a-4052-8610-1eb090b82098'.
-      #
-      # The only value of this seems to be for yast-bootloader to locate the
-      # root & boot devices.
-      #
-      # Note that this works magically atm because for the root device, the
-      # kernel device name is passed so the 'if' below actually matches.
-      #
-
-      if name.include?("/dev/disk/by-id")
-        mount_by = Y2Storage::Filesystems::MountByType::ID
-      elsif name.include?("/dev/")
-        mount_by = Y2Storage::Filesystems::MountByType::DEVICE
-      else
-        # existing code has the following lines here that don't make sense 
(afaics):
-        #
-        # mount_by = Y2Storage::Filesystems::MountByType::LABEL
-        # filesystem = staging.filesystems.with(label: name).first
-      end
-
+      mount_by = Y2Storage::Filesystems::MountByType.from_fstab_spec(name)
       return unless mount_by
 
       filesystem = fs_by_devicename(staging, name)
+      return unless filesystem
 
-      if filesystem
-        filesystem.mount_path = mountpoint
-        filesystem.mount_point.mount_by = mount_by
-      end
-
+      filesystem.mount_path = mountpoint
+      filesystem.mount_point.mount_by = mount_by
     end
 
     # FIXME
@@ -2273,38 +2164,161 @@
     # Return nil if there's no such device or device doesn't have a filesystem.
     #
     # @param devicegraph [Devicegraph]
-    # @param devicename [String]
+    # @param device_spec [String] fs_spec field of one entry from fstab
     #
     # @return [String, nil]
     #
-    def fstype_for_device(devicegraph, devicename)
-      fs = fs_by_devicename(devicegraph, devicename)
+    def fstype_for_device(devicegraph, device_spec)
+      fs = fs_by_devicename(devicegraph, device_spec)
       fs.type.to_s if fs
     end
 
-    # Look up filesystem object with matching device name.
+    # Look up filesystem object with matching device name, as specified in
+    # fstab.
     #
     # Return nil if there's no such device or the device doesn't have a
     # filesystem.
     #
     # @param devicegraph [Devicegraph]
-    # @param devicename [String]
-    # @return [Y2Storage::Filesystems::BlkFilesystem, nil]
+    # @param device_spec [String] fs_spec field of one entry from fstab
+    # @return [Y2Storage::Filesystems::Base, nil]
     #
-    def fs_by_devicename(devicegraph, devicename)
-      fs = devicegraph.blk_filesystems.find do |fs|
-        fs.blk_devices.any? { |dev| dev.name == devicename }
-      end
+    def fs_by_devicename(devicegraph, device_spec)
+      fs = devicegraph.filesystems.find { |fs| 
fs.match_fstab_spec?(device_spec) }
+      # If the previous search returned nil, there is still a last chance to
+      # find the device. Maybe 'device_spec' is one of the udev names discarded
+      # by libstorage-ng
+      fs ||= fs_by_udev_lookup(devicegraph, device_spec)
 
       # log which devicegraph we operate on
       graph = "?"
       graph = "probed" if devicegraph.object_id == probed.object_id
       graph = "staging#" + 
Y2Storage::StorageManager.instance.staging_revision.to_s if 
devicegraph.object_id == staging.object_id
-      log.info("fs_by_devicename(#{graph}, #{devicename}) = #{'sid#' + 
fs.sid.to_s if fs}")
+      log.info("fs_by_devicename(#{graph}, #{device_spec}) = #{'sid#' + 
fs.sid.to_s if fs}")
 
       fs
     end
 
+    # Finds a filesystem by udev name, using a direct lookup in the system
+    # (i.e. going beyond the udev names recognized by libstorage-ng) if needed
+    #
+    # @param devicegraph [Devicegraph]
+    # @param name [String] full udev name
+    # @return [Y2Storage::Filesystems::BlkFilesystem, nil]
+    def fs_by_udev_lookup(devicegraph, name)
+      dev = devicegraph.find_by_any_name(name)
+      return nil if dev.nil? || !dev.respond_to?(:filesystem)
+      dev.filesystem
+    end
+
+    # Whether the given fstab spec corresponds to a device mounted by its 
kernel
+    # device name.
+    #
+    # @param spec [String] content of the first column of an /etc/fstab entry
+    # @return [Boolean]
+    def mounted_by_kernel_name?(spec)
+      mount_by = Y2Storage::Filesystems::MountByType.from_fstab_spec(spec)
+      mount_by.is?(:device)
+    end
+
+    # Whether the device referenced by the given fstab spec is a virtual device
+    # (basically anything that is not a partition).
+    #
+    # This is somehow the inverse of the old Storage.DeviceRealDisk
+    #
+    # @param spec [String] content of the first column of an /etc/fstab entry
+    # @return [Boolean] true if the device was found and is not a partition
+    def virtual_device?(spec)
+      filesystem = fs_by_devicename(probed, spec)
+      # If 'filesystem' is nil, either the device is not longer there or it's a
+      # partition that now has a new name (names of virtual devices should be 
stable).
+      return false unless filesystem
+
+      # If this is not based on a block device (so far that means this is NFS),
+      # then it's virtual.
+      return true unless filesystem.respond_to?(:plain_blk_devices)
+
+      # To be more faithful to the original check on Storage.DeviceRealDisk
+      # let's consider everything but a partition to be virtual.
+      filesystem.plain_blk_devices.none? { |dev| dev.is?(:partition) }
+    end
+
+    # This method tries to infer the /var device name from its partition number
+    # and the name of the root device.
+    #
+    # @param var_name [String] spec of the /var partition in the old /etc/fstab
+    # @param fstab [Array<Hash>] content of the old /etc/fstab
+    # @param root_current_name [String] current kernel device name of the root 
partition
+    # @return [String, nil] new name of the device (best guess), nil if we know
+    #   the current name is outdated but we cannot infer the new one
+    def update_var_dev_name(var_name, fstab, root_current_name)
+      root_entry = fstab.find { |entry| entry["file"] == "/" }
+      root_spec = root_entry ? root_entry["spec"] : nil
+      root_device = probed.find_by_name(root_current_name)
+
+      # If /var was mounted by partition kernel name but the root device was
+      # not, we cannot apply the upcoming logic to make up the new /var device
+      # name. Let's simply use the one we already know.
+      if root_spec.nil? || !mounted_by_kernel_name?(root_spec) || 
!root_device.is?(:partition)
+        return var_name
+      end
+
+      # Regular expresion to break a partition name. Second capture gets the
+      # partition number (as string). First capture gets the rest.
+      regexp = /(.*[^\d])(\d*)$/
+      var_name_no_number, var_name_number = regexp.match(var_name).captures
+      root_spec_no_number = regexp.match(root_spec)[1]
+
+      # If /var and / were partitions in the same disk...
+      if var_name_no_number == root_spec_no_number
+        root_current_no_number = regexp.match(root_current_name)[1]
+        return root_current_no_number + var_name_number
+      end
+
+      # If both partitions were not in the same disk, we assume '/' is in one
+      # disk and '/var' in the other one. Of course that logic only works if
+      # there are exactly two disks.
+      return nil if probed.disk_devices.size != 2
+
+      root_disk = root_device.partitionable
+      other_disk = probed.disk_devices.find { |dev| dev != root_disk }
+      partition = other_disk.partitions.find { |part| part.number == 
var_name_number.to_i }
+      partition.name
+    end
+
+    # @see #mount_regular_fstab_entry?(
+    ALLOWED_FS = [
+      "ext",
+      "ext2",
+      "ext3",
+      "ext4",
+      "btrfs",
+      "jfs",
+      "xfs",
+      "hpfs",
+      "vfat",
+      "auto",
+    ].freeze
+    private_constant :ALLOWED_FS
+
+    # Whether a given fstab entry should be mounted by {#MountFSTab}
+    #
+    # @param entry [Hash] fstab entry
+    # @return [Boolean]
+    def mount_regular_fstab_entry?(entry)
+      vfstype = entry.fetch("vfstype", "")
+      mntops = entry.fetch("mntops", "")
+      path = entry.fetch("file", "")
+
+      return false if path == "/"
+      return false unless ALLOWED_FS.include?(vfstype)
+      return false if mntops.include?("noauto")
+
+      # The conditions above are enough for any mount point except /var.
+      # In the /var case, it should have been already processed by
+      # #MountVarIfRequired... except when /var is a subvolume
+      path != "/var" || mntops.include?("subvol=")
+    end
   end
 
   RootPart = RootPartClass.new
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/yast2-update-4.0.11/test/data/devicegraphs/trivial-lvm.yml 
new/yast2-update-4.0.12/test/data/devicegraphs/trivial-lvm.yml
--- old/yast2-update-4.0.11/test/data/devicegraphs/trivial-lvm.yml      
1970-01-01 01:00:00.000000000 +0100
+++ new/yast2-update-4.0.12/test/data/devicegraphs/trivial-lvm.yml      
2018-03-20 11:19:45.000000000 +0100
@@ -0,0 +1,30 @@
+---
+- disk:
+    name: /dev/sda
+    size: 200 GiB
+    partition_table:  gpt
+    partitions:
+
+    - partition:
+        size:         unlimited
+        name:         /dev/sda1
+        id:           lvm
+
+- lvm_vg:
+    vg_name: vg0
+    lvm_pvs:
+        - lvm_pv:
+            blk_device: /dev/sda1
+
+    lvm_lvs:
+        - lvm_lv:
+            size:         20 GiB
+            lv_name:      root
+            file_system:  btrfs
+            uuid:         5a0a-3387
+
+        - lvm_lv:
+            size:         20 GiB
+            lv_name:      var
+            file_system:  xfs
+            uuid:         4b85-3de0
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/yast2-update-4.0.11/test/data/devicegraphs/two-disks-two-btrfs.xml 
new/yast2-update-4.0.12/test/data/devicegraphs/two-disks-two-btrfs.xml
--- old/yast2-update-4.0.11/test/data/devicegraphs/two-disks-two-btrfs.xml      
1970-01-01 01:00:00.000000000 +0100
+++ new/yast2-update-4.0.12/test/data/devicegraphs/two-disks-two-btrfs.xml      
2018-03-20 11:19:45.000000000 +0100
@@ -0,0 +1,314 @@
+<?xml version="1.0"?>
+<!-- Update scenario: one disk with two existing Linux installations,
+     including several partitions -->
+
+<!-- sda1 is a btrfs filesystem with some subvolumes including @/var
+     sda2 is a btrfs filesystem with other subvolumes and no @/var
+     sda3 is swap
+     sda4 is xfs (to be used as /var)
+     sdb1 is another xfs (also to be used as /var) -->
+
+<!-- generated by libstorage-ng version 3.3.182 and modified by hand -->
+
+<Devicegraph>
+  <Devices>
+    <Disk>
+      <sid>42</sid>
+      <name>/dev/sda</name>
+      <sysfs-name>sda</sysfs-name>
+      
<sysfs-path>/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sda</sysfs-path>
+      <region>
+        <length>104857600</length>
+        <block-size>512</block-size>
+      </region>
+      <udev-path>pci-0000:00:01.1-ata-1</udev-path>
+      <udev-id>ata-QEMU_HARDDISK_QM00001</udev-id>
+      <udev-id>scsi-0ATA_QEMU_HARDDISK_QM00001</udev-id>
+      <udev-id>scsi-1ATA_QEMU_HARDDISK_QM00001</udev-id>
+      <udev-id>scsi-SATA_QEMU_HARDDISK_QM00001</udev-id>
+      <topology/>
+      <range>256</range>
+      <rotational>true</rotational>
+      <transport>ATA</transport>
+    </Disk>
+    <Msdos>
+      <sid>43</sid>
+    </Msdos>
+    <Partition>
+      <sid>44</sid>
+      <name>/dev/sda1</name>
+      <sysfs-name>sda1</sysfs-name>
+      
<sysfs-path>/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sda/sda1</sysfs-path>
+      <region>
+        <start>2048</start>
+        <length>20971520</length>
+        <block-size>512</block-size>
+      </region>
+      <udev-path>pci-0000:00:01.1-ata-1-part1</udev-path>
+      <udev-id>ata-QEMU_HARDDISK_QM00001-part1</udev-id>
+      <udev-id>scsi-0ATA_QEMU_HARDDISK_QM00001-part1</udev-id>
+      <udev-id>scsi-1ATA_QEMU_HARDDISK_QM00001-part1</udev-id>
+      <udev-id>scsi-SATA_QEMU_HARDDISK_QM00001-part1</udev-id>
+      <type>primary</type>
+      <id>131</id>
+      <boot>true</boot>
+    </Partition>
+    <Partition>
+      <sid>45</sid>
+      <name>/dev/sda2</name>
+      <sysfs-name>sda2</sysfs-name>
+      
<sysfs-path>/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sda/sda2</sysfs-path>
+      <region>
+        <start>20973568</start>
+        <length>52008960</length>
+        <block-size>512</block-size>
+      </region>
+      <udev-path>pci-0000:00:01.1-ata-1-part2</udev-path>
+      <udev-id>ata-QEMU_HARDDISK_QM00001-part2</udev-id>
+      <udev-id>scsi-0ATA_QEMU_HARDDISK_QM00001-part2</udev-id>
+      <udev-id>scsi-1ATA_QEMU_HARDDISK_QM00001-part2</udev-id>
+      <udev-id>scsi-SATA_QEMU_HARDDISK_QM00001-part2</udev-id>
+      <type>primary</type>
+      <id>131</id>
+    </Partition>
+    <Partition>
+      <sid>46</sid>
+      <name>/dev/sda3</name>
+      <sysfs-name>sda3</sysfs-name>
+      
<sysfs-path>/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sda/sda3</sysfs-path>
+      <region>
+        <start>100663296</start>
+        <length>4194304</length>
+        <block-size>512</block-size>
+      </region>
+      <udev-path>pci-0000:00:01.1-ata-1-part3</udev-path>
+      <udev-id>ata-QEMU_HARDDISK_QM00001-part3</udev-id>
+      <udev-id>scsi-0ATA_QEMU_HARDDISK_QM00001-part3</udev-id>
+      <udev-id>scsi-1ATA_QEMU_HARDDISK_QM00001-part3</udev-id>
+      <udev-id>scsi-SATA_QEMU_HARDDISK_QM00001-part3</udev-id>
+      <type>primary</type>
+      <id>130</id>
+    </Partition>
+    <Partition>
+      <sid>47</sid>
+      <name>/dev/sda4</name>
+      <sysfs-name>sda4</sysfs-name>
+      
<sysfs-path>/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sda/sda4</sysfs-path>
+      <region>
+        <start>72982528</start>
+        <length>27680768</length>
+        <block-size>512</block-size>
+      </region>
+      <udev-path>pci-0000:00:01.1-ata-1-part4</udev-path>
+      <udev-id>ata-QEMU_HARDDISK_QM00001-part4</udev-id>
+      <udev-id>scsi-0ATA_QEMU_HARDDISK_QM00001-part4</udev-id>
+      <udev-id>scsi-1ATA_QEMU_HARDDISK_QM00001-part4</udev-id>
+      <udev-id>scsi-SATA_QEMU_HARDDISK_QM00001-part4</udev-id>
+      <type>primary</type>
+      <id>131</id>
+    </Partition>
+    <Disk>
+      <sid>80</sid>
+      <name>/dev/sdb</name>
+      <sysfs-name>sdb</sysfs-name>
+      
<sysfs-path>/devices/vio/30000003/host0/target0:0:1/0:0:1:0/block/sdb</sysfs-path>
+      <region>
+        <length>1342177280</length>
+        <block-size>512</block-size>
+      </region>
+      <udev-id>scsi-SAIX_VDASD_0001da0a00007a000000015d3bb126fd.2</udev-id>
+      <topology/>
+      <range>256</range>
+      <rotational>true</rotational>
+    </Disk>
+    <Gpt>
+      <sid>81</sid>
+    </Gpt>
+    <Partition>
+      <sid>82</sid>
+      <name>/dev/sdb1</name>
+      <sysfs-name>sdb1</sysfs-name>
+      
<sysfs-path>/devices/vio/30000003/host0/target0:0:1/0:0:1:0/block/sdb/sdb1</sysfs-path>
+      <region>
+        <start>2048</start>
+        <length>409600</length>
+        <block-size>512</block-size>
+      </region>
+      
<udev-id>scsi-SAIX_VDASD_0001da0a00007a000000015d3bb126fd.2-part1</udev-id>
+      <type>primary</type>
+      <id>131</id>
+    </Partition>
+    <Btrfs>
+      <sid>48</sid>
+      <uuid>0a0ebfa7-e1a8-45f2-ad53-495e192fcc8d</uuid>
+    </Btrfs>
+    <BtrfsSubvolume>
+      <sid>49</sid>
+      <id>5</id>
+      <path></path>
+    </BtrfsSubvolume>
+    <BtrfsSubvolume>
+      <sid>50</sid>
+      <id>257</id>
+      <path>@</path>
+      <default-btrfs-subvolume>true</default-btrfs-subvolume>
+    </BtrfsSubvolume>
+    <BtrfsSubvolume>
+      <sid>51</sid>
+      <id>258</id>
+      <path>@/var</path>
+      <nocow>true</nocow>
+    </BtrfsSubvolume>
+    <BtrfsSubvolume>
+      <sid>59</sid>
+      <id>268</id>
+      <path>@/lib/machines</path>
+    </BtrfsSubvolume>
+    <Btrfs>
+      <sid>60</sid>
+      <uuid>d6e5c710-3067-48de-8363-433e54a9d0b5</uuid>
+    </Btrfs>
+    <BtrfsSubvolume>
+      <sid>61</sid>
+      <id>5</id>
+      <path></path>
+    </BtrfsSubvolume>
+    <BtrfsSubvolume>
+      <sid>62</sid>
+      <id>257</id>
+      <path>@</path>
+    </BtrfsSubvolume>
+    <BtrfsSubvolume>
+      <sid>63</sid>
+      <id>258</id>
+      <path>@/usr/local</path>
+    </BtrfsSubvolume>
+    <BtrfsSubvolume>
+      <sid>64</sid>
+      <id>259</id>
+      <path>@/tmp</path>
+    </BtrfsSubvolume>
+    <BtrfsSubvolume>
+      <sid>69</sid>
+      <id>264</id>
+      <path>@/.snapshots</path>
+    </BtrfsSubvolume>
+    <BtrfsSubvolume>
+      <sid>70</sid>
+      <id>265</id>
+      <path>@/.snapshots/1/snapshot</path>
+      <default-btrfs-subvolume>true</default-btrfs-subvolume>
+    </BtrfsSubvolume>
+    <BtrfsSubvolume>
+      <sid>71</sid>
+      <id>267</id>
+      <path>@/.snapshots/2/snapshot</path>
+    </BtrfsSubvolume>
+    <Swap>
+      <sid>72</sid>
+      <uuid>a62e32ec-f58d-4bff-941b-6fb9ea45c016</uuid>
+    </Swap>
+    <Xfs>
+      <sid>73</sid>
+      <uuid>c9510dc7-fb50-4f7b-bd84-886965c821f6</uuid>
+    </Xfs>
+    <Xfs>
+      <sid>83</sid>
+      <uuid>kAKSqH-08vm-3xDY-Ycqe-GkgG-tTUQ-MtBE1F</uuid>
+    </Xfs>
+  </Devices>
+  <Holders>
+    <User>
+      <source-sid>42</source-sid>
+      <target-sid>43</target-sid>
+    </User>
+    <Subdevice>
+      <source-sid>43</source-sid>
+      <target-sid>44</target-sid>
+    </Subdevice>
+    <Subdevice>
+      <source-sid>43</source-sid>
+      <target-sid>45</target-sid>
+    </Subdevice>
+    <Subdevice>
+      <source-sid>43</source-sid>
+      <target-sid>46</target-sid>
+    </Subdevice>
+    <Subdevice>
+      <source-sid>43</source-sid>
+      <target-sid>47</target-sid>
+    </Subdevice>
+    <Subdevice>
+      <source-sid>48</source-sid>
+      <target-sid>49</target-sid>
+    </Subdevice>
+    <FilesystemUser>
+      <source-sid>44</source-sid>
+      <target-sid>48</target-sid>
+    </FilesystemUser>
+    <Subdevice>
+      <source-sid>49</source-sid>
+      <target-sid>50</target-sid>
+    </Subdevice>
+    <Subdevice>
+      <source-sid>50</source-sid>
+      <target-sid>51</target-sid>
+    </Subdevice>
+    <Subdevice>
+      <source-sid>50</source-sid>
+      <target-sid>59</target-sid>
+    </Subdevice>
+    <Subdevice>
+      <source-sid>60</source-sid>
+      <target-sid>61</target-sid>
+    </Subdevice>
+    <FilesystemUser>
+      <source-sid>45</source-sid>
+      <target-sid>60</target-sid>
+    </FilesystemUser>
+    <Subdevice>
+      <source-sid>61</source-sid>
+      <target-sid>62</target-sid>
+    </Subdevice>
+    <Subdevice>
+      <source-sid>62</source-sid>
+      <target-sid>63</target-sid>
+    </Subdevice>
+    <Subdevice>
+      <source-sid>62</source-sid>
+      <target-sid>64</target-sid>
+    </Subdevice>
+    <Subdevice>
+      <source-sid>62</source-sid>
+      <target-sid>69</target-sid>
+    </Subdevice>
+    <Subdevice>
+      <source-sid>69</source-sid>
+      <target-sid>70</target-sid>
+    </Subdevice>
+    <Subdevice>
+      <source-sid>69</source-sid>
+      <target-sid>71</target-sid>
+    </Subdevice>
+    <FilesystemUser>
+      <source-sid>46</source-sid>
+      <target-sid>72</target-sid>
+    </FilesystemUser>
+    <FilesystemUser>
+      <source-sid>47</source-sid>
+      <target-sid>73</target-sid>
+    </FilesystemUser>
+    <User>
+      <source-sid>80</source-sid>
+      <target-sid>81</target-sid>
+    </User>
+    <Subdevice>
+      <source-sid>81</source-sid>
+      <target-sid>82</target-sid>
+    </Subdevice>
+    <FilesystemUser>
+      <source-sid>82</source-sid>
+      <target-sid>83</target-sid>
+    </FilesystemUser>
+  </Holders>
+</Devicegraph>
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/yast2-update-4.0.11/test/helpers.rb 
new/yast2-update-4.0.12/test/helpers.rb
--- old/yast2-update-4.0.11/test/helpers.rb     2018-03-15 13:34:54.000000000 
+0100
+++ new/yast2-update-4.0.12/test/helpers.rb     2018-03-20 11:19:45.000000000 
+0100
@@ -16,4 +16,13 @@
     allow(subject).to receive(:initialize_update_rootpart)
     allow(subject).to receive(:load_data)
   end
+
+  def stub_storage(devicegraph_file)
+    path = File.join(File.dirname(__FILE__), "data", "devicegraphs", 
devicegraph_file)
+    if path.end_with?(".xml")
+      Y2Storage::StorageManager.create_test_instance.probe_from_xml(path)
+    else
+      Y2Storage::StorageManager.create_test_instance.probe_from_yaml(path)
+    end
+  end
 end
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/yast2-update-4.0.11/test/root_part_test.rb 
new/yast2-update-4.0.12/test/root_part_test.rb
--- old/yast2-update-4.0.11/test/root_part_test.rb      2018-03-15 
13:34:54.000000000 +0100
+++ new/yast2-update-4.0.12/test/root_part_test.rb      2018-03-20 
11:19:45.000000000 +0100
@@ -5,4 +5,233 @@
 Yast.import "RootPart"
 
 describe Yast::RootPart do
+  describe "#MountVarIfRequired" do
+    before do
+      stub_storage(scenario)
+      # Mock the system lookup executed as last resort when the devicegraph
+      # doesn't contain the searched information
+      allow(Y2Storage::BlkDevice).to receive(:find_by_any_name)
+    end
+
+    let(:scenario) { "two-disks-two-btrfs.xml" }
+
+    let(:fstab_sda1) do
+      [
+        {
+          "file"=>"/", "mntops"=>"defaults", "vfstype"=>"btrfs",
+          "spec"=> root_spec
+        },
+        {
+          "file"=>"/lib/machines", "mntops"=>"subvol=/@/lib/machines", 
"vfstype"=>"btrfs",
+          "spec"=> root_spec
+        },
+        {
+          "file"=>"/var", "mntops"=>"subvol=/@/var", "vfstype"=>"btrfs",
+          "spec"=> root_spec
+        },
+        {
+          "file"=>"swap", "mntops"=>"defaults", "vfstype"=>"swap",
+          "spec"=>"/dev/disk/by-uuid/a62e32ec-f58d-4bff-941b-6fb9ea45c016"
+        }
+      ]
+    end
+
+    let(:fstab_sda2) do
+      [
+        {
+          "file"=>"/", "mntops"=>"defaults", "vfstype"=>"btrfs",
+          "spec"=> root_spec
+        },
+        {
+          "file"=>"/usr/local", "mntops"=>"subvol=/@/usr/local", 
"vfstype"=>"btrfs",
+          "spec"=> root_spec
+        },
+        {
+          "file"=>"/tmp", "mntops"=>"subvol=/@/tmp", "vfstype"=>"btrfs",
+          "spec"=> root_spec
+        },
+        {
+          "file"=>"/.snapshots", "mntops"=>"subvol=/@/.snapshots", 
"vfstype"=>"btrfs",
+          "spec"=> root_spec
+        },
+        {
+          "file"=>"swap", "mntops"=>"defaults", "vfstype"=>"swap",
+          "spec"=>"UUID=a62e32ec-f58d-4bff-941b-6fb9ea45c016"
+        }
+      ]
+    end
+
+    RSpec.shared_examples "mounting result" do
+      context "and mounting /var fails with an error message" do
+        before do
+          allow(subject).to receive(:FsckAndMount).with("/var", any_args)
+            .and_return "an error"
+        end
+
+        it "returns a string including the device and the error " do
+          result = subject.MountVarIfRequired(fstab, root_device, false)
+          expect(result).to be_a(String)
+          expect(result).to include("an error")
+          expect(result).to include(var_device)
+        end
+      end
+
+      context "and mounting /var succeeds" do
+        before do
+          allow(subject).to receive(:FsckAndMount).with("/var", 
any_args).and_return nil
+        end
+
+        it "returns nil" do
+          expect(subject.MountVarIfRequired(fstab, root_device, false)).to 
be_nil
+        end
+      end
+    end
+
+    context "if there is no separate partition" do
+      context "and no @/var subvolume" do
+        let(:fstab) { fstab_sda2 }
+        let(:root_device) { "/dev/sda2" }
+        let(:root_spec) { "UUID=d6e5c710-3067-48de-8363-433e54a9d0b5" }
+
+        it "does not try to mount /var" do
+          expect(subject).to_not receive(:FsckAndMount)
+          subject.MountVarIfRequired(fstab, root_device, false)
+        end
+
+        it "returns nil" do
+          expect(subject.MountVarIfRequired(fstab, root_device, false)).to 
be_nil
+        end
+      end
+
+      context "and there is a @/var subvolume" do
+        let(:fstab) { fstab_sda1 }
+        let(:root_device) { "/dev/sda1" }
+        let(:root_spec) { "UUID=0a0ebfa7-e1a8-45f2-ad53-495e192fcc8d" }
+
+        # The old code did not support Btrfs properly, so it mounted the /var
+        # subvolume as a partition, which produced big breakage.
+        it "does not try to mount /var" do
+          expect(subject).to_not receive(:FsckAndMount)
+          subject.MountVarIfRequired(fstab, root_device, false)
+        end
+
+        it "returns nil" do
+          expect(subject.MountVarIfRequired(fstab, root_device, false)).to 
be_nil
+        end
+      end
+    end
+
+    context "if /var is a separate partition" do
+      let(:fstab) do
+        fstab_sda2 + [
+          {
+            "file"=>"/var", "mntops"=>"defaults", "vfstype"=>"xfs",
+            "spec"=> var_spec
+          }
+        ]
+      end
+
+      context "that was mounted by UUID" do
+        let(:root_device) { "/dev/sda2" }
+        let(:root_spec) { "UUID=d6e5c710-3067-48de-8363-433e54a9d0b5" }
+
+        let(:var_spec) { "UUID=c9510dc7-fb50-4f7b-bd84-886965c821f6" }
+        let(:var_device) { var_spec }
+
+        it "tries to mount /var by its UUID" do
+          expect(subject).to receive(:FsckAndMount).with("/var", var_device, 
"")
+          subject.MountVarIfRequired(fstab, root_device, false)
+        end
+
+        include_examples "mounting result"
+      end
+
+      context "that was mounted by kernel device name" do
+        # Let's simulate the situation in which the disk used to have another 
name
+        let(:root_spec) { "/dev/sdb2" }
+        let(:root_device) { "/dev/sda2" }
+
+        context "and is in the same disk than /" do
+          let(:var_spec) { "/dev/sdb4" }
+          let(:var_device) { "/dev/sda4" }
+
+          it "tries to mount /var by its adapted device name" do
+            expect(subject).to receive(:FsckAndMount).with("/var", var_device, 
"")
+            subject.MountVarIfRequired(fstab, root_device, false)
+          end
+
+          include_examples "mounting result"
+        end
+
+        context "and is in a different disk than / (two disks in total)" do
+          let(:var_spec) { "/dev/sda1" }
+          let(:var_device) { "/dev/sdb1" }
+
+          it "tries to mount /var by its adapted device name" do
+            expect(subject).to receive(:FsckAndMount).with("/var", var_device, 
"")
+            subject.MountVarIfRequired(fstab, root_device, false)
+          end
+
+          include_examples "mounting result"
+        end
+      end
+    end
+
+    context "if /var is a separate LVM logical volume" do
+      let(:scenario) { "trivial-lvm.yml" }
+
+      let(:fstab) do
+        fstab_sda2 + [
+          {
+            "file"=>"/var", "mntops"=>"defaults", "vfstype"=>"xfs",
+            "spec"=> var_spec
+          }
+        ]
+      end
+
+      context "that was mounted by UUID" do
+        let(:root_device) { "/dev/vg0/root" }
+        let(:root_spec) { "/dev/disk/by-uuid/5a0a-3387" }
+
+        let(:var_spec) { "/dev/disk/by-uuid/4b85-3de0" }
+        let(:var_device) { var_spec }
+
+        it "tries to mount /var by its UUID" do
+          expect(subject).to receive(:FsckAndMount).with("/var", var_device, 
"")
+          subject.MountVarIfRequired(fstab, root_device, false)
+        end
+
+        include_examples "mounting result"
+      end
+
+      context "that was mounted by kernel device name" do
+        let(:root_spec) { "/dev/vg0/root" }
+        let(:root_device) { "/dev/vg0/root" }
+
+        context "and the LV is not longer there" do
+          let(:var_spec) { "/dev/vg0/none" }
+          let(:var_device) { "/dev/vg0/none" }
+
+          it "tries to mount /var by its old device name" do
+            expect(subject).to receive(:FsckAndMount).with("/var", var_device, 
"")
+            subject.MountVarIfRequired(fstab, root_device, false)
+          end
+
+          include_examples "mounting result"
+        end
+
+        context "and the LV is still there" do
+          let(:var_spec) { "/dev/vg0/var" }
+          let(:var_device) { "/dev/vg0/var" }
+
+          it "tries to mount /var by its device name" do
+            expect(subject).to receive(:FsckAndMount).with("/var", var_device, 
"")
+            subject.MountVarIfRequired(fstab, root_device, false)
+          end
+
+          include_examples "mounting result"
+        end
+      end
+    end
+  end
 end


Reply via email to