udevadm info -e >/tmp/1828617-2.out

~# ls -l /var/lib/ceph/osd/ceph*
-rw------- 1 ceph ceph  69 May 21 08:44 
/var/lib/ceph/osd/ceph.client.osd-upgrade.keyring

/var/lib/ceph/osd/ceph-11:
total 24
lrwxrwxrwx 1 ceph ceph 93 May 28 22:12 block -> 
/dev/ceph-33de740d-bd8c-4b47-a601-3e6e634e489a/osd-block-33de740d-bd8c-4b47-a601-3e6e634e489a
-rw------- 1 ceph ceph 37 May 28 22:12 ceph_fsid
-rw------- 1 ceph ceph 37 May 28 22:12 fsid
-rw------- 1 ceph ceph 56 May 28 22:12 keyring
-rw------- 1 ceph ceph  6 May 28 22:12 ready
-rw------- 1 ceph ceph 10 May 28 22:12 type
-rw------- 1 ceph ceph  3 May 28 22:12 whoami

/var/lib/ceph/osd/ceph-18:
total 24
lrwxrwxrwx 1 ceph ceph 93 May 28 22:12 block -> 
/dev/ceph-eb5270dc-1110-420f-947e-aab7fae299c9/osd-block-eb5270dc-1110-420f-947e-aab7fae299c9
lrwxrwxrwx 1 ceph ceph 94 May 28 22:12 block.db -> 
/dev/ceph-wal-4de27554-2d05-440e-874a-9921dfc6f47e/osd-db-eb5270dc-1110-420f-947e-aab7fae299c9
lrwxrwxrwx 1 ceph ceph 95 May 28 22:12 block.wal -> 
/dev/ceph-wal-4de27554-2d05-440e-874a-9921dfc6f47e/osd-wal-eb5270dc-1110-420f-947e-aab7fae299c9
-rw------- 1 ceph ceph 37 May 28 22:12 ceph_fsid
-rw------- 1 ceph ceph 37 May 28 22:12 fsid
-rw------- 1 ceph ceph 56 May 28 22:12 keyring
-rw------- 1 ceph ceph  6 May 28 22:12 ready
-rw------- 1 ceph ceph 10 May 28 22:12 type
-rw------- 1 ceph ceph  3 May 28 22:12 whoami

/var/lib/ceph/osd/ceph-24:
total 24
lrwxrwxrwx 1 ceph ceph 93 May 28 22:12 block -> 
/dev/ceph-d38a7e91-cf06-4607-abbe-53eac89ac5ea/osd-block-d38a7e91-cf06-4607-abbe-53eac89ac5ea
-rw------- 1 ceph ceph 37 May 28 22:12 ceph_fsid
-rw------- 1 ceph ceph 37 May 28 22:12 fsid
-rw------- 1 ceph ceph 56 May 28 22:12 keyring
-rw------- 1 ceph ceph  6 May 28 22:12 ready
-rw------- 1 ceph ceph 10 May 28 22:12 type
-rw------- 1 ceph ceph  3 May 28 22:12 whoami

/var/lib/ceph/osd/ceph-31:
total 24
lrwxrwxrwx 1 ceph ceph 93 May 28 22:12 block -> 
/dev/ceph-053e000a-76ed-427e-98b3-e5373e263f2d/osd-block-053e000a-76ed-427e-98b3-e5373e263f2d
lrwxrwxrwx 1 ceph ceph 94 May 28 22:12 block.db -> 
/dev/ceph-wal-4de27554-2d05-440e-874a-9921dfc6f47e/osd-db-053e000a-76ed-427e-98b3-e5373e263f2d
lrwxrwxrwx 1 ceph ceph 95 May 28 22:12 block.wal -> 
/dev/ceph-wal-4de27554-2d05-440e-874a-9921dfc6f47e/osd-wal-053e000a-76ed-427e-98b3-e5373e263f2d
-rw------- 1 ceph ceph 37 May 28 22:12 ceph_fsid
-rw------- 1 ceph ceph 37 May 28 22:12 fsid
-rw------- 1 ceph ceph 56 May 28 22:12 keyring
-rw------- 1 ceph ceph  6 May 28 22:12 ready
-rw------- 1 ceph ceph 10 May 28 22:12 type
-rw------- 1 ceph ceph  3 May 28 22:12 whoami

/var/lib/ceph/osd/ceph-38:
total 24
lrwxrwxrwx 1 ceph ceph 93 May 28 22:12 block -> 
/dev/ceph-c2669da2-63aa-42e2-b049-cf00a478e076/osd-block-c2669da2-63aa-42e2-b049-cf00a478e076
lrwxrwxrwx 1 ceph ceph 94 May 28 22:12 block.db -> 
/dev/ceph-wal-4de27554-2d05-440e-874a-9921dfc6f47e/osd-db-c2669da2-63aa-42e2-b049-cf00a478e076
lrwxrwxrwx 1 ceph ceph 95 May 28 22:12 block.wal -> 
/dev/ceph-wal-4de27554-2d05-440e-874a-9921dfc6f47e/osd-wal-c2669da2-63aa-42e2-b049-cf00a478e076
-rw------- 1 ceph ceph 37 May 28 22:12 ceph_fsid
-rw------- 1 ceph ceph 37 May 28 22:12 fsid
-rw------- 1 ceph ceph 56 May 28 22:12 keyring
-rw------- 1 ceph ceph  6 May 28 22:12 ready
-rw------- 1 ceph ceph 10 May 28 22:12 type
-rw------- 1 ceph ceph  3 May 28 22:12 whoami

/var/lib/ceph/osd/ceph-4:
total 24
lrwxrwxrwx 1 ceph ceph 93 May 28 22:12 block -> 
/dev/ceph-7478edfc-f321-40a2-a105-8e8a2c8ca3f6/osd-block-7478edfc-f321-40a2-a105-8e8a2c8ca3f6
lrwxrwxrwx 1 ceph ceph 94 May 28 22:12 block.db -> 
/dev/ceph-wal-4de27554-2d05-440e-874a-9921dfc6f47e/osd-db-7478edfc-f321-40a2-a105-8e8a2c8ca3f6
lrwxrwxrwx 1 ceph ceph 95 May 28 22:12 block.wal -> 
/dev/ceph-wal-4de27554-2d05-440e-874a-9921dfc6f47e/osd-wal-7478edfc-f321-40a2-a105-8e8a2c8ca3f6
-rw------- 1 ceph ceph 37 May 28 22:12 ceph_fsid
-rw------- 1 ceph ceph 37 May 28 22:12 fsid
-rw------- 1 ceph ceph 55 May 28 22:12 keyring
-rw------- 1 ceph ceph  6 May 28 22:12 ready
-rw------- 1 ceph ceph 10 May 28 22:12 type
-rw------- 1 ceph ceph  2 May 28 22:12 whoami

/var/lib/ceph/osd/ceph-45:
total 24
lrwxrwxrwx 1 ceph ceph 93 May 28 22:12 block -> 
/dev/ceph-12e68fcb-d2b6-459f-97f2-d3eb4e28c75e/osd-block-12e68fcb-d2b6-459f-97f2-d3eb4e28c75e
lrwxrwxrwx 1 ceph ceph 94 May 28 22:12 block.db -> 
/dev/ceph-wal-4de27554-2d05-440e-874a-9921dfc6f47e/osd-db-12e68fcb-d2b6-459f-97f2-d3eb4e28c75e
lrwxrwxrwx 1 ceph ceph 95 May 28 22:12 block.wal -> 
/dev/ceph-wal-4de27554-2d05-440e-874a-9921dfc6f47e/osd-wal-12e68fcb-d2b6-459f-97f2-d3eb4e28c75e
-rw------- 1 ceph ceph 37 May 28 22:12 ceph_fsid
-rw------- 1 ceph ceph 37 May 28 22:12 fsid
-rw------- 1 ceph ceph 56 May 28 22:12 keyring
-rw------- 1 ceph ceph  6 May 28 22:12 ready
-rw------- 1 ceph ceph 10 May 28 22:12 type
-rw------- 1 ceph ceph  3 May 28 22:12 whoami


** Attachment added: "1828617-2.out"
   
https://bugs.launchpad.net/ubuntu/+source/systemd/+bug/1828617/+attachment/5267247/+files/1828617-2.out

-- 
You received this bug notification because you are a member of Ubuntu
Touch seeded packages, which is subscribed to systemd in Ubuntu.
https://bugs.launchpad.net/bugs/1828617

Title:
  Hosts randomly 'losing' disks, breaking ceph-osd service enumeration

Status in systemd package in Ubuntu:
  New

Bug description:
  Ubuntu 18.04.2 Ceph deployment.

  Ceph OSD devices utilizing LVM volumes pointing to udev-based physical 
devices.
  LVM module is supposed to create PVs from devices using the links in 
/dev/disk/by-dname/ folder that are created by udev.
  However on reboot it happens (not always, rather like race condition) that 
Ceph services cannot start, and pvdisplay doesn't show any volumes created. The 
folder /dev/disk/by-dname/ however has all necessary device created by the end 
of boot process.

  The behaviour can be fixed manually by running "#/sbin/lvm pvscan
  --cache --activate ay /dev/nvme0n1" command for re-activating the LVM
  components and then the services can be started.

To manage notifications about this bug go to:
https://bugs.launchpad.net/ubuntu/+source/systemd/+bug/1828617/+subscriptions

-- 
Mailing list: https://launchpad.net/~touch-packages
Post to     : [email protected]
Unsubscribe : https://launchpad.net/~touch-packages
More help   : https://help.launchpad.net/ListHelp

Reply via email to