Following back up on this thread as we were never able to find a resolution to 
this issue.  There is no issue with the host disk size.  But there is some 
additional information that we think might shed more light into what might be 
causing lvcreate to hang:

We had to modify the 20-grub script in order for FAI to properly detect the 
virtual disks on the VM

Here is the script we used:

#! /bin/bash
# support for GRUB version 2
#
# This script was adapted from: 
https://github.com/faiproject/fai-config/blob/master/scripts/GRUB_PC/10-setup
# Custmoizations are noted below

error=0; trap 'error=$(($?>$error?$?:$error))' ERR # save maximum error code

set -a
set -x
exec > /tmp/20-grub.log 2>&1

# do not set up grub during dirinstall
if [ "$FAI_ACTION" = "dirinstall" ] ; then
    exit 0
fi
# during softupdate use this file
[ -r $LOGDIR/disk_var.sh ] && . $LOGDIR/disk_var.sh

if [ -z "$BOOT_DEVICE" ]; then
    exit 189
fi

# disable os-prober because of #802717
ainsl /etc/default/grub 'GRUB_DISABLE_OS_PROBER=true'

# efivars may still be mounted from the host system during fai-diskimage
if [ -d $target/sys/firmware/efi/efivars ]; then
    umount $target/sys/firmware/efi/efivars
fi

# skip the rest, if not an initial installation
if [ $FAI_ACTION != "install" ]; then
    $ROOTCMD update-grub
    exit $error
fi

# CHANGES START
# Without the code below, this grub script was unable to detect /dev/sda
# These commands mount the local devices inside of the chroot of the FAI build
#
# Make sure /target has what grub/udevadm need
mountpoint -q /target/dev  || mount --bind /dev  /target/dev
mountpoint -q /target/proc || mount -t proc proc /target/proc
mountpoint -q /target/sys  || mount -t sysfs sys  /target/sys
# Optional but often helpful:
mountpoint -q /target/run  || mount --bind /run  /target/run
# Ensure udev has created by-id links inside the chroot
$ROOTCMD udevadm trigger --subsystem-match=block 2>/dev/null || true
$ROOTCMD udevadm settle 2>/dev/null || true

# This subroutine was updated to clean up the output to ensure that the grub
# script executes successfully
get_stable_devname() {
    local _DEV="$1"
    local i
    declare -a _RES
    local _UDEVOUT

    # debug -> stderr only
    >&2 echo "get_stable_devname: ROOTCMD=$ROOTCMD"
    >&2 echo "get_stable_devname: _DEV=$_DEV"

    # Ask udev inside the chroot for persistent names; suppress errors
    _UDEVOUT=$($ROOTCMD udevadm info -r --query=symlink "$_DEV" 2>/dev/null || 
true)

    for i in $_UDEVOUT; do
        if [[ "$i" =~ /by-id/scsi ]]; then
            _RES[10]="$i"
        elif [[ "$i" =~ /by-id/ata ]]; then
            _RES[20]="$i"
        elif [[ "$i" =~ /by-id/wwn ]]; then
            _RES[99]="$i"
        fi
    done

    # If we found something, print the top candidate; otherwise fall back to 
raw device
    if [[ -n "${_RES[*]}" ]]; then
        echo "${_RES[@]::1}"
    else
        echo "$_DEV"
    fi
}
# END CHANGES

# handle /boot in lvm-on-md
_bdev=$(readlink -f $BOOT_DEVICE)
if [ "${_bdev%%-*}" = "/dev/dm" ]; then
  BOOT_DEVICE=$( lvs --noheadings -o devices $BOOT_DEVICE | sed -e 
's/^\s*\([^(]*\)(.*$/\1/' )
fi

# Check if RAID is used for the boot device
if [[ $BOOT_DEVICE =~ '/dev/md' ]]; then
    raiddev=${BOOT_DEVICE#/dev/}
    # install grub on all members of RAID
    for device in $(LC_ALL=C perl -ne 'if(/^'$raiddev'\s.+raid\d+\s(.+)/){ 
$_=$1; s/\d+\[\d+\]//g; s/(nvme.+?)p/$1/g; print }' /proc/mdstat); do
pdevice=$(get_stable_devname /dev/$device)
if [ -z "$pdevice" ]; then
    # if we cannot find a persistent name (for e.g. in a VM) use old name
    pdevice="/dev/$device"
fi
mbrdevices+="$pdevice, "
echo Installing grub on /dev/$device = $pdevice
$ROOTCMD grub-install --no-floppy "/dev/$device"
    done
    # remove last ,
    mbrdevices=${mbrdevices%, }
else
    mbrdevices=$(get_stable_devname $BOOT_DEVICE)
    if [ -z "$mbrdevices" ]; then
# if we cannot find a persistent name (for e.g. in a VM) use old name
mbrdevices=$BOOT_DEVICE
    fi

# CHANGES START
# This cleans up and properly sets the mbrdevices variable so the following 
grub-install command
# is successful
    # if somehow mbrdevices contains newline/garbage, collapse and validate it
    mbrdevices=$(echo "$mbrdevices" | tr -d '\r' | tr '\n' ' ' | sed 's/  */ 
/g' | sed 's/^ *//; s/ *$//')

    # Ensure it's a usable path (if not, fall back)
    if [[ ! -e "$mbrdevices" && ! -b "$mbrdevices" ]]; then
        >&2 echo "WARNING: computed grub target '$mbrdevices' not found; using 
$BOOT_DEVICE"
        mbrdevices="$BOOT_DEVICE"
    fi
# END CHANGES

    echo "Installing grub on $BOOT_DEVICE = $mbrdevices"
    $ROOTCMD grub-install --no-floppy "$mbrdevices"
fi

echo "grub-pc grub-pc/install_devices multiselect $mbrdevices" | $ROOTCMD 
debconf-set-selections
$ROOTCMD dpkg-reconfigure grub-pc
exit $error



________________________________
From: linux-fai <[email protected]> on behalf of Thomas Lange 
<[email protected]>
Sent: Thursday, September 25, 2025 1:56 AM
To: fully automatic installation for Linux <[email protected]>
Subject: Re: trixie build hangs at lvcreate command during disk partitioning

>>>>> On Wed, 24 Sep 2025 15:15:15 -0700, [email protected] said:

    > Executing: lvcreate --yes -n root -L 12288 my_vg  <--- HANGS HERE
I cannot reproduce this error.

    > Any suggestions for troubleshooting steps?

    > More information below:

    > Here is the disk configuration:

    > disk_config disk1 align-at:4k fstabkey:uuid
    > primary /boot              512   ext4  rw,noatime
    > primary -                  4096- -     -
    > primary /var/cache/openafs 4096  ext2  defaults,noatime

    > disk_config lvm
    > vg            my_vg          sda2
I've changed sda2 to disk1.2, which is more elegant.
Then I've booted this FAI config in qemu with a fresh disk of size
40GB. It went smoothly through the disk partitioning steps.
Maybe your local storage is full and the qemu disk cannot grow to the
needed size? Pressing Alt-F2 gives you another root shell during the
FAI installation, so you can call commands to debug this.

But check your disks space on the host.

--
best regards Thomas

Antwort per Email an