Hello!
Trying to run 3 Ubuntu nodes with 3 disks in vagrant.
Has anyone managed to do this?
Below is a WORKING vagrantfile with 3 centos7 nodes with 3 disks.
But it DOESN'T WORK for ubuntu.
$sdb1 = <<-SCRIPT
parted /dev/sdb mklabel msdos
parted /dev/sdb mkpart primary 0% 100%
mkfs.xfs /dev/sdb1
mkdir /mnt/data1
if grep -Fxq "sdb1" /etc/fstab
then
echo 'sdb1 exist in fstab'
else
echo `blkid /dev/sdb1 | awk '{print$2}' | sed -e 's/"//g'` /mnt/data1 xfs
noatime,nobarrier 0 0 >> /etc/fstab
fi
if mount | grep /mnt/data1 > /dev/null; then
echo "/dev/sdb1 mounted /mnt/data1"
umount /mnt/data1
mount /mnt/data1
else
mount /mnt/data1
fi
SCRIPT
$sdc1 = <<-SCRIPT
parted /dev/sdc mklabel msdos
parted /dev/sdc mkpart primary 0% 100%
mkfs.xfs /dev/sdc1
mkdir /mnt/data2
if grep -Fxq "sdc1" /etc/fstab
then
echo 'sdc1 exist in fstab'
else
echo `blkid /dev/sdc1 | awk '{print$2}' | sed -e 's/"//g'` /mnt/data2 xfs
noatime,nobarrier 0 0 >> /etc/fstab
fi
if mount | grep /mnt/data2 > /dev/null; then
echo "/dev/sdc1 mounted /mnt/data2"
umount /mnt/data2
mount /mnt/data2
else
mount /mnt/data2
fi
SCRIPT
$sdd1 = <<-SCRIPT
parted /dev/sdd mklabel msdos
parted /dev/sdd mkpart primary 0% 100%
mkfs.xfs /dev/sdd1
mkdir /mnt/metadata1
if grep -Fxq "sdd1" /etc/fstab
then
echo 'sdd1 exist in fstab'
else
echo `blkid /dev/sdd1 | awk '{print$2}' | sed -e 's/"//g'` /mnt/metadata1
xfs noatime,nobarrier 0 0 >> /etc/fstab
fi
if mount | grep /mnt/metadata1 > /dev/null; then
echo "/dev/sdd1 mounted /mnt/metadata1"
umount /mnt/metadata1
mount /mnt/metadata1
else
mount /mnt/metadata1
fi
SCRIPT
node1disk1 = "./tmp/node1disk1.vdi";
node1disk2 = "./tmp/node1disk2.vdi";
node1disk3 = "./tmp/node1disk3.vdi";
ip_node1 = "192.168.33.31";
Vagrant.configure("2") do |config|
config.vm.define "node1" do |node1|
node1.vm.network "private_network", ip: ip_node1
node1.vm.hostname = "node1"
node1.vm.define "node1"
node1.vm.box_download_insecure = true
node1.vm.box = "centos/7"
node1.vm.provider "virtualbox" do |vb|
vb.memory = "2048"
if not File.exists?(node1disk1)
vb.customize ['createhd', '--filename', node1disk1, '--variant',
'Fixed', '--size', 1 * 1024]
vb.customize ['storageattach', :id, '--storagectl', 'IDE', '--port',
0, '--device', 1, '--type', 'hdd', '--medium', node1disk1]
end
if not File.exists?(node1disk2)
vb.customize ['createhd', '--filename', node1disk2, '--variant',
'Fixed', '--size', 1 * 1024]
vb.customize ['storageattach', :id, '--storagectl', 'IDE', '--port',
1, '--device', 0, '--type', 'hdd', '--medium', node1disk2]
end
if not File.exists?(node1disk3)
vb.customize ['createhd', '--filename', node1disk3, '--variant',
'Fixed', '--size', 1 * 1024]
vb.customize ['storageattach', :id, '--storagectl', 'IDE', '--port',
1, '--device', 1, '--type', 'hdd', '--medium', node1disk3]
end
end
node1.vm.provision "shell", inline: $sdb1
node1.vm.provision "shell", inline: $sdc1
node1.vm.provision "shell", inline: $sdd1
end
end
In the process of experimenting had to come to this Vagrantfile.
But the system cannot boot
$sdb1 = <<-SCRIPT
parted /dev/sdb mklabel msdos
parted /dev/sdb mkpart primary 0% 100%
mkfs.xfs /dev/sdb1
mkdir /mnt/data1
if grep -Fxq "sdb1" /etc/fstab
then
echo 'sdb1 exist in fstab'
else
echo `blkid /dev/sdb1 | awk '{print$2}' | sed -e 's/"//g'` /mnt/data1 xfs
noatime,nobarrier 0 0 >> /etc/fstab
fi
if mount | grep /mnt/data1 > /dev/null; then
echo "/dev/sdb1 mounted /mnt/data1"
umount /mnt/data1
mount /mnt/data1
else
mount /mnt/data1
fi
SCRIPT
$sdc1 = <<-SCRIPT
parted /dev/sdc mklabel msdos
parted /dev/sdc mkpart primary 0% 100%
mkfs.xfs /dev/sdc1
mkdir /mnt/data2
if grep -Fxq "sdc1" /etc/fstab
then
echo 'sdc1 exist in fstab'
else
echo `blkid /dev/sdc1 | awk '{print$2}' | sed -e 's/"//g'` /mnt/data2 xfs
noatime,nobarrier 0 0 >> /etc/fstab
fi
if mount | grep /mnt/data2 > /dev/null; then
echo "/dev/sdc1 mounted /mnt/data2"
umount /mnt/data2
mount /mnt/data2
else
mount /mnt/data2
fi
SCRIPT
$sdd1 = <<-SCRIPT
parted /dev/sdd mklabel msdos
parted /dev/sdd mkpart primary 0% 100%
mkfs.xfs /dev/sdd1
mkdir /mnt/metadata1
if grep -Fxq "sdd1" /etc/fstab
then
echo 'sdd1 exist in fstab'
else
echo `blkid /dev/sdd1 | awk '{print$2}' | sed -e 's/"//g'` /mnt/metadata1
xfs noatime,nobarrier 0 0 >> /etc/fstab
fi
if mount | grep /mnt/metadata1 > /dev/null; then
echo "/dev/sdd1 mounted /mnt/metadata1"
umount /mnt/metadata1
mount /mnt/metadata1
else
mount /mnt/metadata1
fi
SCRIPT
node1disk1 = "./tmp/node1disk1.vdi";
node1disk2 = "./tmp/node1disk2.vdi";
node1disk3 = "./tmp/node1disk3.vdi";
ip_node1 = "192.168.33.31";
Vagrant.configure("2") do |config|
config.vm.define "node1" do |node1|
node1.vm.network "private_network", ip: ip_node1
node1.vm.hostname = "node1"
node1.vm.define "node1"
node1.vm.box_download_insecure = true
node1.vm.box = "ubuntu/bionic64"
node1.vm.provider "virtualbox" do |vb|
vb.gui = true
vb.memory = "1024"
vb.customize ["storagectl", :id, "--name", "IDE", "--remove"]
vb.customize ["storagectl", :id, "--name", "IDE", "--add", "ide",
"--controller", "ICH6"]
if not File.exists?(node1disk1)
vb.customize ['createhd', '--filename', node1disk1, '--variant',
'Fixed', '--size', 1 * 1024]
vb.customize ['storageattach', :id, '--storagectl', 'IDE', '--port',
0, '--device', 1, '--type', 'hdd', '--medium', node1disk1]
end
if not File.exists?(node1disk2)
vb.customize ['createhd', '--filename', node1disk2, '--variant',
'Fixed', '--size', 1 * 1024]
vb.customize ['storageattach', :id, '--storagectl', 'IDE', '--port',
1, '--device', 0, '--type', 'hdd', '--medium', node1disk2]
end
if not File.exists?(node1disk3)
vb.customize ['createhd', '--filename', node1disk3, '--variant',
'Fixed', '--size', 1 * 1024]
vb.customize ['storageattach', :id, '--storagectl', 'IDE', '--port',
1, '--device', 1, '--type', 'hdd', '--medium', node1disk3]
end
end
node1.vm.provision "shell", inline: $sdb1
node1.vm.provision "shell", inline: $sdc1
node1.vm.provision "shell", inline: $sdd1
end
end
VBoxManage showvminfo says:
Storage Controller Name (0): SCSI
Storage Controller Type (0): LsiLogic
Storage Controller Instance Number (0): 0
Storage Controller Max Port Count (0): 16
Storage Controller Port Count (0): 16
Storage Controller Bootable (0): on
Storage Controller Name (1): IDE
Storage Controller Type (1): ICH6
Storage Controller Instance Number (1): 0
Storage Controller Max Port Count (1): 2
Storage Controller Port Count (1): 2
Storage Controller Bootable (1): on
SCSI (0, 0): /home/user/VirtualBox
VMs/vagrant-openio-multi-nodes_node1_1565541256124_28246/ubuntu-bionic-18.04-cloudimg.vmdk
(UUID: 9b9b05cc-d359-428e-a4c5-91391eb7e0e3)
SCSI (1, 0): /home/user/VirtualBox
VMs/vagrant-openio-multi-nodes_node1_1565541256124_28246/ubuntu-bionic-18.04-cloudimg-configdrive.vmdk
(UUID: 5e47924d-2ad2-4096-9a58-7b97d2ffcbd8)
IDE (0, 1): /home/user/github/vagrant-openio-multi-nodes/tmp/node1disk1.vdi
(UUID: d2ef2936-f296-483c-9336-04b5bbd417e9)
IDE (1, 0): /home/user/github/vagrant-openio-multi-nodes/tmp/node1disk2.vdi
(UUID: 2673732a-edf3-48f2-8ecb-50af82b1d2e5)
IDE (1, 1): /home/user/github/vagrant-openio-multi-nodes/tmp/node1disk3.vdi
(UUID: f2243189-ebba-496a-aab8-cb97f68b4038)
--
This mailing list is governed under the HashiCorp Community Guidelines -
https://www.hashicorp.com/community-guidelines.html. Behavior in violation of
those guidelines may result in your removal from this mailing list.
GitHub Issues: https://github.com/mitchellh/vagrant/issues
IRC: #vagrant on Freenode
---
You received this message because you are subscribed to the Google Groups
"Vagrant" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
To view this discussion on the web visit
https://groups.google.com/d/msgid/vagrant-up/b8596a25-43bf-43ac-a57a-a72dbcfa039b%40googlegroups.com.