Vagrant shell provisioner repeats five times - vagrant

The shell provisioner is running five times, and I cannot find why this is happening.
My Vagrantfile:
Vagrant.configure("2") do |config|
config.vm.box = "papasmurf/win2016base"
config.vm.communicator = "winrm"
config.winrm.username = "vagrant"
config.winrm.password = "vagrant"
config.winrm.timeout = 180
config.vm.guest = :windows
config.windows.halt_timeout = 15
config.vm.provider "virtualbox" do |vb|
vb.linked_clone = true
vb.name = "DBSRV2016"
vb.memory = "4096"
vb.gui = false
vb.cpus = 2
config.vm.provision "shell" do |s|
s.privileged = "true"
s.inline = "echo Hello World"
end
end
end
The inline command was a cmd file first, but while I was investigating this issue I found that even the echo Hello World is executed five times.
This is the debug log: https://www.dropbox.com/s/syl8f50xldu32xr/vagrant.log?dl=1
Any idea what is wrong here?

Move provision block outside provider:
Vagrant.configure("2") do |config|
config.vm.box = "papasmurf/win2016base"
config.vm.communicator = "winrm"
config.winrm.username = "vagrant"
config.winrm.password = "vagrant"
config.winrm.timeout = 180
config.vm.guest = :windows
config.windows.halt_timeout = 15
config.vm.provider "virtualbox" do |vb|
vb.linked_clone = true
vb.name = "DBSRV2016"
vb.memory = "4096"
vb.gui = false
vb.cpus = 2
end
config.vm.provision "shell" do |s|
s.privileged = "true"
s.inline = "echo Hello World"
end
end

Related

Vagrant file with multi OS machines?

I've being playing around with Vagrant for awhile and everything has been as expected. It just work. But now when I tried to create few Linux and few Windows nodes on same file, the problems started. Normally it's with provisioning, either Linux is tried to be contacted with winrm or winbox if telling me that apt is not something PowerShell can do. These boxes are within their own
Vagrant.configure("2") do |config|
Vagrant.configure("2") do |windows|
Etc.
Any ideas how to tackle this?
It seems you're confusing Vagrant configuration (with version) and [Vagrant VM definition](https://www.vagrantup.com/docs/multi-machine#defining-multiple-machines]
you will have 1 configuration for your Vagrantfile and multiple VM definition.
here is a sample I have been using in the past of 4 VM (3 are centos based and 1 Windows VM)
# -*- mode: ruby -*-
# vi: set ft=ruby :
require 'yaml'
settings = YAML.load_file File.join(File.dirname(__FILE__), "puppet/hieradata/common.yaml")
selenium_version = settings['selenium_version']
Vagrant.configure("2") do |config|
config.vm.box = settings['host_box'] || "pws/centos65"
config.ssh.username = settings['ariba_user']
config.vm.define "db" do |db|
db.vm.hostname = settings['db_hostname']
db.vm.network "private_network", ip: settings['host_db_address']
db.vm.synced_folder "dump/", "/dump"
db.vm.provider "vmware_workstation" do |vm|
vm.vmx["memsize"] = "3072"
end
db.vm.provider "virtualbox" do |vb|
vb.memory = "3072"
end
db.vm.provision "shell", path: "puppet/script/install-puppet-modules-db.sh"
db.vm.provision :puppet do |puppet|
puppet.manifests_path = "puppet/manifests"
puppet.manifest_file = "base-db.pp"
puppet.module_path = "puppet/modules/db"
puppet.hiera_config_path = "puppet/hiera.yaml"
#puppet.options = "--verbose --debug"
end
end
config.vm.define "app", primary: true do |app|
app.vm.hostname = settings['ariba_hostname']
app.vm.network "private_network", ip: settings['host_app_address']
app.vm.synced_folder "puppet/install_ariba", "/home/ariba/install_sources"
app.ssh.forward_agent = true
app.ssh.forward_x11 = true
app.vm.provider "vmware_workstation" do |vm|
vm.vmx["memsize"] = "4096"
end
app.vm.provider "virtualbox" do |vb|
vb.memory = "3072"
end
app.vm.provision "shell", path: "puppet/script/install-puppet-modules-app.sh"
app.vm.provision :puppet do |puppet|
puppet.manifests_path = "puppet/manifests"
puppet.manifest_file = "base-app.pp"
puppet.module_path = "puppet/modules"
puppet.hiera_config_path = "puppet/hiera.yaml"
#puppet.options = "--verbose --debug"
end
app.vm.provision "shell", path: "puppet/script/run-ariba-app.sh", privileged: false, run: 'always'
end
config.vm.define "win_10" do |win10|
win10.vm.box = "windows_10"
win10.vm.synced_folder "puppet/install_ariba/test", "/test"
win10.vm.provision "shell", path: "puppet/install_ariba/test/install_win_jdk.ps1", privileged: false
win10.vm.provision "shell", path: "puppet/install_ariba/test/install_browsers.ps1", privileged: false
win10.vm.provision "shell", path: "puppet/install_ariba/test/start_win_selenium.bat", run: 'always', args: ["#{selenium_version}", settings['host_hub_address']]
end
config.vm.define "hub" do |hub|
hub.vm.hostname = settings['hub_hostname']
hub.vm.network "private_network", ip: settings['host_hub_address']
hub.vm.provider "vmware_workstation" do |vm|
vm.vmx["memsize"] = "1024"
end
hub.vm.provider "virtualbox" do |vb|
vb.memory = "3072"
end
hub.vm.synced_folder "puppet/install_ariba/test", "/test"
hub.vm.provision "shell", path: "puppet/script/install-puppet-modules-hub.sh"
hub.vm.provision :puppet do |puppet|
puppet.manifests_path = "puppet/manifests"
puppet.manifest_file = "base-hub.pp"
puppet.module_path = "puppet/modules/hub"
puppet.hiera_config_path = "puppet/hiera.yaml"
#puppet.options = "--verbose --debug"
end
hub.vm.provision "shell", path: "puppet/script/run-test.sh", privileged: false, run: 'always', args: "#{selenium_version}"
end
end
Linux provisioning is done through shell script or puppet and windows provisioning is done with PowerShell script or bat file. Pay attention how VM are defined and how we use variable name in their corresponding block.
As mentioned in the Vagrant doc
For POSIX-like machines, the shell provisioner executes scripts with
SSH. For Windows guest machines that are configured to use WinRM, the
shell provisioner executes PowerShell and Batch scripts over WinRM.

How execute a playbook within Vagrantfile with a certain condition?

Im struggling with this Vagrantfile. My goal is to set up two virtual machines using Vagrant technology and also utilize Ansible to deploy a certain task in each machine after they had been created.
But how can execute provision with ansible after the two machines had been created successfuly?
# -*- mode: ruby -*-
# vi: set ft=ruby :
# variables
BOX_NAME = "debian/buster64"
PRIVATE_KEY_PATH_CONTROLLER = "~/.ssh/id_rsa"
PRIVATE_KEY_PATH_VAGRANT = "~/.vagrant.d/insecure_private_key"
PUBLIC_KEY_PATH_CONTROLLER = "~/.ssh/id_rsa.pub"
AUTHORIZED_KEYS_PATH = "~/.ssh/authorized_keys"
Vagrant.configure("2") do |config|
# configuracion general
config.vm.box = BOX_NAME
config.ssh.insert_key = false
config.ssh.private_key_path = [PRIVATE_KEY_PATH_CONTROLLER, PRIVATE_KEY_PATH_VAGRANT]
config.vm.provision "file", source: PUBLIC_KEY_PATH_CONTROLLER, destination: AUTHORIZED_KEYS_PATH
config.vm.synced_folder "./shared", "/vagrant"
# configuracion nodo 1
config.vm.define "nodo_uno" do |nodo|
nodo.vm.hostname = "nodo1"
nodo.vm.network "private_network", ip: "192.168.50.10"
nodo.vm.provider "virtualbox" do |vb|
vb.name = "nodo1(50.10)"
vb.memory = 512
vb.cpus = 1
end
end
# configuracion nodo 2
config.vm.define "nodo_dos" do |nodo|
nodo.vm.hostname = "nodo2"
nodo.vm.network "private_network", ip: "192.168.50.20"
nodo.vm.provider "virtualbox" do |vb|
vb.name = "nodo2(50.20)"
vb.memory = 512
vb.cpus = 1
end
end
# if nodo = "nodo_dos"
config.vm.provision "ansible" do |ansible|
# ansible.groups = {
# "nodo_uno" => ["nodos"],
# "nodo_dos" => ["nodos"]
# }
ansible.inventory_path = "./hosts"
ansible.limit = "nodos"
ansible.playbook = "grupos_usuarios.yml"
ansible.become = true
end
end
nodos is a group that involves nodo_uno and nodo_dos
so when I run vagrant up, ansible throws me an error because of noo2 has not been created yet. so it can't provision it.
I know I can use "vagrant up --no-provision" and then "vagant provision" but what I want is to avoid this system and use some conditional statement in my vagrantfile that triggers my playbook once node2 had been successfuly set up. Sorry for my bad english.

Vagrant: Enforce provisioning order in a multi-machine environment

My Vagrantfile creates three VMs: k8s-master, ndoe-1, and node-2. How can I enforce that k8s-master is fully provisioned before the provisioning for node-1 and nod-2 starts?
Here is my Vagrantfile:
IMAGE_NAME = "generic/ubuntu1804"
N = 2
Vagrant.configure("2") do |config|
config.ssh.insert_key = false
config.vm.provider "libvirt" do |v|
v.memory = 1024
v.cpus = 2
end
config.vm.define "k8s-master" do |master|
master.vm.box = IMAGE_NAME
master.vm.network "private_network", ip: "192.168.50.10"
master.vm.hostname = "k8s-master"
master.vm.provision "ansible" do |ansible|
ansible.playbook = "kubernetes-setup/master-playbook.yml"
ansible.extra_vars = {
node_ip: "192.168.50.10",
}
ansible.compatibility_mode = "2.0"
end
end
(1..N).each do |i|
config.vm.define "node-#{i}" do |node|
node.vm.box = IMAGE_NAME
node.vm.network "private_network", ip: "192.168.50.#{i + 10}"
node.vm.hostname = "node-#{i}"
node.vm.provision "ansible" do |ansible|
ansible.playbook = "kubernetes-setup/node-playbook.yml"
ansible.extra_vars = {
node_ip: "192.168.50.#{i + 10}",
}
ansible.compatibility_mode = "2.0"
end
end
end
end
The multi-machine documentation mentions outside-in ordering, so I tried putting the for-each loop inside the k8s-master's define-block, but then the nodes are not created at all.

How to set VirtualBox machine name to be the same as the hostname in multi-machine Vagrantfile?

The following Vagrantfile configuration won't work as the next VM gets the same name as previous one. (BTW, not 100% sure, but I think this used to work in previous Vagrant versions).
Vagrant.configure(2) do |config|
config.vm.define "xfcevm" do |xfcevm|
xfcevm.vm.box = "generic/ubuntu1904"
xfcevm.vm.hostname = "xfcevm"
config.vm.provider "virtualbox" do |vb|
vb.name = "xfcevm"
end
end
config.vm.define "gnomevm" do |gnomevm|
gnomevm.vm.box = "generic/fedora30"
gnomevm.vm.hostname = "gnomevm"
config.vm.provider "virtualbox" do |vb|
vb.name = "gnomevm"
end
end
config.vm.provider "virtualbox" do |vb|
# vb.name = config.vm.hostname
vb.gui = true
vb.memory = "3072"
vb.cpus = 1
vb.customize ["modifyvm", :id, "--vram", "32"]
end
config.vm.provision "ansible" do |ansible|
ansible.verbose = "v"
ansible.compatibility_mode = "2.0"
ansible.playbook = "setup.yml"
end
config.vm.provision "ansible", run: 'always' do |ansible|
ansible.verbose = "v"
ansible.compatibility_mode = "2.0"
ansible.playbook = "tests.yml"
end
end
In the line # vb.name = config.vm.hostname, the assignment returns an object (it is printed with puts as #<Object:0x0000000001c191d8>) and I'm not familiar with Ruby and Vagrant enough to get a string attribute from it if that's even possible from that object.
P.S. a workaround (an alternative question to this one) would be to get the running VirtualBox machine name from Ansible playbook, as the goal has been calling VBoxManage on that virtual machine as local_action from inside the Ansible playbook.
You are running the virtualbox provider on the config global object.
# Wrong for multivm
# This sets a default name for all vms
config.vm.provider "virtualbox" do |vb|
vb.name = "gnomevm"
end
Simply call this on the current vm you are defining (example for gnomevm)
# Correct
# This sets the specific name for this vm only
gnomevm.vm.provider "virtualbox" do |vb|
vb.name = "gnomevm"
end

How to halt a machine in multi-machine Vagrant setup immediately upon successful provisioning?

There's a multi-machine Vagrant setup (truncated here to two machines) like the following:
Vagrant.configure(2) do |config|
config.vm.define "xfcevm" do |xfcevm|
xfcevm.vm.box = "generic/ubuntu1904"
xfcevm.vm.hostname = "xfcevm"
end
config.vm.define "gnomevm" do |gnomevm|
gnomevm.vm.box = "generic/fedora30"
gnomevm.vm.hostname = "gnomevm"
end
config.vm.provider "virtualbox" do |vb|
vb.gui = true
vb.memory = "2048"
vb.cpus = 1
vb.customize ["modifyvm", :id, "--vram", "32"]
end
config.vm.provision "ansible" do |ansible|
ansible.verbose = "v"
ansible.compatibility_mode = "2.0"
ansible.playbook = "setup.yml"
end
config.vm.provision "ansible", run: 'always' do |ansible|
ansible.verbose = "v"
ansible.compatibility_mode = "2.0"
ansible.playbook = "tests.yml"
end
# halt here
end
If the tests playbook passes without errors then I want that machine to be halted just after the tests.yml playbook is finished. How I can do that from Vagrantfile or by creating another Ansible task?
You can issue a shutdown command at the end of your test playbook. It will only be played if the rest of the tasks were successful.
- name: shutdown machine
become: true
command: shutdown -h now
See shutdown --help to adapt the command to your specific need (e.g. using halt instead of poweroff)

Resources