Ansible eht1 ipv4 address in j2 template - ansible

I'm currently working on a school assignment where i have to configure HAProxy to loadbalance between my two webservers.
I'm deploying the machines via Vagrant in Virtualbox. After this, my Ansible playbook will run and starts off with configuring the webservers. After the webservers are done, it will configure the loadbalancer.
Sadly, i can't manage to get the ipv4 address of both eth1 adapters added to the HAproxy.conf. I'm repeatedly getting the message that ansible can't find the variable inside the hostvars.
TASK [Configuring haproxy]
fatal: [HDVLD-TEST-LB01]: FAILED! => {"changed": false, "msg":
"AnsibleUndefinedVariable: 'ansible.vars.hostvars.HostVarsVars object'
has no attribute 'ansible_eth1'"}
Adding up to this, HAProxy is not responding on 10.2.2.20:8080 -> Chrome gives me an
ERR_CONNECTION_REFUSED
I hope someone over here can help me out..
I'll paste my code down here.
Vagrantfile
# -*- mode: ruby -*-
# vi: set ft=ruby :
# All Vagrant configuration is done below. The "2" in Vagrant.configure
# configures the configuration version (we support older styles for
# backwards compatibility). Please don't change it unless you know what
# you're doing.
Vagrant.configure("2") do |config|
# config.ssh.insert_key = false
#webserver
(1..2).each do |i|
config.vm.define "HDVLD-TEST-WEB0#{i}" do |webserver|
webserver.vm.box = "ubuntu/trusty64"
webserver.vm.hostname = "HDVLD-TEST-WEB0#{i}"
webserver.vm.network :private_network, ip: "10.2.2.1#{i}"
webserver.vm.provider :virtualbox do |vb|
vb.memory = "524"
vb.customize ["modifyvm", :id, "--nested-hw-virt", "on"]
end
webserver.vm.provision "shell" do |shell|
ssh_pub_key = File.readlines("#{Dir.home}/.ssh/id_rsa.pub")
shell.inline = <<-SHELL
echo #{ssh_pub_key} >> /home/vagrant/.ssh/authorized_keys
echo #{ssh_pub_key} >> /root/.ssh/authorized_keys
SHELL
end
end
# config.vm.define "HDVLD-TEST-DB01" do|db_server|
# db_server.vm.box = "ubuntu/trusty64"
# db_server.vm.hostname = "HDVLD-TEST-DB01"
# db_server.vm.network :private_network, ip: "10.2.2.30"
# end
config.vm.define "HDVLD-TEST-LB01" do |lb_server|
lb_server.vm.box = "ubuntu/trusty64"
lb_server.vm.hostname = "HDVLD-TEST-LB01"
lb_server.vm.network :private_network, ip: "10.2.2.20"
lb_server.vm.provider :virtualbox do |vb|
vb.memory = "524"
vb.customize ["modifyvm", :id, "--nested-hw-virt", "on"]
end
lb_server.vm.provision "shell" do |shell|
ssh_pub_key = File.readlines("#{Dir.home}/.ssh/id_rsa.pub")
shell.inline = <<-SHELL
echo #{ssh_pub_key} >> /home/vagrant/.ssh/authorized_keys
echo #{ssh_pub_key} >> /root/.ssh/authorized_keys
SHELL
end
end
config.vm.provision :ansible do |ansible|
ansible.playbook = "webserver_test.yml"
ansible.groups = {
"webservers" => ["HDVLD-TEST-WEB01", "HDVLD-TEST-WEB02"],
"loadbalancer" => ["HDVLD-TEST-LB01"]
}
end
end
end
Playbook.yml
- hosts: webservers
become: true
vars_files: vars/default.yml
gather_facts: True
tasks:
# Getting the IP address of eth0 interface
- name: Gather facts from new server
delegate_facts: True
setup:
filter: ansible_eth1.ipv4.address
- name: Debug facts from Server
delegate_facts: True
debug:
var: ansible_eth1.ipv4.address
- name: UPurge
apt: purge=yes
- name: Install latest version of Apache
apt: name=apache2 update_cache=yes state=latest
- name: Install latest version of Facter
apt: name=facter state=latest
- name: Create document root for your domain
file:
path: /var/www/{{ http_host }}
state: directory
mode: '0755'
- name: Copy your index page
template:
src: "files/index.html.j2"
dest: "/var/www/{{ http_host }}/index.html"
- name: Set up virtuahHost
template:
src: "files/apache.conf.j2"
dest: "/etc/apache2/sites-available/{{ http_conf }}"
notify: restart-apache
- name: Enable new site {{ http_host }}
command: a2ensite {{ http_host }}
- name: Disable default site
command: a2dissite 000-default
when: disable_default
notify: restart-apache
- name: "UFW firewall allow HTTP on port {{ http_port }}"
ufw:
rule: allow
port: "{{ http_port }}"
proto: tcp
handlers:
- name: restart-apache
service:
name: apache2
state: restarted
- hosts: loadbalancer
become: true
vars_files: vars/default.yml
gather_facts: true
tasks:
- name: "Installing haproxy"
package:
name: "haproxy"
state: present
- name: "Starting haproxy"
service:
name: "haproxy"
state: started
enabled: yes
- name: "Configuring haproxy"
template:
src: "files/haproxy.conf.j2"
dest: "/etc/haproxy/haproxy.cfg"
notify: restart-haproxy
- name: "UFW firewall allow Proxy on port {{ proxy_port }}"
ufw:
rule: allow
port: "{{ proxy_port }}"
proto: tcp
- name: "UFW firewall allow static port on port {{ staticlb_port }}"
ufw:
rule: allow
port: "{{ staticlb_port }}"
proto: tcp
- name: Gather facts from new Server
setup:
filter: ansible_default_ipv4.address
handlers:
- name: restart-haproxy
service:
name: haproxy
state: restarted
Haproxy.conf.j2
#---------------------------------------------------------------------
# Example configuration for a possible web application. See the
# full configuration options online.
#
# https://www.haproxy.org/download/1.8/doc/configuration.txt
#
#---------------------------------------------------------------------
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
# to have these messages end up in /var/log/haproxy.log you will
# need to:
#
# 1) configure syslog to accept network log events. This is done
# by adding the '-r' option to the SYSLOGD_OPTIONS in
# /etc/sysconfig/syslog
#
# 2) configure local2 events to go to the /var/log/haproxy.log
# file. A line like the following can be added to
# /etc/sysconfig/syslog
#
# local2.* /var/log/haproxy.log
#
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 1000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
# utilize system-wide crypto-policies
ssl-default-bind-ciphers PROFILE=SYSTEM
ssl-default-server-ciphers PROFILE=SYSTEM
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
listen haproxy-monitoring *:{{ proxy_port }}
#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
frontend main
bind *:{{ http_port }}
acl url_static path_beg -i /static /images /javascript /stylesheets
acl url_static path_end -i .jpg .gif .png .css .js
use_backend static if url_static
default_backend app
#---------------------------------------------------------------------
# static backend for serving up images, stylesheets and such
#---------------------------------------------------------------------
backend static
balance roundrobin
server static 127.0.0.1:{{ staticlb_port }} check
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend app
balance roundrobin
{% for host in groups['webservers'] %}
{{ hostvars[host].ansible_eth1.ipv4.address }}:{{ http_port }} check
{% endfor %}`
defaults (vars file)
http_host: "hdvld"
http_conf: "hdvld.conf"
http_port: "80"
proxy_port: "8080"
disable_default: true
staticlb_port: "4331"
I'm doing something wrong, but i can't find the issue.. Yesterday I have been searching and trying the whole day so there are some quoted pieces off code inside the files, please ignore it..
** Added the inventory file
This is the inventory file
# Generated by Vagrant
HDVLD-TEST-LB01 ansible_host=127.0.0.1 ansible_port=2200 ansible_user='vagrant' ansible_ssh_private_key_file='/home/web01/VM2022/template/.vagrant/machines/HDVLD-TEST-LB01/virtualbox/private_key'
HDVLD-TEST-WEB02 ansible_host=127.0.0.1 ansible_port=2201 ansible_user='vagrant' ansible_ssh_private_key_file='/home/web01/VM2022/template/.vagrant/machines/HDVLD-TEST-WEB02/virtualbox/private_key'
HDVLD-TEST-WEB01 ansible_host=127.0.0.1 ansible_port=2222 ansible_user='vagrant' ansible_ssh_private_key_file='/home/web01/VM2022/template/.vagrant/machines/HDVLD-TEST-WEB01/virtualbox/private_key'
[webservers]
HDVLD-TEST-WEB01
HDVLD-TEST-WEB02
[loadbalancer]
HDVLD-TEST-LB01

in the first play: (you could replace the first 2 tasks)
- name: N1
hosts: webservers
tasks:
- name: get eth1 adress
set_fact:
ips: "{{ ips | d({}) | combine({_ho: _ip}) }}"
loop: "{{ ansible_play_hosts }}"
vars:
_ho: "{{ item }}"
_ip: "{{ ansible_eth1.ipv4.address }}"
- name: add variables to dummy host
add_host:
name: "variable_holder"
shared_variable: "{{ ips }}"
:
in the second play:
- name: N2
hosts: loadbalancer
gather_facts: true
vars:
ips: "{{ hostvars['variable_holder']['shared_variable'] }}"
tasks:
- name: check the value of ips
debug:
var: ips
:
:
in the j2.file change:
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend app
balance roundrobin
{% for host in groups['webservers'] if ips[host] is defined %}
{{ ips[host] }}:{{ http_port }} check
{% endfor %}`

Related

populate yaml variable from jinja variable

I want to populate the nifi.web.https.host below after I run the below playbook. I am new to ansible jinja/yaml so I am not sure why is not working.
{% set external_ip = 'curl 169.254.169.254/2009-04-04/meta-data/public-ipv4' %}
- hosts: localhost
become: yes
roles:
- my.nifi
vars:
nifi_properties:
# HTTPS properties
nifi.web.https.host: 'external_ip'
nifi.web.https.port: 8443
nifi.web.https.network.interface.default: eth0
After I run the playbook I get this error:
{% set external_ip = 'curl 169.254.169.254/2009-04-04/meta-data/public-ipv4' %}
^ here
- hosts: localhost
become: yes
roles:
- my.nifi
pre_tasks:
- name: Get EC2 public IP
raw: curl http://169.254.169.254/2009-04-04/meta-data/public-ipv4
register: ec2_public_ip
vars:
nifi_properties:
# HTTPS properties
nifi.web.https.host: "{{ ec2_public_ip.stdout }}"
nifi.web.https.port: 8443
nifi.web.https.network.interface.default: eth0

Ansible-VMware How to loop over a task and supply a value to a key?

Many of the VMware modules for Ansible are structured a bit differently than a normal Ansible module. What I'm running into is needing to supply either a hostname or cluster name to the module. This doesn't scale well and I'm looking for a way to loop over a set of hosts, or even clusters from a vars file (the VMware modules don't use the normal /etc/hosts file) and supply that host or cluster name to the module. In the code below, I would be supplying a hostname to "esxi_hostname".
As you can see by the commented code, I have tried the with_items option, which doesn't work because it's not available to the module. I have tried jinja like so: 'esxi_hostname: '{% for host in hosts %} {{ host }} {% endfor %} as well as "loop: '{{ hosts }}'
---
- hosts: localhost
vars_files:
- credentials.yml
- vars.yml
- se1_hosts.yml
tasks:
- name: Manage Log level setting for an ESXi host
vmware_host_config_manager:
hostname: 'vcsa.host.se1.somewhere.com'
username: '{{ vc_username }}'
password: '{{ vc_pass }}'
esxi_hostname: 'hostname'
# with_items:
# - 'c05n06.esx.se1.csnzoo.com'
# loop: '{{ hosts }}'
validate_certs: False
options:
'Config.HostAgent.log.level': 'info'
delegate_to: localhost
I would expect I can supply a var to esxi_hostname to be utlized, and am looking for a way to do that with a loop, so it runs against host1, host2, host3, etc..
Thanks in advance!
loops can be applied to modules (in this case module vmware_host_config_manager)
so loop keyword shall be at same indent level :
- name: Manage Log level setting for an ESXi host
vmware_host_config_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ item }}'
options:
'Config.HostAgent.log.level': 'info'
loop: "{{ groups['esxi'] }}"
delegate_to: localhost

Ansible is re-provision the same host even though inventory file setup correctly

I've been trying to debug this for a while now, and I thought I had it working, but then made some other changes, and now back again.
Basically, I have Vagrant looping over a list of machines definitions and while my Ansible inventory looks perfectly fine, I find that only one host is actually being provisioned.
Generated Ansible Inventory -- The SSH ports are all different, groups are correct
# Generated by Vagrant
kafka.cp.vagrant ansible_host=127.0.0.1 ansible_port=2200 ansible_user='vagrant' ansible_ssh_private_key_file='/workspace/confluent/cp-ansible/vagrant/.vagrant/machines/kafka.cp.vagrant/virtualbox/private_key' kafka='{"broker": {"id": 1}}'
zk.cp.vagrant ansible_host=127.0.0.1 ansible_port=2222 ansible_user='vagrant' ansible_ssh_private_key_file='/workspace/confluent/cp-ansible/vagrant/.vagrant/machines/zk.cp.vagrant/virtualbox/private_key'
connect.cp.vagrant ansible_host=127.0.0.1 ansible_port=2201 ansible_user='vagrant' ansible_ssh_private_key_file='/workspace/confluent/cp-ansible/vagrant/.vagrant/machines/connect.cp.vagrant/virtualbox/private_key'
[preflight]
zk.cp.vagrant
kafka.cp.vagrant
connect.cp.vagrant
[zookeeper]
zk.cp.vagrant
[broker]
kafka.cp.vagrant
[schema-registry]
kafka.cp.vagrant
[connect-distributed]
connect.cp.vagrant
Generated hosts file -- IPs and hostnames are correct
## vagrant-hostmanager-start id: aca1499c-a63f-4747-b39e-0e71ae289576
192.168.100.101 zk.cp.vagrant
192.168.100.102 kafka.cp.vagrant
192.168.100.103 connect.cp.vagrant
## vagrant-hostmanager-end
Ansible Playbook I want to run -- Correctly correspond to the groups in my inventory
- hosts: preflight
tasks:
- import_role:
name: confluent.preflight
- hosts: zookeeper
tasks:
- import_role:
name: confluent.zookeeper
- hosts: broker
tasks:
- import_role:
name: confluent.kafka-broker
- hosts: schema-registry
tasks:
- import_role:
name: confluent.schema-registry
- hosts: connect-distributed
tasks:
- import_role:
name: confluent.connect-distributed
For any code missing here, see Confluent :: cp-ansible.
The following is a sample of my Vagrantfile. (I made a fork, but haven't committed until I get this working...)
I know that this if index == machines.length - 1 should work according to the Vagrant documentation, and it does start all the machines, then only runs Ansible on the last machine, but its just all the tasks are executed on first one for some reason.
machines = {"zk"=>{"ports"=>{2181=>nil}, "groups"=>["preflight", "zookeeper"]}, "kafka"=>{"memory"=>3072, "cpus"=>2, "ports"=>{9092=>nil, 8081=>nil}, "groups"=>["preflight", "broker", "schema-registry"], "vars"=>{"kafka"=>"{\"broker\": {\"id\": 1}}"}}, "connect"=>{"ports"=>{8083=>nil}, "groups"=>["preflight", "connect-distributed"]}}
Vagrant.configure("2") do |config|
if Vagrant.has_plugin?("vagrant-hostmanager")
config.hostmanager.enabled = true
config.hostmanager.manage_host = true
config.hostmanager.ignore_private_ip = false
config.hostmanager.include_offline = true
end
# More info on http://fgrehm.viewdocs.io/vagrant-cachier/usage
if Vagrant.has_plugin?("vagrant-cachier")
config.cache.scope = :box
end
if Vagrant.has_plugin?("vagrant-vbguest")
config.vbguest.auto_update = false
end
config.vm.box = VAGRANT_BOX
config.vm.box_check_update = false
config.vm.synced_folder '.', '/vagrant', disabled: true
machines.each_with_index do |(machine, machine_conf), index|
hostname = getFqdn(machine.to_s)
config.vm.define hostname do |v|
v.vm.network "private_network", ip: "192.168.100.#{101+index}"
v.vm.hostname = hostname
machine_conf['ports'].each do |guest_port, host_port|
if host_port.nil?
host_port = guest_port
end
v.vm.network "forwarded_port", guest: guest_port, host: host_port
end
v.vm.provider "virtualbox" do |vb|
vb.memory = machine_conf['memory'] || 1536 # Give overhead for 1G default java heaps
vb.cpus = machine_conf['cpus'] || 1
end
if index == machines.length - 1
v.vm.provision "ansible" do |ansible|
ansible.compatibility_mode = '2.0'
ansible.limit = 'all'
ansible.playbook = "../plaintext/all.yml"
ansible.become = true
ansible.verbose = "vv"
# ... defined host and group variables here
end # Ansible provisioner
end # If last machine
end # machine configuration
end # for each machine
end
I setup an Ansible task like this
- debug:
msg: "FQDN: {{ansible_fqdn}}; Hostname: {{inventory_hostname}}; IPv4: {{ansible_default_ipv4.address}}"
Just with that task, notice that the following ansible_fqdn is always zk.cp.vagrant, and this lines up with the fact that only that VM is getting provisioned by Ansible.
ok: [zk.cp.vagrant] => {
"msg": "FQDN: zk.cp.vagrant; Hostname: zk.cp.vagrant; IPv4: 10.0.2.15"
}
ok: [kafka.cp.vagrant] => {
"msg": "FQDN: zk.cp.vagrant; Hostname: kafka.cp.vagrant; IPv4: 10.0.2.15"
}
ok: [connect.cp.vagrant] => {
"msg": "FQDN: zk.cp.vagrant; Hostname: connect.cp.vagrant; IPv4: 10.0.2.15"
}
Update with minimal example: hostname -f is only one host, and I assume that's what gather_facts is running for ansible_fqdn
ansible all --private-key=~/.vagrant.d/insecure_private_key --inventory-file=/workspace/confluent/cp-ansible/vagrant/.vagrant/provisioners/ansible/inventory -a 'hostname -f' -f1
zk.cp.vagrant | SUCCESS | rc=0 >>
kafka.cp.vagrant
connect.cp.vagrant | SUCCESS | rc=0 >>
kafka.cp.vagrant
kafka.cp.vagrant | SUCCESS | rc=0 >>
kafka.cp.vagrant
Turns out I can get around the problem with not having this section in my ansible.cfg
[ssh_connection]
control_path = %(directory)s/%%h-%%r

How to delay running playbook?

I have a playbook which creates a virtual machine in vCenter. During creation the VM mounts iso image and installs OS automatically. After installation VM gets its IP address. I want to continue running playbook, but using the IP.
I read about wait_for, but I don't understand how to use it. Or maybe there is another way do it?
Playbook below:
- hosts: localhost
gather_facts: false
connection: local
user: ansible
become: true
vars_files:
- ../roles/vm-create/vars/default.yml
vars_prompt:
- name: "name_VM"
prompt: "VM name:"
private: no
default: "vm001"
- name: "vcenter_user"
prompt: "vCenter user"
private: no
default: "root"
- name: "vcenter_pass"
prompt: "Enter password vCenter"
private: yes
roles:
- vm-create
# waiting for the installation and gets ip (Do-Until Loops or wait_for)
- name: setting VM
become: true
hosts: '{{ get_ip }}'
roles:
- { role: ldap-client, tags: [ 'ldap' ] }
You don't show how you assign the get_ip variable, so I assume you can reference it. It's not straightforward, because you would have to either access it with hostvars['localhost']['get_ip]` or you should create a dynamic inventory.
Then you just need to use a very basic wait_for task, but because you want to assign a role to the machine, you need to define the task in the pre_tasks section.
Your second play should be:
- name: setting VM
become: true
hosts: '{{ get_ip }}'
pre_tasks:
- name: Ensure machine at {{ get_ip }} SSH port is listening
wait_for:
host: "{{ get_ip }}"
port: 22 # SSH port
delay: 60 # wait 1 minute before trying
roles:
- { role: ldap-client, tags: [ 'ldap' ] }

Ansible - Write variable to config file

We have some redis configurations that only differs on port and maxmemory settings, so I'm looking for a way to write a 'base' config file for redis and then replace the port and maxmemory variables.
Can I do that with Ansible?
For such operations usually lineinfile module works best; for example:
- name: Ensure maxmemory is set to 2 MB
lineinfile:
dest: /path/to/redis.conf
regexp: maxmemory
line: maxmemory 2mb
Or change multiple lines in one task with with_items:
- name: Ensure Redis parameters are configured
lineinfile:
dest: /path/to/redis.conf
regexp: "{{ item.line_to_match }}"
line: "{{ item.line_to_configure }}"
with_items:
- { line_to_match: "line_to_match", line_to_configure: "maxmemory 2mb" }
- { line_to_match: "port", line_to_configure: "port 4096" }
Or if you want to create a base config, write it in Jinja2 and use a template module:
vars:
redis_maxmemory: 2mb
redis_port: 4096
tasks:
- name: Ensure Redis is configured
template:
src: redis.conf.j2
dest: /path/to/redis.conf
with redis.conf.j2 containing:
maxmemory {{ redis_maxmemory }}
port {{ redis_port }}
The best way i've found to do this (and i use the same technique everywhere)
is to create a role redis with a default vars file and then override the vars when you call the role.
So in roles/redis/default/main.yml :
redis_bind: 127.0.0.1
redis_memory: 2GB
redis_port: 1337
And in your playbook :
- name: Provision redis node
hosts: redis1
roles:
- redis:
redis_port: 9999
redis_memory: 4GB
- name: Provision redis node
hosts: redis2
roles:
- redis:
redis_port: 8888
redis_memory: 8GB

Resources