I'm building AWS Network LB with target groups. And I stuck on aws_lb_listener adding several target_group_arn in several http_tcp_listeners
I.e. I have two aws_lb_target_group for 80 and 443 ports and two http_tcp_listeners for these same ports.
But I have got this error message:
in resource "aws_lb_listener" "frontend_http_tcp":
│ 172: target_group_arn = each.value.arn
│ ├────────────────
│ │ each.value is map of string with 4 elements
│
│ This map does not have an element with the key "arn".
variable "aws_lb_target_group" {
description = "aws_lb_target_group"
type = map(any)
default = {
http = {
name = "http"
target_type = "instance"
port = 80
protocol = "TCP"
protocol_version = "HTTP1"
type = "source_ip"
enabled = false
path_health_check = "/health.html"
matcher_health_check = "200" # has to be HTTP 200 or fails
},
https = {
name = "https"
target_type = "instance"
port = 443
protocol = "TCP"
protocol_version = "HTTP2"
type = "source_ip"
enabled = false
path_health_check = "/health.html"
matcher_health_check = "200" # has to be HTTP 200 or fails
}
}
}
variable "http_tcp_listeners" {
description = "aws_lb_listener"
type = map(any)
default = {
http = {
port = "80"
protocol = "TCP"
action_type = "forward"
alpn_policy = "HTTP1Only"
},
https = {
port = "443"
protocol = "TCP"
action_type = "forward"
certificate_arn = "data.terraform_remote_state.acm.outputs.acm_certificate_arn"
alpn_policy = "HTTP2Preferred"
}
}
}
resource "aws_lb_target_group" "main" {
for_each = var.aws_lb_target_group
name = "test-group-${random_pet.this.id}-${each.value.name}"
target_type = each.value.target_type
port = each.value.port
protocol = each.value.protocol
protocol_version = each.value.protocol_version
vpc_id = local.vpc_id
stickiness {
type = "source_ip"
enabled = false
}
health_check {
path = each.value.path_health_check
port = each.value.port
healthy_threshold = 3
unhealthy_threshold = 3
interval = 30
}
depends_on = [
aws_lb.main,
]
}
resource "aws_lb_listener" "frontend_http_tcp" {
for_each = var.http_tcp_listeners
load_balancer_arn = aws_lb.main.arn
port = each.value.port
protocol = each.value.protocol
certificate_arn = data.terraform_remote_state.acm.outputs.acm_certificate_arn
alpn_policy = each.value.alpn_policy
dynamic "default_action" {
for_each = aws_lb_target_group.main
content {
type = "forward"
target_group_arn = each.value.arn
}
}
depends_on = [
aws_lb.main,
aws_lb_target_group.main,
]
}
When you use dynamic blocks, your key is not each, but the name of the block. So I think it should be:
target_group_arn = default_action.value.arn
To have only one default_action, try:
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.main[each.key].arn
}
Related
I want to allow access the virtual machine from my IP addr and the another ec2 instance IP address. So I need to put pub IP of EC2 instance into SG cidr_block. I tried different options and didn`t find any solution:
# cidr_blocks = "${aws_instance.name.public_ip}"
# cidr_blocks = ["${formatlist("%v/32", aws_instance.name.public_ip)}"]
# cidr_blocks = [aws_instance.name.public_ip,"32"]
# cidr_blocks = ["${aws_instance.name.public_ip}/32"]
# cidr_blocks = join("/",[aws_instance.kibana.public_ip,"32"])
# cidr_blocks = ["${aws_instance.kibana.public_ip}/32"]
SG
resource "aws_security_group" "elasticsearch_sg" {
vpc_id = aws_vpc.elastic_vpc.id
ingress {
description = "ingress rules"
cidr_blocks = [var.access_ip] # my IP
from_port = 22
protocol = "tcp"
to_port = 22
}
ingress {
description = "ingress rules2"
# cidr_blocks = "${aws_instance.kibana.public_ip}"
# cidr_blocks = [aws_instance.kibana.public_ip,"32"]
# cidr_blocks = ["${aws_instance.kibana.public_ip}/32"]
from_port = 9200
protocol = "tcp"
to_port = 9300
self = true
}
egress {
description = "egress rules"
cidr_blocks = [ "0.0.0.0/0" ]
from_port = 0
protocol = "-1"
to_port = 0
}
tags={
Name="elasticsearch_sg"
}
}
---------null_resource start_es
resource "null_resource" "start_es" {
depends_on = [
null_resource.move_elasticsearch_file
]
count = 3
connection {
type = "ssh"
user = "ec2-user"
private_key = "${tls_private_key.pk.private_key_pem}"
host= aws_instance.elastic_nodes[count.index].public_ip
}
provisioner "remote-exec" {
inline = [
"sudo yum update -y",
"sudo rpm -i https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.5.1-x86_64.rpm",
"sudo systemctl daemon-reload",
"sudo systemctl enable elasticsearch.service",
# "sudo sed -i 's#-Xms1g#-Xms${aws_instance.elastic_nodes[count.index].root_block_device[0].volume_size/2}g#g' /etc/elasticsearch/jvm.options",
# "sudo sed -i 's#-Xmx1g#-Xmx${aws_instance.elastic_nodes[count.index].root_block_device[0].volume_size/2}g#g' /etc/elasticsearch/jvm.options",
"sudo rm /etc/elasticsearch/elasticsearch.yml",
"sudo cp elasticsearch.yml /etc/elasticsearch/",
"sudo systemctl start elasticsearch.service"
]
}
}
aws_instance.kibana
resource "aws_instance" "kibana" {
depends_on = [
null_resource.start_es
]
ami = "ami-04d29b6f966df1537"
instance_type = "t2.medium"
iam_instance_profile = "${aws_iam_instance_profile.test_profile.name}"
subnet_id = aws_subnet.elastic_subnet[var.az_name[0]].id
vpc_security_group_ids = [aws_security_group.kibana_sg.id]
key_name = aws_key_pair.kp.key_name
associate_public_ip_address = true
tags = {
Name = "kibana"
}
}
aws_security_group.elasticsearch_sg
resource "aws_security_group" "elasticsearch_sg" {
vpc_id = aws_vpc.elastic_vpc.id
ingress {
description = "ingress rules"
cidr_blocks = [var.access_ip] # my IP
from_port = 22
protocol = "tcp"
to_port = 22
}
ingress {
description = "ingress rules2"
# cidr_blocks = "${aws_instance.kibana.public_ip}"
# cidr_blocks = [aws_instance.kibana.public_ip,"32"]
# cidr_blocks = ["${aws_instance.kibana.public_ip}/32"]
from_port = 9200
protocol = "tcp"
to_port = 9300
self = true
}
egress {
description = "egress rules"
cidr_blocks = [ "0.0.0.0/0" ]
from_port = 0
protocol = "-1"
to_port = 0
}
tags={
Name="elasticsearch_sg"
}
}
aws_instance.elastic_nodes
resource "aws_instance" "elastic_nodes" {
count = 3
ami = "ami-04d29b6f966df1537"
instance_type = "t2.medium"
iam_instance_profile = "${aws_iam_instance_profile.test_profile.name}"
subnet_id = aws_subnet.elastic_subnet[var.az_name[count.index]].id
vpc_security_group_ids = [aws_security_group.elasticsearch_sg.id]
key_name = aws_key_pair.kp.key_name
associate_public_ip_address = true
tags = {
Name = "elasticsearch_${count.index}"
}
}
null_resource.move_elasticsearch_file
resource "null_resource" "move_elasticsearch_file" {
depends_on = [aws_instance.elastic_nodes]
count = 3
connection {
type = "ssh"
user = "ec2-user"
private_key = "${tls_private_key.pk.private_key_pem}"
host= aws_instance.elastic_nodes[count.index].public_ip
}
provisioner "file" {
# content = data.template_file.init_elasticsearch[count.index].rendered
content = templatefile("./templates/elasticsearch_config.tpl", {
cluster_name = "cluster1"
node_name = "node_${count.index}"
node = aws_instance.elastic_nodes[count.index].private_ip
node1 = aws_instance.elastic_nodes[0].private_ip
node2 = aws_instance.elastic_nodes[1].private_ip
node3 = aws_instance.elastic_nodes[2].private_ip
})
destination = "elasticsearch.yml"
}
}
It seems the HTTP health check is not occurring, I've come to this conclusion due to the HTTP debug log not showing any regular periodic requests.
Is there any additional configuration request for a health check to occur?
job "example" {
datacenters = ["dc1"]
type = "service"
update {
max_parallel = 1
min_healthy_time = "10s"
healthy_deadline = "3m"
progress_deadline = "10m"
auto_revert = false
canary = 0
}
migrate {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
}
group "app" {
count = 1
restart {
attempts = 2
interval = "30m"
delay = "15s"
mode = "fail"
}
ephemeral_disk {
size = 300
}
task "app" {
driver = "docker"
config {
image = "localhost:5000/myhub:latest"
command = "python"
args = [
"manage.py",
"runserver",
"0.0.0.0:8001"
]
port_map {
app = 8001
}
network_mode = "host"
}
resources {
cpu = 500
memory = 256
network {
mbits = 10
port "app" {}
}
}
service {
name = "myhub"
port = "app"
check {
name = "alive"
type = "http"
port = "app"
path = "/"
interval = "10s"
timeout = "3s"
}
}
}
}
}
It seems Consul must be installed for this to occur.
Also make sure to install Consul v1.4.2 or later as v1.4.1 seems to have a bug: https://github.com/hashicorp/consul/issues/5270
I want to create windows azure VM, copy some file and run some simple command on that VM using terraform script.
Problem is : I am able to create VM but not able to connect via winrm.
provider "azurerm" {
subscription_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
tenant_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
}
resource "azurerm_virtual_network" "vnet" {
name = "cmTFVnet"
address_space = ["10.0.0.0/16"]
location = "South India"
resource_group_name = "cservice"
}
resource "azurerm_subnet" "subnet" {
name = "cmTFSubnet"
resource_group_name = "cservice"
virtual_network_name = "${azurerm_virtual_network.vnet.name}"
address_prefix = "10.0.2.0/24"
}
resource "azurerm_public_ip" "publicip" {
name = "cmTFPublicIP"
location = "South India"
resource_group_name = "cservice"
public_ip_address_allocation = "dynamic"
}
resource "azurerm_network_security_group" "nsg" {
name = "cmTFNSG"
location = "South India"
resource_group_name = "cservice"
security_rule {
name = "SSH"
priority = 340
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
security_rule {
name = "winrm"
priority = 1010
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "5985"
source_address_prefix = "*"
destination_address_prefix = "*"
}
security_rule {
name = "winrm-out"
priority = 100
direction = "Outbound"
access = "Allow"
protocol = "*"
source_port_range = "*"
destination_port_range = "5985"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
resource "azurerm_network_interface" "nic" {
name = "cmNIC"
location = "South India"
resource_group_name = "cservice"
network_security_group_id = "${azurerm_network_security_group.nsg.id}"
ip_configuration {
name = "compilerNICConfg"
subnet_id = "${azurerm_subnet.subnet.id}"
private_ip_address_allocation = "dynamic"
public_ip_address_id = "${azurerm_public_ip.publicip.id}"
}
}
resource "azurerm_virtual_machine" "vm" {
name = "cmTFVM"
location = "South India"
resource_group_name = "cservice"
network_interface_ids = ["${azurerm_network_interface.nic.id}"]
vm_size = "Standard_D2s_v3"
storage_image_reference
{
id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
}
storage_os_disk {
name = "cmOsDisk"
managed_disk_type = "Premium_LRS"
create_option = "FromImage"
}
os_profile {
computer_name = "hostname"
admin_username = "test"
admin_password = "test#123"
}
os_profile_windows_config {
enable_automatic_upgrades = "true"
provision_vm_agent ="true"
winrm = {
protocol = "http"
}
}
provisioner "remote-exec" {
connection = {
type = "winrm"
user = "test"
password = "test#123"
agent = "false"
https = false
insecure = true
}
inline = [
"cd..",
"cd..",
"cd docker",
"mkdir test"
]
}
}
VM is created successfully but not able to connect by WINRM
but I am getting following error in "remote-exec":
azurerm_virtual_machine.vm: timeout - last error: unknown error Post
http://:5985/wsman: dial tcp :5985: connectex: A connection attempt
failed because the connected party did not properly respond after a
period of time, or established connection failed because connected
host has failed to respond.
or http response error: 401 - invalid content type
When you create the Windows Azure VM, the WINRM is not configured by default. So if you want to connect the VM through the WINRM, you should configure the WINRM after the VM creation time, or in the creation time.
You can follow the steps in Configure WinRM after virtual machine creation. And you can also configure it in the creation time. There is an example shows that through Azure template. It will also provide a little help. See Deploys a Windows VM and Configures a WinRM Https listener.
I have taken reference of github code.Please find below URL
https://github.com/terraform-providers/terraform-provider-azurerm/tree/master/examples/vm-from-managed-image
I modified the scripts and executed terraform init. I received below error.
Error reading config for azurerm_network_interface[main]: parse error at 1:18: expected ")" but found "."[0m
My Script :
# Configure the Microsoft Azure Provider
provider "azurerm" {
subscription_id = "xxxxxxxx"
client_id = "xxxxxxxx"
client_secret = "xxxxxxxx"
tenant_id = "xxxxxxxx"
}
# Locate the existing custom/golden image
data "azurerm_image" "search" {
name = "AZLXSPTDEVOPS01_Image"
resource_group_name = "RG-PLATFORM"
}
output "image_id" {
value = "/subscriptions/4f5c9f2a-3584-4bbd-a26e-bbf69ffbfbe6/resourceGroups/RG-EASTUS-SPT-PLATFORM/providers/Microsoft.Compute/images/AZLXSPTDEVOPS01_Image"
}
# Create a Resource Group for the new Virtual Machine.
resource "azurerm_resource_group" "main" {
name = "RG-TEST"
location = "eastus"
}
# Create a Virtual Network within the Resource Group
resource "azurerm_virtual_network" "main" {
name = "RG-Vnet"
address_space = ["10.100.0.0/16"]
resource_group_name = "${azurerm_resource_group.main.name}"
location = "${azurerm_resource_group.main.location}"
}
# Create a Subnet within the Virtual Network
resource "azurerm_subnet" "internal" {
name = "RG-Terraform-snet-in"
virtual_network_name = "${azurerm_virtual_network.main.name}"
resource_group_name = "${azurerm_resource_group.main.name}"
address_prefix = "10.100.2.0/24"
}
# Create a Network Security Group with some rules
resource "azurerm_network_security_group" "main" {
name = "RG-QA-Test-Web-NSG"
location = "${azurerm_resource_group.main.location}"
resource_group_name = "${azurerm_resource_group.main.name}"
security_rule {
name = "allow_SSH"
description = "Allow SSH access"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
# Create a network interface for VMs and attach the PIP and the NSG
resource "azurerm_network_interface" "main" {
name = "myNIC"
location = "${azurerm_resource_group.main.location}"
resource_group_name = "${azurerm_resource_group.main.name}"
network_security_group_id = "${azurerm_network_security_group.main.id}"
ip_configuration {
name = "primary"
subnet_id = "${azurerm_subnet.internal.id}"
private_ip_address_allocation = "static"
private_ip_address = "${cidrhost("10.100.1.8/24", 4)}"
}
}
# Create a new Virtual Machine based on the Golden Image
resource "azurerm_virtual_machine" "vm" {
name = "AZLXSPTDEVOPS01"
location = "${azurerm_resource_group.main.location}"
resource_group_name = "${azurerm_resource_group.main.name}"
network_interface_ids = ["${azurerm_network_interface.main.id}"]
vm_size = "Standard_DS12_v2"
delete_os_disk_on_termination = true
delete_data_disks_on_termination = true
storage_image_reference {
id = "${data.azurerm_image.search.id}"
}
storage_os_disk {
name = "AZLXSPTDEVOPS01-OS"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Standard_LRS"
}
os_profile {
computer_name = "APPVM"
admin_username = "admin"
admin_password = "admin#2019"
}
os_profile_linux_config {
disable_password_authentication = false
}
}
Below script is working fine
# Configure the Microsoft Azure Provider
provider "azurerm" {
subscription_id = "xxxx"
client_id = "xxxx"
client_secret = "xxxx"
tenant_id = "xxxx"
}
# Locate the existing custom/golden image
data "azurerm_image" "search" {
name = "AZDEVOPS01_Image"
resource_group_name = "RG-PLATFORM"
}
output "image_id" {
value = "/subscriptions/xxxxxx/resourceGroups/RG-EASTUS-SPT-PLATFORM/providers/Microsoft.Compute/images/AZLXDEVOPS01_Image"
}
# Create a Resource Group for the new Virtual Machine.
resource "azurerm_resource_group" "main" {
name = "RG-OPT-QA-TEST"
location = "eastus"
}
# Create a Subnet within the Virtual Network
resource "azurerm_subnet" "internal" {
name = "RG-Terraform-snet-in"
virtual_network_name = "RG-OPT-QA-Vnet"
resource_group_name = "${azurerm_resource_group.main.name}"
address_prefix = "10.100.2.0/24"
}
# Create a Network Security Group with some rules
resource "azurerm_network_security_group" "main" {
name = "RG-QA-Test-Dev-NSG"
location = "${azurerm_resource_group.main.location}"
resource_group_name = "${azurerm_resource_group.main.name}"
security_rule {
name = "allow_SSH"
description = "Allow SSH access"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
# Create a network interface for VMs and attach the PIP and the NSG
resource "azurerm_network_interface" "main" {
name = "NIC"
location = "${azurerm_resource_group.main.location}"
resource_group_name = "${azurerm_resource_group.main.name}"
network_security_group_id = "${azurerm_network_security_group.main.id}"
ip_configuration {
name = "nicconfig"
subnet_id = "${azurerm_subnet.internal.id}"
private_ip_address_allocation = "static"
private_ip_address = "${cidrhost("10.100.2.16/24", 4)}"
}
}
# Create a new Virtual Machine based on the Golden Image
resource "azurerm_virtual_machine" "vm" {
name = "AZLXDEVOPS01"
location = "${azurerm_resource_group.main.location}"
resource_group_name = "${azurerm_resource_group.main.name}"
network_interface_ids = ["${azurerm_network_interface.main.id}"]
vm_size = "Standard_DS12_v2"
delete_os_disk_on_termination = true
delete_data_disks_on_termination = true
storage_image_reference {
id = "${data.azurerm_image.search.id}"
}
storage_os_disk {
name = "AZLXDEVOPS01-OS"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Standard_LRS"
}
os_profile {
computer_name = "APPVM"
admin_username = "devopsadmin"
admin_password = "Cssladmin#2019"
}
os_profile_linux_config {
disable_password_authentication = false
}
}
Well, with the errors that in your comment, I think you should set the subnet like this:
resource "azurerm_subnet" "internal" {
name = "RG-Terraform-snet-in"
virtual_network_name = "${azurerm_virtual_network.main.name}"
resource_group_name = "${azurerm_resource_group.main.name}"
address_prefix = "10.100.1.0/24"
}
And the error with the virtual network, I do not see the virtual network with the name "RG-Vnet" in the code as the error said. So you should take a check if everything is all right in your code as you want.
To create an Azure VM from the image in Azure Marketplace, you can follow the tutorial Create a complete Linux virtual machine infrastructure in Azure with Terraform. You do not need to create an image resource in your Terraform code. Just set it like this in the resource azurerm_virtual_machine:
storage_os_disk {
name = "myOsDisk"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Premium_LRS"
}
Also, when you refer to other resources in the same code, you should do it like this:
virtual_network_name = "${azurerm_virtual_network.main.name}"
not just with the string name as "RG-Vnet", it's not the correct way.
I have multiple docker_container resources:
resource "docker_container" "headerdebug" {
name = "headerdebug"
image = "${docker_image.headerdebug.latest}"
labels {
"traefik.frontend.rule" = "Host:debug.in.bb8.fun"
"traefik.port" = 8080
"traefik.enable" = "true"
"traefik.frontend.passHostHeader" = "true"
"traefik.frontend.headers.SSLTemporaryRedirect" = "true"
"traefik.frontend.headers.STSSeconds" = "2592000"
"traefik.frontend.headers.STSIncludeSubdomains" = "false"
"traefik.frontend.headers.customResponseHeaders" = "${var.xpoweredby}"
"traefik.frontend.headers.customFrameOptionsValue" = "${var.xfo_allow}"
}
}
And another one:
resource "docker_container" "cadvisor" {
name = "cadvisor"
image = "${docker_image.cadvisor.latest}"
labels {
"traefik.frontend.rule" = "Host:cadvisor.bb8.fun"
"traefik.port" = 8080
"traefik.enable" = "true"
"traefik.frontend.headers.SSLTemporaryRedirect" = "true"
"traefik.frontend.headers.STSSeconds" = "2592000"
"traefik.frontend.headers.STSIncludeSubdomains" = "false"
"traefik.frontend.headers.contentTypeNosniff" = "true"
"traefik.frontend.headers.browserXSSFilter" = "true"
"traefik.frontend.headers.customFrameOptionsValue" = "${var.xfo_allow}"
"traefik.frontend.headers.customResponseHeaders" = "${var.xpoweredby}"
}
}
I'm trying to use locals to re-use the common labels between both the containers. I have the following local defined:
locals {
traefik_common_labels {
"traefik.frontend.passHostHeader" = "true"
"traefik.frontend.headers.SSLTemporaryRedirect" = "true"
"traefik.frontend.headers.STSSeconds" = "2592000"
"traefik.frontend.headers.STSIncludeSubdomains" = "false"
"traefik.frontend.headers.customResponseHeaders" = "${var.xpoweredby}"
"traefik.frontend.headers.customFrameOptionsValue" = "${var.xfo_allow}"
}
}
But the documentation doesn't mention how to use locals for merging entire blocks, only maps.
I've tried the following:
labels "${merge(
local.traefik_common_labels,
map(
"traefik.frontend.rule", "Host:debug.in.bb8.fun",
"traefik.port", 8080,
"traefik.enable", "true",
)
)}"
which gives the following error:
tf11 plan
Error: Failed to load root config module: Error loading modules: module docker: Error parsing .terraform/modules/2f3785083ce0d0ac2dd3346cf129e795/main.tf: key 'labels "${merge(
local.traefik_common_labels,
map(
"traefik.frontend.rule", "Host:debug.in.bb8.fun",
"traefik.port", 8080,
"traefik.enable", "true",
)
)}"' expected start of object ('{') or assignment ('=')
There is a pretty diff of my attempts at this PR: https://git.captnemo.in/nemo/nebula/pulls/4/files
In Terraform 1.x+ you can use a dynamic block to achieve this
variable "xpoweredby" { default = "" }
variable "xfo_allow" { default = "" }
locals {
traefik_common_labels = {
"traefik.frontend.passHostHeader" = "true"
"traefik.frontend.headers.SSLTemporaryRedirect" = "true"
"traefik.frontend.headers.STSSeconds" = "2592000"
"traefik.frontend.headers.STSIncludeSubdomains" = "false"
"traefik.frontend.headers.customResponseHeaders" = var.xpoweredby
"traefik.frontend.headers.customFrameOptionsValue" = var.xfo_allow
}
}
resource "docker_image" "cadvisor" {
name = "google/cadvisor:latest"
}
resource "docker_container" "cadvisor" {
name = "cadvisor"
image = docker_image.cadvisor.latest
dynamic "labels" {
for_each = merge(local.traefik_common_labels,
{
"traefik.frontend.rule" = "Host:debug.in.bb8.fun",
"traefik.port" = 8080,
"traefik.enable" = "true",
}
)
content {
label = labels.key
value = labels.value
}
}
}
In Terraform 0.11 etc, this could be accomplised with the following:
You need to assign the value to labels like so
locals {
traefik_common_labels {
"traefik.frontend.passHostHeader" = "true"
"traefik.frontend.headers.SSLTemporaryRedirect" = "true"
"traefik.frontend.headers.STSSeconds" = "2592000"
"traefik.frontend.headers.STSIncludeSubdomains" = "false"
"traefik.frontend.headers.customResponseHeaders" = "${var.xpoweredby}"
"traefik.frontend.headers.customFrameOptionsValue" = "${var.xfo_allow}"
}
}
resource "docker_container" "cadvisor" {
name = "cadvisor"
image = "${docker_image.cadvisor.latest}"
labels = "${merge(
local.traefik_common_labels,
map(
"traefik.frontend.rule", "Host:debug.in.bb8.fun",
"traefik.port", 8080,
"traefik.enable", "true",
))}"
}