EC2 + CodePipeline/CodeDeploy - how to make application ready on instance refresh - amazon-ec2

Please forgive me, I am learning DevOps.
I am using codedeploy/codepipeline to deploy a node.js app onto an ec2 instance.
I am using terraform to manage the infrastructure.
The pipeline works great, but...
Question 1: Deploy on Refresh
If I refresh the auto scaling group, then the instances are missing the application. Is this normal?
Question 2: Clean on deploy
As a workaround in step 1, I wrote a script that downloads the latest build from the codebuild s3 bucket, unzips it, and runs the application.
I then added the logic in the ASG launch configuration to run the script.
So far, so good. When I refresh the instance, the app is booted.
However, the problem arises when I try and then trigger a codepipeline deployment.
It fails in the codedeploy step because the project folder is already full.
In my appspec.yml I have already tried to make it clean out the application directory during the ApplicationStop phase, but I have not found any success.
Any tips/guidance would be most welcome.
Code
app_dir/appspec.yml
version: 0.0
os: linux
files:
- source: /
destination: /home/ec2-user/app_dir/
overwrite: true
permissions:
- object: /home/ec2-user/app_dir/
pattern: "**"
owner: ec2-user
group: ec2-user
hooks:
ApplicationStop:
- location: infrastructure/cleanup.sh
timeout: 60
runas: ec2-user
AfterInstall:
- location: infrastructure/install_dependencies.sh
timeout: 180
runas: ec2-user
- location: infrastructure/install_root_dependencies.sh
timeout: 30
runas: root
ApplicationStart:
- location: infrastructure/start.sh
timeout: 45
runas: ec2-user
app_dir/infrastructure/cleanup.sh
#!/bin/bash
export HOME=/home/ec2-user
pm2 stop all
pm2 delete all
rm -rf /home/ec2-user/app_dir
mkdir -p /home/ec2-user/app_dir
sudo chown ec2-user:ec2-user /home/ec2-user/app_dir
cd /home/ec2-user/app_dir
app_dir/infrastructure/install_root_dependencies.sh
#!/bin/bash
export HOME=/home/ec2-user
cd /home/ec2-user/app_dir
npm install -g pm2
app_dir/infrastructure/start.sh
#!/bin/bash
export HOME=/home/ec2-user
mkdir -p /home/ec2-user/Lexstep
. /home/ec2-user/Lexstep/infrastructure/parameter_store/load_ssm_parameters.sh
cd /home/ec2-user/app_dir || exit
sudo chown -R ec2-user:ec2-user /home/ec2-user/app_dir
mkdir -p /home/ec2-user/logs/app
sudo chown -R ec2-user:ec2-user /home/ec2-user/logs/app
sudo chmod a+rwx /home/ec2-user/logs/app
output=$(pm2 ls | awk '{print $2}')
startup_cmd=$(pm2 startup | awk '/^sudo/')
if [[ $output == *"0"* ]]; then
echo -e "\nProcess already defined. Deleting all processes."
pm2 restart ecosystem.config.js --update-env
else
echo -e "\nNo process is running currently. Creating a new process..\n"
if [[ "$DEPLOYMENT_GROUP_NAME" == "lexstep-prod"* ]] || [[ "$DEPLOYMENT_GROUP_NAME" == "lexstep-stage"* ]]; then
pm2 start ecosystem.config.js --env production
else
pm2 start ecosystem.config.js --env development
fi
eval "$startup_cmd"
fi
pm2 save #save current PM2 process list to start upon reboot
codepipeline terraform definitions
resource "aws_codebuild_project" "codebuild_project" {
name = "MY_APP-${var.env}-build-project"
description = "${var.env} Build Project for MY_APP"
build_timeout = "10"
service_role = aws_iam_role.codebuild_role.arn
source {
type = "CODEPIPELINE"
buildspec = "infrastructure/buildspec.yml"
git_clone_depth = 0
git_submodules_config {
fetch_submodules = false
}
}
environment {
compute_type = "BUILD_GENERAL1_SMALL"
image = "aws/codebuild/amazonlinux2-x86_64-standard:3.0"
type = "LINUX_CONTAINER"
image_pull_credentials_type = "CODEBUILD"
privileged_mode = false
}
vpc_config {
vpc_id = aws_vpc.vpc.id
subnets = [
aws_subnet.subnets_app[0].id,
aws_subnet.subnets_app[1].id,
]
security_group_ids = [
aws_security_group.app_sg.id,
]
}
artifacts {
type = "CODEPIPELINE"
artifact_identifier = "BuildArtifact"
}
secondary_artifacts {
type = "S3"
artifact_identifier = "TestArtifacts"
location = aws_s3_bucket.codepipeline_bucket.id
path = "MY_APP-${var.env}-pipeline/TestArtifacts/artifacts.zip"
name = "TestArtifacts"
packaging = "ZIP"
}
logs_config {
cloudwatch_logs {
group_name = local.app_build_log_group_name
stream_name = "MY_APP-${var.env}-build-app-log-stream"
}
}
cache {
type = "LOCAL"
modes = ["LOCAL_DOCKER_LAYER_CACHE", "LOCAL_SOURCE_CACHE"]
}
tags = merge(
var.additional_tags,
{
Name = "MY_APP-${var.env}-build-project"
},
)
}
resource "aws_codebuild_project" "codebuild_test_project" {
name = "MY_APP-${var.env}-build-test-project"
description = "Unit Test Build Project for MY_APP"
build_timeout = "10"
service_role = aws_iam_role.codebuild_test_role.arn
source {
type = "CODEPIPELINE"
buildspec = "infrastructure/buildspec_unit_test.yml"
git_clone_depth = 0
git_submodules_config {
fetch_submodules = false
}
}
environment {
compute_type = "BUILD_GENERAL1_SMALL"
image = "aws/codebuild/amazonlinux2-x86_64-standard:3.0"
type = "LINUX_CONTAINER"
image_pull_credentials_type = "CODEBUILD"
privileged_mode = false
}
vpc_config {
vpc_id = aws_vpc.vpc.id
subnets = [
aws_subnet.subnets_app[0].id,
aws_subnet.subnets_app[1].id,
]
security_group_ids = [
aws_security_group.app_sg.id,
]
}
artifacts {
type = "CODEPIPELINE"
}
logs_config {
cloudwatch_logs {
group_name = local.test_build_log_group_name
stream_name = "MY_APP-${var.env}-build-unit-test-log-stream"
}
}
cache {
type = "LOCAL"
modes = ["LOCAL_DOCKER_LAYER_CACHE", "LOCAL_SOURCE_CACHE"]
}
tags = merge(
var.additional_tags,
{
Name = "MY_APP-${var.env}-build-test-project"
},
)
}
resource "aws_codebuild_project" "codebuild_e2e_test_project" {
name = "MY_APP-${var.env}-build-e2e-test-project"
description = "Integration Test Build Project for MY_APP"
build_timeout = "10"
service_role = aws_iam_role.codebuild_test_role.arn
source {
type = "CODEPIPELINE"
buildspec = "infrastructure/buildspec_e2e_test.yml"
git_clone_depth = 0
git_submodules_config {
fetch_submodules = false
}
}
environment {
compute_type = "BUILD_GENERAL1_SMALL"
image = "aws/codebuild/amazonlinux2-x86_64-standard:3.0"
type = "LINUX_CONTAINER"
image_pull_credentials_type = "CODEBUILD"
privileged_mode = false
}
vpc_config {
vpc_id = aws_vpc.vpc.id
subnets = [
aws_subnet.subnets_app[0].id,
aws_subnet.subnets_app[1].id,
]
security_group_ids = [
aws_security_group.app_sg.id,
]
}
artifacts {
type = "CODEPIPELINE"
}
logs_config {
cloudwatch_logs {
group_name = local.e2e_test_build_log_group_name
stream_name = "MY_APP-${var.env}-build-test-e2e-log-stream"
}
}
cache {
type = "LOCAL"
modes = ["LOCAL_DOCKER_LAYER_CACHE", "LOCAL_SOURCE_CACHE"]
}
tags = merge(
var.additional_tags,
{
Name = "MY_APP-${var.env}-build-test-e2e-project"
},
)
}
resource "aws_codedeploy_app" "codedeploy_app" {
name = "MY_APP-${var.env}-deploy-app"
compute_platform = "Server"
}
resource "aws_codedeploy_deployment_group" "codedeploy_group" {
app_name = aws_codedeploy_app.codedeploy_app.name
deployment_group_name = "MY_APP-${var.env}-deploy-group"
service_role_arn = aws_iam_role.codedeploy_role.arn
autoscaling_groups = [aws_autoscaling_group.app_asg.name]
# deployment_config_name = "CodeDeployDefault.OneAtATime" # possible option: "CodeDeployDefault.AllAtOnce"
# TODO: change this to OneAtATime for production
deployment_config_name = "CodeDeployDefault.AllAtOnce" # possible option: "CodeDeployDefault.AllAtOnce"
deployment_style {
deployment_type = "IN_PLACE"
}
auto_rollback_configuration {
enabled = true
events = ["DEPLOYMENT_FAILURE"]
}
}
resource "aws_codedeploy_deployment_group" "codedeploy_schedule_group" {
app_name = aws_codedeploy_app.codedeploy_app.name
deployment_group_name = "MY_APP-${var.env}-schedule-deploy-group"
service_role_arn = aws_iam_role.codedeploy_role.arn
autoscaling_groups = [aws_autoscaling_group.app_schedule_asg.name]
deployment_config_name = "CodeDeployDefault.OneAtATime" # possible option: "CodeDeployDefault.AllAtOnce"
deployment_style {
deployment_type = "IN_PLACE"
}
auto_rollback_configuration {
enabled = true
events = ["DEPLOYMENT_FAILURE"]
}
}
resource "aws_codepipeline" "codepipeline" {
name = "MY_APP-${var.env}-pipeline"
role_arn = aws_iam_role.codepipeline_role.arn
artifact_store {
location = aws_s3_bucket.codepipeline_bucket.bucket
type = "S3"
}
stage {
name = "Source"
action {
name = "Source"
category = "Source"
owner = "AWS"
provider = "CodeStarSourceConnection"
version = "1"
output_artifacts = ["SourceArtifacts"]
namespace = "SourceVariables"
configuration = {
ConnectionArn = var.connection_arn
FullRepositoryId = "MY_APP/MY_APP-nest"
BranchName = "master" # change branch here "master"
}
}
}
stage {
name = "Build"
action {
name = "Build"
category = "Build"
owner = "AWS"
provider = "CodeBuild"
version = "1"
input_artifacts = ["SourceArtifacts"]
output_artifacts = ["BuildArtifact", "TestArtifacts"]
namespace = "BuildVariables"
configuration = {
ProjectName = aws_codebuild_project.codebuild_project.id
}
}
}
/* stage {
name = "Test"
action {
name = "UnitTest"
category = "Test"
owner = "AWS"
provider = "CodeBuild"
version = "1"
run_order = "1"
input_artifacts = ["TestArtifacts"]
configuration = {
ProjectName = aws_codebuild_project.codebuild_test_project.id
}
}
action {
name = "IntegrationTest"
category = "Test"
owner = "AWS"
provider = "CodeBuild"
version = "1"
run_order = "2"
input_artifacts = ["TestArtifacts"]
configuration = {
ProjectName = aws_codebuild_project.codebuild_e2e_test_project.id
}
}
} */
stage {
name = "Deploy"
dynamic "action" {
# a fake map for a conditional block
for_each = var.deployment_requires_approval ? { name = "ManualApproval" } : {}
content {
name = "ManualApproval"
category = "Approval"
owner = "AWS"
provider = "Manual"
version = "1"
run_order = "1"
configuration = {
NotificationArn = var.sns_notification_arn
}
}
}
action {
name = "Deploy"
category = "Deploy"
owner = "AWS"
provider = "CodeDeploy"
input_artifacts = ["BuildArtifact"]
version = "1"
run_order = "1"
namespace = "DeployVariables"
configuration = {
ApplicationName = aws_codedeploy_app.codedeploy_app.name
DeploymentGroupName = "MY_APP-${var.env}-deploy-group"
}
}
action {
name = "DeploySchedule"
category = "Deploy"
owner = "AWS"
provider = "CodeDeploy"
input_artifacts = ["BuildArtifact"]
version = "1"
run_order = "2"
namespace = "DeployScheduleVariables"
configuration = {
ApplicationName = aws_codedeploy_app.codedeploy_app.name
DeploymentGroupName = "MY_APP-${var.env}-schedule-deploy-group"
}
}
}
tags = merge(
var.additional_tags,
{
Name = "MY_APP-${var.env}-pipeline"
},
)
depends_on = [aws_codebuild_project.codebuild_project]
}
Terraform EC2 definition
#############################
####### EC2 INSTANCES #######
#############################
### APP instance Launch Configuration ###
resource "aws_launch_configuration" "app_lc" {
name_prefix = "app-${var.env}-lc"
image_id = var.app_ec2_ami
instance_type = var.app_instance_type
iam_instance_profile = aws_iam_instance_profile.app_instance_profile.name
key_name = var.key_name
enable_monitoring = true
ebs_optimized = false
security_groups = [aws_security_group.app_sg.id]
root_block_device {
volume_size = 25
volume_type = "gp3"
}
user_data = <<-EOF
#!/bin/bash
sudo yum update -y
sudo yum install git jq -y
amazon-linux-extras install epel -y
#############
# Node.js #
#############
${file("gists/install_node.sh")}
#############
# App #
#############
cat <<-'ENVFILE' | tee /home/ec2-user/.env
export DEPLOYMENT_GROUP_NAME="{var.env}-deploy-group"
export region="eu-west-2"
ENVFILE
source /home/ec2-user/.env
function get_app() {
sudo mkdir -p /home/ec2-user/logs/app
sudo chown -R ec2-user:ec2-user /home/ec2-user/logs
sudo chmod 777 -R /home/ec2-user/logs
sudo chmod +arwx -R /home/ec2-user/logs
echo "get_app: starting"
mkdir -p /home/ec2-user/app_dir
cd /home/ec2-user/app_dir
BUCKET="${aws_s3_bucket.codepipeline_bucket.bucket}"
BUCKET_KEY=`aws s3 ls $BUCKET --recursive | sort | tail -n 1 | awk '{print $4}'`
echo "downloading app.zip"
aws s3 cp s3://$BUCKET/$BUCKET_KEY ./app.zip
echo "unzipping app.zip"
unzip app.zip
rm -rf app.zip
echo "removing app.zip"
sudo chown -R ec2-user:ec2-user /home/ec2-user/Lexstep
echo "installing root dependencies"
bash /home/ec2-user/app_dir/infrastructure/install_root_dependencies.sh
echo "installing root dependencies"
bash /home/ec2-user/app_dir/infrastructure/install_dependencies.sh
echo "STARTING APP"
export NODE_ENV=production
export region=eu-west-2
bash /home/ec2-user/app_dir/infrastructure/start.sh
sudo chown ec2-user:ec2-user /home/ec2-user/.pm2/rpc.sock /home/ec2-user/.pm2/pub.sock
cd /home/ec2-user
echo "FINISHED get_app SCRIPT"
}
mkdir -p /home/ec2-user/logs && touch /home/ec2-user/logs/get_app.log && get_app 2>&1 | tee /home/ec2-user/logs/get_app.log
echo "Finished init of app_lc"
EOF
lifecycle {
create_before_destroy = true
}
}
### APP Autoscaling Group ###
resource "aws_autoscaling_group" "app_asg" {
name = "${var.env}-app"
launch_configuration = aws_launch_configuration.app_lc.name
min_size = 1
max_size = 2
desired_capacity = 2
health_check_type = "EC2"
health_check_grace_period = 240
vpc_zone_identifier = [aws_subnet.subnets_app[0].id, aws_subnet.subnets_app[1].id]
service_linked_role_arn = data.aws_iam_role.aws_service_linked_role.arn
target_group_arns = [aws_lb_target_group.tg.arn]
lifecycle {
create_before_destroy = true
}
tags = concat(
[
{
"key" = "Name"
"value" = "${var.env}-app"
"propagate_at_launch" = true
},
{
"key" = "Project"
"value" = var.additional_tags.Project
"propagate_at_launch" = true
},
{
"key" = "CreatedBy"
"value" = var.additional_tags.CreatedBy
"propagate_at_launch" = true
},
{
"key" = "Environment"
"value" = var.additional_tags.Environment
"propagate_at_launch" = true
},
])
}

Related

Terraform: Lambda Change Based On Image Change

I'm trying to create a Lambda with Terraform based on an docker image and I want that Lambda to get pushed every time I create a new image. I'm trying to use depends_on inside lambda, but it doesn't work, Lambda just stay in the same state as before. Any other solution or any way to make it work?
This is the code I have right now:
resource "docker_registry_image" "registry_image" {
name = docker_image.image.name
triggers = {
dir_sha1 = sha1(join("", [for f in fileset("../AWS/", "**") : filesha1("../AWS/${f}")]))
}
}
resource "docker_image" "image" {
name = "${aws_ecr_repository.repo.repository_url}:latest"
triggers = {
dir_sha1 = sha1(join("", [for f in fileset("../AWS/", "**") : filesha1("../AWS/${f}")]))
}
build {
context = "../AWS/"
dockerfile = "Dockerfile"
}
}
resource "aws_ecr_repository" "repo" {
name = "repo"
force_delete = true
}
resource "aws_ecr_repository_policy" "repo_policy" {
repository = aws_ecr_repository.repo.name
policy = <<EOF
{
"Version": "2008-10-17",
"Statement": [
{
"Sid": "Set the permission for ECR",
"Effect": "Allow",
"Principal": "*",
"Action": [
"ecr:BatchCheckLayerAvailability",
"ecr:BatchGetImage",
"ecr:CompleteLayerUpload",
"ecr:GetDownloadUrlForLayer",
"ecr:GetLifecyclePolicy",
"ecr:InitiateLayerUpload",
"ecr:PutImage",
"ecr:UploadLayerPart"
]
}
]
}
EOF
}
resource "aws_lambda_function" "lambda1" {
depends_on = [docker_image.image]
function_name = "lambda1"
role = aws_iam_role.lambda_role.arn
image_uri = "${aws_ecr_repository.repo.repository_url}:latest"
image_config {
command = ["lambda1.handler"]
working_directory = "/var/task"
}
package_type = "Image"
memory_size = 2048 # Min 128 MB and the Max 10,240 MB, there are some files of 300 MB
timeout = 180
environment {
variables = {
TZ = "Europe/Madrid"
}
}
}
Update
It seems that it is a issue of Amazon CDK. I recommend you to read
this thread: updating-lambda-using-cdk-doesnt-deploy-latest-image.

how to deploy escloud extension in terraform

I deploy escloud with terraform.
I want to add an existing extension, analysis-icu, how can I configure it?
resource "ec_deployment_extension" "icu" {
name = "analysis-icu"
version = "*"
extension_type = "bundle"
download_url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-nori/analysis-nori-8.6.1.zip"
}
module "escloud_default" {
source = "./escloud"
name = "${var.environment}-test"
...
elasticsearch_config = {
topologies = [
{
id = "hot_content"
size = var.environment == "prod" ? "2g" : "1g"
size_resource = "memory"
zone_count = var.environment == "prod" ? 2 : 1
autoscaling = {
min_size = ""
min_size_resource = ""
max_size = "116g"
max_size_resource = "memory"
}
},
]
extensions = [
{
name = ec_deployment_extension.nori.name
type = "bundle"
version = "*"
url = ec_deployment_extension.nori.url
}
]
}
...
This code does not apply existing icu plugin, just create custom bundle.
i solved it. There is config.plugins arguments.
https://registry.terraform.io/providers/elastic/ec/latest/docs/resources/ec_deployment#plugins

hashicorp nomad .. why my app is not connecting to the database (Postgres , golang api) provider="nomad"

Help required.. I tried in many ways but unfortunately app is not connecting to the database.
DB_CONN is the env variable that app is expecting
service provider is nomad.
so it must be a simple service discovery. Created two groups in same job file,infact I tried differently as well but it did not work.
variable "datacenters" {
description = "A list of datacenters in the region which are eligible for task placement."
type = list(string)
default = ["dc1"]
}
variable "region" {
description = "The region where the job should be placed."
type = string
default = "global"
}
variable "postgres_db" {
description = "Postgres DB name"
default = "vehicle_master_bd"
}
variable "postgres_user" {
description = "Postgres DB User"
default = "postgres"
}
variable "postgres_password" {
description = "Postgres DB Password"
default = "postgres"
}
# Begin Job Spec
job "cres-vehicleMaster" {
type = "service"
region = var.region
datacenters = var.datacenters
group "db" {
network {
port "db" {
to = 5432
}
}
task "postgres" {
driver = "docker"
meta {
service = "database"
}
service {
name = "database"
port = "db"
provider = "nomad"
}
config {
image = "postgres"
ports = ["db"]
}
resources {
cpu = 500
memory = 500
}
env {
POSTGRES_DB = var.postgres_db
POSTGRES_USER = var.postgres_user
POSTGRES_PASSWORD = var.postgres_password
}
}
}
group "vehicleMaster-api" {
count = 1
network {
port "api" {
to = 50080
}
}
restart {
attempts = 2
interval = "30m"
delay = "15s"
mode = "fail"
}
task "vehicleMaster-api" {
driver = "docker"
service {
name = "vehicleMaster-api"
tags = ["vehicleMaster", "RESTAPI"]
port = "api"
provider = "nomad"
}
template {
data = <<EOH
{{ range nomadService "database" }}
DB_CONN="host={{ .Address }} port={{ .Port }} user=${var.postgres_user} password=${var.postgres_password} dbname=${var.postgres_db} sslmode=disable"
{{ end }}
EOH
destination = "local/env.txt"
env = true
}
env {
// DB_CONN = "host=127.0.0.1 port=5432 user=postgres password=postgres dbname=vehicle_master_bd sslmode=disable"
// DB_CONN = "host=${NOMAD_IP_postgres} port=${NOMAD_PORT_postgres} user=${var.postgres_user} password=${var.postgres_password} dbname=${var.postgres_db} sslmode=disable"
PORT = "50080"
}
config {
image = "jpalaparthi/vehiclemaster:v0.0.3"
ports = ["api"]
}
resources {
cpu = 500 # 500 MHz
memory = 512 # 256MB
}
}
}
}

Error: Unknown post-processor type "ami-copy"

I am using the packer plugin from https://github.com/martinbaillie/packer-plugin-ami-copy
and followed the examples in that repository
post-processor "ami-copy" {
ami_users = "${var.ami_users}"
encrypt_boot = false
role_name = "AMICopyRole"
// ... other settings.
}
during the build I see that the plugin is installed
Installed plugin github.com/hashicorp/amazon v1.0.6 in "/root/.config/packer/plugins/github.com/hashicorp/amazon/packer-plugin-amazon_v1.0.6_x5.0_linux_amd64"
Installed plugin github.com/martinbaillie/ami-copy v1.8.0 in "/root/.config/packer/plugins/github.com/martinbaillie/ami-copy/packer-plugin-ami-copy_v1.8.0_x5.0_linux_amd64"
but later on I get the following error
Error: Unknown post-processor type "ami-copy"
on images/ubuntu20-java-17/build.pkr.hcl line 75:
(source code not available)
known post-processors: [shell-local docker-import manifest alicloud-import
googlecompute-import ucloud-import vsphere-template googlecompute-export vagrant
vagrant-cloud digitalocean-import vsphere checksum yandex-export docker-tag
yandex-import artifice docker-save amazon-import compress docker-push]
I have tried to find a solution to the issue but have had no luck.
I am using packer in docker and am running packer:full-1.7.82.0
Any help would be appreciated a LOT
here is the build.pkr.hcl file :
variable "ami_users" {
type = list(string)
default = ["xxxxxxxxxx","xxxxxxxxxx","xxxxxxxxxx","xxxxxxxxxx"]
description = "AMI Users"
sensitive = false
}
variable "name" {
type = string
default = "bmj-ubuntu20-java-17"
description = "AMI Name"
sensitive = false
}
source "amazon-ebs" "ubuntu" {
instance_type = "t2.micro"
region = "eu-west-1"
source_ami_filter {
filters = {
name = "ubuntu/images/*ubuntu-focal-20.04-amd64-server-*"
root-device-type = "ebs"
virtualization-type = "hvm"
}
most_recent = true
owners = ["099720109477"]
}
ssh_username = "ubuntu"
subnet_id = "subnet-xxxxxxxxxx"
ssh_interface = "private_ip"
ami_name = "${var.name}-{{timestamp}}"
ami_description = "Ubuntu 20.04 Base AMI Image"
ena_support = true
ami_users = var.ami_users
snapshot_users = ["xxxxxxxxxx","xxxxxxxxxx","xxxxxxxxxx","xxxxxxxxxx"]
shutdown_behavior = "terminate"
tags = {
os_name = "Ubuntu"
os_release = "20.04"
base_ami_name = var.name
name = var.name
build = "base"
}
}
build {
name = "bmj-packer"
sources = [
"source.amazon-ebs.ubuntu"
]
provisioner "shell" {
inline = ["sudo apt-get update -y && sudo apt-get install --no-install-recommends -y "]
}
# install basics
provisioner "ansible" {
user = "ubuntu"
playbook_file = "./common/focal/basics.yaml"
}
# install java
provisioner "ansible" {
user = "ubuntu"
playbook_file = "./common/focal/java_17.yaml"
}
# setup users
provisioner "ansible" {
user = "ubuntu"
playbook_file = "./common/focal/users_and_groups.yaml"
}
# setup users
provisioner "ansible" {
user = "ubuntu"
playbook_file = "./common/ssh_keys.yaml"
}
post-processor "ami-copy" {
ami_users = "${var.ami_users}"
encrypt_boot = false
role_name = "AMICopyRole"
}
}
Hope that makes it clearer.

finding proxmox ip address with terraform and Ansible

I'm having this code:
terraform {
required_providers {
proxmox = {
source = "telmate/proxmox"
version = "2.8.0"
}
}
}
provider "proxmox" {
pm_api_url = "https://url/api2/json"
pm_user = "user"
pm_password = "pass"
pm_tls_insecure = true
}
resource "proxmox_vm_qemu" "test" {
count = 1
name = "test-${count.index + 1}"
target_node = "prm01"
clone = "image-here"
guest_agent_ready_timeout = 60
os_type = "cloud-init"
cores = 2
sockets = 1
cpu = "host"
memory = 4048
scsihw = "virtio-scsi-pci"
bootdisk = "scsi0"
disk {
slot = 0
size = "32G"
type = "scsi"
storage = "local-lvm"
iothread = 1
}
network {
model = "virtio"
bridge = "vmbr0"
}
lifecycle {
ignore_changes = [
network,
]
}
}
output "proxmox_ip_address_default" {
description = "Current IP Default"
value = proxmox_vm_qemu.test.*.default_ipv4_address
}
This is created via Ansible playbook. What I'm trying to find is the IP assigned to the machine as I'm running then another playbook to provision that machine. The problem is that I didn't found any solution on how to find the assigned IP address of that machine
Output it is empty!
Any help?

Resources